summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-01-01 08:46:43 -0500
committerMike Pagano <mpagano@gentoo.org>2024-01-01 08:46:43 -0500
commitd06c13ad6787d24145cad16670840bd81a4418b8 (patch)
treefbb5125a572a6595f202553214017fab0e16be5f
parentLinux patch 6.1.69 (diff)
downloadlinux-patches-d06c13ad6787d24145cad16670840bd81a4418b8.tar.gz
linux-patches-d06c13ad6787d24145cad16670840bd81a4418b8.tar.bz2
linux-patches-d06c13ad6787d24145cad16670840bd81a4418b8.zip
Linux patch 6.1.706.1-78
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1069_linux-6.1.70.patch5394
2 files changed, 5398 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 3d9aed2d..f806635e 100644
--- a/0000_README
+++ b/0000_README
@@ -319,6 +319,10 @@ Patch: 1068_linux-6.1.69.patch
From: https://www.kernel.org
Desc: Linux 6.1.69
+Patch: 1069_linux-6.1.70.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.70
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1069_linux-6.1.70.patch b/1069_linux-6.1.70.patch
new file mode 100644
index 00000000..f22e31fe
--- /dev/null
+++ b/1069_linux-6.1.70.patch
@@ -0,0 +1,5394 @@
+diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+index ff317fd7c15bf..2e1fcff3c2801 100644
+--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
++++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+@@ -14,9 +14,11 @@ allOf:
+
+ properties:
+ compatible:
+- enum:
+- - fsl,imx23-ocotp
+- - fsl,imx28-ocotp
++ items:
++ - enum:
++ - fsl,imx23-ocotp
++ - fsl,imx28-ocotp
++ - const: fsl,ocotp
+
+ "#address-cells":
+ const: 1
+@@ -40,7 +42,7 @@ additionalProperties: false
+ examples:
+ - |
+ ocotp: efuse@8002c000 {
+- compatible = "fsl,imx28-ocotp";
++ compatible = "fsl,imx28-ocotp", "fsl,ocotp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x8002c000 0x2000>;
+diff --git a/Makefile b/Makefile
+index 9a3b34d2387fa..270593fcafdcd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 69
++SUBLEVEL = 70
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index 97ce0c4f1df7e..a79920ec461f0 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -144,7 +144,7 @@
+
+ l3-noc@44000000 {
+ compatible = "ti,dra7-l3-noc";
+- reg = <0x44000000 0x1000>,
++ reg = <0x44000000 0x1000000>,
+ <0x45000000 0x1000>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index 59755b5a1ad7a..75091aa7269ae 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
+
+ soc_dev_attr->machine = soc_name;
+ soc_dev_attr->family = omap_get_family();
++ if (!soc_dev_attr->family) {
++ kfree(soc_dev_attr);
++ return;
++ }
+ soc_dev_attr->revision = soc_rev;
+ soc_dev_attr->custom_attr_group = omap_soc_groups[0];
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
++ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return;
+ }
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 6cc380a15eb76..de94515fb17c6 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -386,7 +386,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_timer_vcpu_terminate(vcpu);
+ kvm_pmu_vcpu_destroy(vcpu);
+-
++ kvm_vgic_vcpu_destroy(vcpu);
+ kvm_arm_vcpu_destroy(vcpu);
+ }
+
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index f2f3bf4a04b0b..0919e3b8f46ec 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ vgic_v4_teardown(kvm);
+ }
+
+-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+@@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ vgic_flush_pending_lpis(vcpu);
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+- vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
++ vgic_unregister_redist_iodev(vcpu);
++ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ }
+ }
+
+-static void __kvm_vgic_destroy(struct kvm *kvm)
++void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++{
++ struct kvm *kvm = vcpu->kvm;
++
++ mutex_lock(&kvm->slots_lock);
++ __kvm_vgic_vcpu_destroy(vcpu);
++ mutex_unlock(&kvm->slots_lock);
++}
++
++void kvm_vgic_destroy(struct kvm *kvm)
+ {
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+- lockdep_assert_held(&kvm->arch.config_lock);
++ mutex_lock(&kvm->slots_lock);
+
+ vgic_debug_destroy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+- kvm_vgic_vcpu_destroy(vcpu);
++ __kvm_vgic_vcpu_destroy(vcpu);
++
++ mutex_lock(&kvm->arch.config_lock);
+
+ kvm_vgic_dist_destroy(kvm);
+-}
+
+-void kvm_vgic_destroy(struct kvm *kvm)
+-{
+- mutex_lock(&kvm->arch.config_lock);
+- __kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->arch.config_lock);
++ mutex_unlock(&kvm->slots_lock);
+ }
+
+ /**
+@@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ type = VGIC_V3;
+ }
+
+- if (ret) {
+- __kvm_vgic_destroy(kvm);
++ if (ret)
+ goto out;
+- }
++
+ dist->ready = true;
+ dist_base = dist->vgic_dist_base;
+ mutex_unlock(&kvm->arch.config_lock);
+
+ ret = vgic_register_dist_iodev(kvm, dist_base, type);
+- if (ret) {
++ if (ret)
+ kvm_err("Unable to register VGIC dist MMIO regions\n");
+- kvm_vgic_destroy(kvm);
+- }
+- mutex_unlock(&kvm->slots_lock);
+- return ret;
+
++ goto out_slots;
+ out:
+ mutex_unlock(&kvm->arch.config_lock);
++out_slots:
+ mutex_unlock(&kvm->slots_lock);
++
++ if (ret)
++ kvm_vgic_destroy(kvm);
++
+ return ret;
+ }
+
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 188d2187eede9..871a45d4fc84c 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -820,7 +820,7 @@ out_unlock:
+ return ret;
+ }
+
+-static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 23e280fa0a16f..9f80a580ca771 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -229,6 +229,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
+ int vgic_v3_save_pending_tables(struct kvm *kvm);
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
+ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
+index 532c29ef03769..956ae0a01bad1 100644
+--- a/arch/riscv/include/asm/signal.h
++++ b/arch/riscv/include/asm/signal.h
+@@ -7,6 +7,6 @@
+ #include <uapi/asm/ptrace.h>
+
+ asmlinkage __visible
+-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
++void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
+
+ #endif
+diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
+index b714ed0ef6885..9acf48e53a87f 100644
+--- a/arch/s390/include/asm/fpu/api.h
++++ b/arch/s390/include/asm/fpu/api.h
+@@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
+ #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+
+ #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
+-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
++#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
+
+ struct kernel_fpu;
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 46b7ee0ab01a4..6b8c93989aa31 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1015,8 +1015,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
+ } else {
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+- local_irq_restore(flags);
+ sync_core();
++ local_irq_restore(flags);
+
+ /*
+ * Could also do a CLFLUSH here to speed up CPU recovery; but
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 5e680e039d0e1..4686c1d9d0cfd 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -2553,3 +2553,49 @@ void bpf_jit_free(struct bpf_prog *prog)
+
+ bpf_prog_unlock_free(prog);
+ }
++
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old)
++{
++ u8 *old_addr, *new_addr, *old_bypass_addr;
++ int ret;
++
++ old_bypass_addr = old ? NULL : poke->bypass_addr;
++ old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
++ new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
++
++ /*
++ * On program loading or teardown, the program's kallsym entry
++ * might not be in place, so we use __bpf_arch_text_poke to skip
++ * the kallsyms check.
++ */
++ if (new) {
++ ret = __bpf_arch_text_poke(poke->tailcall_target,
++ BPF_MOD_JUMP,
++ old_addr, new_addr);
++ BUG_ON(ret < 0);
++ if (!old) {
++ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++ BPF_MOD_JUMP,
++ poke->bypass_addr,
++ NULL);
++ BUG_ON(ret < 0);
++ }
++ } else {
++ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++ BPF_MOD_JUMP,
++ old_bypass_addr,
++ poke->bypass_addr);
++ BUG_ON(ret < 0);
++ /* let other CPUs finish the execution of program
++ * so that it will not possible to expose them
++ * to invalid nop, stack unwind, nop state
++ */
++ if (!ret)
++ synchronize_rcu();
++ ret = __bpf_arch_text_poke(poke->tailcall_target,
++ BPF_MOD_JUMP,
++ old_addr, NULL);
++ BUG_ON(ret < 0);
++ }
++}
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index 9b1ec5d8c99c8..a65fc2ae15b49 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -9,6 +9,7 @@ config XEN
+ select PARAVIRT_CLOCK
+ select X86_HV_CALLBACK_VECTOR
+ depends on X86_64 || (X86_32 && X86_PAE)
++ depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
+ depends on X86_LOCAL_APIC && X86_TSC
+ help
+ This is the Linux Xen port. Enabling this will allow the
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 426d0b42685a0..127e3ceb59799 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1777,14 +1777,43 @@ static const struct block_device_operations lo_fops = {
+ /*
+ * If max_loop is specified, create that many devices upfront.
+ * This also becomes a hard limit. If max_loop is not specified,
++ * the default isn't a hard limit (as before commit 85c50197716c
++ * changed the default value from 0 for max_loop=0 reasons), just
+ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+ * init time. Loop devices can be requested on-demand with the
+ * /dev/loop-control interface, or be instantiated by accessing
+ * a 'dead' device node.
+ */
+ static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+-module_param(max_loop, int, 0444);
++
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++static bool max_loop_specified;
++
++static int max_loop_param_set_int(const char *val,
++ const struct kernel_param *kp)
++{
++ int ret;
++
++ ret = param_set_int(val, kp);
++ if (ret < 0)
++ return ret;
++
++ max_loop_specified = true;
++ return 0;
++}
++
++static const struct kernel_param_ops max_loop_param_ops = {
++ .set = max_loop_param_set_int,
++ .get = param_get_int,
++};
++
++module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
++#else
++module_param(max_loop, int, 0444);
++MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
++#endif
++
+ module_param(max_part, int, 0444);
+ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
+
+@@ -2089,14 +2118,18 @@ static void loop_remove(struct loop_device *lo)
+ put_disk(lo->lo_disk);
+ }
+
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+ static void loop_probe(dev_t dev)
+ {
+ int idx = MINOR(dev) >> part_shift;
+
+- if (max_loop && idx >= max_loop)
++ if (max_loop_specified && max_loop && idx >= max_loop)
+ return;
+ loop_add(idx);
+ }
++#else
++#define loop_probe NULL
++#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
+
+ static int loop_control_remove(int idx)
+ {
+@@ -2277,6 +2310,9 @@ module_exit(loop_exit);
+ static int __init max_loop_setup(char *str)
+ {
+ max_loop = simple_strtol(str, NULL, 0);
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++ max_loop_specified = true;
++#endif
+ return 1;
+ }
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c2f0f74193f0e..3fa74051f31b4 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -103,6 +103,9 @@ struct ublk_uring_cmd_pdu {
+ */
+ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
++/* atomic RW with ubq->cancel_lock */
++#define UBLK_IO_FLAG_CANCELED 0x80000000
++
+ struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+@@ -126,6 +129,7 @@ struct ublk_queue {
+ unsigned int max_io_sz;
+ bool force_abort;
+ unsigned short nr_io_ready; /* how many ios setup */
++ spinlock_t cancel_lock;
+ struct ublk_device *dev;
+ struct ublk_io ios[];
+ };
+@@ -1045,28 +1049,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ return ubq->nr_io_ready == ubq->q_depth;
+ }
+
+-static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+-{
+- io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+-}
+-
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ int i;
+
+- if (!ublk_queue_ready(ubq))
+- return;
+-
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+- if (io->flags & UBLK_IO_FLAG_ACTIVE)
+- io_uring_cmd_complete_in_task(io->cmd,
+- ublk_cmd_cancel_cb);
+- }
++ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
++ bool done;
+
+- /* all io commands are canceled */
+- ubq->nr_io_ready = 0;
++ spin_lock(&ubq->cancel_lock);
++ done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
++ if (!done)
++ io->flags |= UBLK_IO_FLAG_CANCELED;
++ spin_unlock(&ubq->cancel_lock);
++
++ if (!done)
++ io_uring_cmd_done(io->cmd,
++ UBLK_IO_RES_ABORT, 0,
++ IO_URING_F_UNLOCKED);
++ }
++ }
+ }
+
+ /* Cancel all pending commands, must be called after del_gendisk() returns */
+@@ -1113,7 +1117,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+- ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+@@ -1136,6 +1139,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ }
+
+ static void ublk_unquiesce_dev(struct ublk_device *ub)
+@@ -1175,8 +1179,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+- ublk_cancel_dev(ub);
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ cancel_delayed_work_sync(&ub->monitor_work);
+ }
+
+@@ -1353,6 +1357,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
+ void *ptr;
+ int size;
+
++ spin_lock_init(&ubq->cancel_lock);
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+@@ -1882,8 +1887,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
++
+ /* All old ioucmds have to be completed */
+- WARN_ON_ONCE(ubq->nr_io_ready);
++ ubq->nr_io_ready = 0;
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 4415d850d698b..44dc91555aa00 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+
++#include <linux/atomic.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -44,6 +45,7 @@ struct vhci_data {
+ bool wakeup;
+ __u16 msft_opcode;
+ bool aosp_capable;
++ atomic_t initialized;
+ };
+
+ static int vhci_open_dev(struct hci_dev *hdev)
+@@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+- mutex_lock(&data->open_mutex);
+ skb_queue_tail(&data->readq, skb);
+- mutex_unlock(&data->open_mutex);
+
+- wake_up_interruptible(&data->read_wait);
++ if (atomic_read(&data->initialized))
++ wake_up_interruptible(&data->read_wait);
+ return 0;
+ }
+
+@@ -363,7 +364,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ skb_put_u8(skb, 0xff);
+ skb_put_u8(skb, opcode);
+ put_unaligned_le16(hdev->id, skb_put(skb, 2));
+- skb_queue_tail(&data->readq, skb);
++ skb_queue_head(&data->readq, skb);
++ atomic_inc(&data->initialized);
+
+ wake_up_interruptible(&data->read_wait);
+ return 0;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 59a2fe2448f17..15c6b85b125d4 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2174,13 +2174,23 @@ static int sysc_reset(struct sysc *ddata)
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+- /* Flush posted write */
++
++ /*
++ * Some devices need a delay before reading registers
++ * after reset. Presumably a srst_udelay is not needed
++ * for devices that use a rstctrl register reset.
++ */
++ if (ddata->cfg.srst_udelay)
++ fsleep(ddata->cfg.srst_udelay);
++
++ /*
++ * Flush posted write. For devices needing srst_udelay
++ * this should trigger an interconnect error if the
++ * srst_udelay value is needed but not configured.
++ */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+- if (ddata->cfg.srst_udelay)
+- fsleep(ddata->cfg.srst_udelay);
+-
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
+
+diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
+index c22fcaa44a614..6b7d47a52b10a 100644
+--- a/drivers/gpio/gpio-dwapb.c
++++ b/drivers/gpio/gpio-dwapb.c
+@@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val |= BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
++ val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+
+@@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val &= ~BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
++ val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 6ab1cf489d035..e40c93f0960b4 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -2444,10 +2444,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+ return 0;
+ }
+
+-/*
+- * gpio_ioctl() - ioctl handler for the GPIO chardev
+- */
+-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ struct gpio_chardev_data *cdev = file->private_data;
+ struct gpio_device *gdev = cdev->gdev;
+@@ -2484,6 +2481,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ }
+
++/*
++ * gpio_ioctl() - ioctl handler for the GPIO chardev
++ */
++static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct gpio_chardev_data *cdev = file->private_data;
++
++ return call_ioctl_locked(file, cmd, arg, cdev->gdev,
++ gpio_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6d5f3c5fb4a62..13e0b521e3dba 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5104,6 +5104,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
++ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
++ goto ffu;
++
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 848db8676adfd..46c2b991aa108 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
+ bool mirror;
++ struct dc_stream_state *stream;
+ };
+
+ /* IPP related types */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d84579da64003..009b5861a3fec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3427,7 +3427,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+- .mirror = pipe_ctx->plane_state->horizontal_mirror
++ .mirror = pipe_ctx->plane_state->horizontal_mirror,
++ .stream = pipe_ctx->stream,
+ };
+ bool pipe_split_on = false;
+ bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index 4566bc7abf17e..aa252dc263267 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position(
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+- hubp->cur_rect.x = src_x_offset + param->viewport.x;
+- hubp->cur_rect.y = src_y_offset + param->viewport.y;
++ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
++ param->rotation != ROTATION_ANGLE_0) {
++ hubp->cur_rect.x = 0;
++ hubp->cur_rect.y = 0;
++ hubp->cur_rect.w = param->stream->timing.h_addressable;
++ hubp->cur_rect.h = param->stream->timing.v_addressable;
++ } else {
++ hubp->cur_rect.x = src_x_offset + param->viewport.x;
++ hubp->cur_rect.y = src_y_offset + param->viewport.y;
++ }
+ }
+
+ void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 53262f6bc40b0..72bec33e371f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -994,5 +994,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+ dcn20_prepare_bandwidth(dc, context);
++
++ dc_dmub_srv_p_state_delegate(dc,
++ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
+index 18f0a5ae3bacd..a502af0b6dd47 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic.c
+@@ -41,6 +41,7 @@
+ #include "intel_global_state.h"
+ #include "intel_hdcp.h"
+ #include "intel_psr.h"
++#include "intel_fb.h"
+ #include "skl_universal_plane.h"
+
+ /**
+@@ -302,198 +303,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
+ kfree(crtc_state);
+ }
+
+-static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+- int num_scalers_need, struct intel_crtc *intel_crtc,
+- const char *name, int idx,
+- struct intel_plane_state *plane_state,
+- int *scaler_id)
+-{
+- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+- int j;
+- u32 mode;
+-
+- if (*scaler_id < 0) {
+- /* find a free scaler */
+- for (j = 0; j < intel_crtc->num_scalers; j++) {
+- if (scaler_state->scalers[j].in_use)
+- continue;
+-
+- *scaler_id = j;
+- scaler_state->scalers[*scaler_id].in_use = 1;
+- break;
+- }
+- }
+-
+- if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+- "Cannot find scaler for %s:%d\n", name, idx))
+- return;
+-
+- /* set scaler mode */
+- if (plane_state && plane_state->hw.fb &&
+- plane_state->hw.fb->format->is_yuv &&
+- plane_state->hw.fb->format->num_planes > 1) {
+- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+- if (DISPLAY_VER(dev_priv) == 9) {
+- mode = SKL_PS_SCALER_MODE_NV12;
+- } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+- /*
+- * On gen11+'s HDR planes we only use the scaler for
+- * scaling. They have a dedicated chroma upsampler, so
+- * we don't need the scaler to upsample the UV plane.
+- */
+- mode = PS_SCALER_MODE_NORMAL;
+- } else {
+- struct intel_plane *linked =
+- plane_state->planar_linked_plane;
+-
+- mode = PS_SCALER_MODE_PLANAR;
+-
+- if (linked)
+- mode |= PS_PLANE_Y_SEL(linked->id);
+- }
+- } else if (DISPLAY_VER(dev_priv) >= 10) {
+- mode = PS_SCALER_MODE_NORMAL;
+- } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+- /*
+- * when only 1 scaler is in use on a pipe with 2 scalers
+- * scaler 0 operates in high quality (HQ) mode.
+- * In this case use scaler 0 to take advantage of HQ mode
+- */
+- scaler_state->scalers[*scaler_id].in_use = 0;
+- *scaler_id = 0;
+- scaler_state->scalers[0].in_use = 1;
+- mode = SKL_PS_SCALER_MODE_HQ;
+- } else {
+- mode = SKL_PS_SCALER_MODE_DYN;
+- }
+-
+- drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+- intel_crtc->pipe, *scaler_id, name, idx);
+- scaler_state->scalers[*scaler_id].mode = mode;
+-}
+-
+-/**
+- * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+- * @dev_priv: i915 device
+- * @intel_crtc: intel crtc
+- * @crtc_state: incoming crtc_state to validate and setup scalers
+- *
+- * This function sets up scalers based on staged scaling requests for
+- * a @crtc and its planes. It is called from crtc level check path. If request
+- * is a supportable request, it attaches scalers to requested planes and crtc.
+- *
+- * This function takes into account the current scaler(s) in use by any planes
+- * not being part of this atomic state
+- *
+- * Returns:
+- * 0 - scalers were setup succesfully
+- * error code - otherwise
+- */
+-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+- struct intel_crtc *intel_crtc,
+- struct intel_crtc_state *crtc_state)
+-{
+- struct drm_plane *plane = NULL;
+- struct intel_plane *intel_plane;
+- struct intel_plane_state *plane_state = NULL;
+- struct intel_crtc_scaler_state *scaler_state =
+- &crtc_state->scaler_state;
+- struct drm_atomic_state *drm_state = crtc_state->uapi.state;
+- struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
+- int num_scalers_need;
+- int i;
+-
+- num_scalers_need = hweight32(scaler_state->scaler_users);
+-
+- /*
+- * High level flow:
+- * - staged scaler requests are already in scaler_state->scaler_users
+- * - check whether staged scaling requests can be supported
+- * - add planes using scalers that aren't in current transaction
+- * - assign scalers to requested users
+- * - as part of plane commit, scalers will be committed
+- * (i.e., either attached or detached) to respective planes in hw
+- * - as part of crtc_commit, scaler will be either attached or detached
+- * to crtc in hw
+- */
+-
+- /* fail if required scalers > available scalers */
+- if (num_scalers_need > intel_crtc->num_scalers){
+- drm_dbg_kms(&dev_priv->drm,
+- "Too many scaling requests %d > %d\n",
+- num_scalers_need, intel_crtc->num_scalers);
+- return -EINVAL;
+- }
+-
+- /* walkthrough scaler_users bits and start assigning scalers */
+- for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+- int *scaler_id;
+- const char *name;
+- int idx;
+-
+- /* skip if scaler not required */
+- if (!(scaler_state->scaler_users & (1 << i)))
+- continue;
+-
+- if (i == SKL_CRTC_INDEX) {
+- name = "CRTC";
+- idx = intel_crtc->base.base.id;
+-
+- /* panel fitter case: assign as a crtc scaler */
+- scaler_id = &scaler_state->scaler_id;
+- } else {
+- name = "PLANE";
+-
+- /* plane scaler case: assign as a plane scaler */
+- /* find the plane that set the bit as scaler_user */
+- plane = drm_state->planes[i].ptr;
+-
+- /*
+- * to enable/disable hq mode, add planes that are using scaler
+- * into this transaction
+- */
+- if (!plane) {
+- struct drm_plane_state *state;
+-
+- /*
+- * GLK+ scalers don't have a HQ mode so it
+- * isn't necessary to change between HQ and dyn mode
+- * on those platforms.
+- */
+- if (DISPLAY_VER(dev_priv) >= 10)
+- continue;
+-
+- plane = drm_plane_from_index(&dev_priv->drm, i);
+- state = drm_atomic_get_plane_state(drm_state, plane);
+- if (IS_ERR(state)) {
+- drm_dbg_kms(&dev_priv->drm,
+- "Failed to add [PLANE:%d] to drm_state\n",
+- plane->base.id);
+- return PTR_ERR(state);
+- }
+- }
+-
+- intel_plane = to_intel_plane(plane);
+- idx = plane->base.id;
+-
+- /* plane on different crtc cannot be a scaler user of this crtc */
+- if (drm_WARN_ON(&dev_priv->drm,
+- intel_plane->pipe != intel_crtc->pipe))
+- continue;
+-
+- plane_state = intel_atomic_get_new_plane_state(intel_state,
+- intel_plane);
+- scaler_id = &plane_state->scaler_id;
+- }
+-
+- intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+- intel_crtc, name, idx,
+- plane_state, scaler_id);
+- }
+-
+- return 0;
+-}
+-
+ struct drm_atomic_state *
+ intel_atomic_state_alloc(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
+index 1dc439983dd94..e506f6a873447 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.h
++++ b/drivers/gpu/drm/i915/display/intel_atomic.h
+@@ -52,8 +52,4 @@ struct intel_crtc_state *
+ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc);
+
+-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+- struct intel_crtc *intel_crtc,
+- struct intel_crtc_state *crtc_state);
+-
+ #endif /* __INTEL_ATOMIC_H__ */
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 1777a12f2f421..fb8d1d63407a2 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6481,6 +6481,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
+ return -EINVAL;
+ }
+
++ /*
++ * FIXME: Bigjoiner+async flip is busted currently.
++ * Remove this check once the issues are fixed.
++ */
++ if (new_crtc_state->bigjoiner_pipes) {
++ drm_dbg_kms(&i915->drm,
++ "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
++ crtc->base.base.id, crtc->base.name);
++ return -EINVAL;
++ }
++
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
+index 23d854bd73b77..c69a638796c62 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb.c
++++ b/drivers/gpu/drm/i915/display/intel_fb.c
+@@ -1176,7 +1176,8 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
+ {
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+
+- return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
++ return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
++ intel_fb_uses_dpt(&fb->base);
+ }
+
+ static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
+@@ -1312,9 +1313,11 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int tile_width,
+ unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
+ {
++ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int stride_tiles;
+
+- if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
++ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
++ src_stride_tiles < dst_stride_tiles)
+ stride_tiles = src_stride_tiles;
+ else
+ stride_tiles = dst_stride_tiles;
+@@ -1520,7 +1523,8 @@ static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_vi
+ memset(view, 0, sizeof(*view));
+ view->gtt.type = view_type;
+
+- if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
++ if (view_type == I915_GTT_VIEW_REMAPPED &&
++ (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
+ view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index 90f42f63128ec..0b74f91e865d0 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -337,6 +337,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ return 0;
+ }
+
++static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
++ int num_scalers_need, struct intel_crtc *intel_crtc,
++ const char *name, int idx,
++ struct intel_plane_state *plane_state,
++ int *scaler_id)
++{
++ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
++ int j;
++ u32 mode;
++
++ if (*scaler_id < 0) {
++ /* find a free scaler */
++ for (j = 0; j < intel_crtc->num_scalers; j++) {
++ if (scaler_state->scalers[j].in_use)
++ continue;
++
++ *scaler_id = j;
++ scaler_state->scalers[*scaler_id].in_use = 1;
++ break;
++ }
++ }
++
++ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
++ "Cannot find scaler for %s:%d\n", name, idx))
++ return -EINVAL;
++
++ /* set scaler mode */
++ if (plane_state && plane_state->hw.fb &&
++ plane_state->hw.fb->format->is_yuv &&
++ plane_state->hw.fb->format->num_planes > 1) {
++ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
++
++ if (DISPLAY_VER(dev_priv) == 9) {
++ mode = SKL_PS_SCALER_MODE_NV12;
++ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
++ /*
++ * On gen11+'s HDR planes we only use the scaler for
++ * scaling. They have a dedicated chroma upsampler, so
++ * we don't need the scaler to upsample the UV plane.
++ */
++ mode = PS_SCALER_MODE_NORMAL;
++ } else {
++ struct intel_plane *linked =
++ plane_state->planar_linked_plane;
++
++ mode = PS_SCALER_MODE_PLANAR;
++
++ if (linked)
++ mode |= PS_PLANE_Y_SEL(linked->id);
++ }
++ } else if (DISPLAY_VER(dev_priv) >= 10) {
++ mode = PS_SCALER_MODE_NORMAL;
++ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
++ /*
++ * when only 1 scaler is in use on a pipe with 2 scalers
++ * scaler 0 operates in high quality (HQ) mode.
++ * In this case use scaler 0 to take advantage of HQ mode
++ */
++ scaler_state->scalers[*scaler_id].in_use = 0;
++ *scaler_id = 0;
++ scaler_state->scalers[0].in_use = 1;
++ mode = SKL_PS_SCALER_MODE_HQ;
++ } else {
++ mode = SKL_PS_SCALER_MODE_DYN;
++ }
++
++ /*
++ * FIXME: we should also check the scaler factors for pfit, so
++ * this shouldn't be tied directly to planes.
++ */
++ if (plane_state && plane_state->hw.fb) {
++ const struct drm_framebuffer *fb = plane_state->hw.fb;
++ const struct drm_rect *src = &plane_state->uapi.src;
++ const struct drm_rect *dst = &plane_state->uapi.dst;
++ int hscale, vscale, max_vscale, max_hscale;
++
++ /*
++ * FIXME: When two scalers are needed, but only one of
++ * them needs to downscale, we should make sure that
++ * the one that needs downscaling support is assigned
++ * as the first scaler, so we don't reject downscaling
++ * unnecessarily.
++ */
++
++ if (DISPLAY_VER(dev_priv) >= 14) {
++ /*
++ * On versions 14 and up, only the first
++ * scaler supports a vertical scaling factor
++ * of more than 1.0, while a horizontal
++ * scaling factor of 3.0 is supported.
++ */
++ max_hscale = 0x30000 - 1;
++ if (*scaler_id == 0)
++ max_vscale = 0x30000 - 1;
++ else
++ max_vscale = 0x10000;
++
++ } else if (DISPLAY_VER(dev_priv) >= 10 ||
++ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
++ max_hscale = 0x30000 - 1;
++ max_vscale = 0x30000 - 1;
++ } else {
++ max_hscale = 0x20000 - 1;
++ max_vscale = 0x20000 - 1;
++ }
++
++ /*
++ * FIXME: We should change the if-else block above to
++ * support HQ vs dynamic scaler properly.
++ */
++
++ /* Check if required scaling is within limits */
++ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
++ vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
++
++ if (hscale < 0 || vscale < 0) {
++ drm_dbg_kms(&dev_priv->drm,
++ "Scaler %d doesn't support required plane scaling\n",
++ *scaler_id);
++ drm_rect_debug_print("src: ", src, true);
++ drm_rect_debug_print("dst: ", dst, false);
++
++ return -EINVAL;
++ }
++ }
++
++ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
++ intel_crtc->pipe, *scaler_id, name, idx);
++ scaler_state->scalers[*scaler_id].mode = mode;
++
++ return 0;
++}
++
++/**
++ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
++ * @dev_priv: i915 device
++ * @intel_crtc: intel crtc
++ * @crtc_state: incoming crtc_state to validate and setup scalers
++ *
++ * This function sets up scalers based on staged scaling requests for
++ * a @crtc and its planes. It is called from crtc level check path. If request
++ * is a supportable request, it attaches scalers to requested planes and crtc.
++ *
++ * This function takes into account the current scaler(s) in use by any planes
++ * not being part of this atomic state
++ *
++ * Returns:
++ * 0 - scalers were setup successfully
++ * error code - otherwise
++ */
++int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
++ struct intel_crtc *intel_crtc,
++ struct intel_crtc_state *crtc_state)
++{
++ struct drm_plane *plane = NULL;
++ struct intel_plane *intel_plane;
++ struct intel_crtc_scaler_state *scaler_state =
++ &crtc_state->scaler_state;
++ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
++ struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
++ int num_scalers_need;
++ int i;
++
++ num_scalers_need = hweight32(scaler_state->scaler_users);
++
++ /*
++ * High level flow:
++ * - staged scaler requests are already in scaler_state->scaler_users
++ * - check whether staged scaling requests can be supported
++ * - add planes using scalers that aren't in current transaction
++ * - assign scalers to requested users
++ * - as part of plane commit, scalers will be committed
++ * (i.e., either attached or detached) to respective planes in hw
++ * - as part of crtc_commit, scaler will be either attached or detached
++ * to crtc in hw
++ */
++
++ /* fail if required scalers > available scalers */
++ if (num_scalers_need > intel_crtc->num_scalers) {
++ drm_dbg_kms(&dev_priv->drm,
++ "Too many scaling requests %d > %d\n",
++ num_scalers_need, intel_crtc->num_scalers);
++ return -EINVAL;
++ }
++
++ /* walkthrough scaler_users bits and start assigning scalers */
++ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
++ struct intel_plane_state *plane_state = NULL;
++ int *scaler_id;
++ const char *name;
++ int idx, ret;
++
++ /* skip if scaler not required */
++ if (!(scaler_state->scaler_users & (1 << i)))
++ continue;
++
++ if (i == SKL_CRTC_INDEX) {
++ name = "CRTC";
++ idx = intel_crtc->base.base.id;
++
++ /* panel fitter case: assign as a crtc scaler */
++ scaler_id = &scaler_state->scaler_id;
++ } else {
++ name = "PLANE";
++
++ /* plane scaler case: assign as a plane scaler */
++ /* find the plane that set the bit as scaler_user */
++ plane = drm_state->planes[i].ptr;
++
++ /*
++ * to enable/disable hq mode, add planes that are using scaler
++ * into this transaction
++ */
++ if (!plane) {
++ struct drm_plane_state *state;
++
++ /*
++ * GLK+ scalers don't have a HQ mode so it
++ * isn't necessary to change between HQ and dyn mode
++ * on those platforms.
++ */
++ if (DISPLAY_VER(dev_priv) >= 10)
++ continue;
++
++ plane = drm_plane_from_index(&dev_priv->drm, i);
++ state = drm_atomic_get_plane_state(drm_state, plane);
++ if (IS_ERR(state)) {
++ drm_dbg_kms(&dev_priv->drm,
++ "Failed to add [PLANE:%d] to drm_state\n",
++ plane->base.id);
++ return PTR_ERR(state);
++ }
++ }
++
++ intel_plane = to_intel_plane(plane);
++ idx = plane->base.id;
++
++ /* plane on different crtc cannot be a scaler user of this crtc */
++ if (drm_WARN_ON(&dev_priv->drm,
++ intel_plane->pipe != intel_crtc->pipe))
++ continue;
++
++ plane_state = intel_atomic_get_new_plane_state(intel_state,
++ intel_plane);
++ scaler_id = &plane_state->scaler_id;
++ }
++
++ ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
++ intel_crtc, name, idx,
++ plane_state, scaler_id);
++ if (ret < 0)
++ return ret;
++ }
++
++ return 0;
++}
++
+ static int glk_coef_tap(int i)
+ {
+ return i % 7;
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
+index 0097d5d08e102..f040f6ac061f2 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.h
++++ b/drivers/gpu/drm/i915/display/skl_scaler.h
+@@ -8,17 +8,22 @@
+ #include <linux/types.h>
+
+ enum drm_scaling_filter;
++enum pipe;
+ struct drm_i915_private;
++struct intel_crtc;
+ struct intel_crtc_state;
+-struct intel_plane_state;
+ struct intel_plane;
+-enum pipe;
++struct intel_plane_state;
+
+ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+
+ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+
++int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
++ struct intel_crtc *intel_crtc,
++ struct intel_crtc_state *crtc_state);
++
+ void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+
+ void skl_program_plane_scaler(struct intel_plane *plane,
+@@ -26,4 +31,5 @@ void skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_plane_state *plane_state);
+ void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
++
+ #endif
+diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+index b96ae15e0ad91..6d35bb3974818 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
++++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+@@ -39,8 +39,13 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ * The CHPN0001 ACPI device, which is used to describe the Chipone
+ * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ */
+- {"CHPN0001", 0 },
+- { },
++ { "CHPN0001" },
++ /*
++ * The IDEA5002 ACPI device causes high interrupt usage and spurious
++ * wakeups from suspend.
++ */
++ { "IDEA5002" },
++ { }
+ };
+
+ /* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
+@@ -115,9 +120,9 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
+ }
+
+ static const struct acpi_device_id i2c_hid_acpi_match[] = {
+- {"ACPI0C50", 0 },
+- {"PNP0C50", 0 },
+- { },
++ { "ACPI0C50" },
++ { "PNP0C50" },
++ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 6adf3b141316b..86daf791aa27c 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ if (!slave)
+ return 0;
+
+- command = readl(bus->base + ASPEED_I2C_CMD_REG);
++ /*
++ * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
++ * transfers with low enough latency between the nak/stop phase of the current
++ * command and the start/address phase of the following command that the
++ * interrupts are coalesced by the time we process them.
++ */
++ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
++ irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
++ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++ }
++
++ if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
++ bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
++ irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
++ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++ }
++
++ /* Propagate any stop conditions to the slave implementation. */
++ if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
++ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
++ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++ }
+
+- /* Slave was requested, restart state machine. */
++ /*
++ * Now that we've dealt with any potentially coalesced stop conditions,
++ * address any start conditions.
++ */
+ if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
+ irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
+ bus->slave_state = ASPEED_I2C_SLAVE_START;
+ }
+
+- /* Slave is not currently active, irq was for someone else. */
++ /*
++ * If the slave has been stopped and not started then slave interrupt
++ * handling is complete.
++ */
+ if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ return irq_handled;
+
++ command = readl(bus->base + ASPEED_I2C_CMD_REG);
+ dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
+ irq_status, command);
+
+@@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
+ }
+
+- /* Slave was asked to stop. */
+- if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+- irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
+- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+- }
+- if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
+- bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
+- irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
+- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+- }
+-
+ switch (bus->slave_state) {
+ case ASPEED_I2C_SLAVE_READ_REQUESTED:
+ if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
+@@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ break;
+ case ASPEED_I2C_SLAVE_STOP:
+- i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++ /* Stop event handling is done early. Unreachable. */
+ break;
+ case ASPEED_I2C_SLAVE_START:
+ /* Slave was just started. Waiting for the next event. */;
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index 642c5c4895e37..3ac253a27dd97 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -671,8 +671,10 @@ static int tiadc_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, indio_dev);
+
+ err = tiadc_request_dma(pdev, adc_dev);
+- if (err && err == -EPROBE_DEFER)
++ if (err && err != -ENODEV) {
++ dev_err_probe(&pdev->dev, err, "DMA request failed\n");
+ goto err_dma;
++ }
+
+ return 0;
+
+diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
+index 8d4fc97d10059..2b7873e8a959b 100644
+--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
++++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
+@@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer;
+ int ret;
+
++ /*
++ * iio_triggered_buffer_cleanup() assumes that the buffer allocated here
++ * is assigned to indio_dev->buffer but this is only the case if this
++ * function is the first caller to iio_device_attach_buffer(). If
++ * indio_dev->buffer is already set then we can't proceed otherwise the
++ * cleanup function will try to free a buffer that was not allocated here.
++ */
++ if (indio_dev->buffer)
++ return -EADDRINUSE;
++
+ buffer = iio_kfifo_allocate();
+ if (!buffer) {
+ ret = -ENOMEM;
+diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+index 6633b35a94e69..9c9bc77003c7f 100644
+--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
++++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+@@ -15,8 +15,8 @@
+ /* Conversion times in us */
+ static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
+ 13000, 7000 };
+-static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
+- 5000, 8000 };
++static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
++ 3000, 8000 };
+ static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
+ 4100, 8220, 16440 };
+
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 86fbbe9040503..19a1ef5351d24 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -736,13 +736,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
+ chan->channel2, val);
+ mutex_unlock(&st->lock);
+- return IIO_VAL_INT;
++ return ret;
+ case IIO_ACCEL:
+ mutex_lock(&st->lock);
+ ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
+ chan->channel2, val);
+ mutex_unlock(&st->lock);
+- return IIO_VAL_INT;
++ return ret;
+
+ default:
+ return -EINVAL;
+diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
+index 13a66a8e3411f..e0c51189e329c 100644
+--- a/drivers/input/keyboard/ipaq-micro-keys.c
++++ b/drivers/input/keyboard/ipaq-micro-keys.c
+@@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
+ keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
+ keys->input->keycodesize * keys->input->keycodemax,
+ GFP_KERNEL);
++ if (!keys->codes)
++ return -ENOMEM;
++
+ keys->input->keycode = keys->codes;
+
+ __set_bit(EV_KEY, keys->input->evbit);
+diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
+index e79f5497948b8..9116f4248fd09 100644
+--- a/drivers/input/misc/soc_button_array.c
++++ b/drivers/input/misc/soc_button_array.c
+@@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
+ info->name = "power";
+ info->event_code = KEY_POWER;
+ info->wakeup = true;
++ } else if (upage == 0x01 && usage == 0xc6) {
++ info->name = "airplane mode switch";
++ info->event_type = EV_SW;
++ info->event_code = SW_RFKILL_ALL;
++ info->active_low = false;
+ } else if (upage == 0x01 && usage == 0xca) {
+ info->name = "rotation lock switch";
+ info->event_type = EV_SW;
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 0c6fc954e7296..1d9494f64a215 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -381,6 +381,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+ }
+ mutex_unlock(&icc_lock);
+
++ if (!node)
++ return ERR_PTR(-EINVAL);
++
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
+index 5cdb058fa0959..9c2dd40d9a559 100644
+--- a/drivers/interconnect/qcom/sm8250.c
++++ b/drivers/interconnect/qcom/sm8250.c
+@@ -551,6 +551,7 @@ static struct platform_driver qnoc_driver = {
+ .driver = {
+ .name = "qnoc-sm8250",
+ .of_match_table = qnoc_of_match,
++ .sync_state = icc_sync_state,
+ },
+ };
+ module_platform_driver(qnoc_driver);
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 382c5cc471952..100a6a236d92a 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1914,6 +1914,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
+
++void dm_bufio_client_reset(struct dm_bufio_client *c)
++{
++ drop_buffers(c);
++ flush_work(&c->shrink_work);
++}
++EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
++
+ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
+ {
+ c->start = start;
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index fe7dad3ffa75f..77fcff82c82ac 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1763,11 +1763,12 @@ static void integrity_metadata(struct work_struct *w)
+ sectors_to_process = dio->range.n_sectors;
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++ struct bio_vec bv_copy = bv;
+ unsigned int pos;
+ char *mem, *checksums_ptr;
+
+ again:
+- mem = bvec_kmap_local(&bv);
++ mem = bvec_kmap_local(&bv_copy);
+ pos = 0;
+ checksums_ptr = checksums;
+ do {
+@@ -1776,7 +1777,7 @@ again:
+ sectors_to_process -= ic->sectors_per_block;
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+- } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
++ } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
+ kunmap_local(mem);
+
+ r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+@@ -1801,9 +1802,9 @@ again:
+ if (!sectors_to_process)
+ break;
+
+- if (unlikely(pos < bv.bv_len)) {
+- bv.bv_offset += pos;
+- bv.bv_len -= pos;
++ if (unlikely(pos < bv_copy.bv_len)) {
++ bv_copy.bv_offset += pos;
++ bv_copy.bv_len -= pos;
+ goto again;
+ }
+ }
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 4a0e15109997b..bb0e0a270f62a 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -597,6 +597,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
++ pmd->tm = NULL;
++ pmd->metadata_sm = NULL;
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+@@ -605,6 +607,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_create failed");
+ r = PTR_ERR(pmd->data_sm);
++ pmd->data_sm = NULL;
+ goto bad_cleanup_tm;
+ }
+
+@@ -635,11 +638,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+
+ bad_cleanup_nb_tm:
+ dm_tm_destroy(pmd->nb_tm);
++ pmd->nb_tm = NULL;
+ bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+
+ return r;
+ }
+@@ -705,6 +712,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ sizeof(disk_super->metadata_space_map_root),
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
++ pmd->tm = NULL;
++ pmd->metadata_sm = NULL;
+ DMERR("tm_open_with_sm failed");
+ goto bad_unlock_sblock;
+ }
+@@ -714,6 +723,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_open failed");
+ r = PTR_ERR(pmd->data_sm);
++ pmd->data_sm = NULL;
+ goto bad_cleanup_tm;
+ }
+
+@@ -740,9 +750,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+
+ bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+ bad_unlock_sblock:
+ dm_bm_unlock(sblock);
+
+@@ -789,9 +802,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
+ bool destroy_bm)
+ {
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+ dm_tm_destroy(pmd->nb_tm);
++ pmd->nb_tm = NULL;
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ if (destroy_bm)
+ dm_block_manager_destroy(pmd->bm);
+ }
+@@ -999,8 +1016,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ __func__, r);
+ }
+ pmd_write_unlock(pmd);
+- if (!pmd->fail_io)
+- __destroy_persistent_data_objects(pmd, true);
++ __destroy_persistent_data_objects(pmd, true);
+
+ kfree(pmd);
+ return 0;
+@@ -1875,53 +1891,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+ {
+ int r = -EINVAL;
+- struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
+
+ /* fail_io is double-checked with pmd->root_lock held below */
+ if (unlikely(pmd->fail_io))
+ return r;
+
+- /*
+- * Replacement block manager (new_bm) is created and old_bm destroyed outside of
+- * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
+- * shrinker associated with the block manager's bufio client vs pmd root_lock).
+- * - must take shrinker_rwsem without holding pmd->root_lock
+- */
+- new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
+- THIN_MAX_CONCURRENT_LOCKS);
+-
+ pmd_write_lock(pmd);
+ if (pmd->fail_io) {
+ pmd_write_unlock(pmd);
+- goto out;
++ return r;
+ }
+-
+ __set_abort_with_changes_flags(pmd);
++
++ /* destroy data_sm/metadata_sm/nb_tm/tm */
+ __destroy_persistent_data_objects(pmd, false);
+- old_bm = pmd->bm;
+- if (IS_ERR(new_bm)) {
+- DMERR("could not create block manager during abort");
+- pmd->bm = NULL;
+- r = PTR_ERR(new_bm);
+- goto out_unlock;
+- }
+
+- pmd->bm = new_bm;
++ /* reset bm */
++ dm_block_manager_reset(pmd->bm);
++
++ /* rebuild data_sm/metadata_sm/nb_tm/tm */
+ r = __open_or_format_metadata(pmd, false);
+- if (r) {
+- pmd->bm = NULL;
+- goto out_unlock;
+- }
+- new_bm = NULL;
+-out_unlock:
+ if (r)
+ pmd->fail_io = true;
+ pmd_write_unlock(pmd);
+- dm_block_manager_destroy(old_bm);
+-out:
+- if (new_bm && !IS_ERR(new_bm))
+- dm_block_manager_destroy(new_bm);
+-
+ return r;
+ }
+
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 1f40100908d7c..2bbfbb704c751 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+
++void dm_block_manager_reset(struct dm_block_manager *bm)
++{
++ dm_bufio_client_reset(bm->bufio);
++}
++EXPORT_SYMBOL_GPL(dm_block_manager_reset);
++
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm)
+ {
+ return dm_bufio_get_block_size(bm->bufio);
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index 58a23b8ec1902..4371d85d3c258 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create(
+ struct block_device *bdev, unsigned int block_size,
+ unsigned int max_held_per_thread);
+ void dm_block_manager_destroy(struct dm_block_manager *bm);
++void dm_block_manager_reset(struct dm_block_manager *bm);
+
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm);
+ dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index a015cd11f6e97..85aa0a3974fe0 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -76,7 +76,8 @@ struct dm_space_map {
+
+ static inline void dm_sm_destroy(struct dm_space_map *sm)
+ {
+- sm->destroy(sm);
++ if (sm)
++ sm->destroy(sm);
+ }
+
+ static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 39885f8355847..557a3ecfe75a0 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+ void dm_tm_destroy(struct dm_transaction_manager *tm)
+ {
++ if (!tm)
++ return;
++
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 5935be190b9e2..5f2a6fcba9670 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
+ err = -1;
+- goto failed;
++ goto free_buffer;
+ }
+
+ return 0;
++free_buffer:
++ kfree(tx_ring->tx_buffer);
++ tx_ring->tx_buffer = NULL;
+ failed:
+ if (adapter->ring_vir_addr != NULL) {
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index bfddbff7bcdfb..28fb643d2917f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ {
+ struct otx2_nic *pfvf = netdev_priv(dev);
++ u8 old_pfc_en;
+ int err;
+
+- /* Save PFC configuration to interface */
++ old_pfc_en = pfvf->pfc_en;
+ pfvf->pfc_en = pfc->pfc_en;
+
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+@@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+- if (err)
++ if (err) {
++ pfvf->pfc_en = old_pfc_en;
+ return err;
++ }
+
+ process_pfc:
+ err = otx2_config_priority_flow_ctrl(pfvf);
+- if (err)
++ if (err) {
++ pfvf->pfc_en = old_pfc_en;
+ return err;
++ }
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+@@ -425,6 +430,12 @@ process_pfc:
+
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
++ if (pfc->pfc_en)
++ otx2_nix_config_bp(pfvf, false);
++
++ otx2_pfc_txschq_stop(pfvf);
++ pfvf->pfc_en = old_pfc_en;
++ otx2_config_priority_flow_ctrl(pfvf);
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index b3253e263ebc8..ac6a0785b10d8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -48,6 +48,25 @@
+ #define CREATE_TRACE_POINTS
+ #include "diag/cmd_tracepoint.h"
+
++struct mlx5_ifc_mbox_out_bits {
++ u8 status[0x8];
++ u8 reserved_at_8[0x18];
++
++ u8 syndrome[0x20];
++
++ u8 reserved_at_40[0x40];
++};
++
++struct mlx5_ifc_mbox_in_bits {
++ u8 opcode[0x10];
++ u8 uid[0x10];
++
++ u8 reserved_at_20[0x10];
++ u8 op_mod[0x10];
++
++ u8 reserved_at_40[0x40];
++};
++
+ enum {
+ CMD_IF_REV = 5,
+ };
+@@ -71,6 +90,26 @@ enum {
+ MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
+ };
+
++static u16 in_to_opcode(void *in)
++{
++ return MLX5_GET(mbox_in, in, opcode);
++}
++
++/* Returns true for opcodes that might be triggered very frequently and throttle
++ * the command interface. Limit their command slots usage.
++ */
++static bool mlx5_cmd_is_throttle_opcode(u16 op)
++{
++ switch (op) {
++ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
++ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
++ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
++ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
++ return true;
++ }
++ return false;
++}
++
+ static struct mlx5_cmd_work_ent *
+ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
+ struct mlx5_cmd_msg *out, void *uout, int uout_size,
+@@ -92,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
+ ent->context = context;
+ ent->cmd = cmd;
+ ent->page_queue = page_queue;
++ ent->op = in_to_opcode(in->first.data);
+ refcount_set(&ent->refcnt, 1);
+
+ return ent;
+@@ -116,24 +156,27 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
+ return token;
+ }
+
+-static int cmd_alloc_index(struct mlx5_cmd *cmd)
++static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
+ {
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+- ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
+- if (ret < cmd->max_reg_cmds)
+- clear_bit(ret, &cmd->bitmask);
++ ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
++ if (ret < cmd->vars.max_reg_cmds) {
++ clear_bit(ret, &cmd->vars.bitmask);
++ ent->idx = ret;
++ cmd->ent_arr[ent->idx] = ent;
++ }
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+- return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
++ return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
+ }
+
+ static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
+ {
+ lockdep_assert_held(&cmd->alloc_lock);
+- set_bit(idx, &cmd->bitmask);
++ set_bit(idx, &cmd->vars.bitmask);
+ }
+
+ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
+@@ -152,7 +195,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
+
+ if (ent->idx >= 0) {
+ cmd_free_index(cmd, ent->idx);
+- up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
++ up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
+ }
+
+ cmd_free_ent(ent);
+@@ -162,7 +205,7 @@ out:
+
+ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+ {
+- return cmd->cmd_buf + (idx << cmd->log_stride);
++ return cmd->cmd_buf + (idx << cmd->vars.log_stride);
+ }
+
+ static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
+@@ -753,25 +796,6 @@ static int cmd_status_to_err(u8 status)
+ }
+ }
+
+-struct mlx5_ifc_mbox_out_bits {
+- u8 status[0x8];
+- u8 reserved_at_8[0x18];
+-
+- u8 syndrome[0x20];
+-
+- u8 reserved_at_40[0x40];
+-};
+-
+-struct mlx5_ifc_mbox_in_bits {
+- u8 opcode[0x10];
+- u8 uid[0x10];
+-
+- u8 reserved_at_20[0x10];
+- u8 op_mod[0x10];
+-
+- u8 reserved_at_40[0x40];
+-};
+-
+ void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+ {
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+@@ -789,7 +813,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
+ u16 opcode, op_mod;
+ u16 uid;
+
+- opcode = MLX5_GET(mbox_in, in, opcode);
++ opcode = in_to_opcode(in);
+ op_mod = MLX5_GET(mbox_in, in, op_mod);
+ uid = MLX5_GET(mbox_in, in, uid);
+
+@@ -801,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
+ {
+ /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ if (err == -ENXIO) {
+- u16 opcode = MLX5_GET(mbox_in, in, opcode);
++ u16 opcode = in_to_opcode(in);
+ u32 syndrome;
+ u8 status;
+
+@@ -830,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_work_ent *ent, int input)
+ {
+ struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+- u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
+ struct mlx5_cmd_mailbox *next = msg->next;
+ int n = mlx5_calc_cmd_blocks(msg);
++ u16 op = ent->op;
+ int data_only;
+ u32 offset = 0;
+ int dump_len;
+@@ -884,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
+ mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
+ }
+
+-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+-{
+- return MLX5_GET(mbox_in, in->first.data, opcode);
+-}
+-
+ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+
+ static void cb_timeout_handler(struct work_struct *work)
+@@ -906,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
+ /* Maybe got handled by eq recover ? */
+ if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
+ mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
+- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++ mlx5_command_str(ent->op), ent->op);
+ goto out; /* phew, already handled */
+ }
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
+- ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++ ent->idx, mlx5_command_str(ent->op), ent->op);
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
+
+ out:
+@@ -955,10 +974,10 @@ static void cmd_work_handler(struct work_struct *work)
+ cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
+ complete(&ent->handling);
+- sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
++ sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
+ down(sem);
+ if (!ent->page_queue) {
+- alloc_ret = cmd_alloc_index(cmd);
++ alloc_ret = cmd_alloc_index(cmd, ent);
+ if (alloc_ret < 0) {
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+ if (ent->callback) {
+@@ -973,20 +992,18 @@ static void cmd_work_handler(struct work_struct *work)
+ up(sem);
+ return;
+ }
+- ent->idx = alloc_ret;
+ } else {
+- ent->idx = cmd->max_reg_cmds;
++ ent->idx = cmd->vars.max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+- clear_bit(ent->idx, &cmd->bitmask);
++ clear_bit(ent->idx, &cmd->vars.bitmask);
++ cmd->ent_arr[ent->idx] = ent;
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ }
+
+- cmd->ent_arr[ent->idx] = ent;
+ lay = get_inst(cmd, ent->idx);
+ ent->lay = lay;
+ memset(lay, 0, sizeof(*lay));
+ memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+- ent->op = be32_to_cpu(lay->in[0]) >> 16;
+ if (ent->in->next)
+ lay->in_ptr = cpu_to_be64(ent->in->next->dma);
+ lay->inlen = cpu_to_be32(ent->in->len);
+@@ -1099,12 +1116,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
+ */
+ if (wait_for_completion_timeout(&ent->done, timeout)) {
+ mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
+- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++ mlx5_command_str(ent->op), ent->op);
+ return;
+ }
+
+ mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
+- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++ mlx5_command_str(ent->op), ent->op);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
+@@ -1131,12 +1148,10 @@ out_err:
+
+ if (err == -ETIMEDOUT) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+- mlx5_command_str(msg_to_opcode(ent->in)),
+- msg_to_opcode(ent->in));
++ mlx5_command_str(ent->op), ent->op);
+ } else if (err == -ECANCELED) {
+ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+- mlx5_command_str(msg_to_opcode(ent->in)),
+- msg_to_opcode(ent->in));
++ mlx5_command_str(ent->op), ent->op);
+ }
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
+@@ -1170,7 +1185,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ u8 status = 0;
+ int err = 0;
+ s64 ds;
+- u16 op;
+
+ if (callback && page_queue)
+ return -EINVAL;
+@@ -1210,9 +1224,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+- op = MLX5_GET(mbox_in, in->first.data, opcode);
+- if (op < MLX5_CMD_OP_MAX) {
+- stats = &cmd->stats[op];
++ if (ent->op < MLX5_CMD_OP_MAX) {
++ stats = &cmd->stats[ent->op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+@@ -1220,7 +1233,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+- mlx5_command_str(op), ds);
++ mlx5_command_str(ent->op), ds);
+
+ out_free:
+ status = ent->status;
+@@ -1558,15 +1571,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+- for (i = 0; i < cmd->max_reg_cmds; i++)
+- down(&cmd->sem);
+- down(&cmd->pages_sem);
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++ down(&cmd->vars.sem);
++ down(&cmd->vars.pages_sem);
+
+ cmd->allowed_opcode = opcode;
+
+- up(&cmd->pages_sem);
+- for (i = 0; i < cmd->max_reg_cmds; i++)
+- up(&cmd->sem);
++ up(&cmd->vars.pages_sem);
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++ up(&cmd->vars.sem);
+ }
+
+ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
+@@ -1574,15 +1587,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+- for (i = 0; i < cmd->max_reg_cmds; i++)
+- down(&cmd->sem);
+- down(&cmd->pages_sem);
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++ down(&cmd->vars.sem);
++ down(&cmd->vars.pages_sem);
+
+ cmd->mode = mode;
+
+- up(&cmd->pages_sem);
+- for (i = 0; i < cmd->max_reg_cmds; i++)
+- up(&cmd->sem);
++ up(&cmd->vars.pages_sem);
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++ up(&cmd->vars.sem);
+ }
+
+ static int cmd_comp_notifier(struct notifier_block *nb,
+@@ -1641,7 +1654,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
+
+ /* there can be at most 32 command queues */
+ vector = vec & 0xffffffff;
+- for (i = 0; i < (1 << cmd->log_sz); i++) {
++ for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
+ if (test_bit(i, &vector)) {
+ ent = cmd->ent_arr[i];
+
+@@ -1730,7 +1743,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ /* wait for pending handlers to complete */
+ mlx5_eq_synchronize_cmd_irq(dev);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+- vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
++ vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+@@ -1739,14 +1752,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ * to guarantee pending commands will not get freed in the meanwhile.
+ * For that reason, it also has to be done inside the alloc_lock.
+ */
+- for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
++ for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
+ cmd_ent_get(cmd->ent_arr[i]);
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector, true);
+- for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
++ for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
+ cmd_ent_put(cmd->ent_arr[i]);
+ return;
+
+@@ -1759,22 +1772,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+- for (i = 0; i < cmd->max_reg_cmds; i++) {
+- while (down_trylock(&cmd->sem)) {
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
++ while (down_trylock(&cmd->vars.sem)) {
+ mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
+
+- while (down_trylock(&cmd->pages_sem)) {
++ while (down_trylock(&cmd->vars.pages_sem)) {
+ mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+
+ /* Unlock cmdif */
+- up(&cmd->pages_sem);
+- for (i = 0; i < cmd->max_reg_cmds; i++)
+- up(&cmd->sem);
++ up(&cmd->vars.pages_sem);
++ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++ up(&cmd->vars.sem);
+ }
+
+ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+@@ -1817,7 +1830,7 @@ cache_miss:
+
+ static int is_manage_pages(void *in)
+ {
+- return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
++ return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
+ }
+
+ /* Notes:
+@@ -1828,8 +1841,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size, mlx5_cmd_cbk_t callback, void *context,
+ bool force_polling)
+ {
+- u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ struct mlx5_cmd_msg *inb, *outb;
++ u16 opcode = in_to_opcode(in);
++ bool throttle_op;
+ int pages_queue;
+ gfp_t gfp;
+ u8 token;
+@@ -1838,13 +1852,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ return -ENXIO;
+
++ throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
++ if (throttle_op) {
++ /* atomic context may not sleep */
++ if (callback)
++ return -EINVAL;
++ down(&dev->cmd.vars.throttle_sem);
++ }
++
+ pages_queue = is_manage_pages(in);
+ gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
+
+ inb = alloc_msg(dev, in_size, gfp);
+ if (IS_ERR(inb)) {
+ err = PTR_ERR(inb);
+- return err;
++ goto out_up;
+ }
+
+ token = alloc_token(&dev->cmd);
+@@ -1878,6 +1900,9 @@ out_out:
+ mlx5_free_cmd_msg(dev, outb);
+ out_in:
+ free_msg(dev, inb);
++out_up:
++ if (throttle_op)
++ up(&dev->cmd.vars.throttle_sem);
+ return err;
+ }
+
+@@ -1952,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op
+ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+ {
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+- u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
++ u16 opcode = in_to_opcode(in);
+
+ return cmd_status_err(dev, err, opcode, op_mod, out);
+ }
+@@ -1998,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+ {
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+- u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
++ u16 opcode = in_to_opcode(in);
+
+ err = cmd_status_err(dev, err, opcode, op_mod, out);
+ return mlx5_cmd_check(dev, err, in, out);
+@@ -2051,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+
+ work->ctx = ctx;
+ work->user_callback = callback;
+- work->opcode = MLX5_GET(mbox_in, in, opcode);
++ work->opcode = in_to_opcode(in);
+ work->op_mod = MLX5_GET(mbox_in, in, op_mod);
+ work->out = out;
+ if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
+@@ -2187,16 +2212,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ goto err_free_pool;
+
+ cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
+- cmd->log_sz = cmd_l >> 4 & 0xf;
+- cmd->log_stride = cmd_l & 0xf;
+- if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
++ cmd->vars.log_sz = cmd_l >> 4 & 0xf;
++ cmd->vars.log_stride = cmd_l & 0xf;
++ if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
+ mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+- 1 << cmd->log_sz);
++ 1 << cmd->vars.log_sz);
+ err = -EINVAL;
+ goto err_free_page;
+ }
+
+- if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
++ if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
+ mlx5_core_err(dev, "command queue size overflow\n");
+ err = -EINVAL;
+ goto err_free_page;
+@@ -2204,13 +2229,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
+ cmd->checksum_disabled = 1;
+- cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
+- cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
++ cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
++ cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
+
+- cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+- if (cmd->cmdif_rev > CMD_IF_REV) {
++ cmd->vars.cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
++ if (cmd->vars.cmdif_rev > CMD_IF_REV) {
+ mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
+- CMD_IF_REV, cmd->cmdif_rev);
++ CMD_IF_REV, cmd->vars.cmdif_rev);
+ err = -EOPNOTSUPP;
+ goto err_free_page;
+ }
+@@ -2220,8 +2245,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ for (i = 0; i < MLX5_CMD_OP_MAX; i++)
+ spin_lock_init(&cmd->stats[i].lock);
+
+- sema_init(&cmd->sem, cmd->max_reg_cmds);
+- sema_init(&cmd->pages_sem, 1);
++ sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
++ sema_init(&cmd->vars.pages_sem, 1);
++ sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ cmd_h = (u32)((u64)(cmd->dma) >> 32);
+ cmd_l = (u32)(cmd->dma);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+index bb95b40d25eb5..e0b0729e238c1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ int ret;
+
+ cmd = filp->private_data;
+- weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+- field = cmd->max_reg_cmds - weight;
++ weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
++ field = cmd->vars.max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 374c0011a127b..3ba54ffa54bfe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -691,7 +691,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+
+ while (block_timestamp > tracer->last_timestamp) {
+ /* Check block override if it's not the first block */
+- if (!tracer->last_timestamp) {
++ if (tracer->last_timestamp) {
+ u64 *ts_event;
+ /* To avoid block override be the HW in case of buffer
+ * wraparound, the time stamp of the previous block
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index be83ad9db82a4..e1283531e0b81 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
++ ft->g = NULL;
+ kvfree(in);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 4db0483c066a8..83bb0811e7741 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,6 +300,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
++ e->encap_size = ipv4_encap_size;
++ e->encap_header = encap_header;
++
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -319,8 +322,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
+- e->encap_size = ipv4_encap_size;
+- e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -403,12 +404,16 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
++ e->encap_size = ipv4_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto free_encap;
++ goto release_neigh;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -422,10 +427,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
+- e->encap_size = ipv4_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -567,6 +568,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
++ e->encap_size = ipv6_encap_size;
++ e->encap_header = encap_header;
++
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -586,8 +590,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
+- e->encap_size = ipv6_encap_size;
+- e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+@@ -669,12 +671,16 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
++ e->encap_size = ipv6_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto free_encap;
++ goto release_neigh;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -688,10 +694,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
+- e->encap_size = ipv6_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index eeba91d9c5211..ceeb23f478e15 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+- if (count == sizeof(drvinfo->fw_version))
++ if (count >= sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 2653cb96c3105..5aeca9534f15a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -76,7 +76,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+- if (count == sizeof(drvinfo->fw_version))
++ if (count >= sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index d5c3173250309..3f68e3198aa64 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ req_list_size = max_list_size;
+ }
+
+- out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
++ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
+ req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+ out = kvzalloc(out_sz, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
+index fecd43754cead..e5ec0a363aff8 100644
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -350,6 +350,8 @@ union ks8851_tx_hdr {
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
++ * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
++ * @queued_len: Space required in hardware TX buffer for queued packets in txq.
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
+@@ -399,6 +401,7 @@ struct ks8851_net {
+ struct work_struct rxctrl_work;
+
+ struct sk_buff_head txq;
++ unsigned int queued_len;
+
+ struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index cfbc900d4aeb9..0bf13b38b8f5b 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ handled |= IRQ_RXPSI;
+
+ if (status & IRQ_TXI) {
+- handled |= IRQ_TXI;
++ unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+
+- /* no lock here, tx queue should have been stopped */
++ netif_dbg(ks, intr, ks->netdev,
++ "%s: txspace %d\n", __func__, tx_space);
+
+- /* update our idea of how much tx space is available to the
+- * system */
+- ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
++ spin_lock(&ks->statelock);
++ ks->tx_space = tx_space;
++ if (netif_queue_stopped(ks->netdev))
++ netif_wake_queue(ks->netdev);
++ spin_unlock(&ks->statelock);
+
+- netif_dbg(ks, intr, ks->netdev,
+- "%s: txspace %d\n", __func__, ks->tx_space);
++ handled |= IRQ_TXI;
+ }
+
+ if (status & IRQ_RXI)
+@@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ if (status & IRQ_LCI)
+ mii_check_link(&ks->mii);
+
+- if (status & IRQ_TXI)
+- netif_wake_queue(ks->netdev);
+-
+ return IRQ_HANDLED;
+ }
+
+@@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev)
+ ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
+ ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+
++ ks->queued_len = 0;
+ netif_start_queue(ks->netdev);
+
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 70bc7253454f6..88e26c120b483 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -286,6 +286,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+ }
+
++/**
++ * calc_txlen - calculate size of message to send packet
++ * @len: Length of data
++ *
++ * Returns the size of the TXFIFO message needed to send
++ * this packet.
++ */
++static unsigned int calc_txlen(unsigned int len)
++{
++ return ALIGN(len + 4, 4);
++}
++
+ /**
+ * ks8851_rx_skb_spi - receive skbuff
+ * @ks: The device state
+@@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+ */
+ static void ks8851_tx_work(struct work_struct *work)
+ {
++ unsigned int dequeued_len = 0;
+ struct ks8851_net_spi *kss;
++ unsigned short tx_space;
+ struct ks8851_net *ks;
+ unsigned long flags;
+ struct sk_buff *txb;
+@@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work)
+ last = skb_queue_empty(&ks->txq);
+
+ if (txb) {
++ dequeued_len += calc_txlen(txb->len);
++
+ ks8851_wrreg16_spi(ks, KS_RXQCR,
+ ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_spi(ks, txb, last);
+@@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work)
+ }
+ }
+
++ tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
++
++ spin_lock(&ks->statelock);
++ ks->queued_len -= dequeued_len;
++ ks->tx_space = tx_space;
++ spin_unlock(&ks->statelock);
++
+ ks8851_unlock_spi(ks, &flags);
+ }
+
+@@ -346,18 +369,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
+ flush_work(&kss->tx_work);
+ }
+
+-/**
+- * calc_txlen - calculate size of message to send packet
+- * @len: Length of data
+- *
+- * Returns the size of the TXFIFO message needed to send
+- * this packet.
+- */
+-static unsigned int calc_txlen(unsigned int len)
+-{
+- return ALIGN(len + 4, 4);
+-}
+-
+ /**
+ * ks8851_start_xmit_spi - transmit packet using SPI
+ * @skb: The buffer to transmit
+@@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
+
+ spin_lock(&ks->statelock);
+
+- if (needed > ks->tx_space) {
++ if (ks->queued_len + needed > ks->tx_space) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+- ks->tx_space -= needed;
++ ks->queued_len += needed;
+ skb_queue_tail(&ks->txq, skb);
+ }
+
+ spin_unlock(&ks->statelock);
+- schedule_work(&kss->tx_work);
++ if (ret == NETDEV_TX_OK)
++ schedule_work(&kss->tx_work);
+
+ return ret;
+ }
+diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
+index fe4e7a7d9c0b5..8b6c4cc37c53c 100644
+--- a/drivers/net/ethernet/microsoft/Kconfig
++++ b/drivers/net/ethernet/microsoft/Kconfig
+@@ -19,6 +19,7 @@ config MICROSOFT_MANA
+ tristate "Microsoft Azure Network Adapter (MANA) support"
+ depends on PCI_MSI && X86_64
+ depends on PCI_HYPERV
++ select PAGE_POOL
+ help
+ This driver supports Microsoft Azure Network Adapter (MANA).
+ So far, the driver is only supported on X86_64.
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index 0066219bb0e89..6b95262dad904 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -216,10 +216,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+- rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+- rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+- rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+- rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
++ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
++ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
++ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
++ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
+ }
+
+ void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 4ea0e155bb0d5..5a1bf42ce1566 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -173,6 +173,7 @@ struct ax88179_data {
+ u8 in_pm;
+ u32 wol_supported;
+ u32 wolopts;
++ u8 disconnecting;
+ };
+
+ struct ax88179_int_data {
+@@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
++ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ BUG_ON(!dev);
+
+@@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+- if (unlikely(ret < 0))
++ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+ index, ret);
+
+@@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
++ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ BUG_ON(!dev);
+
+@@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+- if (unlikely(ret < 0))
++ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+ index, ret);
+
+@@ -492,6 +495,20 @@ static int ax88179_resume(struct usb_interface *intf)
+ return usbnet_resume(intf);
+ }
+
++static void ax88179_disconnect(struct usb_interface *intf)
++{
++ struct usbnet *dev = usb_get_intfdata(intf);
++ struct ax88179_data *ax179_data;
++
++ if (!dev)
++ return;
++
++ ax179_data = dev->driver_priv;
++ ax179_data->disconnecting = 1;
++
++ usbnet_disconnect(intf);
++}
++
+ static void
+ ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ {
+@@ -1906,7 +1923,7 @@ static struct usb_driver ax88179_178a_driver = {
+ .suspend = ax88179_suspend,
+ .resume = ax88179_resume,
+ .reset_resume = ax88179_resume,
+- .disconnect = usbnet_disconnect,
++ .disconnect = ax88179_disconnect,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 39ab6526e6b85..796972f224326 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -3034,7 +3034,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ u32 i, r, j, rb_len = 0;
+
+- spin_lock(&rxq->lock);
++ spin_lock_bh(&rxq->lock);
+
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+
+@@ -3058,7 +3058,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+- spin_unlock(&rxq->lock);
++ spin_unlock_bh(&rxq->lock);
+
+ return rb_len;
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index eb7c87b344b8f..5b906dbb1096c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4835,6 +4835,8 @@ static void nvme_fw_act_work(struct work_struct *work)
+ struct nvme_ctrl, fw_act_work);
+ unsigned long fw_act_timeout;
+
++ nvme_auth_stop(ctrl);
++
+ if (ctrl->mtfa)
+ fw_act_timeout = jiffies +
+ msecs_to_jiffies(ctrl->mtfa * 100);
+@@ -4890,7 +4892,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+- nvme_auth_stop(ctrl);
+ requeue = false;
+ queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index f71c6457e3509..2425d4813c3c5 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1033,6 +1033,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
+ }
+ };
+
++/*
++ * This lock class allows to tell lockdep that parent IRQ and children IRQ do
++ * not share the same class so it does not raise false positive
++ */
++static struct lock_class_key atmel_lock_key;
++static struct lock_class_key atmel_request_key;
++
+ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -1185,6 +1192,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
+ handle_simple_irq);
+ irq_set_chip_data(irq, atmel_pioctrl);
++ irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
+ dev_dbg(dev,
+ "atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
+ i, irq);
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+index 5b544fb7f3d88..3b18a03075f46 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+@@ -489,7 +489,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+
+ nmaps = 0;
+ ngroups = 0;
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npinmux = of_property_count_u32_elems(child, "pinmux");
+ int npins = of_property_count_u32_elems(child, "pins");
+
+@@ -524,7 +524,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npins;
+ int i;
+
+diff --git a/drivers/reset/core.c b/drivers/reset/core.c
+index f0a076e94118f..92cc13ef3e566 100644
+--- a/drivers/reset/core.c
++++ b/drivers/reset/core.c
+@@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
+ {
+ lockdep_assert_held(&reset_list_mutex);
+
++ if (IS_ERR_OR_NULL(rstc))
++ return;
++
+ kref_put(&rstc->refcnt, __reset_control_release);
+ }
+
+@@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
+ void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+ {
+ mutex_lock(&reset_list_mutex);
+- while (num_rstcs--) {
+- if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
+- continue;
++ while (num_rstcs--)
+ __reset_control_put_internal(rstcs[num_rstcs].rstc);
+- }
+ mutex_unlock(&reset_list_mutex);
+ }
+ EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 7c6efde75da66..5e115e8b2ba46 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -1678,7 +1678,6 @@ struct aac_dev
+ u32 handle_pci_error;
+ bool init_reset;
+ u8 soft_reset_support;
+- u8 use_map_queue;
+ };
+
+ #define aac_adapter_interrupt(dev) \
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 013a9a334972e..25cee03d7f973 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
+ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+ {
+ struct fib *fibptr;
+- u32 blk_tag;
+- int i;
+
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- i = blk_mq_unique_tag_to_tag(blk_tag);
+- fibptr = &dev->fibs[i];
++ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index bff49b8ab057d..5ba5c18b77b46 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -19,7 +19,6 @@
+
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/completion.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -506,15 +505,6 @@ common_config:
+ return 0;
+ }
+
+-static void aac_map_queues(struct Scsi_Host *shost)
+-{
+- struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+-
+- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+- aac->pdev, 0);
+- aac->use_map_queue = true;
+-}
+-
+ /**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+@@ -1499,7 +1489,6 @@ static struct scsi_host_template aac_driver_template = {
+ .bios_param = aac_biosparm,
+ .shost_groups = aac_host_groups,
+ .slave_configure = aac_slave_configure,
+- .map_queues = aac_map_queues,
+ .change_queue_depth = aac_change_queue_depth,
+ .sdev_groups = aac_dev_groups,
+ .eh_abort_handler = aac_eh_abort,
+@@ -1787,8 +1776,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ shost->max_lun = AAC_MAX_LUN;
+
+ pci_set_drvdata(pdev, shost);
+- shost->nr_hw_queues = aac->max_msix;
+- shost->host_tagset = 1;
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+@@ -1921,7 +1908,6 @@ static void aac_remove_one(struct pci_dev *pdev)
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ aac_cancel_rescan_worker(aac);
+- aac->use_map_queue = false;
+ scsi_remove_host(shost);
+
+ __aac_shutdown(aac);
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 61949f3741886..11ef58204e96f 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+
+ u16 vector_no;
+- struct scsi_cmnd *scmd;
+- u32 blk_tag;
+- struct Scsi_Host *shost = dev->scsi_host_ptr;
+- struct blk_mq_queue_map *qmap;
+
+ atomic_inc(&q->numpending);
+
+@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ && dev->sa_firmware)
+ vector_no = aac_get_vector(dev);
+- else {
+- if (!fib->vector_no || !fib->callback_data) {
+- if (shost && dev->use_map_queue) {
+- qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+- vector_no = qmap->mq_map[raw_smp_processor_id()];
+- }
+- /*
+- * We hardcode the vector_no for
+- * reserved commands as a valid shost is
+- * absent during the init
+- */
+- else
+- vector_no = 0;
+- } else {
+- scmd = (struct scsi_cmnd *)fib->callback_data;
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
+- }
+- }
++ else
++ vector_no = fib->vector_no;
+
+ if (native_hba) {
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 05ddbb9bb7d8a..451a58e0fd969 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+- struct sk_buff *tmp_skb;
+
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fcoe_packet_type);
+@@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ goto err;
+ }
+
+- tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+- if (!tmp_skb)
+- goto err;
+-
+- skb = tmp_skb;
++ skb = skb_share_check(skb, GFP_ATOMIC);
++ if (!skb)
++ return -1;
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 02520f9123066..9a289d6f2e5ee 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1108,6 +1108,7 @@ retry:
+
+ scsi_log_send(scmd);
+ scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
++ scmd->flags |= SCMD_LAST;
+
+ /*
+ * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+@@ -2402,6 +2403,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
+ scsi_init_command(dev, scmd);
+
+ scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
++ scmd->flags |= SCMD_LAST;
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+
+ scmd->cmd_len = 0;
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index d89f92032c1c2..f691bce5c1477 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -943,7 +943,7 @@ static void margining_port_remove(struct tb_port *port)
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ if (parent)
+- debugfs_remove_recursive(debugfs_lookup("margining", parent));
++ debugfs_lookup_and_remove("margining", parent);
+
+ kfree(port->usb4->margining);
+ port->usb4->margining = NULL;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 05e28a5ce42b1..fe2173e37b061 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
+ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e2099445db708..21a2b5a25fc09 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1568,9 +1568,9 @@
+ #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
+ #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
+ #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
+-#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
+-#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
+-#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
++#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */
++#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */
++#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */
+ #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
+ #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
+ #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7f2aa72d52e65..4adef92598709 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
+ #define QUECTEL_PRODUCT_RM520N 0x0801
+ #define QUECTEL_PRODUCT_EC200U 0x0901
++#define QUECTEL_PRODUCT_EG912Y 0x6001
+ #define QUECTEL_PRODUCT_EC200S_CN 0x6002
+ #define QUECTEL_PRODUCT_EC200A 0x6005
+ #define QUECTEL_PRODUCT_EM061K_LWW 0x6008
+@@ -1232,6 +1233,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+@@ -1244,6 +1246,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+@@ -2242,6 +2245,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */
++ .driver_info = RSVD(3) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 20dcbccb290b3..fd68204374f2c 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1305,6 +1305,17 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_INITIAL_READ10 ),
+
++/*
++ * Patch by Tasos Sahanidis <tasos@tasossah.com>
++ * This flash drive always shows up with write protect enabled
++ * during the first mode sense.
++ */
++UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100,
++ "Kingston",
++ "DT Ultimate G3",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_WP_DETECT),
++
+ /*
+ * This Pentax still camera is not conformant
+ * to the USB storage specification: -
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index 988c2ac7cecec..926cb1188eba6 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -409,10 +409,12 @@ static int afs_update_cell(struct afs_cell *cell)
+ if (ret == -ENOMEM)
+ goto out_wake;
+
+- ret = -ENOMEM;
+ vllist = afs_alloc_vlserver_list(0);
+- if (!vllist)
++ if (!vllist) {
++ if (ret >= 0)
++ ret = -ENOMEM;
+ goto out_wake;
++ }
+
+ switch (ret) {
+ case -ENODATA:
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 91e804c70dd0a..9937993cf29dc 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -114,6 +114,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ struct afs_net *net = afs_d2net(dentry);
+ const char *name = dentry->d_name.name;
+ size_t len = dentry->d_name.len;
++ char *result = NULL;
+ int ret;
+
+ /* Names prefixed with a dot are R/W mounts. */
+@@ -131,9 +132,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ }
+
+ ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+- NULL, NULL, false);
+- if (ret == -ENODATA || ret == -ENOKEY)
++ &result, NULL, false);
++ if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
+ ret = -ENOENT;
++ if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
++ struct dns_server_list_v1_header *v1 = (void *)result;
++
++ if (v1->hdr.zero == 0 &&
++ v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
++ v1->hdr.version == 1 &&
++ (v1->status != DNS_LOOKUP_GOOD &&
++ v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
++ return -ENOENT;
++
++ }
++
++ kfree(result);
+ return ret;
+ }
+
+@@ -252,20 +266,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
+ return 1;
+ }
+
+-/*
+- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
+- * sleep)
+- * - called from dput() when d_count is going to 0.
+- * - return 1 to request dentry be unhashed, 0 otherwise
+- */
+-static int afs_dynroot_d_delete(const struct dentry *dentry)
+-{
+- return d_really_is_positive(dentry);
+-}
+-
+ const struct dentry_operations afs_dynroot_dentry_operations = {
+ .d_revalidate = afs_dynroot_d_revalidate,
+- .d_delete = afs_dynroot_d_delete,
++ .d_delete = always_delete_dentry,
+ .d_release = afs_d_release,
+ .d_automount = afs_d_automount,
+ };
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index c2d70fc1698c0..fcbb598d8c85d 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -585,6 +585,7 @@ struct afs_volume {
+ #define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
+ #define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
+ #define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
++#define AFS_VOLUME_RM_TREE 7 /* - Set if volume removed from cell->volumes */
+ #ifdef CONFIG_AFS_FSCACHE
+ struct fscache_volume *cache; /* Caching cookie */
+ #endif
+@@ -1517,6 +1518,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
+ extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
+ extern int afs_activate_volume(struct afs_volume *);
+ extern void afs_deactivate_volume(struct afs_volume *);
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
+ extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
+ extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index f4937029dcd72..1c9144e3e83ac 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -32,8 +32,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
+ } else if (p->vid > volume->vid) {
+ pp = &(*pp)->rb_right;
+ } else {
+- volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
+- goto found;
++ if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
++ volume = p;
++ goto found;
++ }
++
++ set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
++ rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
+ }
+ }
+
+@@ -56,7 +61,8 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
+ afs_volume_trace_remove);
+ write_seqlock(&cell->volume_lock);
+ hlist_del_rcu(&volume->proc_link);
+- rb_erase(&volume->cell_node, &cell->volumes);
++ if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
++ rb_erase(&volume->cell_node, &cell->volumes);
+ write_sequnlock(&cell->volume_lock);
+ }
+ }
+@@ -235,6 +241,20 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
+ _leave(" [destroyed]");
+ }
+
++/*
++ * Try to get a reference on a volume record.
++ */
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
++{
++ int r;
++
++ if (__refcount_inc_not_zero(&volume->ref, &r)) {
++ trace_afs_volume(volume->vid, r + 1, reason);
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * Get a reference on a volume record.
+ */
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 4cd8e44cba4c5..b27795e13ff31 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2685,13 +2685,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+
+ spin_lock(&ctl->tree_lock);
+- /* Count initial region as zone_unusable until it gets activated. */
+ if (!used)
+ to_free = size;
+- else if (initial &&
+- test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+- (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+- to_free = 0;
+ else if (initial)
+ to_free = block_group->zone_capacity;
+ else if (offset >= block_group->alloc_offset)
+@@ -2719,8 +2714,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ reclaimable_unusable = block_group->zone_unusable -
+ (block_group->length - block_group->zone_capacity);
+ /* All the region is now unusable. Mark it as unused and reclaim */
+- if (block_group->zone_unusable == block_group->length &&
+- block_group->alloc_offset) {
++ if (block_group->zone_unusable == block_group->length) {
+ btrfs_mark_bg_unused(block_group);
+ } else if (bg_reclaim_threshold &&
+ reclaimable_unusable >=
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 675dbed075d8e..99cb690da9893 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1574,19 +1574,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+ return;
+
+ WARN_ON(cache->bytes_super != 0);
+-
+- /* Check for block groups never get activated */
+- if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+- cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+- !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+- cache->alloc_offset == 0) {
+- unusable = cache->length;
+- free = 0;
+- } else {
+- unusable = (cache->alloc_offset - cache->used) +
+- (cache->length - cache->zone_capacity);
+- free = cache->zone_capacity - cache->alloc_offset;
+- }
++ unusable = (cache->alloc_offset - cache->used) +
++ (cache->length - cache->zone_capacity);
++ free = cache->zone_capacity - cache->alloc_offset;
+
+ /* We only need ->free_space in ALLOC_SEQ block groups */
+ cache->cached = BTRFS_CACHE_FINISHED;
+@@ -1882,7 +1872,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ {
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+- struct btrfs_space_info *space_info = block_group->space_info;
+ struct map_lookup *map;
+ struct btrfs_device *device;
+ u64 physical;
+@@ -1894,7 +1883,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ map = block_group->physical_map;
+
+- spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
+ ret = true;
+@@ -1923,14 +1911,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ /* Successfully activated all the zones */
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+- WARN_ON(block_group->alloc_offset != 0);
+- if (block_group->zone_unusable == block_group->length) {
+- block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+- space_info->bytes_zone_unusable -= block_group->zone_capacity;
+- }
+ spin_unlock(&block_group->lock);
+- btrfs_try_granting_tickets(fs_info, space_info);
+- spin_unlock(&space_info->lock);
+
+ /* For the active block group list */
+ btrfs_get_block_group(block_group);
+@@ -1943,7 +1924,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ out_unlock:
+ spin_unlock(&block_group->lock);
+- spin_unlock(&space_info->lock);
+ return ret;
+ }
+
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 98a9cf5318731..a9681fecbd91f 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -63,6 +63,19 @@ struct fuse_forget_link {
+ struct fuse_forget_link *next;
+ };
+
++/* Submount lookup tracking */
++struct fuse_submount_lookup {
++ /** Refcount */
++ refcount_t count;
++
++ /** Unique ID, which identifies the inode between userspace
++ * and kernel */
++ u64 nodeid;
++
++ /** The request used for sending the FORGET message */
++ struct fuse_forget_link *forget;
++};
++
+ /** FUSE inode */
+ struct fuse_inode {
+ /** Inode data */
+@@ -155,6 +168,8 @@ struct fuse_inode {
+ */
+ struct fuse_inode_dax *dax;
+ #endif
++ /** Submount specific lookup tracking */
++ struct fuse_submount_lookup *submount_lookup;
+ };
+
+ /** FUSE inode state bits */
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index bc3c3e76c646d..f81000d968875 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -68,6 +68,24 @@ struct fuse_forget_link *fuse_alloc_forget(void)
+ return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
+ }
+
++static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
++{
++ struct fuse_submount_lookup *sl;
++
++ sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
++ if (!sl)
++ return NULL;
++ sl->forget = fuse_alloc_forget();
++ if (!sl->forget)
++ goto out_free;
++
++ return sl;
++
++out_free:
++ kfree(sl);
++ return NULL;
++}
++
+ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ {
+ struct fuse_inode *fi;
+@@ -83,6 +101,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ fi->attr_version = 0;
+ fi->orig_ino = 0;
+ fi->state = 0;
++ fi->submount_lookup = NULL;
+ mutex_init(&fi->mutex);
+ spin_lock_init(&fi->lock);
+ fi->forget = fuse_alloc_forget();
+@@ -113,6 +132,17 @@ static void fuse_free_inode(struct inode *inode)
+ kmem_cache_free(fuse_inode_cachep, fi);
+ }
+
++static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
++ struct fuse_submount_lookup *sl)
++{
++ if (!refcount_dec_and_test(&sl->count))
++ return;
++
++ fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
++ sl->forget = NULL;
++ kfree(sl);
++}
++
+ static void fuse_evict_inode(struct inode *inode)
+ {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+@@ -132,6 +162,11 @@ static void fuse_evict_inode(struct inode *inode)
+ fi->nlookup);
+ fi->forget = NULL;
+ }
++
++ if (fi->submount_lookup) {
++ fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
++ fi->submount_lookup = NULL;
++ }
+ }
+ if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ WARN_ON(!list_empty(&fi->write_files));
+@@ -311,6 +346,13 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ fuse_dax_dontcache(inode, attr->flags);
+ }
+
++static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
++ u64 nodeid)
++{
++ sl->nodeid = nodeid;
++ refcount_set(&sl->count, 1);
++}
++
+ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+ {
+ inode->i_mode = attr->mode & S_IFMT;
+@@ -368,12 +410,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ */
+ if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
+ S_ISDIR(attr->mode)) {
++ struct fuse_inode *fi;
++
+ inode = new_inode(sb);
+ if (!inode)
+ return NULL;
+
+ fuse_init_inode(inode, attr);
+- get_fuse_inode(inode)->nodeid = nodeid;
++ fi = get_fuse_inode(inode);
++ fi->nodeid = nodeid;
++ fi->submount_lookup = fuse_alloc_submount_lookup();
++ if (!fi->submount_lookup) {
++ iput(inode);
++ return NULL;
++ }
++ /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
++ fuse_init_submount_lookup(fi->submount_lookup, nodeid);
+ inode->i_flags |= S_AUTOMOUNT;
+ goto done;
+ }
+@@ -396,11 +448,11 @@ retry:
+ iput(inode);
+ goto retry;
+ }
+-done:
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+ fi->nlookup++;
+ spin_unlock(&fi->lock);
++done:
+ fuse_change_attributes(inode, attr, attr_valid, attr_version);
+
+ return inode;
+@@ -1439,6 +1491,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ struct super_block *parent_sb = parent_fi->inode.i_sb;
+ struct fuse_attr root_attr;
+ struct inode *root;
++ struct fuse_submount_lookup *sl;
++ struct fuse_inode *fi;
+
+ fuse_sb_defaults(sb);
+ fm->sb = sb;
+@@ -1461,12 +1515,27 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ * its nlookup should not be incremented. fuse_iget() does
+ * that, though, so undo it here.
+ */
+- get_fuse_inode(root)->nlookup--;
++ fi = get_fuse_inode(root);
++ fi->nlookup--;
++
+ sb->s_d_op = &fuse_dentry_operations;
+ sb->s_root = d_make_root(root);
+ if (!sb->s_root)
+ return -ENOMEM;
+
++ /*
++ * Grab the parent's submount_lookup pointer and take a
++ * reference on the shared nlookup from the parent. This is to
++ * prevent the last forget for this nodeid from getting
++ * triggered until all users have finished with it.
++ */
++ sl = parent_fi->submount_lookup;
++ WARN_ON(!sl);
++ if (sl) {
++ refcount_inc(&sl->count);
++ fi->submount_lookup = sl;
++ }
++
+ return 0;
+ }
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 0acb455368f23..5df8d93233376 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -38,11 +38,13 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
+ #ifdef CONFIG_CIFS_DEBUG2
+ struct smb_hdr *smb = buf;
+
+- cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
+- smb->Command, smb->Status.CifsError,
+- smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+- cifs_dbg(VFS, "smb buf %p len %u\n", smb,
+- server->ops->calc_smb_size(smb));
++ cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
++ smb->Command, smb->Status.CifsError, smb->Flags,
++ smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
++ if (!server->ops->check_message(buf, server->total_read, server)) {
++ cifs_dbg(VFS, "smb buf %p len %u\n", smb,
++ server->ops->calc_smb_size(smb));
++ }
+ #endif /* CONFIG_CIFS_DEBUG2 */
+ }
+
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 2e814eadd6aef..512ac9dea9787 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -513,7 +513,8 @@ struct smb_version_operations {
+ struct mid_q_entry **, char **, int *);
+ enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
+ enum securityEnum);
+- int (*next_header)(char *);
++ int (*next_header)(struct TCP_Server_Info *server, char *buf,
++ unsigned int *noff);
+ /* ioctl passthrough for query_info */
+ int (*ioctl_query_info)(const unsigned int xid,
+ struct cifs_tcon *tcon,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 5b19918938346..f725a119ce312 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1225,7 +1225,12 @@ next_pdu:
+ server->total_read += length;
+
+ if (server->ops->next_header) {
+- next_offset = server->ops->next_header(buf);
++ if (server->ops->next_header(server, buf, &next_offset)) {
++ cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
++ __func__, next_offset);
++ cifs_reconnect(server, true);
++ continue;
++ }
+ if (next_offset)
+ server->pdu_size = next_offset;
+ }
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 31e06133acc3d..41290c12d0bcc 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -350,6 +350,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
+ cifs_dbg(VFS, "Length less than smb header size\n");
+ }
+ return -EIO;
++ } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
++ cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
++ __func__, smb->WordCount);
++ return -EIO;
+ }
+
+ /* otherwise, there is enough to get to the BCC */
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 1b3489a2f0db7..df03d80ab6d5f 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5196,17 +5196,22 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ NULL, 0, 0, false);
+ }
+
+-static int
+-smb2_next_header(char *buf)
++static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
++ unsigned int *noff)
+ {
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
+
+- if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
+- return sizeof(struct smb2_transform_hdr) +
+- le32_to_cpu(t_hdr->OriginalMessageSize);
+-
+- return le32_to_cpu(hdr->NextCommand);
++ if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
++ *noff = le32_to_cpu(t_hdr->OriginalMessageSize);
++ if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
++ return -EINVAL;
++ } else {
++ *noff = le32_to_cpu(hdr->NextCommand);
++ }
++ if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
++ return -EINVAL;
++ return 0;
+ }
+
+ static int
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 847d69d327c2a..05ff8a457a3d7 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -372,10 +372,15 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+ {
+ /* BB eventually switch this to SMB2 specific small buf size */
+- if (smb2_command == SMB2_SET_INFO)
++ switch (smb2_command) {
++ case SMB2_SET_INFO:
++ case SMB2_QUERY_INFO:
+ *request_buf = cifs_buf_get();
+- else
++ break;
++ default:
+ *request_buf = cifs_small_buf_get();
++ break;
++ }
+ if (*request_buf == NULL) {
+ /* BB should we add a retry in here if not a writepage? */
+ return -ENOMEM;
+@@ -3523,8 +3528,13 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb2_query_info_req *req;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int total_len;
++ size_t len;
+ int rc;
+
++ if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
++ len > CIFSMaxBufSize))
++ return -EINVAL;
++
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
+ if (rc)
+@@ -3546,7 +3556,7 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+
+ iov[0].iov_base = (char *)req;
+ /* 1 for Buffer */
+- iov[0].iov_len = total_len - 1 + input_len;
++ iov[0].iov_len = len;
+ return 0;
+ }
+
+@@ -3554,7 +3564,7 @@ void
+ SMB2_query_info_free(struct smb_rqst *rqst)
+ {
+ if (rqst && rqst->rq_iov)
+- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++ cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ }
+
+ static int
+@@ -5439,6 +5449,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ return 0;
+ }
+
++static inline void free_qfs_info_req(struct kvec *iov)
++{
++ cifs_buf_release(iov->iov_base);
++}
++
+ int
+ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+@@ -5470,7 +5485,7 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto posix_qfsinf_exit;
+@@ -5521,7 +5536,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qfsinf_exit;
+@@ -5588,7 +5603,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qfsattr_exit;
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 6b7d95b65f4b6..f4728e65d1bda 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
++ return;
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1fba826f0acef..3ce9e39ecdb85 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2681,6 +2681,9 @@ enum bpf_text_poke_type {
+ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2);
+
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old);
++
+ void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+ int bpf_arch_text_invalidate(void *dst, size_t len);
+
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index b13be7ae2275e..e6941b239f449 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -8,6 +8,7 @@
+ #ifndef _DAMON_H_
+ #define _DAMON_H_
+
++#include <linux/completion.h>
+ #include <linux/mutex.h>
+ #include <linux/time64.h>
+ #include <linux/types.h>
+@@ -452,6 +453,8 @@ struct damon_ctx {
+ /* private: internal use only */
+ struct timespec64 last_aggregation;
+ struct timespec64 last_ops_update;
++ /* for waiting until the execution of the kdamond_fn is started */
++ struct completion kdamond_started;
+
+ /* public: */
+ struct task_struct *kdamond;
+diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
+index 1262d92ab88fc..2e71ca35942e9 100644
+--- a/include/linux/dm-bufio.h
++++ b/include/linux/dm-bufio.h
+@@ -37,6 +37,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ */
+ void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
++void dm_bufio_client_reset(struct dm_bufio_client *c);
++
+ /*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index 6e6f0238d63cc..4603e6e30c0ea 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -471,10 +471,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+
+ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+-#ifdef CONFIG_KASAN
++#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ void kasan_non_canonical_hook(unsigned long addr);
+-#else /* CONFIG_KASAN */
++#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+ static inline void kasan_non_canonical_hook(unsigned long addr) { }
+-#endif /* CONFIG_KASAN */
++#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+ #endif /* LINUX_KASAN_H */
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index 7d985a1dfe4af..5caf3ce823733 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -73,6 +73,7 @@ struct key_type {
+
+ unsigned int flags;
+ #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
++#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
+
+ /* vet a description */
+ int (*vet_description)(const char *description);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 3660ce6a93496..93ec34a94b724 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -282,18 +282,23 @@ struct mlx5_cmd_stats {
+ struct mlx5_cmd {
+ struct mlx5_nb nb;
+
++ /* members which needs to be queried or reinitialized each reload */
++ struct {
++ u16 cmdif_rev;
++ u8 log_sz;
++ u8 log_stride;
++ int max_reg_cmds;
++ unsigned long bitmask;
++ struct semaphore sem;
++ struct semaphore pages_sem;
++ struct semaphore throttle_sem;
++ } vars;
+ enum mlx5_cmdif_state state;
+ void *cmd_alloc_buf;
+ dma_addr_t alloc_dma;
+ int alloc_size;
+ void *cmd_buf;
+ dma_addr_t dma;
+- u16 cmdif_rev;
+- u8 log_sz;
+- u8 log_stride;
+- int max_reg_cmds;
+- int events;
+- u32 __iomem *vector;
+
+ /* protect command queue allocations
+ */
+@@ -303,11 +308,8 @@ struct mlx5_cmd {
+ */
+ spinlock_t token_lock;
+ u8 token;
+- unsigned long bitmask;
+ char wq_name[MLX5_CMD_WQ_MAX_NAME];
+ struct workqueue_struct *wq;
+- struct semaphore sem;
+- struct semaphore pages_sem;
+ int mode;
+ u16 allowed_opcode;
+ struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 583aebd8c1e01..5f8a534b65746 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -187,6 +187,7 @@ struct blocked_key {
+ struct smp_csrk {
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
++ u8 link_type;
+ u8 type;
+ u8 val[16];
+ };
+@@ -196,6 +197,7 @@ struct smp_ltk {
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
++ u8 link_type;
+ u8 authenticated;
+ u8 type;
+ u8 enc_size;
+@@ -210,6 +212,7 @@ struct smp_irk {
+ bdaddr_t rpa;
+ bdaddr_t bdaddr;
+ u8 addr_type;
++ u8 link_type;
+ u8 val[16];
+ };
+
+@@ -217,6 +220,8 @@ struct link_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ bdaddr_t bdaddr;
++ u8 bdaddr_type;
++ u8 link_type;
+ u8 type;
+ u8 val[HCI_LINK_KEY_SIZE];
+ u8 pin_len;
+diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
+index 4dfa6d7f83baa..cd104a1343e2d 100644
+--- a/include/trace/events/9p.h
++++ b/include/trace/events/9p.h
+@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u16, tag )
+- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
++ __dynamic_array(unsigned char, line,
++ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = pdu->id;
+ __entry->tag = pdu->tag;
+- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
++ memcpy(__get_dynamic_array(line), pdu->sdata,
++ __get_dynamic_array_len(line));
+ ),
+- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
++ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
+ (unsigned long)__entry->clnt, show_9p_op(__entry->type),
+- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
++ __entry->tag, __get_dynamic_array_len(line),
++ __get_dynamic_array(line))
+ );
+
+
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 832b2659e96e2..00f23febb9a7d 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -997,11 +997,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
+ mutex_unlock(&aux->poke_mutex);
+ }
+
++void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old)
++{
++ WARN_ON_ONCE(1);
++}
++
+ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ struct bpf_prog *old,
+ struct bpf_prog *new)
+ {
+- u8 *old_addr, *new_addr, *old_bypass_addr;
+ struct prog_poke_elem *elem;
+ struct bpf_array_aux *aux;
+
+@@ -1010,7 +1015,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+
+ list_for_each_entry(elem, &aux->poke_progs, list) {
+ struct bpf_jit_poke_descriptor *poke;
+- int i, ret;
++ int i;
+
+ for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ poke = &elem->aux->poke_tab[i];
+@@ -1029,21 +1034,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ * activated, so tail call updates can arrive from here
+ * while JIT is still finishing its final fixup for
+ * non-activated poke entries.
+- * 3) On program teardown, the program's kallsym entry gets
+- * removed out of RCU callback, but we can only untrack
+- * from sleepable context, therefore bpf_arch_text_poke()
+- * might not see that this is in BPF text section and
+- * bails out with -EINVAL. As these are unreachable since
+- * RCU grace period already passed, we simply skip them.
+- * 4) Also programs reaching refcount of zero while patching
++ * 3) Also programs reaching refcount of zero while patching
+ * is in progress is okay since we're protected under
+ * poke_mutex and untrack the programs before the JIT
+- * buffer is freed. When we're still in the middle of
+- * patching and suddenly kallsyms entry of the program
+- * gets evicted, we just skip the rest which is fine due
+- * to point 3).
+- * 5) Any other error happening below from bpf_arch_text_poke()
+- * is a unexpected bug.
++ * buffer is freed.
+ */
+ if (!READ_ONCE(poke->tailcall_target_stable))
+ continue;
+@@ -1053,39 +1047,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ poke->tail_call.key != key)
+ continue;
+
+- old_bypass_addr = old ? NULL : poke->bypass_addr;
+- old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+- new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+-
+- if (new) {
+- ret = bpf_arch_text_poke(poke->tailcall_target,
+- BPF_MOD_JUMP,
+- old_addr, new_addr);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- if (!old) {
+- ret = bpf_arch_text_poke(poke->tailcall_bypass,
+- BPF_MOD_JUMP,
+- poke->bypass_addr,
+- NULL);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- }
+- } else {
+- ret = bpf_arch_text_poke(poke->tailcall_bypass,
+- BPF_MOD_JUMP,
+- old_bypass_addr,
+- poke->bypass_addr);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- /* let other CPUs finish the execution of program
+- * so that it will not possible to expose them
+- * to invalid nop, stack unwind, nop state
+- */
+- if (!ret)
+- synchronize_rcu();
+- ret = bpf_arch_text_poke(poke->tailcall_target,
+- BPF_MOD_JUMP,
+- old_addr, NULL);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- }
++ bpf_arch_poke_desc_update(poke, new, old);
+ }
+ }
+ }
+diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
+index 8d77526892f45..d944924cd1e1c 100644
+--- a/kernel/trace/synth_event_gen_test.c
++++ b/kernel/trace/synth_event_gen_test.c
+@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
+
+ ret = test_trace_synth_event();
+ WARN_ON(ret);
++
++ /* Disable when done */
++ trace_array_set_clr_event(gen_synth_test->tr,
++ "synthetic",
++ "gen_synth_test", false);
++ trace_array_set_clr_event(empty_synth_test->tr,
++ "synthetic",
++ "empty_synth_test", false);
++ trace_array_set_clr_event(create_synth_test->tr,
++ "synthetic",
++ "create_synth_test", false);
+ out:
+ return ret;
+ }
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 24f37bab8bc1f..fa1c197018551 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2092,15 +2092,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+- struct fwnode_handle *__fwnode =
+- fwnode_get_nth_parent(fwnode, depth);
++ /*
++ * Only get a reference for other nodes (i.e. parent nodes).
++ * fwnode refcount may be 0 here.
++ */
++ struct fwnode_handle *__fwnode = depth ?
++ fwnode_get_nth_parent(fwnode, depth) : fwnode;
+
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
+ default_str_spec);
+
+- fwnode_handle_put(__fwnode);
++ if (depth)
++ fwnode_handle_put(__fwnode);
+ }
+
+ return buf;
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 36d098d06c558..5db9bec8ae67c 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -383,6 +383,8 @@ struct damon_ctx *damon_new_ctx(void)
+ if (!ctx)
+ return NULL;
+
++ init_completion(&ctx->kdamond_started);
++
+ ctx->attrs.sample_interval = 5 * 1000;
+ ctx->attrs.aggr_interval = 100 * 1000;
+ ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
+@@ -519,11 +521,14 @@ static int __damon_start(struct damon_ctx *ctx)
+ mutex_lock(&ctx->kdamond_lock);
+ if (!ctx->kdamond) {
+ err = 0;
++ reinit_completion(&ctx->kdamond_started);
+ ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
+ nr_running_ctxs);
+ if (IS_ERR(ctx->kdamond)) {
+ err = PTR_ERR(ctx->kdamond);
+ ctx->kdamond = NULL;
++ } else {
++ wait_for_completion(&ctx->kdamond_started);
+ }
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+@@ -1147,6 +1152,8 @@ static int kdamond_fn(void *data)
+
+ pr_debug("kdamond (%d) starts\n", current->pid);
+
++ complete(&ctx->kdamond_started);
++
+ if (ctx->ops.init)
+ ctx->ops.init(ctx);
+ if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index 66a37f177d231..5d9ae80df4954 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -523,8 +523,9 @@ void kasan_report_async(void)
+ }
+ #endif /* CONFIG_KASAN_HW_TAGS */
+
++#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ /*
+- * With CONFIG_KASAN, accesses to bogus pointers (outside the high
++ * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
+ * canonical half of the address space) cause out-of-bounds shadow memory reads
+ * before the actual access. For addresses in the low canonical half of the
+ * address space, as well as most non-canonical addresses, that out-of-bounds
+@@ -560,3 +561,4 @@ void kasan_non_canonical_hook(unsigned long addr)
+ pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
+ orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
+ }
++#endif
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 0beb44f2fe1f0..f001582345052 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -407,6 +407,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
+ if (err)
+ goto unwind;
+@@ -417,6 +419,8 @@ unwind:
+ list_for_each_entry_continue_reverse(vid_info,
+ &vlan_info->vid_list,
+ list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
+
+@@ -436,8 +440,11 @@ void vlan_vids_del_by_dev(struct net_device *dev,
+ if (!vlan_info)
+ return;
+
+- list_for_each_entry(vid_info, &vlan_info->vid_list, list)
++ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
++ }
+ }
+ EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index 4e3a2a1ffcb3f..0e6603b1ec906 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -394,6 +394,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ uint16_t *nwname = va_arg(ap, uint16_t *);
+ char ***wnames = va_arg(ap, char ***);
+
++ *wnames = NULL;
++
+ errcode = p9pdu_readf(pdu, proto_version,
+ "w", nwname);
+ if (!errcode) {
+@@ -403,6 +405,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ GFP_NOFS);
+ if (!*wnames)
+ errcode = -ENOMEM;
++ else
++ (*wnames)[0] = NULL;
+ }
+
+ if (!errcode) {
+@@ -414,8 +418,10 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ proto_version,
+ "s",
+ &(*wnames)[i]);
+- if (errcode)
++ if (errcode) {
++ (*wnames)[i] = NULL;
+ break;
++ }
+ }
+ }
+
+@@ -423,11 +429,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ if (*wnames) {
+ int i;
+
+- for (i = 0; i < *nwname; i++)
++ for (i = 0; i < *nwname; i++) {
++ if (!(*wnames)[i])
++ break;
+ kfree((*wnames)[i]);
++ }
++ kfree(*wnames);
++ *wnames = NULL;
+ }
+- kfree(*wnames);
+- *wnames = NULL;
+ }
+ }
+ break;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 1c3c7ff5c3c66..f1b7510359e4b 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -264,11 +264,14 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
++ lock_sock(sk);
++
+ skb = skb_recv_datagram(sk, flags, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+- return 0;
++ err = 0;
+
++ release_sock(sk);
+ return err;
+ }
+
+@@ -294,6 +297,8 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+
+ skb_free_datagram(sk, skb);
+
++ release_sock(sk);
++
+ if (flags & MSG_TRUNC)
+ copied = skblen;
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index c86a45344fe28..dcb13c64e8e7c 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -515,6 +515,9 @@ static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
+ {
+ struct hci_rp_read_class_of_dev *rp = data;
+
++ if (WARN_ON(!hdev))
++ return HCI_ERROR_UNSPECIFIED;
++
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+@@ -746,9 +749,23 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
+ } else {
+ conn->enc_key_size = rp->key_size;
+ status = 0;
++
++ if (conn->enc_key_size < hdev->min_enc_key_size) {
++ /* As slave role, the conn->state has been set to
++ * BT_CONNECTED and l2cap conn req might not be received
++ * yet, at this moment the l2cap layer almost does
++ * nothing with the non-zero status.
++ * So we also clear encrypt related bits, and then the
++ * handler of l2cap conn req will get the right secure
++ * state at a later time.
++ */
++ status = HCI_ERROR_AUTH_FAILURE;
++ clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
++ clear_bit(HCI_CONN_AES_CCM, &conn->flags);
++ }
+ }
+
+- hci_encrypt_cfm(conn, 0);
++ hci_encrypt_cfm(conn, status);
+
+ done:
+ hci_dev_unlock(hdev);
+@@ -2298,7 +2315,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+ return;
+ }
+
+- set_bit(HCI_INQUIRY, &hdev->flags);
++ if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
++ set_bit(HCI_INQUIRY, &hdev->flags);
+ }
+
+ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a7899857aee5d..4c5793053393f 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6493,6 +6493,14 @@ drop:
+ kfree_skb(skb);
+ }
+
++static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
++{
++ struct l2cap_cmd_rej_unk rej;
++
++ rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
++ l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
++}
++
+ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+ {
+@@ -6518,23 +6526,24 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+
+ if (len > skb->len || !cmd->ident) {
+ BT_DBG("corrupted command");
++ l2cap_sig_send_rej(conn, cmd->ident);
+ break;
+ }
+
+ err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
+ if (err) {
+- struct l2cap_cmd_rej_unk rej;
+-
+ BT_ERR("Wrong link type (%d)", err);
+-
+- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+- sizeof(rej), &rej);
++ l2cap_sig_send_rej(conn, cmd->ident);
+ }
+
+ skb_pull(skb, len);
+ }
+
++ if (skb->len > 0) {
++ BT_DBG("corrupted command");
++ l2cap_sig_send_rej(conn, 0);
++ }
++
+ drop:
+ kfree_skb(skb);
+ }
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d2e8565d0b33f..6d631a2e60166 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2883,7 +2883,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_link_key_info *key = &cp->keys[i];
+
+- if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
++ /* Considering SMP over BREDR/LE, there is no need to check addr_type */
++ if (key->type > 0x08)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+@@ -7129,6 +7130,7 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+
+ for (i = 0; i < irk_count; i++) {
+ struct mgmt_irk_info *irk = &cp->irks[i];
++ u8 addr_type = le_addr_type(irk->addr.type);
+
+ if (hci_is_blocked_key(hdev,
+ HCI_BLOCKED_KEY_TYPE_IRK,
+@@ -7138,8 +7140,12 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ continue;
+ }
+
++ /* When using SMP over BR/EDR, the addr type should be set to BREDR */
++ if (irk->addr.type == BDADDR_BREDR)
++ addr_type = BDADDR_BREDR;
++
+ hci_add_irk(hdev, &irk->addr.bdaddr,
+- le_addr_type(irk->addr.type), irk->val,
++ addr_type, irk->val,
+ BDADDR_ANY);
+ }
+
+@@ -7220,6 +7226,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+ u8 type, authenticated;
++ u8 addr_type = le_addr_type(key->addr.type);
+
+ if (hci_is_blocked_key(hdev,
+ HCI_BLOCKED_KEY_TYPE_LTK,
+@@ -7254,8 +7261,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ continue;
+ }
+
++ /* When using SMP over BR/EDR, the addr type should be set to BREDR */
++ if (key->addr.type == BDADDR_BREDR)
++ addr_type = BDADDR_BREDR;
++
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+- le_addr_type(key->addr.type), type, authenticated,
++ addr_type, type, authenticated,
+ key->val, key->enc_size, key->ediv, key->rand);
+ }
+
+@@ -9523,7 +9534,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+
+ ev.store_hint = persistent;
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+- ev.key.addr.type = BDADDR_BREDR;
++ ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ ev.key.pin_len = key->pin_len;
+@@ -9574,7 +9585,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
+ ev.store_hint = persistent;
+
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+- ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
++ ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.type = mgmt_ltk_type(key);
+ ev.key.enc_size = key->enc_size;
+ ev.key.ediv = key->ediv;
+@@ -9603,7 +9614,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
+
+ bacpy(&ev.rpa, &irk->rpa);
+ bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+- ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
++ ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
+ memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+
+ mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+@@ -9632,7 +9643,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ ev.store_hint = persistent;
+
+ bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+- ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
++ ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
+ ev.key.type = csrk->type;
+ memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 70663229b3cc9..ecb005bce65ac 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1058,6 +1058,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ }
+
+ if (smp->remote_irk) {
++ smp->remote_irk->link_type = hcon->type;
+ mgmt_new_irk(hdev, smp->remote_irk, persistent);
+
+ /* Now that user space can be considered to know the
+@@ -1072,24 +1073,28 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ }
+
+ if (smp->csrk) {
++ smp->csrk->link_type = hcon->type;
+ smp->csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->csrk, persistent);
+ }
+
+ if (smp->responder_csrk) {
++ smp->responder_csrk->link_type = hcon->type;
+ smp->responder_csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
+ }
+
+ if (smp->ltk) {
++ smp->ltk->link_type = hcon->type;
+ smp->ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->ltk, persistent);
+ }
+
+ if (smp->responder_ltk) {
++ smp->responder_ltk->link_type = hcon->type;
+ smp->responder_ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
+@@ -1109,6 +1114,8 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
+ smp->link_key, type, 0, &persistent);
+ if (key) {
++ key->link_type = hcon->type;
++ key->bdaddr_type = hcon->dst_type;
+ mgmt_new_link_key(hdev, key, persistent);
+
+ /* Don't keep debug keys around if the relevant
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0d5aa820fd830..0a5566b6f8a25 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3551,6 +3551,9 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ if (gso_segs > READ_ONCE(dev->gso_max_segs))
+ return features & ~NETIF_F_GSO_MASK;
+
++ if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
++ return features & ~NETIF_F_GSO_MASK;
++
+ if (!skb_shinfo(skb)->gso_type) {
+ skb_warn_bad_offload(skb);
+ return features & ~NETIF_F_GSO_MASK;
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 051aa71a8ad0f..30e7deff4c551 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -79,7 +79,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
+ remove_wait_queue(sk_sleep(sk), &wait);
+ sk->sk_write_pending--;
+ } while (!done);
+- return 0;
++ return done < 0 ? done : 0;
+ }
+ EXPORT_SYMBOL(sk_stream_wait_connect);
+
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 3aced951d5ab8..03f8f33dc134c 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -91,6 +91,7 @@ const struct cred *dns_resolver_cache;
+ static int
+ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ {
++ const struct dns_server_list_v1_header *v1;
+ const struct dns_payload_header *bin;
+ struct user_key_payload *upayload;
+ unsigned long derrno;
+@@ -122,6 +123,13 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ return -EINVAL;
+ }
+
++ v1 = (const struct dns_server_list_v1_header *)bin;
++ if ((v1->status != DNS_LOOKUP_GOOD &&
++ v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) {
++ if (prep->expiry == TIME64_MAX)
++ prep->expiry = ktime_get_real_seconds() + 1;
++ }
++
+ result_len = datalen;
+ goto store_result;
+ }
+@@ -314,7 +322,7 @@ static long dns_resolver_read(const struct key *key,
+
+ struct key_type key_type_dns_resolver = {
+ .name = "dns_resolver",
+- .flags = KEY_TYPE_NET_DOMAIN,
++ .flags = KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP,
+ .preparse = dns_resolver_preparse,
+ .free_preparse = dns_resolver_free_preparse,
+ .instantiate = generic_key_instantiate,
+diff --git a/net/ife/ife.c b/net/ife/ife.c
+index 13bbf8cb6a396..be05b690b9ef2 100644
+--- a/net/ife/ife.c
++++ b/net/ife/ife.c
+@@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
+ if (unlikely(!pskb_may_pull(skb, total_pull)))
+ return NULL;
+
++ ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
+ skb_set_mac_header(skb, total_pull);
+ __skb_pull(skb, total_pull);
+ *metalen = ifehdrln - IFE_METAHDRLEN;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 2ca442f485132..a2c4866080bd7 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1694,10 +1694,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ lockdep_is_held(&local->sta_mtx));
+
+ /*
+- * If there are no changes, then accept a link that doesn't exist,
++ * If there are no changes, then accept a link that exist,
+ * unless it's a new link.
+ */
+- if (params->link_id < 0 && !new_link &&
++ if (params->link_id >= 0 && !new_link &&
+ !params->link_mac && !params->txpwr_set &&
+ !params->supported_rates_len &&
+ !params->ht_capa && !params->vht_capa &&
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index bd0b7c189adfa..711c3377f428b 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -1051,8 +1051,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ case WLAN_SP_MESH_PEERING_OPEN:
+ if (!matches_local)
+ event = OPN_RJCT;
+- if (!mesh_plink_free_count(sdata) ||
+- (sta->mesh->plid && sta->mesh->plid != plid))
++ else if (!mesh_plink_free_count(sdata) ||
++ (sta->mesh->plid && sta->mesh->plid != plid))
+ event = OPN_IGNR;
+ else
+ event = OPN_ACPT;
+@@ -1060,9 +1060,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ if (!matches_local)
+ event = CNF_RJCT;
+- if (!mesh_plink_free_count(sdata) ||
+- sta->mesh->llid != llid ||
+- (sta->mesh->plid && sta->mesh->plid != plid))
++ else if (!mesh_plink_free_count(sdata) ||
++ sta->mesh->llid != llid ||
++ (sta->mesh->plid && sta->mesh->plid != plid))
+ event = CNF_IGNR;
+ else
+ event = CNF_ACPT;
+@@ -1230,6 +1230,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
+ return;
+ }
+ elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL);
+- mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
+- kfree(elems);
++ if (elems) {
++ mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
++ kfree(elems);
++ }
+ }
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 2cc95c8dc4c7b..f74baefd855d3 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -116,6 +116,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ ret = gpiod_direction_output(rfkill->reset_gpio, true);
++ if (ret)
++ return ret;
++
++ ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
++ if (ret)
++ return ret;
++
+ rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
+ rfkill->type, &rfkill_gpio_ops,
+ rfkill);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 674937284b8d2..29b74a569e0b0 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -182,21 +182,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
+ */
+ static void rose_kill_by_device(struct net_device *dev)
+ {
+- struct sock *s;
++ struct sock *sk, *array[16];
++ struct rose_sock *rose;
++ bool rescan;
++ int i, cnt;
+
++start:
++ rescan = false;
++ cnt = 0;
+ spin_lock_bh(&rose_list_lock);
+- sk_for_each(s, &rose_list) {
+- struct rose_sock *rose = rose_sk(s);
++ sk_for_each(sk, &rose_list) {
++ rose = rose_sk(sk);
++ if (rose->device == dev) {
++ if (cnt == ARRAY_SIZE(array)) {
++ rescan = true;
++ break;
++ }
++ sock_hold(sk);
++ array[cnt++] = sk;
++ }
++ }
++ spin_unlock_bh(&rose_list_lock);
+
++ for (i = 0; i < cnt; i++) {
++ sk = array[cnt];
++ rose = rose_sk(sk);
++ lock_sock(sk);
++ spin_lock_bh(&rose_list_lock);
+ if (rose->device == dev) {
+- rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
++ rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ if (rose->neighbour)
+ rose->neighbour->use--;
+ netdev_put(rose->device, &rose->dev_tracker);
+ rose->device = NULL;
+ }
++ spin_unlock_bh(&rose_list_lock);
++ release_sock(sk);
++ sock_put(sk);
++ cond_resched();
+ }
+- spin_unlock_bh(&rose_list_lock);
++ if (rescan)
++ goto start;
+ }
+
+ /*
+@@ -656,7 +682,10 @@ static int rose_release(struct socket *sock)
+ break;
+ }
+
++ spin_lock_bh(&rose_list_lock);
+ netdev_put(rose->device, &rose->dev_tracker);
++ rose->device = NULL;
++ spin_unlock_bh(&rose_list_lock);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+diff --git a/net/wireless/certs/wens.hex b/net/wireless/certs/wens.hex
+new file mode 100644
+index 0000000000000..0d50369bede98
+--- /dev/null
++++ b/net/wireless/certs/wens.hex
+@@ -0,0 +1,87 @@
++/* Chen-Yu Tsai's regdb certificate */
++0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f,
++0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab,
++0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c,
++0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d,
++0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
++0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31,
++0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03,
++0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20,
++0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31,
++0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18,
++0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30,
++0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a,
++0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03,
++0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e,
++0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06,
++0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
++0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f,
++0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01,
++0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7,
++0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef,
++0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16,
++0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe,
++0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef,
++0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f,
++0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e,
++0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c,
++0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48,
++0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf,
++0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f,
++0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e,
++0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce,
++0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03,
++0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93,
++0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a,
++0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3,
++0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb,
++0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b,
++0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6,
++0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a,
++0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32,
++0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58,
++0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b,
++0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16,
++0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e,
++0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83,
++0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91,
++0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73,
++0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36,
++0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52,
++0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78,
++0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30,
++0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
++0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82,
++0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74,
++0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1,
++0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82,
++0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91,
++0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f,
++0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8,
++0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b,
++0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0,
++0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c,
++0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51,
++0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b,
++0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91,
++0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6,
++0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84,
++0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb,
++0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e,
++0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63,
++0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8,
++0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28,
++0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62,
++0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8,
++0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb,
++0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5,
++0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53,
++0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee,
++0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa,
++0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86,
++0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b,
++0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4,
++0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d,
++0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40,
++0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5,
++0x45, 0x4e, 0xc3,
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index e1accacc6f233..ee980965a7cfb 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -297,6 +297,7 @@ struct cfg80211_cqm_config {
+ u32 rssi_hyst;
+ s32 last_rssi_event_value;
+ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
++ bool use_range_api;
+ int n_rssi_thresholds;
+ s32 rssi_thresholds[];
+ };
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b19b5acfaf3a9..70fb14b8bab07 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12574,10 +12574,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ int i, n, low_index;
+ int err;
+
+- /* RSSI reporting disabled? */
+- if (!cqm_config)
+- return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-
+ /*
+ * Obtain current RSSI value if possible, if not and no RSSI threshold
+ * event has been received yet, we should receive an event after a
+@@ -12652,18 +12648,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+- if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+- if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+- return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+-
+- return rdev_set_cqm_rssi_config(rdev, dev,
+- thresholds[0], hysteresis);
+- }
+-
+- if (!wiphy_ext_feature_isset(&rdev->wiphy,
+- NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+- return -EOPNOTSUPP;
+-
+ if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ n_thresholds = 0;
+
+@@ -12671,6 +12655,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ old = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+
++ /* if already disabled just succeed */
++ if (!n_thresholds && !old) {
++ err = 0;
++ goto unlock;
++ }
++
++ if (n_thresholds > 1) {
++ if (!wiphy_ext_feature_isset(&rdev->wiphy,
++ NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
++ !rdev->ops->set_cqm_rssi_range_config) {
++ err = -EOPNOTSUPP;
++ goto unlock;
++ }
++ } else {
++ if (!rdev->ops->set_cqm_rssi_config) {
++ err = -EOPNOTSUPP;
++ goto unlock;
++ }
++ }
++
+ if (n_thresholds) {
+ cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ n_thresholds),
+@@ -12685,13 +12689,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ memcpy(cqm_config->rssi_thresholds, thresholds,
+ flex_array_size(cqm_config, rssi_thresholds,
+ n_thresholds));
++ cqm_config->use_range_api = n_thresholds > 1 ||
++ !rdev->ops->set_cqm_rssi_config;
+
+ rcu_assign_pointer(wdev->cqm_config, cqm_config);
++
++ if (cqm_config->use_range_api)
++ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev,
++ thresholds[0],
++ hysteresis);
+ } else {
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
++ /* if enabled as range also disable via range */
++ if (old->use_range_api)
++ err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ }
+
+- err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ if (err) {
+ rcu_assign_pointer(wdev->cqm_config, old);
+ kfree_rcu(cqm_config, rcu_head);
+@@ -18758,10 +18775,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ wdev_lock(wdev);
+ cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+- if (!wdev->cqm_config)
++ if (!cqm_config)
+ goto unlock;
+
+- cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++ if (cqm_config->use_range_api)
++ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+ rssi_level = cqm_config->last_rssi_event_value;
+ rssi_event = cqm_config->last_rssi_event_type;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 3c90807476eb0..eaddaceda14ea 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -66,6 +66,19 @@ void key_schedule_gc(time64_t gc_at)
+ }
+ }
+
++/*
++ * Set the expiration time on a key.
++ */
++void key_set_expiry(struct key *key, time64_t expiry)
++{
++ key->expiry = expiry;
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ key_schedule_gc(expiry);
++ }
++}
++
+ /*
+ * Schedule a dead links collection run.
+ */
+@@ -176,7 +189,6 @@ static void key_garbage_collector(struct work_struct *work)
+ static u8 gc_state; /* Internal persistent state */
+ #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+ #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+-#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+ #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+ #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+ #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+@@ -184,21 +196,17 @@ static void key_garbage_collector(struct work_struct *work)
+
+ struct rb_node *cursor;
+ struct key *key;
+- time64_t new_timer, limit;
++ time64_t new_timer, limit, expiry;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = ktime_get_real_seconds();
+- if (limit > key_gc_delay)
+- limit -= key_gc_delay;
+- else
+- limit = key_gc_delay;
+
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+- gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
++ gc_state |= KEY_GC_REAPING_LINKS;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+@@ -233,8 +241,11 @@ continue_scanning:
+ }
+ }
+
+- if (gc_state & KEY_GC_SET_TIMER) {
+- if (key->expiry > limit && key->expiry < new_timer) {
++ expiry = key->expiry;
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ if (expiry > limit && expiry < new_timer) {
+ kdebug("will expire %x in %lld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+@@ -276,7 +287,7 @@ maybe_resched:
+ */
+ kdebug("pass complete");
+
+- if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
++ if (new_timer != TIME64_MAX) {
+ new_timer += key_gc_delay;
+ key_schedule_gc(new_timer);
+ }
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 3c1e7122076b9..ec2ec335b6133 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -174,6 +174,7 @@ extern unsigned key_gc_delay;
+ extern void keyring_gc(struct key *keyring, time64_t limit);
+ extern void keyring_restriction_gc(struct key *keyring,
+ struct key_type *dead_type);
++void key_set_expiry(struct key *key, time64_t expiry);
+ extern void key_schedule_gc(time64_t gc_at);
+ extern void key_schedule_gc_links(void);
+ extern void key_gc_keytype(struct key_type *ktype);
+@@ -222,10 +223,18 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+ */
+ static inline bool key_is_dead(const struct key *key, time64_t limit)
+ {
++ time64_t expiry = key->expiry;
++
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ if (expiry <= limit)
++ return true;
++ }
++
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+- (key->expiry > 0 && key->expiry <= limit) ||
+ key->domain_tag->removed;
+ }
+
+diff --git a/security/keys/key.c b/security/keys/key.c
+index c45afdd1dfbb4..e65240641ca57 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -294,6 +294,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ key->uid = uid;
+ key->gid = gid;
+ key->perm = perm;
++ key->expiry = TIME64_MAX;
+ key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
+
+@@ -463,10 +464,7 @@ static int __key_instantiate_and_link(struct key *key,
+ if (authkey)
+ key_invalidate(authkey);
+
+- if (prep->expiry != TIME64_MAX) {
+- key->expiry = prep->expiry;
+- key_schedule_gc(prep->expiry + key_gc_delay);
+- }
++ key_set_expiry(key, prep->expiry);
+ }
+ }
+
+@@ -606,8 +604,7 @@ int key_reject_and_link(struct key *key,
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, -error);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
+- key->expiry = ktime_get_real_seconds() + timeout;
+- key_schedule_gc(key->expiry + key_gc_delay);
++ key_set_expiry(key, ktime_get_real_seconds() + timeout);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+@@ -722,16 +719,14 @@ found_kernel_type:
+
+ void key_set_timeout(struct key *key, unsigned timeout)
+ {
+- time64_t expiry = 0;
++ time64_t expiry = TIME64_MAX;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
+-
+- key->expiry = expiry;
+- key_schedule_gc(key->expiry + key_gc_delay);
++ key_set_expiry(key, expiry);
+
+ up_write(&key->sem);
+ }
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index d0cde6685627f..4f4e2c1824f18 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -198,7 +198,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+
+ /* come up with a suitable timeout value */
+ expiry = READ_ONCE(key->expiry);
+- if (expiry == 0) {
++ if (expiry == TIME64_MAX) {
+ memcpy(xbuf, "perm", 5);
+ } else if (now >= expiry) {
+ memcpy(xbuf, "expd", 5);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a7c361e0daebe..a88ed60dcd96a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9735,6 +9735,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 4d3c3365488a2..d8259afc60b08 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -834,8 +834,9 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai)
+ static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp,
+ unsigned int jack_status)
+ {
+- if (hcp->jack && jack_status != hcp->jack_status) {
+- snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
++ if (jack_status != hcp->jack_status) {
++ if (hcp->jack)
++ snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
+ hcp->jack_status = jack_status;
+ }
+ }
+@@ -864,6 +865,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+
+ if (hcp->hcd.ops->hook_plugged_cb) {
+ hcp->jack = jack;
++
++ /*
++ * Report the initial jack status which may have been provided
++ * by the parent hdmi driver while the hpd hook was registered.
++ */
++ snd_soc_jack_report(jack, hcp->jack_status, SND_JACK_LINEOUT);
++
+ return 0;
+ }
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 6364d9be28fbb..cf1cd0460ad98 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -715,6 +715,9 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int ofs = sai->soc_data->reg_offset;
+
++ /* Clear xMR to avoid channel swap with mclk_with_tere enabled case */
++ regmap_write(sai->regmap, FSL_SAI_xMR(tx), 0);
++
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs),
+ FSL_SAI_CR3_TRCE_MASK, 0);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index f458328f9ec42..33380cad3a735 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1385,7 +1385,7 @@ free_buf:
+
+ static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
+ {
+- msleep(2000);
++ msleep(4000);
+
+ return 0;
+ }
+@@ -1628,7 +1628,7 @@ int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
+ unsigned int id)
+ {
+ switch (id) {
+- case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++ case USB_ID(0x07fd, 0x0008): /* MOTU M Series, 1st hardware version */
+ return snd_usb_motu_m_series_boot_quirk(dev);
+ }
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index ea6fc59e9f62f..e52d513009fb0 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2652,7 +2652,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 0 0 0
+@@ -2660,7 +2660,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+@@ -2669,7 +2669,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 0 0 0
+@@ -2677,7 +2677,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup