diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.113-114.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.113-114.patch new file mode 100644 index 0000000000..b771fa25db --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.113-114.patch @@ -0,0 +1,5730 @@ +diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst +index 8209c7a7c3970e..fbc833841bef53 100644 +--- a/Documentation/arch/arm64/silicon-errata.rst ++++ b/Documentation/arch/arm64/silicon-errata.rst +@@ -187,6 +187,8 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | MMU-500 | #841119,826419 | N/A | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | MMU-600 | #1076982,1209401| N/A | +diff --git a/Documentation/networking/seg6-sysctl.rst b/Documentation/networking/seg6-sysctl.rst +index 07c20e470bafe6..1b6af4779be114 100644 +--- a/Documentation/networking/seg6-sysctl.rst ++++ b/Documentation/networking/seg6-sysctl.rst +@@ -25,6 +25,9 @@ seg6_require_hmac - INTEGER + + Default is 0. + ++/proc/sys/net/ipv6/seg6_* variables: ++==================================== ++ + seg6_flowlabel - INTEGER + Controls the behaviour of computing the flowlabel of outer + IPv6 header in case of SR T.encaps +diff --git a/Makefile b/Makefile +index ab277ff8764317..ad3952fb542d3a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 113 ++SUBLEVEL = 114 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 4ecba0690938c3..1be9f1f6b32005 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1094,6 +1094,7 @@ config ARM64_ERRATUM_3194386 + * ARM Neoverse-V1 erratum 3324341 + * ARM Neoverse V2 erratum 3324336 + * ARM Neoverse-V3 erratum 3312417 ++ * ARM Neoverse-V3AE erratum 3312417 + + On affected cores "MSR SSBS, #0" instructions may not affect + subsequent speculative instructions, which may permit unexepected +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index d92a0203e5a93d..c279a0a9b3660e 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -93,6 +93,7 @@ + #define ARM_CPU_PART_NEOVERSE_V2 0xD4F + #define ARM_CPU_PART_CORTEX_A720 0xD81 + #define ARM_CPU_PART_CORTEX_X4 0xD82 ++#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83 + #define ARM_CPU_PART_NEOVERSE_V3 0xD84 + #define ARM_CPU_PART_CORTEX_X925 0xD85 + #define ARM_CPU_PART_CORTEX_A725 0xD87 +@@ -180,6 +181,7 @@ + #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) + #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) + #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) ++#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE) + #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) + #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) + #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 463b48d0f92500..23dcd0c4aad36d 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -471,6 +471,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = { + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE), + {} + }; + #endif +diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c +index 297427ffc4e043..8a6ea7d2701887 100644 +--- a/arch/riscv/kernel/probes/kprobes.c ++++ b/arch/riscv/kernel/probes/kprobes.c +@@ -48,10 +48,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) + post_kprobe_handler(p, kcb, regs); + } + +-static bool __kprobes arch_check_kprobe(struct kprobe *p) ++static bool __kprobes arch_check_kprobe(unsigned long addr) + { +- unsigned long tmp = (unsigned long)p->addr - p->offset; +- unsigned long addr = (unsigned long)p->addr; ++ unsigned long tmp, offset; ++ ++ /* start iterating at the closest preceding symbol */ ++ if (!kallsyms_lookup_size_offset(addr, NULL, &offset)) ++ return false; ++ ++ tmp = addr - offset; + + while (tmp <= addr) { + if (tmp == addr) +@@ -70,7 +75,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + if ((unsigned long)insn & 0x1) + return -EILSEQ; + +- if (!arch_check_kprobe(p)) ++ if (!arch_check_kprobe((unsigned long)p->addr)) + return -EILSEQ; + + /* copy instruction */ +diff --git a/block/bdev.c b/block/bdev.c +index 5a54977518eeae..a8357b72a27b86 100644 +--- a/block/bdev.c ++++ b/block/bdev.c +@@ -147,9 +147,26 @@ int set_blocksize(struct block_device *bdev, int size) + + /* Don't change the size if it is same as current */ + if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { ++ /* ++ * Flush and truncate the pagecache before we reconfigure the ++ * mapping geometry because folio sizes are variable now. If a ++ * reader has already allocated a folio whose size is smaller ++ * than the new min_order but invokes readahead after the new ++ * min_order becomes visible, readahead will think there are ++ * "zero" blocks per folio and crash. Take the inode and ++ * invalidation locks to avoid racing with ++ * read/write/fallocate. ++ */ ++ inode_lock(bdev->bd_inode); ++ filemap_invalidate_lock(bdev->bd_inode->i_mapping); ++ + sync_blockdev(bdev); ++ kill_bdev(bdev); ++ + bdev->bd_inode->i_blkbits = blksize_bits(size); + kill_bdev(bdev); ++ filemap_invalidate_unlock(bdev->bd_inode->i_mapping); ++ inode_unlock(bdev->bd_inode); + } + return 0; + } +diff --git a/block/blk-zoned.c b/block/blk-zoned.c +index 619ee41a51cc8c..644bfa1f6753ea 100644 +--- a/block/blk-zoned.c ++++ b/block/blk-zoned.c +@@ -401,6 +401,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, + op = REQ_OP_ZONE_RESET; + + /* Invalidate the page cache, including dirty pages. */ ++ inode_lock(bdev->bd_inode); + filemap_invalidate_lock(bdev->bd_inode->i_mapping); + ret = blkdev_truncate_zone_range(bdev, mode, &zrange); + if (ret) +@@ -423,8 +424,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, + GFP_KERNEL); + + fail: +- if (cmd == BLKRESETZONE) ++ if (cmd == BLKRESETZONE) { + filemap_invalidate_unlock(bdev->bd_inode->i_mapping); ++ inode_unlock(bdev->bd_inode); ++ } + + return ret; + } +diff --git a/block/fops.c b/block/fops.c +index 7c257eb3564d0c..088143fa9ac9e1 100644 +--- a/block/fops.c ++++ b/block/fops.c +@@ -681,7 +681,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) + ret = direct_write_fallback(iocb, from, ret, + blkdev_buffered_write(iocb, from)); + } else { ++ /* ++ * Take i_rwsem and invalidate_lock to avoid racing with ++ * set_blocksize changing i_blkbits/folio order and punching ++ * out the pagecache. ++ */ ++ inode_lock_shared(bd_inode); + ret = blkdev_buffered_write(iocb, from); ++ inode_unlock_shared(bd_inode); + } + + if (ret > 0) +@@ -693,6 +700,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) + static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) + { + struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); ++ struct inode *bd_inode = bdev->bd_inode; + loff_t size = bdev_nr_bytes(bdev); + loff_t pos = iocb->ki_pos; + size_t shorted = 0; +@@ -728,7 +736,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) + goto reexpand; + } + ++ /* ++ * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize ++ * changing i_blkbits/folio order and punching out the pagecache. ++ */ ++ inode_lock_shared(bd_inode); + ret = filemap_read(iocb, to, ret); ++ inode_unlock_shared(bd_inode); + + reexpand: + if (unlikely(shorted)) +@@ -771,6 +785,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, + if ((start | len) & (bdev_logical_block_size(bdev) - 1)) + return -EINVAL; + ++ inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + + /* +@@ -811,6 +826,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, + + fail: + filemap_invalidate_unlock(inode->i_mapping); ++ inode_unlock(inode); + return error; + } + +diff --git a/block/ioctl.c b/block/ioctl.c +index 231537f79a8cb4..024767fa1e52d5 100644 +--- a/block/ioctl.c ++++ b/block/ioctl.c +@@ -114,6 +114,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, + end > bdev_nr_bytes(bdev)) + return -EINVAL; + ++ inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + err = truncate_bdev_range(bdev, mode, start, end - 1); + if (err) +@@ -121,6 +122,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, + err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); + fail: + filemap_invalidate_unlock(inode->i_mapping); ++ inode_unlock(inode); + return err; + } + +@@ -146,12 +148,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, + end > bdev_nr_bytes(bdev)) + return -EINVAL; + ++ inode_lock(bdev->bd_inode); + filemap_invalidate_lock(bdev->bd_inode->i_mapping); + err = truncate_bdev_range(bdev, mode, start, end - 1); + if (!err) + err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, + GFP_KERNEL); + filemap_invalidate_unlock(bdev->bd_inode->i_mapping); ++ inode_unlock(bdev->bd_inode); + return err; + } + +@@ -184,6 +188,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, + return -EINVAL; + + /* Invalidate the page cache, including dirty pages */ ++ inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + err = truncate_bdev_range(bdev, mode, start, end); + if (err) +@@ -194,6 +199,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, + + fail: + filemap_invalidate_unlock(inode->i_mapping); ++ inode_unlock(inode); + return err; + } + +diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c +index f3db3fa91dd52e..08b78f56785329 100644 +--- a/drivers/accel/qaic/qaic_control.c ++++ b/drivers/accel/qaic/qaic_control.c +@@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev, + return -EINVAL; + remaining = in_trans->size - resources->xferred_dma_size; + if (remaining == 0) +- return 0; ++ return -EINVAL; + + if (check_add_overflow(xfer_start_addr, remaining, &end)) + return -EINVAL; +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c +index f53c14fb74fda7..5a33cee9a3947f 100644 +--- a/drivers/base/power/runtime.c ++++ b/drivers/base/power/runtime.c +@@ -1552,6 +1552,32 @@ void pm_runtime_enable(struct device *dev) + } + EXPORT_SYMBOL_GPL(pm_runtime_enable); + ++static void pm_runtime_set_suspended_action(void *data) ++{ ++ pm_runtime_set_suspended(data); ++} ++ ++/** ++ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. ++ * ++ * @dev: Device to handle. ++ */ ++int devm_pm_runtime_set_active_enabled(struct device *dev) ++{ ++ int err; ++ ++ err = pm_runtime_set_active(dev); ++ if (err) ++ return err; ++ ++ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); ++ if (err) ++ return err; ++ ++ return devm_pm_runtime_enable(dev); ++} ++EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); ++ + static void pm_runtime_disable_action(void *data) + { + pm_runtime_dont_use_autosuspend(data); +@@ -1574,6 +1600,24 @@ int devm_pm_runtime_enable(struct device *dev) + } + EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); + ++static void pm_runtime_put_noidle_action(void *data) ++{ ++ pm_runtime_put_noidle(data); ++} ++ ++/** ++ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. ++ * ++ * @dev: Device to handle. ++ */ ++int devm_pm_runtime_get_noresume(struct device *dev) ++{ ++ pm_runtime_get_noresume(dev); ++ ++ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); ++} ++EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); ++ + /** + * pm_runtime_forbid - Block runtime PM of a device. + * @dev: Device to handle. +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 1a2d227b7b7b96..4c21230aee460e 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -511,6 +511,8 @@ static const struct usb_device_id quirks_table[] = { + /* Realtek 8851BU Bluetooth devices */ + { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, ++ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK | ++ BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8852AE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c +index ea32bdf7cc24e0..4d96eed64fe0ce 100644 +--- a/drivers/cpufreq/cppc_cpufreq.c ++++ b/drivers/cpufreq/cppc_cpufreq.c +@@ -344,6 +344,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) + return 0; + } + ++static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu) ++{ ++ unsigned int transition_latency_ns = cppc_get_transition_latency(cpu); ++ ++ if (transition_latency_ns == CPUFREQ_ETERNAL) ++ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC; ++ ++ return transition_latency_ns / NSEC_PER_USEC; ++} ++ + /* + * The PCC subspace describes the rate at which platform can accept commands + * on the shared PCC channel (including READs which do not count towards freq +@@ -366,12 +376,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) + return 10000; + } + } +- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; ++ return __cppc_cpufreq_get_transition_delay_us(cpu); + } + #else + static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) + { +- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; ++ return __cppc_cpufreq_get_transition_delay_us(cpu); + } + #endif + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index a1f35510d53955..c94e1cc7e3a73a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -2285,10 +2285,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) + int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, + struct kfd_vm_fault_info *mem) + { +- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { ++ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) { + *mem = *adev->gmc.vm_fault_info; +- mb(); /* make sure read happened */ +- atomic_set(&adev->gmc.vm_fault_info_updated, 0); ++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); + } + return 0; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +index c83445c2e37f3d..d358a08b5e0067 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +@@ -2012,7 +2012,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp) + } + + ret = psp_ta_load(psp, &psp->securedisplay_context.context); +- if (!ret) { ++ if (!ret && !psp->securedisplay_context.context.resp_status) { + psp->securedisplay_context.context.initialized = true; + mutex_init(&psp->securedisplay_context.mutex); + } else +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +index fd905889a4c63b..e2ee10a98640ac 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +@@ -1061,7 +1061,7 @@ static int gmc_v7_0_sw_init(void *handle) + GFP_KERNEL); + if (!adev->gmc.vm_fault_info) + return -ENOMEM; +- atomic_set(&adev->gmc.vm_fault_info_updated, 0); ++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); + + return 0; + } +@@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, + vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + VMID); + if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) +- && !atomic_read(&adev->gmc.vm_fault_info_updated)) { ++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { + struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; + u32 protections = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, +@@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, + info->prot_read = protections & 0x8 ? true : false; + info->prot_write = protections & 0x10 ? true : false; + info->prot_exec = protections & 0x20 ? true : false; +- mb(); +- atomic_set(&adev->gmc.vm_fault_info_updated, 1); ++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); + } + + return 0; +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +index 0bebcdbb265807..ed268c5739eb8c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +@@ -1174,7 +1174,7 @@ static int gmc_v8_0_sw_init(void *handle) + GFP_KERNEL); + if (!adev->gmc.vm_fault_info) + return -ENOMEM; +- atomic_set(&adev->gmc.vm_fault_info_updated, 0); ++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); + + return 0; + } +@@ -1465,7 +1465,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, + vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + VMID); + if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) +- && !atomic_read(&adev->gmc.vm_fault_info_updated)) { ++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { + struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; + u32 protections = REG_GET_FIELD(status, + VM_CONTEXT1_PROTECTION_FAULT_STATUS, +@@ -1481,8 +1481,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, + info->prot_read = protections & 0x8 ? true : false; + info->prot_write = protections & 0x10 ? true : false; + info->prot_exec = protections & 0x20 ? true : false; +- mb(); +- atomic_set(&adev->gmc.vm_fault_info_updated, 1); ++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); + } + + return 0; +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +index 53849fd3615f68..965ffcac17f860 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +@@ -5437,8 +5437,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, + thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + else if (hwmgr->pp_table_version == PP_TABLE_V0) +- thermal_data->max = data->thermal_temp_setting.temperature_shutdown * +- PP_TEMPERATURE_UNITS_PER_CENTIGRADES; ++ thermal_data->max = data->thermal_temp_setting.temperature_shutdown; + + thermal_data->sw_ctf_threshold = thermal_data->max; + +diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c +index 4d404f5ef87ebb..ea192c90b543ef 100644 +--- a/drivers/gpu/drm/bridge/lontium-lt9211.c ++++ b/drivers/gpu/drm/bridge/lontium-lt9211.c +@@ -120,8 +120,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx) + } + + /* Test for known Chip ID. */ +- if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || +- chipid[2] != REG_CHIPID2_VALUE) { ++ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) { + dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", + chipid[0], chipid[1], chipid[2]); + return -EINVAL; +diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c +index 5f8e5e87d7cd63..0ed62cd51fb518 100644 +--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c ++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c +@@ -51,7 +51,6 @@ struct decon_context { + void __iomem *regs; + unsigned long irq_flags; + bool i80_if; +- bool suspended; + wait_queue_head_t wait_vsync_queue; + atomic_t wait_vsync_event; + +@@ -81,13 +80,30 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { + DRM_PLANE_TYPE_CURSOR, + }; + +-static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) ++/** ++ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync ++ * ++ * @ctx: display and enhancement controller context ++ * @win: window to protect registers for ++ * @protect: 1 to protect (disable updates) ++ */ ++static void decon_shadow_protect_win(struct decon_context *ctx, ++ unsigned int win, bool protect) + { +- struct decon_context *ctx = crtc->ctx; ++ u32 bits, val; + +- if (ctx->suspended) +- return; ++ bits = SHADOWCON_WINx_PROTECT(win); ++ ++ val = readl(ctx->regs + SHADOWCON); ++ if (protect) ++ val |= bits; ++ else ++ val &= ~bits; ++ writel(val, ctx->regs + SHADOWCON); ++} + ++static void decon_wait_for_vblank(struct decon_context *ctx) ++{ + atomic_set(&ctx->wait_vsync_event, 1); + + /* +@@ -100,25 +116,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) + DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n"); + } + +-static void decon_clear_channels(struct exynos_drm_crtc *crtc) ++static void decon_clear_channels(struct decon_context *ctx) + { +- struct decon_context *ctx = crtc->ctx; + unsigned int win, ch_enabled = 0; ++ u32 val; + + /* Check if any channel is enabled. */ + for (win = 0; win < WINDOWS_NR; win++) { +- u32 val = readl(ctx->regs + WINCON(win)); ++ val = readl(ctx->regs + WINCON(win)); + + if (val & WINCONx_ENWIN) { ++ decon_shadow_protect_win(ctx, win, true); ++ + val &= ~WINCONx_ENWIN; + writel(val, ctx->regs + WINCON(win)); + ch_enabled = 1; ++ ++ decon_shadow_protect_win(ctx, win, false); + } + } + ++ val = readl(ctx->regs + DECON_UPDATE); ++ val |= DECON_UPDATE_STANDALONE_F; ++ writel(val, ctx->regs + DECON_UPDATE); ++ + /* Wait for vsync, as disable channel takes effect at next vsync */ + if (ch_enabled) +- decon_wait_for_vblank(ctx->crtc); ++ decon_wait_for_vblank(ctx); + } + + static int decon_ctx_initialize(struct decon_context *ctx, +@@ -126,7 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, + { + ctx->drm_dev = drm_dev; + +- decon_clear_channels(ctx->crtc); ++ decon_clear_channels(ctx); + + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); + } +@@ -155,9 +179,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) + struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; + u32 val, clkdiv; + +- if (ctx->suspended) +- return; +- + /* nothing to do if we haven't set the mode yet */ + if (mode->htotal == 0 || mode->vtotal == 0) + return; +@@ -219,9 +240,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) + struct decon_context *ctx = crtc->ctx; + u32 val; + +- if (ctx->suspended) +- return -EPERM; +- + if (!test_and_set_bit(0, &ctx->irq_flags)) { + val = readl(ctx->regs + VIDINTCON0); + +@@ -244,9 +262,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) + struct decon_context *ctx = crtc->ctx; + u32 val; + +- if (ctx->suspended) +- return; +- + if (test_and_clear_bit(0, &ctx->irq_flags)) { + val = readl(ctx->regs + VIDINTCON0); + +@@ -343,36 +358,11 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) + writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); + } + +-/** +- * decon_shadow_protect_win() - disable updating values from shadow registers at vsync +- * +- * @ctx: display and enhancement controller context +- * @win: window to protect registers for +- * @protect: 1 to protect (disable updates) +- */ +-static void decon_shadow_protect_win(struct decon_context *ctx, +- unsigned int win, bool protect) +-{ +- u32 bits, val; +- +- bits = SHADOWCON_WINx_PROTECT(win); +- +- val = readl(ctx->regs + SHADOWCON); +- if (protect) +- val |= bits; +- else +- val &= ~bits; +- writel(val, ctx->regs + SHADOWCON); +-} +- + static void decon_atomic_begin(struct exynos_drm_crtc *crtc) + { + struct decon_context *ctx = crtc->ctx; + int i; + +- if (ctx->suspended) +- return; +- + for (i = 0; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, true); + } +@@ -392,9 +382,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, + unsigned int cpp = fb->format->cpp[0]; + unsigned int pitch = fb->pitches[0]; + +- if (ctx->suspended) +- return; +- + /* + * SHADOWCON/PRTCON register is used for enabling timing. + * +@@ -482,9 +469,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, + unsigned int win = plane->index; + u32 val; + +- if (ctx->suspended) +- return; +- + /* protect windows */ + decon_shadow_protect_win(ctx, win, true); + +@@ -503,9 +487,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) + struct decon_context *ctx = crtc->ctx; + int i; + +- if (ctx->suspended) +- return; +- + for (i = 0; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, false); + exynos_crtc_handle_event(crtc); +@@ -533,9 +514,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc) + struct decon_context *ctx = crtc->ctx; + int ret; + +- if (!ctx->suspended) +- return; +- + ret = pm_runtime_resume_and_get(ctx->dev); + if (ret < 0) { + DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n"); +@@ -549,8 +527,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc) + decon_enable_vblank(ctx->crtc); + + decon_commit(ctx->crtc); +- +- ctx->suspended = false; + } + + static void decon_atomic_disable(struct exynos_drm_crtc *crtc) +@@ -558,9 +534,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc) + struct decon_context *ctx = crtc->ctx; + int i; + +- if (ctx->suspended) +- return; +- + /* + * We need to make sure that all windows are disabled before we + * suspend that connector. Otherwise we might try to scan from +@@ -570,8 +543,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc) + decon_disable_plane(crtc, &ctx->planes[i]); + + pm_runtime_put_sync(ctx->dev); +- +- ctx->suspended = true; + } + + static const struct exynos_drm_crtc_ops decon_crtc_ops = { +@@ -692,7 +663,6 @@ static int decon_probe(struct platform_device *pdev) + return -ENOMEM; + + ctx->dev = dev; +- ctx->suspended = true; + + i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); + if (i80_if_timings) +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +index 97eadd08181d61..38fad14ffd4356 100644 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +@@ -1281,9 +1281,16 @@ static int ct_receive(struct intel_guc_ct *ct) + + static void ct_try_receive_message(struct intel_guc_ct *ct) + { ++ struct intel_guc *guc = ct_to_guc(ct); + int ret; + +- if (GEM_WARN_ON(!ct->enabled)) ++ if (!ct->enabled) { ++ GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress); ++ return; ++ } ++ ++ /* When interrupt disabled, message handling is not expected */ ++ if (!guc->interrupts.enabled) + return; + + ret = ct_receive(ct); +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +index e7136b7759cb33..c50aafa0ecdb61 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +@@ -230,6 +230,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) + if (ret) + DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + ++ set_bit(GMU_STATUS_FW_START, &gmu->status); ++ + return ret; + } + +@@ -460,9 +462,10 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) + int ret; + u32 val; + +- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); +- /* Wait for the register to finish posting */ +- wmb(); ++ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) ++ return 0; ++ ++ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, + val & (1 << 1), 100, 10000); +@@ -489,6 +492,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) + int ret; + u32 val; + ++ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) ++ return; ++ + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); + + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, +@@ -497,6 +503,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) + DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); ++ ++ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); + } + + static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) +@@ -617,8 +625,6 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) + /* ensure no writes happen before the uCode is fully written */ + wmb(); + +- a6xx_rpmh_stop(gmu); +- + err: + if (!IS_ERR_OR_NULL(pdcptr)) + iounmap(pdcptr); +@@ -755,22 +761,18 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } + +- if (state == GMU_WARM_BOOT) { +- ret = a6xx_rpmh_start(gmu); +- if (ret) +- return ret; +- } else { ++ /* Turn on register retention */ ++ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); ++ ++ ret = a6xx_rpmh_start(gmu); ++ if (ret) ++ return ret; ++ ++ if (state == GMU_COLD_BOOT) { + if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], + "GMU firmware is not loaded\n")) + return -ENOENT; + +- /* Turn on register retention */ +- gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); +- +- ret = a6xx_rpmh_start(gmu); +- if (ret) +- return ret; +- + ret = a6xx_gmu_fw_load(gmu); + if (ret) + return ret; +@@ -909,6 +911,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) + + /* Reset GPU core blocks */ + a6xx_gpu_sw_reset(gpu, true); ++ ++ a6xx_rpmh_stop(gmu); + } + + static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +index 236f81a43caa62..6a28ecaf8594e9 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +@@ -96,6 +96,12 @@ struct a6xx_gmu { + /* For power domain callback */ + struct notifier_block pd_nb; + struct completion pd_gate; ++ ++/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */ ++#define GMU_STATUS_FW_START 0 ++/* To track if PDC sleep seq was done */ ++#define GMU_STATUS_PDC_SLEEP 1 ++ unsigned long status; + }; + + static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 3664c1476a83ad..00bfc6f38f459d 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1209,14 +1209,16 @@ static int hw_init(struct msm_gpu *gpu) + /* Clear GBIF halt in case GX domain was not collapsed */ + if (adreno_is_a619_holi(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); ++ gpu_read(gpu, REG_A6XX_GBIF_HALT); ++ + gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0); +- /* Let's make extra sure that the GPU can access the memory.. */ +- mb(); ++ gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL); + } else if (a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); ++ gpu_read(gpu, REG_A6XX_GBIF_HALT); ++ + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); +- /* Let's make extra sure that the GPU can access the memory.. */ +- mb(); ++ gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT); + } + + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +index 0193d10867dd2f..97486eba01b7bf 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +@@ -984,7 +984,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, + return format; + + if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || +- drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) { ++ drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) { + drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", + drm_rect_width(src) >> 16, drm_rect_height(src) >> 16, + drm_rect_width(dest), drm_rect_height(dest)); +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c +index fa4652f2347189..4faa2108c0a73b 100644 +--- a/drivers/gpu/drm/scheduler/sched_main.c ++++ b/drivers/gpu/drm/scheduler/sched_main.c +@@ -783,13 +783,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, + dma_resv_assert_held(resv); + + dma_resv_for_each_fence(&cursor, resv, usage, fence) { +- /* Make sure to grab an additional ref on the added fence */ +- dma_fence_get(fence); +- ret = drm_sched_job_add_dependency(job, fence); +- if (ret) { +- dma_fence_put(fence); ++ /* ++ * As drm_sched_job_add_dependency always consumes the fence ++ * reference (even when it fails), and dma_resv_for_each_fence ++ * is not obtaining one, we need to grab one before calling. ++ */ ++ ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); ++ if (ret) + return ret; +- } + } + return 0; + } +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index f5c217ac4bfaa7..f073d5621050a1 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -622,7 +622,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, + return; + } + +- if (value == 0 || value < dev->battery_min || value > dev->battery_max) ++ if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0) ++ return; ++ ++ if (value < dev->battery_min || value > dev->battery_max) + return; + + capacity = hidinput_scale_battery_capacity(dev, value); +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index a85581cd511fd3..b9e67b408a4b93 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -83,9 +83,8 @@ enum latency_mode { + HID_LATENCY_HIGH = 1, + }; + +-#define MT_IO_FLAGS_RUNNING 0 +-#define MT_IO_FLAGS_ACTIVE_SLOTS 1 +-#define MT_IO_FLAGS_PENDING_SLOTS 2 ++#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */ ++#define MT_IO_FLAGS_RUNNING 32 + + static const bool mtrue = true; /* default for true */ + static const bool mfalse; /* default for false */ +@@ -161,7 +160,11 @@ struct mt_device { + struct mt_class mtclass; /* our mt device class */ + struct timer_list release_timer; /* to release sticky fingers */ + struct hid_device *hdev; /* hid_device we're attached to */ +- unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */ ++ unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING) ++ * first 8 bits are reserved for keeping the slot ++ * states, this is fine because we only support up ++ * to 250 slots (MT_MAX_MAXCONTACT) ++ */ + __u8 inputmode_value; /* InputMode HID feature value */ + __u8 maxcontacts; + bool is_buttonpad; /* is this device a button pad? */ +@@ -936,6 +939,7 @@ static void mt_release_pending_palms(struct mt_device *td, + + for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) { + clear_bit(slotnum, app->pending_palm_slots); ++ clear_bit(slotnum, &td->mt_io_flags); + + input_mt_slot(input, slotnum); + input_mt_report_slot_inactive(input); +@@ -967,12 +971,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app, + + app->num_received = 0; + app->left_button_state = 0; +- +- if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags)) +- set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); +- else +- clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); +- clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); + } + + static int mt_compute_timestamp(struct mt_application *app, __s32 value) +@@ -1147,7 +1145,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, + input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); + input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); + +- set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); ++ set_bit(slotnum, &td->mt_io_flags); ++ } else { ++ clear_bit(slotnum, &td->mt_io_flags); + } + + return 0; +@@ -1282,7 +1282,7 @@ static void mt_touch_report(struct hid_device *hid, + * defect. + */ + if (app->quirks & MT_QUIRK_STICKY_FINGERS) { +- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) ++ if (td->mt_io_flags & MT_IO_SLOTS_MASK) + mod_timer(&td->release_timer, + jiffies + msecs_to_jiffies(100)); + else +@@ -1658,6 +1658,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) + case HID_CP_CONSUMER_CONTROL: + case HID_GD_WIRELESS_RADIO_CTLS: + case HID_GD_SYSTEM_MULTIAXIS: ++ case HID_DG_PEN: + /* already handled by hid core */ + break; + case HID_DG_TOUCHSCREEN: +@@ -1729,6 +1730,7 @@ static void mt_release_contacts(struct hid_device *hid) + for (i = 0; i < mt->num_slots; i++) { + input_mt_slot(input_dev, i); + input_mt_report_slot_inactive(input_dev); ++ clear_bit(i, &td->mt_io_flags); + } + input_mt_sync_frame(input_dev); + input_sync(input_dev); +@@ -1751,7 +1753,7 @@ static void mt_expired_timeout(struct timer_list *t) + */ + if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) + return; +- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) ++ if (td->mt_io_flags & MT_IO_SLOTS_MASK) + mt_release_contacts(hdev); + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + } +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +index 809734e566e332..e289afcb43e3c6 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +@@ -126,9 +126,9 @@ struct inv_icm42600_suspended { + * @suspended: suspended sensors configuration. + * @indio_gyro: gyroscope IIO device. + * @indio_accel: accelerometer IIO device. +- * @buffer: data transfer buffer aligned for DMA. +- * @fifo: FIFO management structure. + * @timestamp: interrupt timestamps. ++ * @fifo: FIFO management structure. ++ * @buffer: data transfer buffer aligned for DMA. + */ + struct inv_icm42600_state { + struct mutex lock; +@@ -142,12 +142,12 @@ struct inv_icm42600_state { + struct inv_icm42600_suspended suspended; + struct iio_dev *indio_gyro; + struct iio_dev *indio_accel; +- u8 buffer[2] __aligned(IIO_DMA_MINALIGN); +- struct inv_icm42600_fifo fifo; + struct { + s64 gyro; + s64 accel; + } timestamp; ++ struct inv_icm42600_fifo fifo; ++ u8 buffer[2] __aligned(IIO_DMA_MINALIGN); + }; + + /* Virtual register addresses: @bank on MSB (4 upper bits), @address on LSB */ +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +index a1f055014cc652..99eb651743f80f 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +@@ -567,20 +567,12 @@ static void inv_icm42600_disable_vdd_reg(void *_data) + static void inv_icm42600_disable_vddio_reg(void *_data) + { + struct inv_icm42600_state *st = _data; +- const struct device *dev = regmap_get_device(st->map); +- int ret; +- +- ret = regulator_disable(st->vddio_supply); +- if (ret) +- dev_err(dev, "failed to disable vddio error %d\n", ret); +-} ++ struct device *dev = regmap_get_device(st->map); + +-static void inv_icm42600_disable_pm(void *_data) +-{ +- struct device *dev = _data; ++ if (pm_runtime_status_suspended(dev)) ++ return; + +- pm_runtime_put_sync(dev); +- pm_runtime_disable(dev); ++ regulator_disable(st->vddio_supply); + } + + int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, +@@ -677,16 +669,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, + return ret; + + /* setup runtime power management */ +- ret = pm_runtime_set_active(dev); ++ ret = devm_pm_runtime_set_active_enabled(dev); + if (ret) + return ret; +- pm_runtime_get_noresume(dev); +- pm_runtime_enable(dev); ++ + pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(dev); +- pm_runtime_put(dev); + +- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev); ++ return ret; + } + EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600); + +@@ -697,17 +687,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600); + static int inv_icm42600_suspend(struct device *dev) + { + struct inv_icm42600_state *st = dev_get_drvdata(dev); +- int ret; ++ int ret = 0; + + mutex_lock(&st->lock); + + st->suspended.gyro = st->conf.gyro.mode; + st->suspended.accel = st->conf.accel.mode; + st->suspended.temp = st->conf.temp_en; +- if (pm_runtime_suspended(dev)) { +- ret = 0; ++ if (pm_runtime_suspended(dev)) + goto out_unlock; +- } + + /* disable FIFO data streaming */ + if (st->fifo.on) { +@@ -739,10 +727,13 @@ static int inv_icm42600_resume(struct device *dev) + struct inv_icm42600_state *st = dev_get_drvdata(dev); + struct inv_sensors_timestamp *gyro_ts = iio_priv(st->indio_gyro); + struct inv_sensors_timestamp *accel_ts = iio_priv(st->indio_accel); +- int ret; ++ int ret = 0; + + mutex_lock(&st->lock); + ++ if (pm_runtime_suspended(dev)) ++ goto out_unlock; ++ + ret = inv_icm42600_enable_regulator_vddio(st); + if (ret) + goto out_unlock; +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h +index 2810ebe9b5f75c..5a4676d5207935 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h +@@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe); + void mxc_isi_channel_put(struct mxc_isi_pipe *pipe); + void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe); + void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe); +-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass); ++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe); + void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe); + + void mxc_isi_channel_config(struct mxc_isi_pipe *pipe, +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c +index 19e80b95ffeaa3..ece352171b936d 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c +@@ -589,7 +589,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe) + * + * TODO: Support secondary line buffer for downscaling YUV420 images. + */ +-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass) ++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe) + { + /* Channel chaining requires both line and output buffer. */ + const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c +index cd6c52e9d158a7..81223d28ee56e8 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c +@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data { + struct v4l2_pix_format_mplane format; + const struct mxc_isi_format_info *info; + u32 sequence; +- bool streaming; + }; + + struct mxc_isi_m2m_ctx { +@@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2) + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + } + ++static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q) ++{ ++ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q); ++ const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format; ++ const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format; ++ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info; ++ const struct mxc_isi_format_info *out_info = ctx->queues.out.info; ++ struct mxc_isi_m2m *m2m = ctx->m2m; ++ int ret; ++ ++ guard(mutex)(&m2m->lock); ++ ++ if (m2m->usage_count == INT_MAX) ++ return -EOVERFLOW; ++ ++ /* ++ * Acquire the pipe and initialize the channel with the first user of ++ * the M2M device. ++ */ ++ if (m2m->usage_count == 0) { ++ bool bypass = cap_pix->width == out_pix->width && ++ cap_pix->height == out_pix->height && ++ cap_info->encoding == out_info->encoding; ++ ++ ret = mxc_isi_channel_acquire(m2m->pipe, ++ &mxc_isi_m2m_frame_write_done, ++ bypass); ++ if (ret) ++ return ret; ++ ++ mxc_isi_channel_get(m2m->pipe); ++ } ++ ++ m2m->usage_count++; ++ ++ /* ++ * Allocate resources for the channel, counting how many users require ++ * buffer chaining. ++ */ ++ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { ++ ret = mxc_isi_channel_chain(m2m->pipe); ++ if (ret) ++ goto err_deinit; ++ ++ m2m->chained_count++; ++ ctx->chained = true; ++ } ++ ++ return 0; ++ ++err_deinit: ++ if (--m2m->usage_count == 0) { ++ mxc_isi_channel_put(m2m->pipe); ++ mxc_isi_channel_release(m2m->pipe); ++ } ++ ++ return ret; ++} ++ + static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q, + unsigned int count) + { +@@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q) + } + } + ++static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q) ++{ ++ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q); ++ struct mxc_isi_m2m *m2m = ctx->m2m; ++ ++ guard(mutex)(&m2m->lock); ++ ++ /* ++ * If the last context is this one, reset it to make sure the device ++ * will be reconfigured when streaming is restarted. ++ */ ++ if (m2m->last_ctx == ctx) ++ m2m->last_ctx = NULL; ++ ++ /* Free the channel resources if this is the last chained context. */ ++ if (ctx->chained && --m2m->chained_count == 0) ++ mxc_isi_channel_unchain(m2m->pipe); ++ ctx->chained = false; ++ ++ /* Turn off the light with the last user. */ ++ if (--m2m->usage_count == 0) { ++ mxc_isi_channel_disable(m2m->pipe); ++ mxc_isi_channel_put(m2m->pipe); ++ mxc_isi_channel_release(m2m->pipe); ++ } ++ ++ WARN_ON(m2m->usage_count < 0); ++} ++ + static const struct vb2_ops mxc_isi_m2m_vb2_qops = { + .queue_setup = mxc_isi_m2m_vb2_queue_setup, + .buf_init = mxc_isi_m2m_vb2_buffer_init, +@@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_vb2_qops = { + .buf_queue = mxc_isi_m2m_vb2_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, ++ .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming, + .start_streaming = mxc_isi_m2m_vb2_start_streaming, + .stop_streaming = mxc_isi_m2m_vb2_stop_streaming, ++ .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming, + }; + + static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq, +@@ -483,136 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh, + return 0; + } + +-static int mxc_isi_m2m_streamon(struct file *file, void *fh, +- enum v4l2_buf_type type) +-{ +- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); +- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); +- const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format; +- const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format; +- const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info; +- const struct mxc_isi_format_info *out_info = ctx->queues.out.info; +- struct mxc_isi_m2m *m2m = ctx->m2m; +- bool bypass; +- int ret; +- +- if (q->streaming) +- return 0; +- +- mutex_lock(&m2m->lock); +- +- if (m2m->usage_count == INT_MAX) { +- ret = -EOVERFLOW; +- goto unlock; +- } +- +- bypass = cap_pix->width == out_pix->width && +- cap_pix->height == out_pix->height && +- cap_info->encoding == out_info->encoding; +- +- /* +- * Acquire the pipe and initialize the channel with the first user of +- * the M2M device. +- */ +- if (m2m->usage_count == 0) { +- ret = mxc_isi_channel_acquire(m2m->pipe, +- &mxc_isi_m2m_frame_write_done, +- bypass); +- if (ret) +- goto unlock; +- +- mxc_isi_channel_get(m2m->pipe); +- } +- +- m2m->usage_count++; +- +- /* +- * Allocate resources for the channel, counting how many users require +- * buffer chaining. +- */ +- if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { +- ret = mxc_isi_channel_chain(m2m->pipe, bypass); +- if (ret) +- goto deinit; +- +- m2m->chained_count++; +- ctx->chained = true; +- } +- +- /* +- * Drop the lock to start the stream, as the .device_run() operation +- * needs to acquire it. +- */ +- mutex_unlock(&m2m->lock); +- ret = v4l2_m2m_ioctl_streamon(file, fh, type); +- if (ret) { +- /* Reacquire the lock for the cleanup path. */ +- mutex_lock(&m2m->lock); +- goto unchain; +- } +- +- q->streaming = true; +- +- return 0; +- +-unchain: +- if (ctx->chained && --m2m->chained_count == 0) +- mxc_isi_channel_unchain(m2m->pipe); +- ctx->chained = false; +- +-deinit: +- if (--m2m->usage_count == 0) { +- mxc_isi_channel_put(m2m->pipe); +- mxc_isi_channel_release(m2m->pipe); +- } +- +-unlock: +- mutex_unlock(&m2m->lock); +- return ret; +-} +- +-static int mxc_isi_m2m_streamoff(struct file *file, void *fh, +- enum v4l2_buf_type type) +-{ +- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); +- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); +- struct mxc_isi_m2m *m2m = ctx->m2m; +- +- v4l2_m2m_ioctl_streamoff(file, fh, type); +- +- if (!q->streaming) +- return 0; +- +- mutex_lock(&m2m->lock); +- +- /* +- * If the last context is this one, reset it to make sure the device +- * will be reconfigured when streaming is restarted. +- */ +- if (m2m->last_ctx == ctx) +- m2m->last_ctx = NULL; +- +- /* Free the channel resources if this is the last chained context. */ +- if (ctx->chained && --m2m->chained_count == 0) +- mxc_isi_channel_unchain(m2m->pipe); +- ctx->chained = false; +- +- /* Turn off the light with the last user. */ +- if (--m2m->usage_count == 0) { +- mxc_isi_channel_disable(m2m->pipe); +- mxc_isi_channel_put(m2m->pipe); +- mxc_isi_channel_release(m2m->pipe); +- } +- +- WARN_ON(m2m->usage_count < 0); +- +- mutex_unlock(&m2m->lock); +- +- q->streaming = false; +- +- return 0; +-} +- + static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = { + .vidioc_querycap = mxc_isi_m2m_querycap, + +@@ -633,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = { + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + +- .vidioc_streamon = mxc_isi_m2m_streamon, +- .vidioc_streamoff = mxc_isi_m2m_streamoff, ++ .vidioc_streamon = v4l2_m2m_ioctl_streamon, ++ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c +index 65d20e9bae69db..483523327c025f 100644 +--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c ++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c +@@ -851,7 +851,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe, + + /* Chain the channel if needed for wide resolutions. */ + if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { +- ret = mxc_isi_channel_chain(pipe, bypass); ++ ret = mxc_isi_channel_chain(pipe); + if (ret) + mxc_isi_channel_release(pipe); + } +diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c +index cdb28d6a092c6d..e5477775992e6e 100644 +--- a/drivers/net/can/m_can/m_can_platform.c ++++ b/drivers/net/can/m_can/m_can_platform.c +@@ -183,7 +183,7 @@ static void m_can_plat_remove(struct platform_device *pdev) + struct m_can_classdev *mcan_class = &priv->cdev; + + m_can_class_unregister(mcan_class); +- ++ pm_runtime_disable(mcan_class->dev); + m_can_class_free_dev(mcan_class->net); + } + +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c +index de616d6589c0bb..9bd61fd8e5013f 100644 +--- a/drivers/net/can/usb/gs_usb.c ++++ b/drivers/net/can/usb/gs_usb.c +@@ -286,11 +286,6 @@ struct gs_host_frame { + #define GS_MAX_RX_URBS 30 + #define GS_NAPI_WEIGHT 32 + +-/* Maximum number of interfaces the driver supports per device. +- * Current hardware only supports 3 interfaces. The future may vary. +- */ +-#define GS_MAX_INTF 3 +- + struct gs_tx_context { + struct gs_can *dev; + unsigned int echo_id; +@@ -321,7 +316,6 @@ struct gs_can { + + /* usb interface struct */ + struct gs_usb { +- struct gs_can *canch[GS_MAX_INTF]; + struct usb_anchor rx_submitted; + struct usb_device *udev; + +@@ -333,9 +327,11 @@ struct gs_usb { + + unsigned int hf_size_rx; + u8 active_channels; ++ u8 channel_cnt; + + unsigned int pipe_in; + unsigned int pipe_out; ++ struct gs_can *canch[] __counted_by(channel_cnt); + }; + + /* 'allocate' a tx context. +@@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) + } + + /* device reports out of range channel id */ +- if (hf->channel >= GS_MAX_INTF) ++ if (hf->channel >= parent->channel_cnt) + goto device_detach; + + dev = parent->canch[hf->channel]; +@@ -696,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) + /* USB failure take down all interfaces */ + if (rc == -ENODEV) { + device_detach: +- for (rc = 0; rc < GS_MAX_INTF; rc++) { ++ for (rc = 0; rc < parent->channel_cnt; rc++) { + if (parent->canch[rc]) + netif_device_detach(parent->canch[rc]->netdev); + } +@@ -1246,6 +1242,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, + + netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */ + netdev->dev_id = channel; ++ netdev->dev_port = channel; + + /* dev setup */ + strcpy(dev->bt_const.name, KBUILD_MODNAME); +@@ -1457,17 +1454,19 @@ static int gs_usb_probe(struct usb_interface *intf, + icount = dconf.icount + 1; + dev_info(&intf->dev, "Configuring for %u interfaces\n", icount); + +- if (icount > GS_MAX_INTF) { ++ if (icount > type_max(parent->channel_cnt)) { + dev_err(&intf->dev, + "Driver cannot handle more that %u CAN interfaces\n", +- GS_MAX_INTF); ++ type_max(parent->channel_cnt)); + return -EINVAL; + } + +- parent = kzalloc(sizeof(*parent), GFP_KERNEL); ++ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL); + if (!parent) + return -ENOMEM; + ++ parent->channel_cnt = icount; ++ + init_usb_anchor(&parent->rx_submitted); + + usb_set_intfdata(intf, parent); +@@ -1528,7 +1527,7 @@ static void gs_usb_disconnect(struct usb_interface *intf) + return; + } + +- for (i = 0; i < GS_MAX_INTF; i++) ++ for (i = 0; i < parent->channel_cnt; i++) + if (parent->canch[i]) + gs_destroy_candev(parent->canch[i]); + +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +index 34d45cebefb5d3..b4d57da71de2a1 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -1172,7 +1172,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) + + static int xgbe_phy_reset(struct xgbe_prv_data *pdata) + { +- pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + + return pdata->phy_if.phy_reset(pdata); +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +index 8345d439184ebe..63012119f2c8eb 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +@@ -1664,6 +1664,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) + pdata->phy.duplex = DUPLEX_FULL; + } + ++ pdata->phy_link = 0; + pdata->phy.link = 0; + + pdata->phy.pause_autoneg = pdata->pause_autoneg; +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index b3878975bd9c05..ea4973096aa287 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -5814,7 +5814,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) + u32 current_speed = SPEED_UNKNOWN; + u8 current_duplex = DUPLEX_UNKNOWN; + bool current_link_up = false; +- u32 local_adv, remote_adv, sgsr; ++ u32 local_adv = 0, remote_adv = 0, sgsr; + + if ((tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) && +@@ -5955,9 +5955,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) + else + current_duplex = DUPLEX_HALF; + +- local_adv = 0; +- remote_adv = 0; +- + if (bmcr & BMCR_ANENABLE) { + u32 common; + +diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c +index 1c3a5cf379cd03..72c97dcd0fee0e 100644 +--- a/drivers/net/ethernet/dlink/dl2k.c ++++ b/drivers/net/ethernet/dlink/dl2k.c +@@ -498,25 +498,34 @@ static int alloc_list(struct net_device *dev) + for (i = 0; i < RX_RING_SIZE; i++) { + /* Allocated fixed size of skbuff */ + struct sk_buff *skb; ++ dma_addr_t addr; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); + np->rx_skbuff[i] = skb; +- if (!skb) { +- free_list(dev); +- return -ENOMEM; +- } ++ if (!skb) ++ goto err_free_list; ++ ++ addr = dma_map_single(&np->pdev->dev, skb->data, ++ np->rx_buf_sz, DMA_FROM_DEVICE); ++ if (dma_mapping_error(&np->pdev->dev, addr)) ++ goto err_kfree_skb; + + np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + + ((i + 1) % RX_RING_SIZE) * + sizeof(struct netdev_desc)); + /* Rubicon now supports 40 bits of addressing space. */ +- np->rx_ring[i].fraginfo = +- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, +- np->rx_buf_sz, DMA_FROM_DEVICE)); ++ np->rx_ring[i].fraginfo = cpu_to_le64(addr); + np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); + } + + return 0; ++ ++err_kfree_skb: ++ dev_kfree_skb(np->rx_skbuff[i]); ++ np->rx_skbuff[i] = NULL; ++err_free_list: ++ free_list(dev); ++ return -ENOMEM; + } + + static void rio_hw_init(struct net_device *dev) +diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h +index 5f08779c0e4e31..e177d1d58696aa 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/defines.h ++++ b/drivers/net/ethernet/intel/ixgbevf/defines.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Copyright(c) 1999 - 2018 Intel Corporation. */ ++/* Copyright(c) 1999 - 2024 Intel Corporation. */ + + #ifndef _IXGBEVF_DEFINES_H_ + #define _IXGBEVF_DEFINES_H_ +@@ -16,6 +16,9 @@ + #define IXGBE_DEV_ID_X550_VF_HV 0x1564 + #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + ++#define IXGBE_DEV_ID_E610_VF 0x57AD ++#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF ++ + #define IXGBE_VF_IRQ_CLEAR_MASK 7 + #define IXGBE_VF_MAX_TX_QUEUES 8 + #define IXGBE_VF_MAX_RX_QUEUES 8 +@@ -25,6 +28,7 @@ + + /* Link speed */ + typedef u32 ixgbe_link_speed; ++#define IXGBE_LINK_SPEED_UNKNOWN 0 + #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 + #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 + #define IXGBE_LINK_SPEED_100_FULL 0x0008 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c +index f804b35d79c726..83b3243f172c72 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c +@@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + ++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) ++ return -EOPNOTSUPP; ++ + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload"); + return -EINVAL; +@@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + ++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) ++ return; ++ + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; + +@@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) + size_t size; + + switch (adapter->hw.api_version) { ++ case ixgbe_mbox_api_17: ++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) ++ return; ++ break; + case ixgbe_mbox_api_14: + break; + default: +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +index 130cb868774c40..f31068e24e867f 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Copyright(c) 1999 - 2018 Intel Corporation. */ ++/* Copyright(c) 1999 - 2024 Intel Corporation. */ + + #ifndef _IXGBEVF_H_ + #define _IXGBEVF_H_ +@@ -366,6 +366,13 @@ struct ixgbevf_adapter { + /* Interrupt Throttle Rate */ + u32 eitr_param; + ++ u32 pf_features; ++#define IXGBEVF_PF_SUP_IPSEC BIT(0) ++#define IXGBEVF_PF_SUP_ESX_MBX BIT(1) ++ ++#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \ ++ IXGBEVF_PF_SUP_ESX_MBX) ++ + struct ixgbevf_hw_stats stats; + + unsigned long state; +@@ -418,6 +425,8 @@ enum ixgbevf_boards { + board_X550EM_x_vf, + board_X550EM_x_vf_hv, + board_x550em_a_vf, ++ board_e610_vf, ++ board_e610_vf_hv, + }; + + enum ixgbevf_xcast_modes { +@@ -434,11 +443,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; + extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; + extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; + extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; ++extern const struct ixgbevf_info ixgbevf_e610_vf_info; + + extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; + extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; + extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; + extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; ++extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info; + extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; + + /* needed by ethtool.c */ +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +index a44e4bd561421a..72b17a0f052c2b 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +@@ -1,5 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Copyright(c) 1999 - 2018 Intel Corporation. */ ++/* Copyright(c) 1999 - 2024 Intel Corporation. */ + + /****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; + + static char ixgbevf_copyright[] = +- "Copyright (c) 2009 - 2018 Intel Corporation."; ++ "Copyright (c) 2009 - 2024 Intel Corporation."; + + static const struct ixgbevf_info *ixgbevf_info_tbl[] = { + [board_82599_vf] = &ixgbevf_82599_vf_info, +@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, + [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, ++ [board_e610_vf] = &ixgbevf_e610_vf_info, ++ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info, + }; + + /* ixgbevf_pci_tbl - PCI Device ID Table +@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, ++ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID, ++ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf}, + /* required last entry */ + {0, } + }; +@@ -2270,10 +2275,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; + } + ++/** ++ * ixgbevf_set_features - Set features supported by PF ++ * @adapter: pointer to the adapter struct ++ * ++ * Negotiate with PF supported features and then set pf_features accordingly. ++ */ ++static void ixgbevf_set_features(struct ixgbevf_adapter *adapter) ++{ ++ u32 *pf_features = &adapter->pf_features; ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ ++ err = hw->mac.ops.negotiate_features(hw, pf_features); ++ if (err && err != -EOPNOTSUPP) ++ netdev_dbg(adapter->netdev, ++ "PF feature negotiation failed.\n"); ++ ++ /* Address also pre API 1.7 cases */ ++ if (hw->api_version == ixgbe_mbox_api_14) ++ *pf_features |= IXGBEVF_PF_SUP_IPSEC; ++ else if (hw->api_version == ixgbe_mbox_api_15) ++ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX; ++} ++ + static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + static const int api[] = { ++ ixgbe_mbox_api_17, ++ ixgbe_mbox_api_16, + ixgbe_mbox_api_15, + ixgbe_mbox_api_14, + ixgbe_mbox_api_13, +@@ -2293,7 +2324,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) + idx++; + } + +- if (hw->api_version >= ixgbe_mbox_api_15) { ++ ixgbevf_set_features(adapter); ++ ++ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) { + hw->mbx.ops.init_params(hw); + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); +@@ -2650,6 +2683,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: ++ case ixgbe_mbox_api_16: ++ case ixgbe_mbox_api_17: + if (adapter->xdp_prog && + hw->mac.max_tx_queues == rss) + rss = rss > 3 ? 2 : 1; +@@ -4644,6 +4679,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: ++ case ixgbe_mbox_api_16: ++ case ixgbe_mbox_api_17: + netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); + break; +@@ -4694,6 +4731,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; ++ case ixgbe_mac_e610_vf: ++ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n"); ++ break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); +diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h +index 835bbcc5cc8e63..a8ed23ee66aa84 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h ++++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h +@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ + ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ ++ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */ ++ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ + }; +@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev { + + #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + ++/* mailbox API, version 1.6 VF requests */ ++#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */ ++ ++/* mailbox API, version 1.7 VF requests */ ++#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/ ++ + /* length of permanent address message returned from PF */ + #define IXGBE_VF_PERMADDR_MSG_LEN 4 + /* word in permanent address message with the current multicast type */ +diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c +index 1641d00d8ed35c..65257107dfc8a4 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/vf.c ++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c +@@ -1,5 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Copyright(c) 1999 - 2018 Intel Corporation. */ ++/* Copyright(c) 1999 - 2024 Intel Corporation. */ + + #include "vf.h" + #include "ixgbevf.h" +@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) + * is not supported for this device type. + */ + switch (hw->api_version) { ++ case ixgbe_mbox_api_17: ++ case ixgbe_mbox_api_16: + case ixgbe_mbox_api_15: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_13: +@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) + * or if the operation is not supported for this device type. + */ + switch (hw->api_version) { ++ case ixgbe_mbox_api_17: ++ case ixgbe_mbox_api_16: + case ixgbe_mbox_api_15: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_13: +@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: ++ case ixgbe_mbox_api_16: ++ case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; +@@ -624,6 +630,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) + return -EOPNOTSUPP; + } + ++/** ++ * ixgbevf_get_pf_link_state - Get PF's link status ++ * @hw: pointer to the HW structure ++ * @speed: link speed ++ * @link_up: indicate if link is up/down ++ * ++ * Ask PF to provide link_up state and speed of the link. ++ * ++ * Return: IXGBE_ERR_MBX in the case of mailbox error, ++ * -EOPNOTSUPP if the op is not supported or 0 on success. ++ */ ++static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up) ++{ ++ u32 msgbuf[3] = {}; ++ int err; ++ ++ switch (hw->api_version) { ++ case ixgbe_mbox_api_16: ++ case ixgbe_mbox_api_17: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE; ++ ++ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ++ ARRAY_SIZE(msgbuf)); ++ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { ++ err = IXGBE_ERR_MBX; ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ /* No need to set @link_up to false as it will be done by ++ * ixgbe_check_mac_link_vf(). ++ */ ++ } else { ++ *speed = msgbuf[1]; ++ *link_up = msgbuf[2]; ++ } ++ ++ return err; ++} ++ ++/** ++ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver ++ * @hw: pointer to the HW structure ++ * @pf_features: bitmask of features supported by PF ++ * ++ * Return: IXGBE_ERR_MBX in the case of mailbox error, ++ * -EOPNOTSUPP if the op is not supported or 0 on success. ++ */ ++static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features) ++{ ++ u32 msgbuf[2] = {}; ++ int err; ++ ++ switch (hw->api_version) { ++ case ixgbe_mbox_api_17: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE; ++ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES; ++ ++ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ++ ARRAY_SIZE(msgbuf)); ++ ++ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { ++ err = IXGBE_ERR_MBX; ++ *pf_features = 0x0; ++ } else { ++ *pf_features = msgbuf[1]; ++ } ++ ++ return err; ++} ++ + /** + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address + * @hw: pointer to the HW structure +@@ -658,6 +743,58 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + return err; + } + ++/** ++ * ixgbe_read_vflinks - Read VFLINKS register ++ * @hw: pointer to the HW structure ++ * @speed: link speed ++ * @link_up: indicate if link is up/down ++ * ++ * Get linkup status and link speed from the VFLINKS register. ++ */ ++static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up) ++{ ++ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); ++ ++ /* if link status is down no point in checking to see if PF is up */ ++ if (!(vflinks & IXGBE_LINKS_UP)) { ++ *link_up = false; ++ return; ++ } ++ ++ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs ++ * before the link status is correct ++ */ ++ if (hw->mac.type == ixgbe_mac_82599_vf) { ++ for (int i = 0; i < 5; i++) { ++ udelay(100); ++ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); ++ ++ if (!(vflinks & IXGBE_LINKS_UP)) { ++ *link_up = false; ++ return; ++ } ++ } ++ } ++ ++ /* We reached this point so there's link */ ++ *link_up = true; ++ ++ switch (vflinks & IXGBE_LINKS_SPEED_82599) { ++ case IXGBE_LINKS_SPEED_10G_82599: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case IXGBE_LINKS_SPEED_1G_82599: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ case IXGBE_LINKS_SPEED_100_82599: ++ *speed = IXGBE_LINK_SPEED_100_FULL; ++ break; ++ default: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ } ++} ++ + /** + * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. + * @hw: unused +@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, + bool *link_up, + bool autoneg_wait_to_complete) + { ++ struct ixgbevf_adapter *adapter = hw->back; + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; +- u32 links_reg; + u32 in_msg = 0; + + /* If we were hit with a reset drop the link */ +@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, + if (!mac->get_link_status) + goto out; + +- /* if link status is down no point in checking to see if pf is up */ +- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); +- if (!(links_reg & IXGBE_LINKS_UP)) +- goto out; +- +- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs +- * before the link status is correct +- */ +- if (mac->type == ixgbe_mac_82599_vf) { +- int i; +- +- for (i = 0; i < 5; i++) { +- udelay(100); +- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); +- +- if (!(links_reg & IXGBE_LINKS_UP)) +- goto out; +- } +- } +- +- switch (links_reg & IXGBE_LINKS_SPEED_82599) { +- case IXGBE_LINKS_SPEED_10G_82599: +- *speed = IXGBE_LINK_SPEED_10GB_FULL; +- break; +- case IXGBE_LINKS_SPEED_1G_82599: +- *speed = IXGBE_LINK_SPEED_1GB_FULL; +- break; +- case IXGBE_LINKS_SPEED_100_82599: +- *speed = IXGBE_LINK_SPEED_100_FULL; +- break; ++ if (hw->mac.type == ixgbe_mac_e610_vf) { ++ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up); ++ if (ret_val) ++ goto out; ++ } else { ++ ixgbe_read_vflinks(hw, speed, link_up); ++ if (*link_up == false) ++ goto out; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1)) { +- if (hw->api_version >= ixgbe_mbox_api_15) ++ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) + mac->get_link_status = false; + goto out; + } +@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: ++ case ixgbe_mbox_api_16: ++ case ixgbe_mbox_api_17: + break; + default: + return 0; +@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .negotiate_api_version = ixgbevf_negotiate_api_version_vf, ++ .negotiate_features = ixgbevf_negotiate_features_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_update_xcast_mode, +@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { + .mac = ixgbe_mac_x550em_a_vf, + .mac_ops = &ixgbevf_mac_ops, + }; ++ ++const struct ixgbevf_info ixgbevf_e610_vf_info = { ++ .mac = ixgbe_mac_e610_vf, ++ .mac_ops = &ixgbevf_mac_ops, ++}; ++ ++const struct ixgbevf_info ixgbevf_e610_vf_hv_info = { ++ .mac = ixgbe_mac_e610_vf, ++ .mac_ops = &ixgbevf_hv_mac_ops, ++}; +diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h +index b4eef5b6c172bd..4f19b8900c29a3 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/vf.h ++++ b/drivers/net/ethernet/intel/ixgbevf/vf.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Copyright(c) 1999 - 2018 Intel Corporation. */ ++/* Copyright(c) 1999 - 2024 Intel Corporation. */ + + #ifndef __IXGBE_VF_H__ + #define __IXGBE_VF_H__ +@@ -26,6 +26,7 @@ struct ixgbe_mac_operations { + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); ++ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features); + + /* Link */ + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); +@@ -54,6 +55,8 @@ enum ixgbe_mac_type { + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_mac_x550em_a_vf, ++ ixgbe_mac_e610, ++ ixgbe_mac_e610_vf, + ixgbe_num_macs + }; + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 5af932a5e70c44..3b90f257e94f85 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -4919,8 +4919,9 @@ static int rtl8169_resume(struct device *device) + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + +- /* Reportedly at least Asus X453MA truncates packets otherwise */ +- if (tp->mac_version == RTL_GIGA_MAC_VER_37) ++ /* Some chip versions may truncate packets without this initialization */ ++ if (tp->mac_version == RTL_GIGA_MAC_VER_37 || ++ tp->mac_version == RTL_GIGA_MAC_VER_46) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index ec5689cd240aaf..121f1c15c67936 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -1940,13 +1940,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { + .get_regs = lan78xx_get_regs, + }; + +-static void lan78xx_init_mac_address(struct lan78xx_net *dev) ++static int lan78xx_init_mac_address(struct lan78xx_net *dev) + { + u32 addr_lo, addr_hi; + u8 addr[6]; ++ int ret; ++ ++ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); ++ if (ret < 0) ++ return ret; + +- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); +- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); ++ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); ++ if (ret < 0) ++ return ret; + + addr[0] = addr_lo & 0xFF; + addr[1] = (addr_lo >> 8) & 0xFF; +@@ -1979,14 +1985,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) + (addr[2] << 16) | (addr[3] << 24); + addr_hi = addr[4] | (addr[5] << 8); + +- lan78xx_write_reg(dev, RX_ADDRL, addr_lo); +- lan78xx_write_reg(dev, RX_ADDRH, addr_hi); ++ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); ++ if (ret < 0) ++ return ret; ++ ++ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); ++ if (ret < 0) ++ return ret; + } + +- lan78xx_write_reg(dev, MAF_LO(0), addr_lo); +- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); ++ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); ++ if (ret < 0) ++ return ret; ++ ++ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); ++ if (ret < 0) ++ return ret; + + eth_hw_addr_set(dev->net, addr); ++ ++ return 0; + } + + /* MDIO read and write wrappers for phylib */ +@@ -2910,8 +2928,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) + } + } while (buf & HW_CFG_LRST_); + +- lan78xx_init_mac_address(dev); +- + /* save DEVID for later usage */ + ret = lan78xx_read_reg(dev, ID_REV, &buf); + if (ret < 0) +@@ -2920,6 +2936,10 @@ static int lan78xx_reset(struct lan78xx_net *dev) + dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; + dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; + ++ ret = lan78xx_init_mac_address(dev); ++ if (ret < 0) ++ return ret; ++ + /* Respond to the IN token with a NAK */ + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + if (ret < 0) +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 1e85cfe524e875..386376ceeda25d 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -10104,7 +10104,12 @@ static int __init rtl8152_driver_init(void) + ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); + if (ret) + return ret; +- return usb_register(&rtl8152_driver); ++ ++ ret = usb_register(&rtl8152_driver); ++ if (ret) ++ usb_deregister_device_driver(&rtl8152_cfgselector_driver); ++ ++ return ret; + } + + static void __exit rtl8152_driver_exit(void) +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 119afdfe4b91e9..57416bbf9344f0 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -131,12 +131,14 @@ void nvme_mpath_start_request(struct request *rq) + struct nvme_ns *ns = rq->q->queuedata; + struct gendisk *disk = ns->head->disk; + +- if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { ++ if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && ++ !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) { + atomic_inc(&ns->ctrl->nr_active); + nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; + } + +- if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) ++ if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) || ++ (nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) + return; + + nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; +diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c +index f76a358e2b5b6f..753a4c615781f1 100644 +--- a/drivers/pci/controller/cadence/pci-j721e.c ++++ b/drivers/pci/controller/cadence/pci-j721e.c +@@ -48,6 +48,7 @@ enum link_status { + #define J721E_MODE_RC BIT(7) + #define LANE_COUNT(n) ((n) << 8) + ++#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0) + #define GENERATION_SEL_MASK GENMASK(1, 0) + + struct j721e_pcie { +@@ -225,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, + return ret; + } + ++static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie, ++ struct regmap *syscon) ++{ ++ struct device *dev = pcie->cdns_pcie->dev; ++ struct device_node *node = dev->of_node; ++ u32 mask = ACSPCIE_PAD_DISABLE_MASK; ++ struct of_phandle_args args; ++ u32 val; ++ int ret; ++ ++ ret = of_parse_phandle_with_fixed_args(node, ++ "ti,syscon-acspcie-proxy-ctrl", ++ 1, 0, &args); ++ if (ret) { ++ dev_err(dev, ++ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n"); ++ return ret; ++ } ++ ++ /* Clear PAD IO disable bits to enable refclk output */ ++ val = ~(args.args[0]); ++ ret = regmap_update_bits(syscon, 0, mask, val); ++ if (ret) { ++ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ + static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) + { + struct device *dev = pcie->cdns_pcie->dev; +@@ -246,6 +277,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) + if (!ret) + offset = args.args[0]; + ++ /* ++ * The PCIe Controller's registers have different "reset-values" ++ * depending on the "strap" settings programmed into the PCIEn_CTRL ++ * register within the CTRL_MMR memory-mapped register space. ++ * The registers latch onto a "reset-value" based on the "strap" ++ * settings sampled after the PCIe Controller is powered on. ++ * To ensure that the "reset-values" are sampled accurately, power ++ * off the PCIe Controller before programming the "strap" settings ++ * and power it on after that. The runtime PM APIs namely ++ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and ++ * increment the usage counter respectively, causing GENPD to power off ++ * and power on the PCIe Controller. ++ */ ++ ret = pm_runtime_put_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "Failed to power off PCIe Controller\n"); ++ return ret; ++ } ++ + ret = j721e_pcie_set_mode(pcie, syscon, offset); + if (ret < 0) { + dev_err(dev, "Failed to set pci mode\n"); +@@ -264,7 +314,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) + return ret; + } + +- return 0; ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "Failed to power on PCIe Controller\n"); ++ return ret; ++ } ++ ++ /* Enable ACSPCIE refclk output if the optional property exists */ ++ syscon = syscon_regmap_lookup_by_phandle_optional(node, ++ "ti,syscon-acspcie-proxy-ctrl"); ++ if (!syscon) ++ return 0; ++ ++ return j721e_enable_acspcie_refclk(pcie, syscon); + } + + static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, +diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c +index c7d3e248a59a20..ce2a7a6dab90c5 100644 +--- a/drivers/pci/controller/dwc/pcie-tegra194.c ++++ b/drivers/pci/controller/dwc/pcie-tegra194.c +@@ -1963,6 +1963,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) + return IRQ_HANDLED; + } + ++static void tegra_pcie_ep_init(struct dw_pcie_ep *ep) ++{ ++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ++ enum pci_barno bar; ++ ++ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) ++ dw_pcie_ep_reset_bar(pci, bar); ++}; ++ + static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) + { + /* Tegra194 supports only INTA */ +@@ -2036,6 +2045,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) + } + + static const struct dw_pcie_ep_ops pcie_ep_ops = { ++ .ep_init = tegra_pcie_ep_init, + .raise_irq = tegra_pcie_ep_raise_irq, + .get_features = tegra_pcie_ep_get_features, + }; +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index 449d42744d336f..300caafcfa1009 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -186,9 +186,15 @@ static ssize_t max_link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct pci_dev *pdev = to_pci_dev(dev); ++ ssize_t ret; ++ ++ /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */ ++ pci_config_pm_runtime_get(pdev); ++ ret = sysfs_emit(buf, "%s\n", ++ pci_speed_string(pcie_get_speed_cap(pdev))); ++ pci_config_pm_runtime_put(pdev); + +- return sysfs_emit(buf, "%s\n", +- pci_speed_string(pcie_get_speed_cap(pdev))); ++ return ret; + } + static DEVICE_ATTR_RO(max_link_speed); + +diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c +index dddb66de6dba15..8d93a830ab8bff 100644 +--- a/drivers/phy/cadence/cdns-dphy.c ++++ b/drivers/phy/cadence/cdns-dphy.c +@@ -30,6 +30,7 @@ + + #define DPHY_CMN_SSM DPHY_PMA_CMN(0x20) + #define DPHY_CMN_SSM_EN BIT(0) ++#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1) + #define DPHY_CMN_TX_MODE_EN BIT(9) + + #define DPHY_CMN_PWM DPHY_PMA_CMN(0x40) +@@ -79,6 +80,7 @@ struct cdns_dphy_cfg { + u8 pll_ipdiv; + u8 pll_opdiv; + u16 pll_fbdiv; ++ u32 hs_clk_rate; + unsigned int nlanes; + }; + +@@ -99,6 +101,8 @@ struct cdns_dphy_ops { + void (*set_pll_cfg)(struct cdns_dphy *dphy, + const struct cdns_dphy_cfg *cfg); + unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy); ++ int (*wait_for_pll_lock)(struct cdns_dphy *dphy); ++ int (*wait_for_cmn_ready)(struct cdns_dphy *dphy); + }; + + struct cdns_dphy { +@@ -108,6 +112,8 @@ struct cdns_dphy { + struct clk *pll_ref_clk; + const struct cdns_dphy_ops *ops; + struct phy *phy; ++ bool is_configured; ++ bool is_powered; + }; + + /* Order of bands is important since the index is the band number. */ +@@ -154,6 +160,9 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy, + cfg->pll_ipdiv, + pll_ref_hz); + ++ cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv, ++ 2 * cfg->pll_opdiv * cfg->pll_ipdiv); ++ + return 0; + } + +@@ -191,6 +200,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy) + return dphy->ops->get_wakeup_time_ns(dphy); + } + ++static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy) ++{ ++ return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0; ++} ++ ++static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy) ++{ ++ return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0; ++} ++ + static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy) + { + /* Default wakeup time is 800 ns (in a simulated environment). */ +@@ -232,7 +251,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy) + static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy, + const struct cdns_dphy_cfg *cfg) + { +- u32 status; + + /* + * set the PWM and PLL Byteclk divider settings to recommended values +@@ -249,13 +267,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy, + + writel(DPHY_TX_J721E_WIZ_LANE_RSTB, + dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL); +- +- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status, +- (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US); +- +- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status, +- (status & DPHY_TX_WIZ_O_CMN_READY), 0, +- POLL_TIMEOUT_US); + } + + static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div) +@@ -263,6 +274,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div) + writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ); + } + ++static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy) ++{ ++ u32 status; ++ ++ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status, ++ status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US); ++} ++ ++static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy) ++{ ++ u32 status; ++ ++ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status, ++ status & DPHY_TX_WIZ_O_CMN_READY, 0, ++ POLL_TIMEOUT_US); ++} ++ + /* + * This is the reference implementation of DPHY hooks. Specific integration of + * this IP may have to re-implement some of them depending on how they decided +@@ -278,6 +306,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = { + .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns, + .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg, + .set_psm_div = cdns_dphy_j721e_set_psm_div, ++ .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock, ++ .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready, + }; + + static int cdns_dphy_config_from_opts(struct phy *phy, +@@ -297,6 +327,7 @@ static int cdns_dphy_config_from_opts(struct phy *phy, + if (ret) + return ret; + ++ opts->hs_clk_rate = cfg->hs_clk_rate; + opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000; + + return 0; +@@ -334,21 +365,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, + static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts) + { + struct cdns_dphy *dphy = phy_get_drvdata(phy); +- struct cdns_dphy_cfg cfg = { 0 }; +- int ret, band_ctrl; +- unsigned int reg; ++ int ret; + +- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg); +- if (ret) +- return ret; ++ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg); ++ if (!ret) ++ dphy->is_configured = true; ++ ++ return ret; ++} ++ ++static int cdns_dphy_power_on(struct phy *phy) ++{ ++ struct cdns_dphy *dphy = phy_get_drvdata(phy); ++ int ret; ++ u32 reg; ++ ++ if (!dphy->is_configured || dphy->is_powered) ++ return -EINVAL; ++ ++ clk_prepare_enable(dphy->psm_clk); ++ clk_prepare_enable(dphy->pll_ref_clk); + + /* + * Configure the internal PSM clk divider so that the DPHY has a + * 1MHz clk (or something close). + */ + ret = cdns_dphy_setup_psm(dphy); +- if (ret) +- return ret; ++ if (ret) { ++ dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret); ++ goto err_power_on; ++ } + + /* + * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes +@@ -363,40 +409,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts) + * Configure the DPHY PLL that will be used to generate the TX byte + * clk. + */ +- cdns_dphy_set_pll_cfg(dphy, &cfg); ++ cdns_dphy_set_pll_cfg(dphy, &dphy->cfg); + +- band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate); +- if (band_ctrl < 0) +- return band_ctrl; ++ ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate); ++ if (ret < 0) { ++ dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret); ++ goto err_power_on; ++ } + +- reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) | +- FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl); ++ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) | ++ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret); + writel(reg, dphy->regs + DPHY_BAND_CFG); + +- return 0; +-} ++ /* Start TX state machine. */ ++ reg = readl(dphy->regs + DPHY_CMN_SSM); ++ writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN, ++ dphy->regs + DPHY_CMN_SSM); + +-static int cdns_dphy_power_on(struct phy *phy) +-{ +- struct cdns_dphy *dphy = phy_get_drvdata(phy); ++ ret = cdns_dphy_wait_for_pll_lock(dphy); ++ if (ret) { ++ dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret); ++ goto err_power_on; ++ } + +- clk_prepare_enable(dphy->psm_clk); +- clk_prepare_enable(dphy->pll_ref_clk); ++ ret = cdns_dphy_wait_for_cmn_ready(dphy); ++ if (ret) { ++ dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n", ++ ret); ++ goto err_power_on; ++ } + +- /* Start TX state machine. */ +- writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN, +- dphy->regs + DPHY_CMN_SSM); ++ dphy->is_powered = true; + + return 0; ++ ++err_power_on: ++ clk_disable_unprepare(dphy->pll_ref_clk); ++ clk_disable_unprepare(dphy->psm_clk); ++ ++ return ret; + } + + static int cdns_dphy_power_off(struct phy *phy) + { + struct cdns_dphy *dphy = phy_get_drvdata(phy); ++ u32 reg; + + clk_disable_unprepare(dphy->pll_ref_clk); + clk_disable_unprepare(dphy->psm_clk); + ++ /* Stop TX state machine. */ ++ reg = readl(dphy->regs + DPHY_CMN_SSM); ++ writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM); ++ ++ dphy->is_powered = false; ++ + return 0; + } + +diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c +index f616059c5e1e4c..a1adfd077c15be 100644 +--- a/drivers/usb/gadget/function/f_acm.c ++++ b/drivers/usb/gadget/function/f_acm.c +@@ -11,12 +11,15 @@ + + /* #define VERBOSE_DEBUG */ + ++#include + #include + #include + #include + #include + #include + ++#include ++ + #include "u_serial.h" + + +@@ -612,6 +615,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + struct usb_string *us; + int status; + struct usb_ep *ep; ++ struct usb_request *request __free(free_usb_request) = NULL; + + /* REVISIT might want instance-specific strings to help + * distinguish instances ... +@@ -629,7 +633,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + acm->ctrl_id = status; + acm_iad_descriptor.bFirstInterface = status; + +@@ -638,40 +642,38 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + acm->data_id = status; + + acm_data_interface_desc.bInterfaceNumber = status; + acm_union_desc.bSlaveInterface0 = status; + acm_call_mgmt_descriptor.bDataInterface = status; + +- status = -ENODEV; +- + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + acm->port.in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + acm->port.out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + acm->notify = ep; + + /* allocate notification */ +- acm->notify_req = gs_alloc_req(ep, +- sizeof(struct usb_cdc_notification) + 2, +- GFP_KERNEL); +- if (!acm->notify_req) +- goto fail; ++ request = gs_alloc_req(ep, ++ sizeof(struct usb_cdc_notification) + 2, ++ GFP_KERNEL); ++ if (!request) ++ return -ENODEV; + +- acm->notify_req->complete = acm_cdc_notify_complete; +- acm->notify_req->context = acm; ++ request->complete = acm_cdc_notify_complete; ++ request->context = acm; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at +@@ -688,7 +690,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, + acm_ss_function, acm_ss_function); + if (status) +- goto fail; ++ return status; ++ ++ acm->notify_req = no_free_ptr(request); + + dev_dbg(&cdev->gadget->dev, + "acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n", +@@ -696,14 +700,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + acm->port.in->name, acm->port.out->name, + acm->notify->name); + return 0; +- +-fail: +- if (acm->notify_req) +- gs_free_req(acm->notify, acm->notify_req); +- +- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); +- +- return status; + } + + static void acm_unbind(struct usb_configuration *c, struct usb_function *f) +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c +index 2afc30de54ce2d..7bb63b9e3f78d9 100644 +--- a/drivers/usb/gadget/function/f_ecm.c ++++ b/drivers/usb/gadget/function/f_ecm.c +@@ -8,12 +8,15 @@ + + /* #define VERBOSE_DEBUG */ + ++#include + #include + #include + #include + #include + #include + ++#include ++ + #include "u_ether.h" + #include "u_ether_configfs.h" + #include "u_ecm.h" +@@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + struct usb_ep *ep; + + struct f_ecm_opts *ecm_opts; ++ struct usb_request *request __free(free_usb_request) = NULL; + + if (!can_support_ecm(cdev->gadget)) + return -EINVAL; +@@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + ecm->ctrl_id = status; + ecm_iad_descriptor.bFirstInterface = status; + +@@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + ecm->data_id = status; + + ecm_data_nop_intf.bInterfaceNumber = status; + ecm_data_intf.bInterfaceNumber = status; + ecm_union_desc.bSlaveInterface0 = status; + +- status = -ENODEV; +- + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ecm->port.in_ep = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ecm->port.out_ep = ep; + + /* NOTE: a status/notification endpoint is *OPTIONAL* but we +@@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ecm->notify = ep; + +- status = -ENOMEM; +- + /* allocate notification request and buffer */ +- ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); +- if (!ecm->notify_req) +- goto fail; +- ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); +- if (!ecm->notify_req->buf) +- goto fail; +- ecm->notify_req->context = ecm; +- ecm->notify_req->complete = ecm_notify_complete; ++ request = usb_ep_alloc_request(ep, GFP_KERNEL); ++ if (!request) ++ return -ENOMEM; ++ request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); ++ if (!request->buf) ++ return -ENOMEM; ++ request->context = ecm; ++ request->complete = ecm_notify_complete; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at +@@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, + ecm_ss_function, ecm_ss_function); + if (status) +- goto fail; ++ return status; + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code +@@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + ecm->port.open = ecm_open; + ecm->port.close = ecm_close; + ++ ecm->notify_req = no_free_ptr(request); ++ + DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n", + ecm->port.in_ep->name, ecm->port.out_ep->name, + ecm->notify->name); + return 0; +- +-fail: +- if (ecm->notify_req) { +- kfree(ecm->notify_req->buf); +- usb_ep_free_request(ecm->notify, ecm->notify_req); +- } +- +- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +- +- return status; + } + + static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item) +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index f5731d465cd7b3..7aad737901e833 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -11,6 +11,7 @@ + * Copyright (C) 2008 Nokia Corporation + */ + ++#include + #include + #include + #include +@@ -19,6 +20,7 @@ + #include + + #include ++#include + + #include "u_ether.h" + #include "u_ether_configfs.h" +@@ -1422,18 +1424,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + struct usb_ep *ep; + struct f_ncm_opts *ncm_opts; + ++ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; ++ struct usb_request *request __free(free_usb_request) = NULL; ++ + if (!can_support_ecm(cdev->gadget)) + return -EINVAL; + + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); + + if (cdev->use_os_string) { +- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), +- GFP_KERNEL); +- if (!f->os_desc_table) ++ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); ++ if (!os_desc_table) + return -ENOMEM; +- f->os_desc_n = 1; +- f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; + } + + mutex_lock(&ncm_opts->lock); +@@ -1443,16 +1445,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + mutex_unlock(&ncm_opts->lock); + + if (status) +- goto fail; ++ return status; + + ncm_opts->bound = true; + + us = usb_gstrings_attach(cdev, ncm_strings, + ARRAY_SIZE(ncm_string_defs)); +- if (IS_ERR(us)) { +- status = PTR_ERR(us); +- goto fail; +- } ++ if (IS_ERR(us)) ++ return PTR_ERR(us); ++ + ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id; + ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id; + ncm_data_intf.iInterface = us[STRING_DATA_IDX].id; +@@ -1462,55 +1463,47 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + ncm->ctrl_id = status; + ncm_iad_desc.bFirstInterface = status; + + ncm_control_intf.bInterfaceNumber = status; + ncm_union_desc.bMasterInterface0 = status; + +- if (cdev->use_os_string) +- f->os_desc_table[0].if_id = +- ncm_iad_desc.bFirstInterface; +- + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + ncm->data_id = status; + + ncm_data_nop_intf.bInterfaceNumber = status; + ncm_data_intf.bInterfaceNumber = status; + ncm_union_desc.bSlaveInterface0 = status; + +- status = -ENODEV; +- + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ncm->port.in_ep = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ncm->port.out_ep = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + ncm->notify = ep; + +- status = -ENOMEM; +- + /* allocate notification request and buffer */ +- ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); +- if (!ncm->notify_req) +- goto fail; +- ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); +- if (!ncm->notify_req->buf) +- goto fail; +- ncm->notify_req->context = ncm; +- ncm->notify_req->complete = ncm_notify_complete; ++ request = usb_ep_alloc_request(ep, GFP_KERNEL); ++ if (!request) ++ return -ENOMEM; ++ request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); ++ if (!request->buf) ++ return -ENOMEM; ++ request->context = ncm; ++ request->complete = ncm_notify_complete; + + /* + * support all relevant hardware speeds... we expect that when +@@ -1530,7 +1523,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, + ncm_ss_function, ncm_ss_function); + if (status) +- goto fail; ++ return status; + + /* + * NOTE: all that is done without knowing or caring about +@@ -1544,23 +1537,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); + ncm->task_timer.function = ncm_tx_timeout; + ++ if (cdev->use_os_string) { ++ os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; ++ os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface; ++ f->os_desc_table = no_free_ptr(os_desc_table); ++ f->os_desc_n = 1; ++ } ++ ncm->notify_req = no_free_ptr(request); ++ + DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n", + ncm->port.in_ep->name, ncm->port.out_ep->name, + ncm->notify->name); + return 0; +- +-fail: +- kfree(f->os_desc_table); +- f->os_desc_n = 0; +- +- if (ncm->notify_req) { +- kfree(ncm->notify_req->buf); +- usb_ep_free_request(ncm->notify, ncm->notify_req); +- } +- +- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +- +- return status; + } + + static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item) +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c +index b47f99d17ee9a5..ef7e734511ab2e 100644 +--- a/drivers/usb/gadget/function/f_rndis.c ++++ b/drivers/usb/gadget/function/f_rndis.c +@@ -19,6 +19,8 @@ + + #include + ++#include ++ + #include "u_ether.h" + #include "u_ether_configfs.h" + #include "u_rndis.h" +@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + struct usb_ep *ep; + + struct f_rndis_opts *rndis_opts; ++ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; ++ struct usb_request *request __free(free_usb_request) = NULL; + + if (!can_support_rndis(c)) + return -EINVAL; +@@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst); + + if (cdev->use_os_string) { +- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), +- GFP_KERNEL); +- if (!f->os_desc_table) ++ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); ++ if (!os_desc_table) + return -ENOMEM; +- f->os_desc_n = 1; +- f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; + } + + rndis_iad_descriptor.bFunctionClass = rndis_opts->class; +@@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + gether_set_gadget(rndis_opts->net, cdev->gadget); + status = gether_register_netdev(rndis_opts->net); + if (status) +- goto fail; ++ return status; + rndis_opts->bound = true; + } + + us = usb_gstrings_attach(cdev, rndis_strings, + ARRAY_SIZE(rndis_string_defs)); +- if (IS_ERR(us)) { +- status = PTR_ERR(us); +- goto fail; +- } ++ if (IS_ERR(us)) ++ return PTR_ERR(us); + rndis_control_intf.iInterface = us[0].id; + rndis_data_intf.iInterface = us[1].id; + rndis_iad_descriptor.iFunction = us[2].id; +@@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + rndis->ctrl_id = status; + rndis_iad_descriptor.bFirstInterface = status; + + rndis_control_intf.bInterfaceNumber = status; + rndis_union_desc.bMasterInterface0 = status; + +- if (cdev->use_os_string) +- f->os_desc_table[0].if_id = +- rndis_iad_descriptor.bFirstInterface; +- + status = usb_interface_id(c, f); + if (status < 0) +- goto fail; ++ return status; + rndis->data_id = status; + + rndis_data_intf.bInterfaceNumber = status; + rndis_union_desc.bSlaveInterface0 = status; + +- status = -ENODEV; +- + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + rndis->port.in_ep = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + rndis->port.out_ep = ep; + + /* NOTE: a status/notification endpoint is, strictly speaking, +@@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc); + if (!ep) +- goto fail; ++ return -ENODEV; + rndis->notify = ep; + +- status = -ENOMEM; +- + /* allocate notification request and buffer */ +- rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); +- if (!rndis->notify_req) +- goto fail; +- rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); +- if (!rndis->notify_req->buf) +- goto fail; +- rndis->notify_req->length = STATUS_BYTECOUNT; +- rndis->notify_req->context = rndis; +- rndis->notify_req->complete = rndis_response_complete; ++ request = usb_ep_alloc_request(ep, GFP_KERNEL); ++ if (!request) ++ return -ENOMEM; ++ request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); ++ if (!request->buf) ++ return -ENOMEM; ++ request->length = STATUS_BYTECOUNT; ++ request->context = rndis; ++ request->complete = rndis_response_complete; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at +@@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, + eth_ss_function, eth_ss_function); + if (status) +- goto fail; ++ return status; + + rndis->port.open = rndis_open; + rndis->port.close = rndis_close; +@@ -789,9 +780,18 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + if (rndis->manufacturer && rndis->vendorID && + rndis_set_param_vendor(rndis->params, rndis->vendorID, + rndis->manufacturer)) { +- status = -EINVAL; +- goto fail_free_descs; ++ usb_free_all_descriptors(f); ++ return -EINVAL; ++ } ++ ++ if (cdev->use_os_string) { ++ os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; ++ os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface; ++ f->os_desc_table = no_free_ptr(os_desc_table); ++ f->os_desc_n = 1; ++ + } ++ rndis->notify_req = no_free_ptr(request); + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code +@@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + rndis->port.in_ep->name, rndis->port.out_ep->name, + rndis->notify->name); + return 0; +- +-fail_free_descs: +- usb_free_all_descriptors(f); +-fail: +- kfree(f->os_desc_table); +- f->os_desc_n = 0; +- +- if (rndis->notify_req) { +- kfree(rndis->notify_req->buf); +- usb_ep_free_request(rndis->notify, rndis->notify_req); +- } +- +- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +- +- return status; + } + + void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net) +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index a4120a25428e5d..25bbb7a440ce28 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, + + req = ep->ops->alloc_request(ep, gfp_flags); + ++ if (req) ++ req->ep = ep; ++ + trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM); + + return req; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 88ba277bc3a79e..2d6ccc21a8229f 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -985,7 +985,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl, + { + const u64 ra_pos = readahead_pos(ractl); + const u64 ra_end = ra_pos + readahead_length(ractl); +- const u64 em_end = em->start + em->ram_bytes; ++ const u64 em_end = em->start + em->len; + + /* No expansion for holes and inline extents. */ + if (em->block_start > EXTENT_MAP_LAST_BYTE) +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index 8efe3a9369df04..1fb635a9997600 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -1108,14 +1108,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, + * If ret is 1 (no key found), it means this is an empty block group, + * without any extents allocated from it and there's no block group + * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree +- * because we are using the block group tree feature, so block group +- * items are stored in the block group tree. It also means there are no +- * extents allocated for block groups with a start offset beyond this +- * block group's end offset (this is the last, highest, block group). ++ * because we are using the block group tree feature (so block group ++ * items are stored in the block group tree) or this is a new block ++ * group created in the current transaction and its block group item ++ * was not yet inserted in the extent tree (that happens in ++ * btrfs_create_pending_block_groups() -> insert_block_group_item()). ++ * It also means there are no extents allocated for block groups with a ++ * start offset beyond this block group's end offset (this is the last, ++ * highest, block group). + */ +- if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE)) +- ASSERT(ret == 0); +- + start = block_group->start; + end = block_group->start + block_group->length; + while (ret == 0) { +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index 8cc1f4b832773e..b21b5e0f89054f 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -3877,6 +3877,7 @@ static noinline_for_stack struct inode *create_reloc_inode( + /* + * Mark start of chunk relocation that is cancellable. Check if the cancellation + * has been requested meanwhile and don't start in that case. ++ * NOTE: if this returns an error, reloc_chunk_end() must not be called. + * + * Return: + * 0 success +@@ -3893,10 +3894,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) + + if (atomic_read(&fs_info->reloc_cancel_req) > 0) { + btrfs_info(fs_info, "chunk relocation canceled on start"); +- /* +- * On cancel, clear all requests but let the caller mark +- * the end after cleanup operations. +- */ ++ /* On cancel, clear all requests. */ ++ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); + atomic_set(&fs_info->reloc_cancel_req, 0); + return -ECANCELED; + } +@@ -3905,9 +3904,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) + + /* + * Mark end of chunk relocation that is cancellable and wake any waiters. ++ * NOTE: call only if a previous call to reloc_chunk_start() succeeded. + */ + static void reloc_chunk_end(struct btrfs_fs_info *fs_info) + { ++ ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)); + /* Requested after start, clear bit first so any waiters can continue */ + if (atomic_read(&fs_info->reloc_cancel_req) > 0) + btrfs_info(fs_info, "chunk relocation canceled during operation"); +@@ -4119,9 +4120,9 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) + if (err && rw) + btrfs_dec_block_group_ro(rc->block_group); + iput(rc->data_inode); ++ reloc_chunk_end(fs_info); + out_put_bg: + btrfs_put_block_group(bg); +- reloc_chunk_end(fs_info); + free_reloc_control(rc); + return err; + } +@@ -4311,8 +4312,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) + err = ret; + out_unset: + unset_reloc_control(rc); +-out_end: + reloc_chunk_end(fs_info); ++out_end: + free_reloc_control(rc); + out: + free_reloc_roots(&reloc_roots); +diff --git a/fs/dax.c b/fs/dax.c +index 8c09578fa03573..e1451efaab1403 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -1578,7 +1578,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, + if (iov_iter_rw(iter) == WRITE) { + lockdep_assert_held_write(&iomi.inode->i_rwsem); + iomi.flags |= IOMAP_WRITE; +- } else { ++ } else if (!sb_rdonly(iomi.inode->i_sb)) { + lockdep_assert_held(&iomi.inode->i_rwsem); + } + +diff --git a/fs/dcache.c b/fs/dcache.c +index 4030c010a76820..74d49b2b3b6e22 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1861,6 +1861,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) + __dget_dlock(parent); + dentry->d_parent = parent; + list_add(&dentry->d_child, &parent->d_subdirs); ++ if (parent->d_flags & DCACHE_DISCONNECTED) ++ dentry->d_flags |= DCACHE_DISCONNECTED; + spin_unlock(&parent->d_lock); + + return dentry; +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 6b2d655c1cefcd..1e80a9b1d1263f 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -45,10 +45,10 @@ + * + * 1) epnested_mutex (mutex) + * 2) ep->mtx (mutex) +- * 3) ep->lock (rwlock) ++ * 3) ep->lock (spinlock) + * + * The acquire order is the one listed above, from 1 to 3. +- * We need a rwlock (ep->lock) because we manipulate objects ++ * We need a spinlock (ep->lock) because we manipulate objects + * from inside the poll callback, that might be triggered from + * a wake_up() that in turn might be called from IRQ context. + * So we can't sleep inside the poll callback and hence we need +@@ -194,7 +194,7 @@ struct eventpoll { + struct list_head rdllist; + + /* Lock which protects rdllist and ovflist */ +- rwlock_t lock; ++ spinlock_t lock; + + /* RB tree root used to store monitored fd structs */ + struct rb_root_cached rbr; +@@ -206,7 +206,7 @@ struct eventpoll { + */ + struct epitem *ovflist; + +- /* wakeup_source used when ep_scan_ready_list is running */ ++ /* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */ + struct wakeup_source *ws; + + /* The user that created the eventpoll descriptor */ +@@ -625,10 +625,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist) + * in a lockless way. + */ + lockdep_assert_irqs_enabled(); +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + list_splice_init(&ep->rdllist, txlist); + WRITE_ONCE(ep->ovflist, NULL); +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + } + + static void ep_done_scan(struct eventpoll *ep, +@@ -636,7 +636,7 @@ static void ep_done_scan(struct eventpoll *ep, + { + struct epitem *epi, *nepi; + +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + /* + * During the time we spent inside the "sproc" callback, some + * other events might have been queued by the poll callback. +@@ -677,7 +677,7 @@ static void ep_done_scan(struct eventpoll *ep, + wake_up(&ep->wq); + } + +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + } + + static void epi_rcu_free(struct rcu_head *head) +@@ -757,10 +757,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) + + rb_erase_cached(&epi->rbn, &ep->rbr); + +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + if (ep_is_linked(epi)) + list_del_init(&epi->rdllink); +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + + wakeup_source_unregister(ep_wakeup_source(epi)); + /* +@@ -1018,7 +1018,7 @@ static int ep_alloc(struct eventpoll **pep) + return -ENOMEM; + + mutex_init(&ep->mtx); +- rwlock_init(&ep->lock); ++ spin_lock_init(&ep->lock); + init_waitqueue_head(&ep->wq); + init_waitqueue_head(&ep->poll_wait); + INIT_LIST_HEAD(&ep->rdllist); +@@ -1105,100 +1105,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, + } + #endif /* CONFIG_KCMP */ + +-/* +- * Adds a new entry to the tail of the list in a lockless way, i.e. +- * multiple CPUs are allowed to call this function concurrently. +- * +- * Beware: it is necessary to prevent any other modifications of the +- * existing list until all changes are completed, in other words +- * concurrent list_add_tail_lockless() calls should be protected +- * with a read lock, where write lock acts as a barrier which +- * makes sure all list_add_tail_lockless() calls are fully +- * completed. +- * +- * Also an element can be locklessly added to the list only in one +- * direction i.e. either to the tail or to the head, otherwise +- * concurrent access will corrupt the list. +- * +- * Return: %false if element has been already added to the list, %true +- * otherwise. +- */ +-static inline bool list_add_tail_lockless(struct list_head *new, +- struct list_head *head) +-{ +- struct list_head *prev; +- +- /* +- * This is simple 'new->next = head' operation, but cmpxchg() +- * is used in order to detect that same element has been just +- * added to the list from another CPU: the winner observes +- * new->next == new. +- */ +- if (!try_cmpxchg(&new->next, &new, head)) +- return false; +- +- /* +- * Initially ->next of a new element must be updated with the head +- * (we are inserting to the tail) and only then pointers are atomically +- * exchanged. XCHG guarantees memory ordering, thus ->next should be +- * updated before pointers are actually swapped and pointers are +- * swapped before prev->next is updated. +- */ +- +- prev = xchg(&head->prev, new); +- +- /* +- * It is safe to modify prev->next and new->prev, because a new element +- * is added only to the tail and new->next is updated before XCHG. +- */ +- +- prev->next = new; +- new->prev = prev; +- +- return true; +-} +- +-/* +- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way, +- * i.e. multiple CPUs are allowed to call this function concurrently. +- * +- * Return: %false if epi element has been already chained, %true otherwise. +- */ +-static inline bool chain_epi_lockless(struct epitem *epi) +-{ +- struct eventpoll *ep = epi->ep; +- +- /* Fast preliminary check */ +- if (epi->next != EP_UNACTIVE_PTR) +- return false; +- +- /* Check that the same epi has not been just chained from another CPU */ +- if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) +- return false; +- +- /* Atomically exchange tail */ +- epi->next = xchg(&ep->ovflist, epi); +- +- return true; +-} +- + /* + * This is the callback that is passed to the wait queue wakeup + * mechanism. It is called by the stored file descriptors when they + * have events to report. +- * +- * This callback takes a read lock in order not to contend with concurrent +- * events from another file descriptor, thus all modifications to ->rdllist +- * or ->ovflist are lockless. Read lock is paired with the write lock from +- * ep_scan_ready_list(), which stops all list modifications and guarantees +- * that lists state is seen correctly. +- * +- * Another thing worth to mention is that ep_poll_callback() can be called +- * concurrently for the same @epi from different CPUs if poll table was inited +- * with several wait queues entries. Plural wakeup from different CPUs of a +- * single wait queue is serialized by wq.lock, but the case when multiple wait +- * queues are used should be detected accordingly. This is detected using +- * cmpxchg() operation. + */ + static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) + { +@@ -1209,7 +1119,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + unsigned long flags; + int ewake = 0; + +- read_lock_irqsave(&ep->lock, flags); ++ spin_lock_irqsave(&ep->lock, flags); + + ep_set_busy_poll_napi_id(epi); + +@@ -1238,12 +1148,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + * chained in ep->ovflist and requeued later on. + */ + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { +- if (chain_epi_lockless(epi)) ++ if (epi->next == EP_UNACTIVE_PTR) { ++ epi->next = READ_ONCE(ep->ovflist); ++ WRITE_ONCE(ep->ovflist, epi); + ep_pm_stay_awake_rcu(epi); ++ } + } else if (!ep_is_linked(epi)) { + /* In the usual case, add event to ready list. */ +- if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) +- ep_pm_stay_awake_rcu(epi); ++ list_add_tail(&epi->rdllink, &ep->rdllist); ++ ep_pm_stay_awake_rcu(epi); + } + + /* +@@ -1276,7 +1189,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + pwake++; + + out_unlock: +- read_unlock_irqrestore(&ep->lock, flags); ++ spin_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) +@@ -1611,7 +1524,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + } + + /* We have to drop the new item inside our item list to keep track of it */ +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + + /* record NAPI ID of new item if present */ + ep_set_busy_poll_napi_id(epi); +@@ -1628,7 +1541,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + pwake++; + } + +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + + /* We have to call this outside the lock */ + if (pwake) +@@ -1692,7 +1605,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, + * list, push it inside. + */ + if (ep_item_poll(epi, &pt, 1)) { +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + if (!ep_is_linked(epi)) { + list_add_tail(&epi->rdllink, &ep->rdllist); + ep_pm_stay_awake(epi); +@@ -1703,7 +1616,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, + if (waitqueue_active(&ep->poll_wait)) + pwake++; + } +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + } + + /* We have to call this outside the lock */ +@@ -1792,7 +1705,7 @@ static int ep_send_events(struct eventpoll *ep, + * availability. At this point, no one can insert + * into ep->rdllist besides us. The epoll_ctl() + * callers are locked out by +- * ep_scan_ready_list() holding "mtx" and the ++ * ep_send_events() holding "mtx" and the + * poll callback will queue them in ep->ovflist. + */ + list_add_tail(&epi->rdllink, &ep->rdllist); +@@ -1936,7 +1849,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + init_wait(&wait); + wait.func = ep_autoremove_wake_function; + +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + /* + * Barrierless variant, waitqueue_active() is called under + * the same lock on wakeup ep_poll_callback() side, so it +@@ -1945,7 +1858,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + __set_current_state(TASK_INTERRUPTIBLE); + + /* +- * Do the final check under the lock. ep_scan_ready_list() ++ * Do the final check under the lock. ep_start/done_scan() + * plays with two lists (->rdllist and ->ovflist) and there + * is always a race when both lists are empty for short + * period of time although events are pending, so lock is +@@ -1955,7 +1868,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + if (!eavail) + __add_wait_queue_exclusive(&ep->wq, &wait); + +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + + if (!eavail) + timed_out = !schedule_hrtimeout_range(to, slack, +@@ -1970,7 +1883,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + eavail = 1; + + if (!list_empty_careful(&wait.entry)) { +- write_lock_irq(&ep->lock); ++ spin_lock_irq(&ep->lock); + /* + * If the thread timed out and is not on the wait queue, + * it means that the thread was woken up after its +@@ -1981,7 +1894,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + if (timed_out) + eavail = list_empty(&wait.entry); + __remove_wait_queue(&ep->wq, &wait); +- write_unlock_irq(&ep->lock); ++ spin_unlock_irq(&ep->lock); + } + } + } +diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c +index d1a2e662440178..32f59295cfa05a 100644 +--- a/fs/ext4/ext4_jbd2.c ++++ b/fs/ext4/ext4_jbd2.c +@@ -277,9 +277,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle, + bh, is_metadata, inode->i_mode, + test_opt(inode->i_sb, DATA_FLAGS)); + +- /* In the no journal case, we can just do a bforget and return */ ++ /* ++ * In the no journal case, we should wait for the ongoing buffer ++ * to complete and do a forget. ++ */ + if (!ext4_handle_valid(handle)) { +- bforget(bh); ++ if (bh) { ++ clear_buffer_dirty(bh); ++ wait_on_buffer(bh); ++ __bforget(bh); ++ } + return 0; + } + +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 91a9fa6f1ad4f0..563cd072642468 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4944,6 +4944,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, + } + ei->i_flags = le32_to_cpu(raw_inode->i_flags); + ext4_set_inode_flags(inode, true); ++ /* Detect invalid flag combination - can't have both inline data and extents */ ++ if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) && ++ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ++ ext4_error_inode(inode, function, line, 0, ++ "inode has both inline data and extents flags"); ++ ret = -EFSCORRUPTED; ++ goto bad_inode; ++ } + inode->i_blocks = ext4_inode_blocks(raw_inode, ei); + ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); + if (ext4_has_feature_64bit(sb)) +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 527f53bfe1b1f0..16a6c249580e3c 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2506,7 +2506,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb, + struct ext4_fs_context *m_ctx) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +- char *s_mount_opts = NULL; ++ char s_mount_opts[65]; + struct ext4_fs_context *s_ctx = NULL; + struct fs_context *fc = NULL; + int ret = -ENOMEM; +@@ -2514,15 +2514,11 @@ static int parse_apply_sb_mount_options(struct super_block *sb, + if (!sbi->s_es->s_mount_opts[0]) + return 0; + +- s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, +- sizeof(sbi->s_es->s_mount_opts), +- GFP_KERNEL); +- if (!s_mount_opts) +- return ret; ++ strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts, sizeof(s_mount_opts)); + + fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); + if (!fc) +- goto out_free; ++ return -ENOMEM; + + s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); + if (!s_ctx) +@@ -2554,11 +2550,8 @@ static int parse_apply_sb_mount_options(struct super_block *sb, + ret = 0; + + out_free: +- if (fc) { +- ext4_fc_free(fc); +- kfree(fc); +- } +- kfree(s_mount_opts); ++ ext4_fc_free(fc); ++ kfree(fc); + return ret; + } + +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index fe23ebfc88ea14..f5252f3e840a5c 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -1506,8 +1506,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode, + struct f2fs_dev_info *dev = &sbi->devs[bidx]; + + map->m_bdev = dev->bdev; +- map->m_pblk -= dev->start_blk; + map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); ++ map->m_pblk -= dev->start_blk; + } else { + map->m_bdev = inode->i_sb->s_bdev; + } +diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c +index 36b6cf2a3abba4..ebd326799f35ac 100644 +--- a/fs/hfsplus/unicode.c ++++ b/fs/hfsplus/unicode.c +@@ -40,6 +40,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1, + p1 = s1->unicode; + p2 = s2->unicode; + ++ if (len1 > HFSPLUS_MAX_STRLEN) { ++ len1 = HFSPLUS_MAX_STRLEN; ++ pr_err("invalid length %u has been corrected to %d\n", ++ be16_to_cpu(s1->length), len1); ++ } ++ ++ if (len2 > HFSPLUS_MAX_STRLEN) { ++ len2 = HFSPLUS_MAX_STRLEN; ++ pr_err("invalid length %u has been corrected to %d\n", ++ be16_to_cpu(s2->length), len2); ++ } ++ + while (1) { + c1 = c2 = 0; + +@@ -74,6 +86,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1, + p1 = s1->unicode; + p2 = s2->unicode; + ++ if (len1 > HFSPLUS_MAX_STRLEN) { ++ len1 = HFSPLUS_MAX_STRLEN; ++ pr_err("invalid length %u has been corrected to %d\n", ++ be16_to_cpu(s1->length), len1); ++ } ++ ++ if (len2 > HFSPLUS_MAX_STRLEN) { ++ len2 = HFSPLUS_MAX_STRLEN; ++ pr_err("invalid length %u has been corrected to %d\n", ++ be16_to_cpu(s2->length), len2); ++ } ++ + for (len = min(len1, len2); len > 0; len--) { + c1 = be16_to_cpu(*p1); + c2 = be16_to_cpu(*p2); +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index c2b8ad0b24c443..82bd29c170855a 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1649,6 +1649,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) + int drop_reserve = 0; + int err = 0; + int was_modified = 0; ++ int wait_for_writeback = 0; + + if (is_handle_aborted(handle)) + return -EROFS; +@@ -1772,18 +1773,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) + } + + /* +- * The buffer is still not written to disk, we should +- * attach this buffer to current transaction so that the +- * buffer can be checkpointed only after the current +- * transaction commits. ++ * The buffer has not yet been written to disk. We should ++ * either clear the buffer or ensure that the ongoing I/O ++ * is completed, and attach this buffer to current ++ * transaction so that the buffer can be checkpointed only ++ * after the current transaction commits. + */ + clear_buffer_dirty(bh); ++ wait_for_writeback = 1; + __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); + spin_unlock(&journal->j_list_lock); + } + drop: + __brelse(bh); + spin_unlock(&jh->b_state_lock); ++ if (wait_for_writeback) ++ wait_on_buffer(bh); + jbd2_journal_put_journal_head(jh); + if (drop_reserve) { + /* no need to reserve log space for this block -bzzz */ +diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c +index 01d7fd108cf3df..59f119cce3dc61 100644 +--- a/fs/nfsd/blocklayout.c ++++ b/fs/nfsd/blocklayout.c +@@ -117,7 +117,6 @@ static __be32 + nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, + struct iomap *iomaps, int nr_iomaps) + { +- loff_t new_size = lcp->lc_last_wr + 1; + struct iattr iattr = { .ia_valid = 0 }; + int error; + +@@ -127,9 +126,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, + iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME; + iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime; + +- if (new_size > i_size_read(inode)) { ++ if (lcp->lc_size_chg) { + iattr.ia_valid |= ATTR_SIZE; +- iattr.ia_size = new_size; ++ iattr.ia_size = lcp->lc_newsize; + } + + error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps, +diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c +index 1ed2f691ebb908..dd35c472eb37de 100644 +--- a/fs/nfsd/blocklayoutxdr.c ++++ b/fs/nfsd/blocklayoutxdr.c +@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr, + *p++ = cpu_to_be32(len); + *p++ = cpu_to_be32(1); /* we always return a single extent */ + +- p = xdr_encode_opaque_fixed(p, &b->vol_id, +- sizeof(struct nfsd4_deviceid)); ++ p = svcxdr_encode_deviceid4(p, &b->vol_id); + p = xdr_encode_hyper(p, b->foff); + p = xdr_encode_hyper(p, b->len); + p = xdr_encode_hyper(p, b->soff); +@@ -145,9 +144,7 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, + for (i = 0; i < nr_iomaps; i++) { + struct pnfs_block_extent bex; + +- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid)); +- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid)); +- ++ p = svcxdr_decode_deviceid4(p, &bex.vol_id); + p = xdr_decode_hyper(p, &bex.foff); + if (bex.foff & (block_size - 1)) { + dprintk("%s: unaligned offset 0x%llx\n", +diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c +index 4b5d998cbc2f44..f4e77859aa8599 100644 +--- a/fs/nfsd/export.c ++++ b/fs/nfsd/export.c +@@ -1071,28 +1071,62 @@ static struct svc_export *exp_find(struct cache_detail *cd, + return exp; + } + +-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) ++/** ++ * check_xprtsec_policy - check if access to export is allowed by the ++ * xprtsec policy ++ * @exp: svc_export that is being accessed. ++ * @rqstp: svc_rqst attempting to access @exp. ++ * ++ * Helper function for check_nfsd_access(). Note that callers should be ++ * using check_nfsd_access() instead of calling this function directly. The ++ * one exception is fh_verify() since it has logic that may result in one ++ * or both of the helpers being skipped. ++ * ++ * Return values: ++ * %nfs_ok if access is granted, or ++ * %nfserr_acces or %nfserr_wrongsec if access is denied ++ */ ++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp) + { +- struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; + struct svc_xprt *xprt = rqstp->rq_xprt; + + if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) { + if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) +- goto ok; ++ return nfs_ok; + } + if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) { + if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && + !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) +- goto ok; ++ return nfs_ok; + } + if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) { + if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && + test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) +- goto ok; ++ return nfs_ok; + } +- goto denied; + +-ok: ++ return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec; ++} ++ ++/** ++ * check_security_flavor - check if access to export is allowed by the ++ * xprtsec policy ++ * @exp: svc_export that is being accessed. ++ * @rqstp: svc_rqst attempting to access @exp. ++ * ++ * Helper function for check_nfsd_access(). Note that callers should be ++ * using check_nfsd_access() instead of calling this function directly. The ++ * one exception is fh_verify() since it has logic that may result in one ++ * or both of the helpers being skipped. ++ * ++ * Return values: ++ * %nfs_ok if access is granted, or ++ * %nfserr_acces or %nfserr_wrongsec if access is denied ++ */ ++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp) ++{ ++ struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; ++ + /* legacy gss-only clients are always OK: */ + if (exp->ex_client == rqstp->rq_gssclient) + return 0; +@@ -1117,10 +1151,20 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) + if (nfsd4_spo_must_allow(rqstp)) + return 0; + +-denied: + return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec; + } + ++__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) ++{ ++ __be32 status; ++ ++ status = check_xprtsec_policy(exp, rqstp); ++ if (status != nfs_ok) ++ return status; ++ ++ return check_security_flavor(exp, rqstp); ++} ++ + /* + * Uses rq_client and rq_gssclient to find an export; uses rq_client (an + * auth_unix client) if it's available and has secinfo information; +diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h +index ca9dc230ae3d0b..4a48b2ad56067f 100644 +--- a/fs/nfsd/export.h ++++ b/fs/nfsd/export.h +@@ -100,6 +100,8 @@ struct svc_expkey { + #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) + + int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp); ++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp); ++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp); + __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); + + /* +diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c +index 3ca5304440ff0a..0bc52e6bec394d 100644 +--- a/fs/nfsd/flexfilelayout.c ++++ b/fs/nfsd/flexfilelayout.c +@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp, + return 0; + } + ++static __be32 ++nfsd4_ff_proc_layoutcommit(struct inode *inode, ++ struct nfsd4_layoutcommit *lcp) ++{ ++ return nfs_ok; ++} ++ + const struct nfsd4_layout_ops ff_layout_ops = { + .notify_types = + NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, +@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = { + .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo, + .proc_layoutget = nfsd4_ff_proc_layoutget, + .encode_layoutget = nfsd4_ff_encode_layoutget, ++ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit, + }; +diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c +index bb205328e043da..223a10f37898ef 100644 +--- a/fs/nfsd/flexfilelayoutxdr.c ++++ b/fs/nfsd/flexfilelayoutxdr.c +@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr, + *p++ = cpu_to_be32(1); /* single mirror */ + *p++ = cpu_to_be32(1); /* single data server */ + +- p = xdr_encode_opaque_fixed(p, &fl->deviceid, +- sizeof(struct nfsd4_deviceid)); ++ p = svcxdr_encode_deviceid4(p, &fl->deviceid); + + *p++ = cpu_to_be32(1); /* efficiency */ + +diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c +index e8a80052cb1ba9..308214378fd352 100644 +--- a/fs/nfsd/nfs4layouts.c ++++ b/fs/nfsd/nfs4layouts.c +@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, + + id->fsid_idx = fhp->fh_export->ex_devid_map->idx; + id->generation = device_generation; +- id->pad = 0; + return 0; + } + +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index e9c1271b7ecc39..836367d839bda1 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -2308,7 +2308,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, + const struct nfsd4_layout_seg *seg = &lcp->lc_seg; + struct svc_fh *current_fh = &cstate->current_fh; + const struct nfsd4_layout_ops *ops; +- loff_t new_size = lcp->lc_last_wr + 1; + struct inode *inode; + struct nfs4_layout_stateid *ls; + __be32 nfserr; +@@ -2324,18 +2323,20 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, + goto out; + inode = d_inode(current_fh->fh_dentry); + +- nfserr = nfserr_inval; +- if (new_size <= seg->offset) { +- dprintk("pnfsd: last write before layout segment\n"); +- goto out; +- } +- if (new_size > seg->offset + seg->length) { +- dprintk("pnfsd: last write beyond layout segment\n"); +- goto out; +- } +- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) { +- dprintk("pnfsd: layoutcommit beyond EOF\n"); +- goto out; ++ lcp->lc_size_chg = false; ++ if (lcp->lc_newoffset) { ++ loff_t new_size = lcp->lc_last_wr + 1; ++ ++ nfserr = nfserr_inval; ++ if (new_size <= seg->offset) ++ goto out; ++ if (new_size > seg->offset + seg->length) ++ goto out; ++ ++ if (new_size > i_size_read(inode)) { ++ lcp->lc_size_chg = true; ++ lcp->lc_newsize = new_size; ++ } + } + + nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid, +@@ -2352,13 +2353,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, + /* LAYOUTCOMMIT does not require any serialization */ + mutex_unlock(&ls->ls_mutex); + +- if (new_size > i_size_read(inode)) { +- lcp->lc_size_chg = 1; +- lcp->lc_newsize = new_size; +- } else { +- lcp->lc_size_chg = 0; +- } +- + nfserr = ops->proc_layoutcommit(inode, lcp); + nfs4_put_stid(&ls->ls_stid); + out: +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 76dfbb99277f05..3eff780fd8da0a 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -566,18 +566,6 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp, + } + + #ifdef CONFIG_NFSD_PNFS +-static __be32 +-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp, +- struct nfsd4_deviceid *devid) +-{ +- __be32 *p; +- +- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE); +- if (!p) +- return nfserr_bad_xdr; +- memcpy(devid, p, sizeof(*devid)); +- return nfs_ok; +-} + + static __be32 + nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp, +@@ -1733,7 +1721,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, + __be32 status; + + memset(gdev, 0, sizeof(*gdev)); +- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid); ++ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid); + if (status) + return status; + if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0) +diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c +index c2495d98c18928..283c1a60c84603 100644 +--- a/fs/nfsd/nfsfh.c ++++ b/fs/nfsd/nfsfh.c +@@ -370,6 +370,16 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) + if (error) + goto out; + ++ /* ++ * NLM is allowed to bypass the xprtsec policy check because lockd ++ * doesn't support xprtsec. ++ */ ++ if (!(access & NFSD_MAY_LOCK)) { ++ error = check_xprtsec_policy(exp, rqstp); ++ if (error) ++ goto out; ++ } ++ + /* + * pseudoflavor restrictions are not enforced on NLM, + * which clients virtually always use auth_sys for, +@@ -386,7 +396,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) + && exp->ex_path.dentry == dentry) + goto skip_pseudoflavor_check; + +- error = check_nfsd_access(exp, rqstp); ++ error = check_security_flavor(exp, rqstp); + if (error) + goto out; + +diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h +index 144e05efd14c35..a83a52b0f51816 100644 +--- a/fs/nfsd/xdr4.h ++++ b/fs/nfsd/xdr4.h +@@ -459,9 +459,43 @@ struct nfsd4_reclaim_complete { + struct nfsd4_deviceid { + u64 fsid_idx; + u32 generation; +- u32 pad; + }; + ++static inline __be32 * ++svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid) ++{ ++ __be64 *q = (__be64 *)p; ++ ++ *q = (__force __be64)devid->fsid_idx; ++ p += 2; ++ *p++ = (__force __be32)devid->generation; ++ *p++ = xdr_zero; ++ return p; ++} ++ ++static inline __be32 * ++svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid) ++{ ++ __be64 *q = (__be64 *)p; ++ ++ devid->fsid_idx = (__force u64)(*q); ++ p += 2; ++ devid->generation = (__force u32)(*p++); ++ p++; /* NFSD does not use the remaining octets */ ++ return p; ++} ++ ++static inline __be32 ++nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid) ++{ ++ __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); ++ ++ if (unlikely(!p)) ++ return nfserr_bad_xdr; ++ svcxdr_decode_deviceid4(p, devid); ++ return nfs_ok; ++} ++ + struct nfsd4_layout_seg { + u32 iomode; + u64 offset; +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c +index be41e26b782469..05fdbbc63e1f5f 100644 +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -680,8 +680,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) + int blocksize; + int err; + +- down_write(&nilfs->ns_sem); +- + blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); + if (!blocksize) { + nilfs_err(sb, "unable to set blocksize"); +@@ -757,7 +755,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) + set_nilfs_init(nilfs); + err = 0; + out: +- up_write(&nilfs->ns_sem); + return err; + + failed_sbh: +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index e585e77cdc88e1..18dba873a1633f 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -1571,15 +1571,13 @@ static int __init ocfs2_init(void) + + ocfs2_set_locking_protocol(); + +- status = register_quota_format(&ocfs2_quota_format); +- if (status < 0) +- goto out3; ++ register_quota_format(&ocfs2_quota_format); ++ + status = register_filesystem(&ocfs2_fs_type); + if (!status) + return 0; + + unregister_quota_format(&ocfs2_quota_format); +-out3: + debugfs_remove(ocfs2_debugfs_root); + ocfs2_free_mem_caches(); + out2: +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index 67562c78e57d53..42a7d0a71b22ee 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -163,13 +163,15 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; + /* SLAB cache for dquot structures */ + static struct kmem_cache *dquot_cachep; + +-int register_quota_format(struct quota_format_type *fmt) ++/* workqueue for work quota_release_work*/ ++static struct workqueue_struct *quota_unbound_wq; ++ ++void register_quota_format(struct quota_format_type *fmt) + { + spin_lock(&dq_list_lock); + fmt->qf_next = quota_formats; + quota_formats = fmt; + spin_unlock(&dq_list_lock); +- return 0; + } + EXPORT_SYMBOL(register_quota_format); + +@@ -892,7 +894,7 @@ void dqput(struct dquot *dquot) + put_releasing_dquots(dquot); + atomic_dec(&dquot->dq_count); + spin_unlock(&dq_list_lock); +- queue_delayed_work(system_unbound_wq, "a_release_work, 1); ++ queue_delayed_work(quota_unbound_wq, "a_release_work, 1); + } + EXPORT_SYMBOL(dqput); + +@@ -3047,6 +3049,11 @@ static int __init dquot_init(void) + if (register_shrinker(&dqcache_shrinker, "dquota-cache")) + panic("Cannot register dquot shrinker"); + ++ quota_unbound_wq = alloc_workqueue("quota_events_unbound", ++ WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); ++ if (!quota_unbound_wq) ++ panic("Cannot create quota_unbound_wq\n"); ++ + return 0; + } + fs_initcall(dquot_init); +diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c +index a0db3f195e9511..8aaf4a501fc0d9 100644 +--- a/fs/quota/quota_v1.c ++++ b/fs/quota/quota_v1.c +@@ -229,7 +229,8 @@ static struct quota_format_type v1_quota_format = { + + static int __init init_v1_quota_format(void) + { +- return register_quota_format(&v1_quota_format); ++ register_quota_format(&v1_quota_format); ++ return 0; + } + + static void __exit exit_v1_quota_format(void) +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c +index 7978ab671e0c6a..d73f4432927719 100644 +--- a/fs/quota/quota_v2.c ++++ b/fs/quota/quota_v2.c +@@ -422,12 +422,9 @@ static struct quota_format_type v2r1_quota_format = { + + static int __init init_v2_quota_format(void) + { +- int ret; +- +- ret = register_quota_format(&v2r0_quota_format); +- if (ret) +- return ret; +- return register_quota_format(&v2r1_quota_format); ++ register_quota_format(&v2r0_quota_format); ++ register_quota_format(&v2r1_quota_format); ++ return 0; + } + + static void __exit exit_v2_quota_format(void) +diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c +index 6c16c4f34d8824..e55852aa49f919 100644 +--- a/fs/smb/client/inode.c ++++ b/fs/smb/client/inode.c +@@ -2319,8 +2319,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + +- if (!server->ops->rename) +- return -ENOSYS; ++ if (!server->ops->rename) { ++ rc = -ENOSYS; ++ goto do_rename_exit; ++ } + + /* try path-based rename first */ + rc = server->ops->rename(xid, tcon, from_dentry, +diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c +index ad77952f6d810b..9b5aeafe220bd9 100644 +--- a/fs/smb/client/misc.c ++++ b/fs/smb/client/misc.c +@@ -922,6 +922,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + char *data_end; + struct dfs_referral_level_3 *ref; + ++ if (rsp_size < sizeof(*rsp)) { ++ cifs_dbg(VFS | ONCE, ++ "%s: header is malformed (size is %u, must be %zu)\n", ++ __func__, rsp_size, sizeof(*rsp)); ++ rc = -EINVAL; ++ goto parse_DFS_referrals_exit; ++ } ++ + *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); + + if (*num_of_nodes < 1) { +@@ -931,6 +939,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + goto parse_DFS_referrals_exit; + } + ++ if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) { ++ cifs_dbg(VFS | ONCE, ++ "%s: malformed buffer (size is %u, must be at least %zu)\n", ++ __func__, rsp_size, ++ sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3)); ++ rc = -EINVAL; ++ goto parse_DFS_referrals_exit; ++ } ++ + ref = (struct dfs_referral_level_3 *) &(rsp->referrals); + if (ref->VersionNumber != cpu_to_le16(3)) { + cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index ee6a6ba13f89c8..b02114b734dcdb 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -3072,8 +3072,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); + if (!utf16_path) { + rc = -ENOMEM; +- free_xid(xid); +- return ERR_PTR(rc); ++ goto put_tlink; + } + + oparms = (struct cifs_open_parms) { +@@ -3105,6 +3104,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + } + ++put_tlink: + cifs_put_tlink(tlink); + free_xid(xid); + +@@ -3145,8 +3145,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen, + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); + if (!utf16_path) { + rc = -ENOMEM; +- free_xid(xid); +- return rc; ++ goto put_tlink; + } + + oparms = (struct cifs_open_parms) { +@@ -3167,6 +3166,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen, + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + } + ++put_tlink: + cifs_put_tlink(tlink); + free_xid(xid); + return rc; +diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h +index c6c1844d444822..363501fc308aa3 100644 +--- a/fs/smb/server/ksmbd_netlink.h ++++ b/fs/smb/server/ksmbd_netlink.h +@@ -108,8 +108,9 @@ struct ksmbd_startup_request { + __u32 smb2_max_credits; /* MAX credits */ + __u32 smbd_max_io_size; /* smbd read write size */ + __u32 max_connections; /* Number of maximum simultaneous connections */ ++ __s8 bind_interfaces_only; + __u32 max_ip_connections; /* Number of maximum connection per ip address */ +- __u32 reserved[125]; /* Reserved room */ ++ __s8 reserved[499]; /* Reserved room */ + __u32 ifc_list_sz; /* interfaces list size */ + __s8 ____payload[]; + } __packed; +diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h +index d0744498ceed66..48bd203abb441f 100644 +--- a/fs/smb/server/server.h ++++ b/fs/smb/server/server.h +@@ -46,6 +46,7 @@ struct ksmbd_server_config { + unsigned int max_ip_connections; + + char *conf[SERVER_CONF_WORK_GROUP + 1]; ++ bool bind_interfaces_only; + }; + + extern struct ksmbd_server_config server_conf; +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 93c31feab3564c..9a58c5a6f9866f 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -38,6 +38,7 @@ + #include "mgmt/user_session.h" + #include "mgmt/ksmbd_ida.h" + #include "ndr.h" ++#include "transport_tcp.h" + + static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) + { +@@ -7790,6 +7791,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn, + if (netdev->type == ARPHRD_LOOPBACK) + continue; + ++ if (!ksmbd_find_netdev_name_iface_list(netdev->name)) ++ continue; ++ + flags = dev_get_flags(netdev); + if (!(flags & IFF_RUNNING)) + continue; +diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c +index 80581a7bc1bcc7..354f7144c59049 100644 +--- a/fs/smb/server/transport_ipc.c ++++ b/fs/smb/server/transport_ipc.c +@@ -327,6 +327,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) + ret = ksmbd_set_netbios_name(req->netbios_name); + ret |= ksmbd_set_server_string(req->server_string); + ret |= ksmbd_set_work_group(req->work_group); ++ server_conf.bind_interfaces_only = req->bind_interfaces_only; + ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req), + req->ifc_list_sz); + out: +diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c +index c43a465114289b..665d21d40e7a17 100644 +--- a/fs/smb/server/transport_tcp.c ++++ b/fs/smb/server/transport_tcp.c +@@ -551,30 +551,37 @@ static int create_socket(struct interface *iface) + return ret; + } + ++struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name) ++{ ++ struct interface *iface; ++ ++ list_for_each_entry(iface, &iface_list, entry) ++ if (!strcmp(iface->name, netdev_name)) ++ return iface; ++ return NULL; ++} ++ + static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, + void *ptr) + { + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct interface *iface; +- int ret, found = 0; ++ int ret; + + switch (event) { + case NETDEV_UP: + if (netif_is_bridge_port(netdev)) + return NOTIFY_OK; + +- list_for_each_entry(iface, &iface_list, entry) { +- if (!strcmp(iface->name, netdev->name)) { +- found = 1; +- if (iface->state != IFACE_STATE_DOWN) +- break; +- ret = create_socket(iface); +- if (ret) +- return NOTIFY_OK; +- break; +- } ++ iface = ksmbd_find_netdev_name_iface_list(netdev->name); ++ if (iface && iface->state == IFACE_STATE_DOWN) { ++ ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n", ++ iface->name); ++ ret = create_socket(iface); ++ if (ret) ++ return NOTIFY_OK; + } +- if (!found && bind_additional_ifaces) { ++ if (!iface && bind_additional_ifaces) { + iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL)); + if (!iface) + return NOTIFY_OK; +@@ -584,19 +591,19 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, + } + break; + case NETDEV_DOWN: +- list_for_each_entry(iface, &iface_list, entry) { +- if (!strcmp(iface->name, netdev->name) && +- iface->state == IFACE_STATE_CONFIGURED) { +- tcp_stop_kthread(iface->ksmbd_kthread); +- iface->ksmbd_kthread = NULL; +- mutex_lock(&iface->sock_release_lock); +- tcp_destroy_socket(iface->ksmbd_socket); +- iface->ksmbd_socket = NULL; +- mutex_unlock(&iface->sock_release_lock); +- +- iface->state = IFACE_STATE_DOWN; +- break; +- } ++ iface = ksmbd_find_netdev_name_iface_list(netdev->name); ++ if (iface && iface->state == IFACE_STATE_CONFIGURED) { ++ ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n", ++ iface->name); ++ tcp_stop_kthread(iface->ksmbd_kthread); ++ iface->ksmbd_kthread = NULL; ++ mutex_lock(&iface->sock_release_lock); ++ tcp_destroy_socket(iface->ksmbd_socket); ++ iface->ksmbd_socket = NULL; ++ mutex_unlock(&iface->sock_release_lock); ++ ++ iface->state = IFACE_STATE_DOWN; ++ break; + } + break; + } +@@ -665,18 +672,6 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) + int sz = 0; + + if (!ifc_list_sz) { +- struct net_device *netdev; +- +- rtnl_lock(); +- for_each_netdev(&init_net, netdev) { +- if (netif_is_bridge_port(netdev)) +- continue; +- if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) { +- rtnl_unlock(); +- return -ENOMEM; +- } +- } +- rtnl_unlock(); + bind_additional_ifaces = 1; + return 0; + } +diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h +index 5925ec5df47552..bf6a3d71f7a040 100644 +--- a/fs/smb/server/transport_tcp.h ++++ b/fs/smb/server/transport_tcp.h +@@ -8,6 +8,7 @@ + + int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz); + void ksmbd_free_transport(struct ksmbd_transport *kt); ++struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name); + int ksmbd_tcp_init(void); + void ksmbd_tcp_destroy(void); + +diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h +index 269573c828085f..e267fc9d3108f4 100644 +--- a/fs/xfs/libxfs/xfs_log_format.h ++++ b/fs/xfs/libxfs/xfs_log_format.h +@@ -171,12 +171,40 @@ typedef struct xlog_rec_header { + __be32 h_prev_block; /* block number to previous LR : 4 */ + __be32 h_num_logops; /* number of log operations in this LR : 4 */ + __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; +- /* new fields */ ++ ++ /* fields added by the Linux port: */ + __be32 h_fmt; /* format of log record : 4 */ + uuid_t h_fs_uuid; /* uuid of FS : 16 */ ++ ++ /* fields added for log v2: */ + __be32 h_size; /* iclog size : 4 */ ++ ++ /* ++ * When h_size added for log v2 support, it caused structure to have ++ * a different size on i386 vs all other architectures because the ++ * sum of the size ofthe member is not aligned by that of the largest ++ * __be64-sized member, and i386 has really odd struct alignment rules. ++ * ++ * Due to the way the log headers are placed out on-disk that alone is ++ * not a problem becaue the xlog_rec_header always sits alone in a ++ * BBSIZEs area, and the rest of that area is padded with zeroes. ++ * But xlog_cksum used to calculate the checksum based on the structure ++ * size, and thus gives different checksums for i386 vs the rest. ++ * We now do two checksum validation passes for both sizes to allow ++ * moving v5 file systems with unclean logs between i386 and other ++ * (little-endian) architectures. ++ */ ++ __u32 h_pad0; + } xlog_rec_header_t; + ++#ifdef __i386__ ++#define XLOG_REC_SIZE offsetofend(struct xlog_rec_header, h_size) ++#define XLOG_REC_SIZE_OTHER sizeof(struct xlog_rec_header) ++#else ++#define XLOG_REC_SIZE sizeof(struct xlog_rec_header) ++#define XLOG_REC_SIZE_OTHER offsetofend(struct xlog_rec_header, h_size) ++#endif /* __i386__ */ ++ + typedef struct xlog_rec_ext_header { + __be32 xh_cycle; /* write cycle of log : 4 */ + __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ +diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c +index 822f5adf7f7cc9..b968f7bc202cfd 100644 +--- a/fs/xfs/scrub/reap.c ++++ b/fs/xfs/scrub/reap.c +@@ -20,6 +20,7 @@ + #include "xfs_ialloc_btree.h" + #include "xfs_rmap.h" + #include "xfs_rmap_btree.h" ++#include "xfs_refcount.h" + #include "xfs_refcount_btree.h" + #include "xfs_extent_busy.h" + #include "xfs_ag.h" +@@ -376,9 +377,21 @@ xreap_agextent_iter( + if (crosslinked) { + trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp); + +- rs->force_roll = true; +- return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, +- *aglenp, rs->oinfo); ++ if (rs->oinfo == &XFS_RMAP_OINFO_COW) { ++ /* ++ * If we're unmapping CoW staging extents, remove the ++ * records from the refcountbt, which will remove the ++ * rmap record as well. ++ */ ++ xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp); ++ rs->force_roll = true; ++ return 0; ++ } ++ ++ xfs_rmap_free_extent(sc->tp, sc->sa.pag->pag_agno, agbno, ++ *aglenp, rs->oinfo->oi_owner); ++ rs->deferred++; ++ return 0; + } + + trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp); +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c +index a1650fc81382f9..d03976b8218027 100644 +--- a/fs/xfs/xfs_log.c ++++ b/fs/xfs/xfs_log.c +@@ -1807,13 +1807,13 @@ xlog_cksum( + struct xlog *log, + struct xlog_rec_header *rhead, + char *dp, +- int size) ++ unsigned int hdrsize, ++ unsigned int size) + { + uint32_t crc; + + /* first generate the crc for the record header ... */ +- crc = xfs_start_cksum_update((char *)rhead, +- sizeof(struct xlog_rec_header), ++ crc = xfs_start_cksum_update((char *)rhead, hdrsize, + offsetof(struct xlog_rec_header, h_crc)); + + /* ... then for additional cycle data for v2 logs ... */ +@@ -2077,7 +2077,7 @@ xlog_sync( + + /* calculcate the checksum */ + iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, +- iclog->ic_datap, size); ++ iclog->ic_datap, XLOG_REC_SIZE, size); + /* + * Intentionally corrupt the log record CRC based on the error injection + * frequency, if defined. This facilitates testing log recovery in the +diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h +index e30c06ec20e33b..a183bfb36470ad 100644 +--- a/fs/xfs/xfs_log_priv.h ++++ b/fs/xfs/xfs_log_priv.h +@@ -503,8 +503,8 @@ xlog_recover_finish( + extern void + xlog_recover_cancel(struct xlog *); + +-extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, +- char *dp, int size); ++__le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, ++ char *dp, unsigned int hdrsize, unsigned int size); + + extern struct kmem_cache *xfs_log_ticket_cache; + struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 60382eb4996105..f708bf4104b1a3 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -2860,20 +2860,34 @@ xlog_recover_process( + int pass, + struct list_head *buffer_list) + { +- __le32 old_crc = rhead->h_crc; +- __le32 crc; ++ __le32 expected_crc = rhead->h_crc, crc, other_crc; + +- crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); ++ crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE, ++ be32_to_cpu(rhead->h_len)); ++ ++ /* ++ * Look at the end of the struct xlog_rec_header definition in ++ * xfs_log_format.h for the glory details. ++ */ ++ if (expected_crc && crc != expected_crc) { ++ other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER, ++ be32_to_cpu(rhead->h_len)); ++ if (other_crc == expected_crc) { ++ xfs_notice_once(log->l_mp, ++ "Fixing up incorrect CRC due to padding."); ++ crc = other_crc; ++ } ++ } + + /* + * Nothing else to do if this is a CRC verification pass. Just return + * if this a record with a non-zero crc. Unfortunately, mkfs always +- * sets old_crc to 0 so we must consider this valid even on v5 supers. +- * Otherwise, return EFSBADCRC on failure so the callers up the stack +- * know precisely what failed. ++ * sets expected_crc to 0 so we must consider this valid even on v5 ++ * supers. Otherwise, return EFSBADCRC on failure so the callers up the ++ * stack know precisely what failed. + */ + if (pass == XLOG_RECOVER_CRCPASS) { +- if (old_crc && crc != old_crc) ++ if (expected_crc && crc != expected_crc) + return -EFSBADCRC; + return 0; + } +@@ -2884,11 +2898,11 @@ xlog_recover_process( + * zero CRC check prevents warnings from being emitted when upgrading + * the kernel from one that does not add CRCs by default. + */ +- if (crc != old_crc) { +- if (old_crc || xfs_has_crc(log->l_mp)) { ++ if (crc != expected_crc) { ++ if (expected_crc || xfs_has_crc(log->l_mp)) { + xfs_alert(log->l_mp, + "log record CRC mismatch: found 0x%x, expected 0x%x.", +- le32_to_cpu(old_crc), ++ le32_to_cpu(expected_crc), + le32_to_cpu(crc)); + xfs_hex_dump(dp, 32); + } +diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h +index c4cc99b70dd303..618bf4f03f280f 100644 +--- a/fs/xfs/xfs_ondisk.h ++++ b/fs/xfs/xfs_ondisk.h +@@ -143,6 +143,8 @@ xfs_check_ondisk_structs(void) + XFS_CHECK_STRUCT_SIZE(struct xfs_rud_log_format, 16); + XFS_CHECK_STRUCT_SIZE(struct xfs_map_extent, 32); + XFS_CHECK_STRUCT_SIZE(struct xfs_phys_extent, 16); ++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_header, 328); ++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_ext_header, 260); + + XFS_CHECK_OFFSET(struct xfs_bui_log_format, bui_extents, 16); + XFS_CHECK_OFFSET(struct xfs_cui_log_format, cui_extents, 16); +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index bfecd9dcb55297..1f94fe8559a9a8 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -32,6 +32,9 @@ + */ + + #define CPUFREQ_ETERNAL (-1) ++ ++#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC ++ + #define CPUFREQ_NAME_LEN 16 + /* Print length for names. Extra 1 space for accommodating '\n' in prints */ + #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) +diff --git a/include/linux/mm.h b/include/linux/mm.h +index ba77f08900ca2e..fa5b11452ae627 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -315,7 +315,7 @@ extern unsigned int kobjsize(const void *objp); + #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ + #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ + #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ +-#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ ++#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */ + + #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS + #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 0511f6f9a4e6ad..e4338237a05454 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -1034,6 +1034,20 @@ static inline struct pci_driver *to_pci_driver(struct device_driver *drv) + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 + ++/** ++ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form ++ * @vend: the vendor name ++ * @dev: the 16 bit PCI Device ID ++ * @subvend: the 16 bit PCI Subvendor ID ++ * @subdev: the 16 bit PCI Subdevice ID ++ * ++ * Generate the pci_device_id struct layout for the specific PCI ++ * device/subdevice. Private data may follow the output. ++ */ ++#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \ ++ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ ++ .subvendor = (subvend), .subdevice = (subdev), 0, 0 ++ + /** + * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form + * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h +index 4b74f0f012a59f..c3a462ff1206e7 100644 +--- a/include/linux/pm_runtime.h ++++ b/include/linux/pm_runtime.h +@@ -94,7 +94,9 @@ extern void pm_runtime_new_link(struct device *dev); + extern void pm_runtime_drop_link(struct device_link *link); + extern void pm_runtime_release_supplier(struct device_link *link); + ++int devm_pm_runtime_set_active_enabled(struct device *dev); + extern int devm_pm_runtime_enable(struct device *dev); ++int devm_pm_runtime_get_noresume(struct device *dev); + + /** + * pm_suspend_ignore_children - Set runtime PM behavior regarding children. +@@ -278,7 +280,9 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} + static inline void pm_runtime_allow(struct device *dev) {} + static inline void pm_runtime_forbid(struct device *dev) {} + ++static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; } + static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } ++static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; } + + static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} + static inline void pm_runtime_get_noresume(struct device *dev) {} +diff --git a/include/linux/quota.h b/include/linux/quota.h +index 07071e64abf3d6..89a0d83ddad082 100644 +--- a/include/linux/quota.h ++++ b/include/linux/quota.h +@@ -526,7 +526,7 @@ struct quota_info { + const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ + }; + +-int register_quota_format(struct quota_format_type *fmt); ++void register_quota_format(struct quota_format_type *fmt); + void unregister_quota_format(struct quota_format_type *fmt); + + struct quota_module_name { +diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h +index 75bda0783395a0..aa831e16c3d39d 100644 +--- a/include/linux/usb/gadget.h ++++ b/include/linux/usb/gadget.h +@@ -15,6 +15,7 @@ + #ifndef __LINUX_USB_GADGET_H + #define __LINUX_USB_GADGET_H + ++#include + #include + #include + #include +@@ -32,6 +33,7 @@ struct usb_ep; + + /** + * struct usb_request - describes one i/o request ++ * @ep: The associated endpoint set by usb_ep_alloc_request(). + * @buf: Buffer used for data. Always provide this; some controllers + * only use PIO, or don't use DMA for some endpoints. + * @dma: DMA address corresponding to 'buf'. If you don't set this +@@ -97,6 +99,7 @@ struct usb_ep; + */ + + struct usb_request { ++ struct usb_ep *ep; + void *buf; + unsigned length; + dma_addr_t dma; +@@ -289,6 +292,28 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep) + + /*-------------------------------------------------------------------------*/ + ++/** ++ * free_usb_request - frees a usb_request object and its buffer ++ * @req: the request being freed ++ * ++ * This helper function frees both the request's buffer and the request object ++ * itself by calling usb_ep_free_request(). Its signature is designed to be used ++ * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request ++ * pointers. ++ */ ++static inline void free_usb_request(struct usb_request *req) ++{ ++ if (!req) ++ return; ++ ++ kfree(req->buf); ++ usb_ep_free_request(req->ep, req); ++} ++ ++DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T)) ++ ++/*-------------------------------------------------------------------------*/ ++ + struct usb_dcd_config_params { + __u8 bU1devExitLat; /* U1 Device exit Latency */ + #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h +index 006a61ddd36fa4..3d36794cb1899a 100644 +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -489,6 +489,21 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, + int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, + int headroom, bool reply); + ++static inline void ip_tunnel_adj_headroom(struct net_device *dev, ++ unsigned int headroom) ++{ ++ /* we must cap headroom to some upperlimit, else pskb_expand_head ++ * will overflow header offsets in skb_headers_offset_update(). ++ */ ++ const unsigned int max_allowed = 512; ++ ++ if (headroom > max_allowed) ++ headroom = max_allowed; ++ ++ if (headroom > READ_ONCE(dev->needed_headroom)) ++ WRITE_ONCE(dev->needed_headroom, headroom); ++} ++ + int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); + + static inline int iptunnel_pull_offloads(struct sk_buff *skb) +diff --git a/kernel/padata.c b/kernel/padata.c +index 93cd7704ab63e6..9260ab0b39eb5d 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -290,7 +290,11 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd, + if (remove_object) { + list_del_init(&padata->list); + ++pd->processed; +- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); ++ /* When sequence wraps around, reset to the first CPU. */ ++ if (unlikely(pd->processed == 0)) ++ pd->cpu = cpumask_first(pd->cpumask.pcpu); ++ else ++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); + } + + spin_unlock(&reorder->lock); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 1cf43e91ae9de8..58231999d929ea 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4829,7 +4829,7 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) + return cfs_rq->avg.load_avg; + } + +-static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); ++static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf); + + static inline unsigned long task_util(struct task_struct *p) + { +@@ -5158,7 +5158,7 @@ attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} + static inline void + detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} + +-static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) ++static inline int sched_balance_newidle(struct rq *rq, struct rq_flags *rf) + { + return 0; + } +@@ -8281,7 +8281,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) + if (rq->nr_running) + return 1; + +- return newidle_balance(rq, rf) != 0; ++ return sched_balance_newidle(rq, rf) != 0; + } + #endif /* CONFIG_SMP */ + +@@ -8528,21 +8528,21 @@ done: __maybe_unused; + return p; + + idle: +- if (!rf) +- return NULL; +- +- new_tasks = newidle_balance(rq, rf); ++ if (rf) { ++ new_tasks = sched_balance_newidle(rq, rf); + +- /* +- * Because newidle_balance() releases (and re-acquires) rq->lock, it is +- * possible for any higher priority task to appear. In that case we +- * must re-start the pick_next_entity() loop. +- */ +- if (new_tasks < 0) +- return RETRY_TASK; ++ /* ++ * Because sched_balance_newidle() releases (and re-acquires) ++ * rq->lock, it is possible for any higher priority task to ++ * appear. In that case we must re-start the pick_next_entity() ++ * loop. ++ */ ++ if (new_tasks < 0) ++ return RETRY_TASK; + +- if (new_tasks > 0) +- goto again; ++ if (new_tasks > 0) ++ goto again; ++ } + + /* + * rq is about to be idle, check if we need to update the +@@ -11542,7 +11542,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, + ld_moved = 0; + + /* +- * newidle_balance() disregards balance intervals, so we could ++ * sched_balance_newidle() disregards balance intervals, so we could + * repeatedly reach this code, which would lead to balance_interval + * skyrocketing in a short amount of time. Skip the balance_interval + * increase logic to avoid that. +@@ -12308,7 +12308,7 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { } + #endif /* CONFIG_NO_HZ_COMMON */ + + /* +- * newidle_balance is called by schedule() if this_cpu is about to become ++ * sched_balance_newidle is called by schedule() if this_cpu is about to become + * idle. Attempts to pull tasks from other CPUs. + * + * Returns: +@@ -12316,7 +12316,7 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { } + * 0 - failed, no new tasks + * > 0 - success, new (fair) tasks present + */ +-static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) ++static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) + { + unsigned long next_balance = jiffies + HZ; + int this_cpu = this_rq->cpu; +diff --git a/mm/shmem.c b/mm/shmem.c +index ecf1011cc3e296..2260def68090c7 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -4617,11 +4617,7 @@ void __init shmem_init(void) + shmem_init_inodecache(); + + #ifdef CONFIG_TMPFS_QUOTA +- error = register_quota_format(&shmem_quota_format); +- if (error < 0) { +- pr_err("Could not register quota format\n"); +- goto out3; +- } ++ register_quota_format(&shmem_quota_format); + #endif + + error = register_filesystem(&shmem_fs_type); +@@ -4650,7 +4646,6 @@ void __init shmem_init(void) + out2: + #ifdef CONFIG_TMPFS_QUOTA + unregister_quota_format(&shmem_quota_format); +-out3: + #endif + shmem_destroy_inodecache(); + shm_mnt = ERR_PTR(error); +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index b5d64cd3ab0a23..090403c8cc6c3d 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -567,20 +567,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, + return 0; + } + +-static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom) +-{ +- /* we must cap headroom to some upperlimit, else pskb_expand_head +- * will overflow header offsets in skb_headers_offset_update(). +- */ +- static const unsigned int max_allowed = 512; +- +- if (headroom > max_allowed) +- headroom = max_allowed; +- +- if (headroom > READ_ONCE(dev->needed_headroom)) +- WRITE_ONCE(dev->needed_headroom, headroom); +-} +- + void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + u8 proto, int tunnel_hlen) + { +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 560273e7f77365..88551db62ca29d 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2189,7 +2189,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, + u32 max_segs) + { + const struct inet_connection_sock *icsk = inet_csk(sk); +- u32 send_win, cong_win, limit, in_flight; ++ u32 send_win, cong_win, limit, in_flight, threshold; ++ u64 srtt_in_ns, expected_ack, how_far_is_the_ack; + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *head; + int win_divisor; +@@ -2251,9 +2252,19 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, + head = tcp_rtx_queue_head(sk); + if (!head) + goto send_now; +- delta = tp->tcp_clock_cache - head->tstamp; +- /* If next ACK is likely to come too late (half srtt), do not defer */ +- if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) ++ ++ srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us; ++ /* When is the ACK expected ? */ ++ expected_ack = head->tstamp + srtt_in_ns; ++ /* How far from now is the ACK expected ? */ ++ how_far_is_the_ack = expected_ack - tp->tcp_clock_cache; ++ ++ /* If next ACK is likely to come too late, ++ * ie in more than min(1ms, half srtt), do not defer. ++ */ ++ threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC); ++ ++ if ((s64)(how_far_is_the_ack - threshold) > 0) + goto send_now; + + /* Ok, it looks like it is advisable to defer. +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index d645d022ce7745..e635ddd41aba6c 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -1255,8 +1255,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, + */ + max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + + dst->header_len + t->hlen; +- if (max_headroom > READ_ONCE(dev->needed_headroom)) +- WRITE_ONCE(dev->needed_headroom, max_headroom); ++ ip_tunnel_adj_headroom(dev, max_headroom); + + err = ip6_tnl_encap(skb, t, &proto, fl6); + if (err) +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index d7dea82bcf5653..c7ee44bd320649 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -254,12 +254,9 @@ int tls_process_cmsg(struct sock *sk, struct msghdr *msg, + if (msg->msg_flags & MSG_MORE) + return -EINVAL; + +- rc = tls_handle_open_record(sk, msg->msg_flags); +- if (rc) +- return rc; +- + *record_type = *(unsigned char *)CMSG_DATA(cmsg); +- rc = 0; ++ ++ rc = tls_handle_open_record(sk, msg->msg_flags); + break; + default: + return -EINVAL; +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 435235a351e2f4..410e39e4b79fd2 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -1054,7 +1054,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + if (ret == -EINPROGRESS) + num_async++; + else if (ret != -EAGAIN) +- goto send_end; ++ goto end; + } + } + +@@ -1112,8 +1112,11 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + goto send_end; + tls_ctx->pending_open_record_frags = true; + +- if (sk_msg_full(msg_pl)) ++ if (sk_msg_full(msg_pl)) { + full_record = true; ++ sk_msg_trim(sk, msg_en, ++ msg_pl->sg.size + prot->overhead_size); ++ } + + if (full_record || eor) + goto copied; +@@ -1149,6 +1152,13 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + } else if (ret != -EAGAIN) + goto send_end; + } ++ ++ /* Transmit if any encryptions have completed */ ++ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { ++ cancel_delayed_work(&ctx->tx_work.work); ++ tls_tx_records(sk, msg->msg_flags); ++ } ++ + continue; + rollback_iter: + copied -= try_to_copy; +@@ -1204,6 +1214,12 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + goto send_end; + } + } ++ ++ /* Transmit if any encryptions have completed */ ++ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { ++ cancel_delayed_work(&ctx->tx_work.work); ++ tls_tx_records(sk, msg->msg_flags); ++ } + } + + continue; +@@ -1223,9 +1239,10 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + goto alloc_encrypted; + } + ++send_end: + if (!num_async) { +- goto send_end; +- } else if (num_zc) { ++ goto end; ++ } else if (num_zc || eor) { + int err; + + /* Wait for pending encryptions to get completed */ +@@ -1242,7 +1259,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + tls_tx_records(sk, msg->msg_flags); + } + +-send_end: ++end: + ret = sk_stream_error(sk, msg->msg_flags, ret); + return copied > 0 ? copied : ret; + } +@@ -1633,8 +1650,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, + + if (unlikely(darg->async)) { + err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); +- if (err) +- __skb_queue_tail(&ctx->async_hold, darg->skb); ++ if (err) { ++ err = tls_decrypt_async_wait(ctx); ++ darg->async = false; ++ } + return err; + } + +diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h +index c91a3c24f6070a..5416f21918e047 100644 +--- a/rust/bindings/bindings_helper.h ++++ b/rust/bindings/bindings_helper.h +@@ -12,8 +12,10 @@ + #include + #include + #include ++#include + + /* `bindgen` gets confused at certain things. */ + const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN; + const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL; + const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO; ++const vm_flags_t BINDINGS_VM_MERGEABLE = VM_MERGEABLE; +diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs +index 9bcbea04dac305..7d9078b94a8f08 100644 +--- a/rust/bindings/lib.rs ++++ b/rust/bindings/lib.rs +@@ -51,3 +51,4 @@ mod bindings_helper { + + pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL; + pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO; ++pub const VM_MERGEABLE: vm_flags_t = BINDINGS_VM_MERGEABLE; +diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h +index 775db3fc4959f5..ec10270c2cce3d 100644 +--- a/sound/firewire/amdtp-stream.h ++++ b/sound/firewire/amdtp-stream.h +@@ -32,7 +32,7 @@ + * allows 5 times as large as IEC 61883-6 defines. + * @CIP_HEADER_WITHOUT_EOH: Only for in-stream. CIP Header doesn't include + * valid EOH. +- * @CIP_NO_HEADERS: a lack of headers in packets ++ * @CIP_NO_HEADER: a lack of headers in packets + * @CIP_UNALIGHED_DBC: Only for in-stream. The value of dbc is not alighed to + * the value of current SYT_INTERVAL; e.g. initial value is not zero. + * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff. +diff --git a/sound/soc/codecs/idt821034.c b/sound/soc/codecs/idt821034.c +index 2cc7b9166e695f..068a5448e273ea 100644 +--- a/sound/soc/codecs/idt821034.c ++++ b/sound/soc/codecs/idt821034.c +@@ -548,14 +548,14 @@ static int idt821034_kctrl_mute_put(struct snd_kcontrol *kcontrol, + return ret; + } + +-static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -6520, 1306); +-#define IDT821034_GAIN_IN_MIN_RAW 1 /* -65.20 dB -> 10^(-65.2/20.0) * 1820 = 1 */ +-#define IDT821034_GAIN_IN_MAX_RAW 8191 /* 13.06 dB -> 10^(13.06/20.0) * 1820 = 8191 */ ++static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -300, 1300); ++#define IDT821034_GAIN_IN_MIN_RAW 1288 /* -3.0 dB -> 10^(-3.0/20.0) * 1820 = 1288 */ ++#define IDT821034_GAIN_IN_MAX_RAW 8130 /* 13.0 dB -> 10^(13.0/20.0) * 1820 = 8130 */ + #define IDT821034_GAIN_IN_INIT_RAW 1820 /* 0dB -> 10^(0/20) * 1820 = 1820 */ + +-static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -6798, 1029); +-#define IDT821034_GAIN_OUT_MIN_RAW 1 /* -67.98 dB -> 10^(-67.98/20.0) * 2506 = 1*/ +-#define IDT821034_GAIN_OUT_MAX_RAW 8191 /* 10.29 dB -> 10^(10.29/20.0) * 2506 = 8191 */ ++static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -1300, 300); ++#define IDT821034_GAIN_OUT_MIN_RAW 561 /* -13.0 dB -> 10^(-13.0/20.0) * 2506 = 561 */ ++#define IDT821034_GAIN_OUT_MAX_RAW 3540 /* 3.0 dB -> 10^(3.0/20.0) * 2506 = 3540 */ + #define IDT821034_GAIN_OUT_INIT_RAW 2506 /* 0dB -> 10^(0/20) * 2506 = 2506 */ + + static const struct snd_kcontrol_new idt821034_controls[] = { +diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c +index f307374834b557..48ed75c3a7db2b 100644 +--- a/sound/soc/codecs/nau8821.c ++++ b/sound/soc/codecs/nau8821.c +@@ -26,7 +26,8 @@ + #include + #include "nau8821.h" + +-#define NAU8821_JD_ACTIVE_HIGH BIT(0) ++#define NAU8821_QUIRK_JD_ACTIVE_HIGH BIT(0) ++#define NAU8821_QUIRK_JD_DB_BYPASS BIT(1) + + static int nau8821_quirk; + static int quirk_override = -1; +@@ -1030,12 +1031,17 @@ static bool nau8821_is_jack_inserted(struct regmap *regmap) + return active_high == is_high; + } + +-static void nau8821_int_status_clear_all(struct regmap *regmap) ++static void nau8821_irq_status_clear(struct regmap *regmap, int active_irq) + { +- int active_irq, clear_irq, i; ++ int clear_irq, i; + +- /* Reset the intrruption status from rightmost bit if the corres- +- * ponding irq event occurs. ++ if (active_irq) { ++ regmap_write(regmap, NAU8821_R11_INT_CLR_KEY_STATUS, active_irq); ++ return; ++ } ++ ++ /* Reset the interruption status from rightmost bit if the ++ * corresponding irq event occurs. + */ + regmap_read(regmap, NAU8821_R10_IRQ_STATUS, &active_irq); + for (i = 0; i < NAU8821_REG_DATA_LEN; i++) { +@@ -1062,7 +1068,7 @@ static void nau8821_eject_jack(struct nau8821 *nau8821) + snd_soc_dapm_sync(dapm); + + /* Clear all interruption status */ +- nau8821_int_status_clear_all(regmap); ++ nau8821_irq_status_clear(regmap, 0); + + /* Enable the insertion interruption, disable the ejection inter- + * ruption, and then bypass de-bounce circuit. +@@ -1166,9 +1172,10 @@ static void nau8821_setup_inserted_irq(struct nau8821 *nau8821) + regmap_update_bits(regmap, NAU8821_R1D_I2S_PCM_CTRL2, + NAU8821_I2S_MS_MASK, NAU8821_I2S_MS_SLAVE); + +- /* Not bypass de-bounce circuit */ +- regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL, +- NAU8821_JACK_DET_DB_BYPASS, 0); ++ /* Do not bypass de-bounce circuit */ ++ if (!(nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS)) ++ regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL, ++ NAU8821_JACK_DET_DB_BYPASS, 0); + + regmap_update_bits(regmap, NAU8821_R0F_INTERRUPT_MASK, + NAU8821_IRQ_EJECT_EN, 0); +@@ -1191,6 +1198,7 @@ static irqreturn_t nau8821_interrupt(int irq, void *data) + + if ((active_irq & NAU8821_JACK_EJECT_IRQ_MASK) == + NAU8821_JACK_EJECT_DETECTED) { ++ cancel_work_sync(&nau8821->jdet_work); + regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1, + NAU8821_MICDET_MASK, NAU8821_MICDET_DIS); + nau8821_eject_jack(nau8821); +@@ -1205,11 +1213,11 @@ static irqreturn_t nau8821_interrupt(int irq, void *data) + clear_irq = NAU8821_KEY_RELEASE_IRQ; + } else if ((active_irq & NAU8821_JACK_INSERT_IRQ_MASK) == + NAU8821_JACK_INSERT_DETECTED) { ++ cancel_work_sync(&nau8821->jdet_work); + regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1, + NAU8821_MICDET_MASK, NAU8821_MICDET_EN); + if (nau8821_is_jack_inserted(regmap)) { + /* detect microphone and jack type */ +- cancel_work_sync(&nau8821->jdet_work); + schedule_work(&nau8821->jdet_work); + /* Turn off insertion interruption at manual mode */ + regmap_update_bits(regmap, +@@ -1527,7 +1535,7 @@ static int nau8821_resume_setup(struct nau8821 *nau8821) + nau8821_configure_sysclk(nau8821, NAU8821_CLK_DIS, 0); + if (nau8821->irq) { + /* Clear all interruption status */ +- nau8821_int_status_clear_all(regmap); ++ nau8821_irq_status_clear(regmap, 0); + + /* Enable both insertion and ejection interruptions, and then + * bypass de-bounce circuit. +@@ -1848,7 +1856,23 @@ static const struct dmi_system_id nau8821_quirk_table[] = { + DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), + DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"), + }, +- .driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH), ++ .driver_data = (void *)(NAU8821_QUIRK_JD_ACTIVE_HIGH), ++ }, ++ { ++ /* Valve Steam Deck LCD */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Valve"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"), ++ }, ++ .driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS), ++ }, ++ { ++ /* Valve Steam Deck OLED */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Valve"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"), ++ }, ++ .driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS), + }, + {} + }; +@@ -1890,9 +1914,12 @@ static int nau8821_i2c_probe(struct i2c_client *i2c) + + nau8821_check_quirks(); + +- if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH) ++ if (nau8821_quirk & NAU8821_QUIRK_JD_ACTIVE_HIGH) + nau8821->jkdet_polarity = 0; + ++ if (nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS) ++ dev_dbg(dev, "Force bypassing jack detection debounce circuit\n"); ++ + nau8821_print_device_properties(nau8821); + + nau8821_reset_chip(nau8821->regmap); +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 7743ea983b1a81..9335bc20c56dfe 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -754,10 +754,16 @@ get_alias_quirk(struct usb_device *dev, unsigned int id) + */ + static int try_to_register_card(struct snd_usb_audio *chip, int ifnum) + { ++ struct usb_interface *iface; ++ + if (check_delayed_register_option(chip) == ifnum || +- chip->last_iface == ifnum || +- usb_interface_claimed(usb_ifnum_to_if(chip->dev, chip->last_iface))) ++ chip->last_iface == ifnum) ++ return snd_card_register(chip->card); ++ ++ iface = usb_ifnum_to_if(chip->dev, chip->last_iface); ++ if (iface && usb_interface_claimed(iface)) + return snd_card_register(chip->card); ++ + return 0; + } + +diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +index bb143de68875cc..e27d66b75fb1fc 100644 +--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c ++++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +@@ -144,11 +144,17 @@ static void test_parse_test_list_file(void) + if (!ASSERT_OK(ferror(fp), "prepare tmp")) + goto out_fclose; + ++ if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp")) ++ goto out_fclose; ++ + init_test_filter_set(&set); + +- ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); ++ if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file")) ++ goto out_fclose; ++ ++ if (!ASSERT_EQ(set.cnt, 4, "test count")) ++ goto out_free_set; + +- ASSERT_EQ(set.cnt, 4, "test count"); + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); + ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); +@@ -158,8 +164,8 @@ static void test_parse_test_list_file(void) + ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); + ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); + ++out_free_set: + free_test_filter_set(&set); +- + out_fclose: + fclose(fp); + out_remove: diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.114-115.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.114-115.patch new file mode 100644 index 0000000000..36656e64a3 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.114-115.patch @@ -0,0 +1,3794 @@ +diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml +index 9ea1e4cd0709c9..69be6affa9b534 100644 +--- a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml ++++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml +@@ -85,13 +85,21 @@ required: + - reg + - "#address-cells" + - "#size-cells" +- - dma-ranges + - ranges + - clocks + - clock-names + - interrupts + - power-domains + ++allOf: ++ - if: ++ properties: ++ compatible: ++ const: fsl,imx8mp-dwc3 ++ then: ++ required: ++ - dma-ranges ++ + additionalProperties: false + + examples: +diff --git a/Makefile b/Makefile +index ad3952fb542d3a..85d8fa82569578 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 114 ++SUBLEVEL = 115 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +@@ -1358,11 +1358,11 @@ endif + + tools/: FORCE + $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ ++ $(Q)$(MAKE) O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ + + tools/%: FORCE + $(Q)mkdir -p $(objtree)/tools +- $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* ++ $(Q)$(MAKE) O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* + + # --------------------------------------------------------------------------- + # Kernel selftest +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 0212129b13d074..92e43b3a10df9b 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -184,7 +184,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) + static inline pte_t pte_mkwrite_novma(pte_t pte) + { + pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); +- pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); ++ if (pte_sw_dirty(pte)) ++ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + return pte; + } + +diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h +index e984af71df6bee..d86aa744cb8fc4 100644 +--- a/arch/m68k/include/asm/bitops.h ++++ b/arch/m68k/include/asm/bitops.h +@@ -329,12 +329,12 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) + #include + #else + +-static inline int find_first_zero_bit(const unsigned long *vaddr, +- unsigned size) ++static inline unsigned long find_first_zero_bit(const unsigned long *vaddr, ++ unsigned long size) + { + const unsigned long *p = vaddr; +- int res = 32; +- unsigned int words; ++ unsigned long res = 32; ++ unsigned long words; + unsigned long num; + + if (!size) +@@ -355,8 +355,9 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, + } + #define find_first_zero_bit find_first_zero_bit + +-static inline int find_next_zero_bit(const unsigned long *vaddr, int size, +- int offset) ++static inline unsigned long find_next_zero_bit(const unsigned long *vaddr, ++ unsigned long size, ++ unsigned long offset) + { + const unsigned long *p = vaddr + (offset >> 5); + int bit = offset & 31UL, res; +@@ -385,11 +386,12 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, + } + #define find_next_zero_bit find_next_zero_bit + +-static inline int find_first_bit(const unsigned long *vaddr, unsigned size) ++static inline unsigned long find_first_bit(const unsigned long *vaddr, ++ unsigned long size) + { + const unsigned long *p = vaddr; +- int res = 32; +- unsigned int words; ++ unsigned long res = 32; ++ unsigned long words; + unsigned long num; + + if (!size) +@@ -410,8 +412,9 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) + } + #define find_first_bit find_first_bit + +-static inline int find_next_bit(const unsigned long *vaddr, int size, +- int offset) ++static inline unsigned long find_next_bit(const unsigned long *vaddr, ++ unsigned long size, ++ unsigned long offset) + { + const unsigned long *p = vaddr + (offset >> 5); + int bit = offset & 31UL, res; +diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c +index 21cb3ac1237b76..020c38e6c5de08 100644 +--- a/arch/mips/mti-malta/malta-setup.c ++++ b/arch/mips/mti-malta/malta-setup.c +@@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = { + .name = "keyboard", + .start = 0x60, + .end = 0x6f, +- .flags = IORESOURCE_IO | IORESOURCE_BUSY ++ .flags = IORESOURCE_IO + }, + { + .name = "dma page reg", +diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c +index 8582ed9658447b..5308c761228173 100644 +--- a/arch/nios2/kernel/setup.c ++++ b/arch/nios2/kernel/setup.c +@@ -147,6 +147,20 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low, + *max_high = PFN_DOWN(memblock_end_of_DRAM()); + } + ++static void __init adjust_lowmem_bounds(void) ++{ ++ phys_addr_t block_start, block_end; ++ u64 i; ++ phys_addr_t memblock_limit = 0; ++ ++ for_each_mem_range(i, &block_start, &block_end) { ++ if (block_end > memblock_limit) ++ memblock_limit = block_end; ++ } ++ ++ memblock_set_current_limit(memblock_limit); ++} ++ + void __init setup_arch(char **cmdline_p) + { + console_verbose(); +@@ -160,6 +174,7 @@ void __init setup_arch(char **cmdline_p) + /* Keep a copy of command line */ + *cmdline_p = boot_command_line; + ++ adjust_lowmem_bounds(); + find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); + max_mapnr = max_low_pfn; + +diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h +index d0ee46de248eaf..74502f91ed9362 100644 +--- a/arch/powerpc/include/asm/pgtable.h ++++ b/arch/powerpc/include/asm/pgtable.h +@@ -20,18 +20,6 @@ struct mm_struct; + #include + #endif /* !CONFIG_PPC_BOOK3S */ + +-/* +- * Protection used for kernel text. We want the debuggers to be able to +- * set breakpoints anywhere, so don't write protect the kernel text +- * on platforms where such control is possible. +- */ +-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ +- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +-#else +-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +-#endif +- + /* Make modules code happy. We don't set RO yet */ + #define PAGE_KERNEL_EXEC PAGE_KERNEL_X + +diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c +index 850783cfa9c730..1b1848761a0008 100644 +--- a/arch/powerpc/mm/book3s32/mmu.c ++++ b/arch/powerpc/mm/book3s32/mmu.c +@@ -204,7 +204,7 @@ void mmu_mark_initmem_nx(void) + + for (i = 0; i < nb - 1 && base < top;) { + size = bat_block_size(base, top); +- setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); ++ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); + base += size; + } + if (base < top) { +@@ -215,7 +215,7 @@ void mmu_mark_initmem_nx(void) + pr_warn("Some RW data is getting mapped X. " + "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); + } +- setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); ++ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); + base += size; + } + for (; i < nb; i++) +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c +index 5c02fd08d61eff..69fac96c2dcd12 100644 +--- a/arch/powerpc/mm/pgtable_32.c ++++ b/arch/powerpc/mm/pgtable_32.c +@@ -109,7 +109,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) + p = memstart_addr + s; + for (; s < top; s += PAGE_SIZE) { + ktext = core_kernel_text(v); +- map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); ++ map_kernel_page(v, p, ktext ? PAGE_KERNEL_X : PAGE_KERNEL); + v += PAGE_SIZE; + p += PAGE_SIZE; + } +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index 332a6bf72b1d54..987cfe87e78252 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -618,6 +618,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) + return __pgprot(prot); + } + ++#define pgprot_dmacoherent pgprot_writecombine ++ + /* + * THP functions + */ +diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c +index 88732abecd0230..93e794d0e5231b 100644 +--- a/arch/riscv/kernel/cpu.c ++++ b/arch/riscv/kernel/cpu.c +@@ -61,10 +61,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo + return -ENODEV; + } + +- if (!of_device_is_available(node)) { +- pr_info("CPU with hartid=%lu is not available\n", *hart); ++ if (!of_device_is_available(node)) + return -ENODEV; +- } + + if (of_property_read_string(node, "riscv,isa-base", &isa)) + goto old_interface; +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index 2cb30d9c5b4ae7..e689e3981bd9e1 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -184,7 +184,7 @@ static bool need_sha_check(u32 cur_rev) + } + + switch (cur_rev >> 8) { +- case 0x80012: return cur_rev <= 0x800126f; break; ++ case 0x80012: return cur_rev <= 0x8001277; break; + case 0x80082: return cur_rev <= 0x800820f; break; + case 0x83010: return cur_rev <= 0x830107c; break; + case 0x86001: return cur_rev <= 0x860010e; break; +diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c +index 3a6c069614eb84..976bdf15be22fd 100644 +--- a/arch/x86/kernel/cpu/resctrl/monitor.c ++++ b/arch/x86/kernel/cpu/resctrl/monitor.c +@@ -241,11 +241,15 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, + if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) + return -EINVAL; + ++ am = get_arch_mbm_state(hw_dom, rmid, eventid); ++ + ret = __rmid_read(rmid, eventid, &msr_val); +- if (ret) ++ if (ret) { ++ if (am && ret == -EINVAL) ++ am->prev_msr = 0; + return ret; ++ } + +- am = get_arch_mbm_state(hw_dom, rmid, eventid); + if (am) { + am->chunks += mbm_overflow_count(am->prev_msr, msr_val, + hw_res->mbm_width); +diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c +index 58b02e4b254b88..544964c9c530ae 100644 +--- a/drivers/acpi/acpica/tbprint.c ++++ b/drivers/acpi/acpica/tbprint.c +@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address, + { + struct acpi_table_header local_header; + ++#pragma GCC diagnostic push ++#if defined(__GNUC__) && __GNUC__ >= 11 ++#pragma GCC diagnostic ignored "-Wstringop-overread" ++#endif ++ + if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) { + + /* FACS only has signature and length fields */ +@@ -135,4 +140,5 @@ acpi_tb_print_table_header(acpi_physical_address address, + local_header.asl_compiler_id, + local_header.asl_compiler_revision)); + } ++#pragma GCC diagnostic pop + } +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 94f10c6eb336a5..e5096fcfad5760 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -846,17 +846,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, + } else { + if (!internal) + node->local_weak_refs++; +- if (!node->has_weak_ref && list_empty(&node->work.entry)) { +- if (target_list == NULL) { +- pr_err("invalid inc weak node for %d\n", +- node->debug_id); +- return -EINVAL; +- } +- /* +- * See comment above +- */ ++ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry)) + binder_enqueue_work_ilocked(&node->work, target_list); +- } + } + return 0; + } +diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c +index 2aa0c642529021..3a14ed36eb92d7 100644 +--- a/drivers/base/arch_topology.c ++++ b/drivers/base/arch_topology.c +@@ -326,7 +326,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) + * frequency (by keeping the initial capacity_freq_ref value). + */ + cpu_clk = of_clk_get(cpu_node, 0); +- if (!PTR_ERR_OR_ZERO(cpu_clk)) { ++ if (!IS_ERR_OR_NULL(cpu_clk)) { + per_cpu(capacity_freq_ref, cpu) = + clk_get_rate(cpu_clk) / HZ_PER_KHZ; + clk_put(cpu_clk); +diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c +index 7e2d1f0d903a6e..6e6bf8be00664f 100644 +--- a/drivers/base/devcoredump.c ++++ b/drivers/base/devcoredump.c +@@ -26,50 +26,46 @@ struct devcd_entry { + void *data; + size_t datalen; + /* +- * Here, mutex is required to serialize the calls to del_wk work between +- * user/kernel space which happens when devcd is added with device_add() +- * and that sends uevent to user space. User space reads the uevents, +- * and calls to devcd_data_write() which try to modify the work which is +- * not even initialized/queued from devcoredump. ++ * There are 2 races for which mutex is required. + * ++ * The first race is between device creation and userspace writing to ++ * schedule immediately destruction. + * ++ * This race is handled by arming the timer before device creation, but ++ * when device creation fails the timer still exists. + * +- * cpu0(X) cpu1(Y) ++ * To solve this, hold the mutex during device_add(), and set ++ * init_completed on success before releasing the mutex. + * +- * dev_coredump() uevent sent to user space +- * device_add() ======================> user space process Y reads the +- * uevents writes to devcd fd +- * which results into writes to ++ * That way the timer will never fire until device_add() is called, ++ * it will do nothing if init_completed is not set. The timer is also ++ * cancelled in that case. + * +- * devcd_data_write() +- * mod_delayed_work() +- * try_to_grab_pending() +- * del_timer() +- * debug_assert_init() +- * INIT_DELAYED_WORK() +- * schedule_delayed_work() +- * +- * +- * Also, mutex alone would not be enough to avoid scheduling of +- * del_wk work after it get flush from a call to devcd_free() +- * mentioned as below. +- * +- * disabled_store() +- * devcd_free() +- * mutex_lock() devcd_data_write() +- * flush_delayed_work() +- * mutex_unlock() +- * mutex_lock() +- * mod_delayed_work() +- * mutex_unlock() +- * So, delete_work flag is required. ++ * The second race involves multiple parallel invocations of devcd_free(), ++ * add a deleted flag so only 1 can call the destructor. + */ + struct mutex mutex; +- bool delete_work; ++ bool init_completed, deleted; + struct module *owner; + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen); + void (*free)(void *data); ++ /* ++ * If nothing interferes and device_add() was returns success, ++ * del_wk will destroy the device after the timer fires. ++ * ++ * Multiple userspace processes can interfere in the working of the timer: ++ * - Writing to the coredump will reschedule the timer to run immediately, ++ * if still armed. ++ * ++ * This is handled by using "if (cancel_delayed_work()) { ++ * schedule_delayed_work() }", to prevent re-arming after having ++ * been previously fired. ++ * - Writing to /sys/class/devcoredump/disabled will destroy the ++ * coredump synchronously. ++ * This is handled by using disable_delayed_work_sync(), and then ++ * checking if deleted flag is set with &devcd->mutex held. ++ */ + struct delayed_work del_wk; + struct device *failing_dev; + }; +@@ -98,14 +94,27 @@ static void devcd_dev_release(struct device *dev) + kfree(devcd); + } + ++static void __devcd_del(struct devcd_entry *devcd) ++{ ++ devcd->deleted = true; ++ device_del(&devcd->devcd_dev); ++ put_device(&devcd->devcd_dev); ++} ++ + static void devcd_del(struct work_struct *wk) + { + struct devcd_entry *devcd; ++ bool init_completed; + + devcd = container_of(wk, struct devcd_entry, del_wk.work); + +- device_del(&devcd->devcd_dev); +- put_device(&devcd->devcd_dev); ++ /* devcd->mutex serializes against dev_coredumpm_timeout */ ++ mutex_lock(&devcd->mutex); ++ init_completed = devcd->init_completed; ++ mutex_unlock(&devcd->mutex); ++ ++ if (init_completed) ++ __devcd_del(devcd); + } + + static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, +@@ -125,12 +134,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + +- mutex_lock(&devcd->mutex); +- if (!devcd->delete_work) { +- devcd->delete_work = true; +- mod_delayed_work(system_wq, &devcd->del_wk, 0); +- } +- mutex_unlock(&devcd->mutex); ++ /* ++ * Although it's tempting to use mod_delayed work here, ++ * that will cause a reschedule if the timer already fired. ++ */ ++ if (cancel_delayed_work(&devcd->del_wk)) ++ schedule_delayed_work(&devcd->del_wk, 0); + + return count; + } +@@ -158,11 +167,21 @@ static int devcd_free(struct device *dev, void *data) + { + struct devcd_entry *devcd = dev_to_devcd(dev); + ++ /* ++ * To prevent a race with devcd_data_write(), cancel work and ++ * complete manually instead. ++ * ++ * We cannot rely on the return value of ++ * cancel_delayed_work_sync() here, because it might be in the ++ * middle of a cancel_delayed_work + schedule_delayed_work pair. ++ * ++ * devcd->mutex here guards against multiple parallel invocations ++ * of devcd_free(). ++ */ ++ cancel_delayed_work_sync(&devcd->del_wk); + mutex_lock(&devcd->mutex); +- if (!devcd->delete_work) +- devcd->delete_work = true; +- +- flush_delayed_work(&devcd->del_wk); ++ if (!devcd->deleted) ++ __devcd_del(devcd); + mutex_unlock(&devcd->mutex); + return 0; + } +@@ -186,12 +205,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri + * put_device() <- last reference + * error = fn(dev, data) devcd_dev_release() + * devcd_free(dev, data) kfree(devcd) +- * mutex_lock(&devcd->mutex); + * + * +- * In the above diagram, It looks like disabled_store() would be racing with parallely +- * running devcd_del() and result in memory abort while acquiring devcd->mutex which +- * is called after kfree of devcd memory after dropping its last reference with ++ * In the above diagram, it looks like disabled_store() would be racing with parallelly ++ * running devcd_del() and result in memory abort after dropping its last reference with + * put_device(). However, this will not happens as fn(dev, data) runs + * with its own reference to device via klist_node so it is not its last reference. + * so, above situation would not occur. +@@ -352,7 +369,7 @@ void dev_coredumpm(struct device *dev, struct module *owner, + devcd->read = read; + devcd->free = free; + devcd->failing_dev = get_device(dev); +- devcd->delete_work = false; ++ devcd->deleted = false; + + mutex_init(&devcd->mutex); + device_initialize(&devcd->devcd_dev); +@@ -361,8 +378,14 @@ void dev_coredumpm(struct device *dev, struct module *owner, + atomic_inc_return(&devcd_count)); + devcd->devcd_dev.class = &devcd_class; + +- mutex_lock(&devcd->mutex); + dev_set_uevent_suppress(&devcd->devcd_dev, true); ++ ++ /* devcd->mutex prevents devcd_del() completing until init finishes */ ++ mutex_lock(&devcd->mutex); ++ devcd->init_completed = false; ++ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); ++ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); ++ + if (device_add(&devcd->devcd_dev)) + goto put_device; + +@@ -379,13 +402,20 @@ void dev_coredumpm(struct device *dev, struct module *owner, + + dev_set_uevent_suppress(&devcd->devcd_dev, false); + kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); +- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); +- schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); ++ ++ /* ++ * Safe to run devcd_del() now that we are done with devcd_dev. ++ * Alternatively we could have taken a ref on devcd_dev before ++ * dropping the lock. ++ */ ++ devcd->init_completed = true; + mutex_unlock(&devcd->mutex); + return; + put_device: +- put_device(&devcd->devcd_dev); + mutex_unlock(&devcd->mutex); ++ cancel_delayed_work_sync(&devcd->del_wk); ++ put_device(&devcd->devcd_dev); ++ + put_module: + module_put(owner); + free: +diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c +index 393966c097405d..ef38e9ad98f62b 100644 +--- a/drivers/comedi/comedi_buf.c ++++ b/drivers/comedi/comedi_buf.c +@@ -368,7 +368,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s, + unsigned int count = 0; + const unsigned int num_sample_bytes = comedi_bytes_per_sample(s); + +- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { ++ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) { + async->munge_count += num_bytes; + return num_bytes; + } +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c +index 92f9c00ad5f9fb..cafe6eed3349f4 100644 +--- a/drivers/cpuidle/governors/menu.c ++++ b/drivers/cpuidle/governors/menu.c +@@ -230,20 +230,17 @@ static unsigned int get_typical_interval(struct menu_device *data) + * + * This can deal with workloads that have long pauses interspersed + * with sporadic activity with a bunch of short pauses. ++ * ++ * However, if the number of remaining samples is too small to exclude ++ * any more outliers, allow the deepest available idle state to be ++ * selected because there are systems where the time spent by CPUs in ++ * deep idle states is correlated to the maximum frequency the CPUs ++ * can get to. On those systems, shallow idle states should be avoided ++ * unless there is a clear indication that the given CPU is most likley ++ * going to be woken up shortly. + */ +- if (divisor * 4 <= INTERVALS * 3) { +- /* +- * If there are sufficiently many data points still under +- * consideration after the outliers have been eliminated, +- * returning without a prediction would be a mistake because it +- * is likely that the next interval will not exceed the current +- * maximum, so return the latter in that case. +- */ +- if (divisor >= INTERVALS / 2) +- return max; +- ++ if (divisor * 4 <= INTERVALS * 3) + return UINT_MAX; +- } + + thresh = max - 1; + goto again; +diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h +index 6c223487121544..dc95652fff4009 100644 +--- a/drivers/firmware/arm_scmi/common.h ++++ b/drivers/firmware/arm_scmi/common.h +@@ -321,10 +321,28 @@ enum debug_counters { + SCMI_DEBUG_COUNTERS_LAST + }; + +-static inline void scmi_inc_count(atomic_t *arr, int stat) ++/** ++ * struct scmi_debug_info - Debug common info ++ * @top_dentry: A reference to the top debugfs dentry ++ * @name: Name of this SCMI instance ++ * @type: Type of this SCMI instance ++ * @is_atomic: Flag to state if the transport of this instance is atomic ++ * @counters: An array of atomic_c's used for tracking statistics (if enabled) ++ */ ++struct scmi_debug_info { ++ struct dentry *top_dentry; ++ const char *name; ++ const char *type; ++ bool is_atomic; ++ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; ++}; ++ ++static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat) + { +- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) +- atomic_inc(&arr[stat]); ++ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { ++ if (dbg) ++ atomic_inc(&dbg->counters[stat]); ++ } + } + + enum scmi_bad_msg { +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c +index d1fd2e492909e5..fbe893734411c8 100644 +--- a/drivers/firmware/arm_scmi/driver.c ++++ b/drivers/firmware/arm_scmi/driver.c +@@ -102,22 +102,6 @@ struct scmi_protocol_instance { + + #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) + +-/** +- * struct scmi_debug_info - Debug common info +- * @top_dentry: A reference to the top debugfs dentry +- * @name: Name of this SCMI instance +- * @type: Type of this SCMI instance +- * @is_atomic: Flag to state if the transport of this instance is atomic +- * @counters: An array of atomic_c's used for tracking statistics (if enabled) +- */ +-struct scmi_debug_info { +- struct dentry *top_dentry; +- const char *name; +- const char *type; +- bool is_atomic; +- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; +-}; +- + /** + * struct scmi_info - Structure representing a SCMI instance + * +@@ -643,6 +627,7 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) + hash_del(&xfer->node); + xfer->pending = false; + } ++ xfer->flags = 0; + hlist_add_head(&xfer->node, &minfo->free_xfers); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); +@@ -661,8 +646,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) + { + struct scmi_info *info = handle_to_scmi_info(handle); + +- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; +- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; + return __scmi_xfer_put(&info->tx_minfo, xfer); + } + +@@ -856,7 +839,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); +- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); ++ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); + + return xfer; + } +@@ -884,7 +867,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) + msg_type, xfer_id, msg_hdr, xfer->state); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); +- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); ++ scmi_inc_count(info->dbg, ERR_MSG_INVALID); + + + /* On error the refcount incremented above has to be dropped */ +@@ -930,7 +913,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, + PTR_ERR(xfer)); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); +- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); ++ scmi_inc_count(info->dbg, ERR_MSG_NOMEM); + + scmi_clear_channel(info, cinfo); + return; +@@ -946,7 +929,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "NOTI", xfer->hdr.seq, + xfer->hdr.status, xfer->rx.buf, xfer->rx.len); +- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); ++ scmi_inc_count(info->dbg, NOTIFICATION_OK); + + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, + xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); +@@ -1006,10 +989,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { + scmi_clear_channel(info, cinfo); + complete(xfer->async_done); +- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); ++ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); + } else { + complete(&xfer->done); +- scmi_inc_count(info->dbg->counters, RESPONSE_OK); ++ scmi_inc_count(info->dbg, RESPONSE_OK); + } + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { +@@ -1117,7 +1100,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; +- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); ++ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); + } + } + +@@ -1142,7 +1125,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + "RESP" : "resp", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); +- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); ++ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + struct scmi_info *info = +@@ -1160,7 +1143,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; +- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); ++ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); + } + } + +@@ -1244,13 +1227,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph, + !is_transport_polling_capable(info->desc)) { + dev_warn_once(dev, + "Polling mode is not supported by transport.\n"); +- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); ++ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); + return -EINVAL; + } + + cinfo = idr_find(&info->tx_idr, pi->proto->id); + if (unlikely(!cinfo)) { +- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); ++ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); + return -EINVAL; + } + /* True ONLY if also supported by transport. */ +@@ -1284,19 +1267,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph, + ret = info->desc->ops->send_message(cinfo, xfer); + if (ret < 0) { + dev_dbg(dev, "Failed to send message %d\n", ret); +- scmi_inc_count(info->dbg->counters, SENT_FAIL); ++ scmi_inc_count(info->dbg, SENT_FAIL); + return ret; + } + + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "CMND", xfer->hdr.seq, + xfer->hdr.status, xfer->tx.buf, xfer->tx.len); +- scmi_inc_count(info->dbg->counters, SENT_OK); ++ scmi_inc_count(info->dbg, SENT_OK); + + ret = scmi_wait_for_message_response(cinfo, xfer); + if (!ret && xfer->hdr.status) { + ret = scmi_to_linux_errno(xfer->hdr.status); +- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); ++ scmi_inc_count(info->dbg, ERR_PROTOCOL); + } + + if (info->desc->ops->mark_txdone) +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index ebd4e113dc2654..de051a085e63fa 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -1313,9 +1313,9 @@ config GPIO_KEMPLD + + config GPIO_LJCA + tristate "INTEL La Jolla Cove Adapter GPIO support" +- depends on MFD_LJCA ++ depends on USB_LJCA + select GPIOLIB_IRQCHIP +- default MFD_LJCA ++ default USB_LJCA + help + Select this option to enable GPIO driver for the INTEL + La Jolla Cove Adapter (LJCA) board. +diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c +index f03ccd0f534cff..f60872d1e8a580 100644 +--- a/drivers/gpio/gpio-104-idio-16.c ++++ b/drivers/gpio/gpio-104-idio-16.c +@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = { + .reg_stride = 1, + .val_bits = 8, + .io_port = true, ++ .max_register = 0x5, + .wr_table = &idio_16_wr_table, + .rd_table = &idio_16_rd_table, + .volatile_table = &idio_16_rd_table, +diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c +index 87863f0230f5cf..c3a595c6f6c723 100644 +--- a/drivers/gpio/gpio-ljca.c ++++ b/drivers/gpio/gpio-ljca.c +@@ -6,6 +6,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -13,19 +14,18 @@ + #include + #include + #include +-#include + #include +-#include + #include + #include ++#include + + /* GPIO commands */ +-#define LJCA_GPIO_CONFIG 1 +-#define LJCA_GPIO_READ 2 +-#define LJCA_GPIO_WRITE 3 +-#define LJCA_GPIO_INT_EVENT 4 +-#define LJCA_GPIO_INT_MASK 5 +-#define LJCA_GPIO_INT_UNMASK 6 ++#define LJCA_GPIO_CONFIG 1 ++#define LJCA_GPIO_READ 2 ++#define LJCA_GPIO_WRITE 3 ++#define LJCA_GPIO_INT_EVENT 4 ++#define LJCA_GPIO_INT_MASK 5 ++#define LJCA_GPIO_INT_UNMASK 6 + + #define LJCA_GPIO_CONF_DISABLE BIT(0) + #define LJCA_GPIO_CONF_INPUT BIT(1) +@@ -36,89 +36,93 @@ + #define LJCA_GPIO_CONF_INTERRUPT BIT(6) + #define LJCA_GPIO_INT_TYPE BIT(7) + +-#define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1) +-#define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0) ++#define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1) ++#define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0) + + /* Intentional overlap with PULLUP / PULLDOWN */ +-#define LJCA_GPIO_CONF_SET BIT(3) +-#define LJCA_GPIO_CONF_CLR BIT(4) ++#define LJCA_GPIO_CONF_SET BIT(3) ++#define LJCA_GPIO_CONF_CLR BIT(4) + +-struct gpio_op { ++#define LJCA_GPIO_BUF_SIZE 60u ++ ++struct ljca_gpio_op { + u8 index; + u8 value; + } __packed; + +-struct gpio_packet { ++struct ljca_gpio_packet { + u8 num; +- struct gpio_op item[]; ++ struct ljca_gpio_op item[] __counted_by(num); + } __packed; + +-#define LJCA_GPIO_BUF_SIZE 60 + struct ljca_gpio_dev { +- struct platform_device *pdev; ++ struct ljca_client *ljca; + struct gpio_chip gc; + struct ljca_gpio_info *gpio_info; + DECLARE_BITMAP(unmasked_irqs, LJCA_MAX_GPIO_NUM); + DECLARE_BITMAP(enabled_irqs, LJCA_MAX_GPIO_NUM); + DECLARE_BITMAP(reenable_irqs, LJCA_MAX_GPIO_NUM); ++ DECLARE_BITMAP(output_enabled, LJCA_MAX_GPIO_NUM); + u8 *connect_mode; +- /* mutex to protect irq bus */ ++ /* protect irq bus */ + struct mutex irq_lock; + struct work_struct work; +- /* lock to protect package transfer to Hardware */ ++ /* protect package transfer to hardware */ + struct mutex trans_lock; + + u8 obuf[LJCA_GPIO_BUF_SIZE]; + u8 ibuf[LJCA_GPIO_BUF_SIZE]; + }; + +-static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config) ++static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, ++ u8 config) + { +- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; ++ struct ljca_gpio_packet *packet = ++ (struct ljca_gpio_packet *)ljca_gpio->obuf; + int ret; + + mutex_lock(&ljca_gpio->trans_lock); ++ packet->num = 1; + packet->item[0].index = gpio_id; + packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id]; +- packet->num = 1; + +- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_CONFIG, packet, +- struct_size(packet, item, packet->num), NULL, NULL); ++ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet, ++ struct_size(packet, item, packet->num), NULL, 0); + mutex_unlock(&ljca_gpio->trans_lock); +- return ret; ++ ++ return ret < 0 ? ret : 0; + } + + static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id) + { +- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; +- struct gpio_packet *ack_packet = (struct gpio_packet *)ljca_gpio->ibuf; +- unsigned int ibuf_len = LJCA_GPIO_BUF_SIZE; ++ struct ljca_gpio_packet *ack_packet = ++ (struct ljca_gpio_packet *)ljca_gpio->ibuf; ++ struct ljca_gpio_packet *packet = ++ (struct ljca_gpio_packet *)ljca_gpio->obuf; + int ret; + + mutex_lock(&ljca_gpio->trans_lock); + packet->num = 1; + packet->item[0].index = gpio_id; +- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_READ, packet, +- struct_size(packet, item, packet->num), ljca_gpio->ibuf, &ibuf_len); +- if (ret) +- goto out_unlock; +- +- if (!ibuf_len || ack_packet->num != packet->num) { +- dev_err(&ljca_gpio->pdev->dev, "failed gpio_id:%u %u", gpio_id, ack_packet->num); +- ret = -EIO; ++ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_READ, (u8 *)packet, ++ struct_size(packet, item, packet->num), ++ ljca_gpio->ibuf, LJCA_GPIO_BUF_SIZE); ++ ++ if (ret <= 0 || ack_packet->num != packet->num) { ++ dev_err(&ljca_gpio->ljca->auxdev.dev, ++ "read package error, gpio_id: %u num: %u ret: %d\n", ++ gpio_id, ack_packet->num, ret); ++ ret = ret < 0 ? ret : -EIO; + } +- +-out_unlock: + mutex_unlock(&ljca_gpio->trans_lock); +- if (ret) +- return ret; +- return ack_packet->item[0].value > 0; ++ ++ return ret < 0 ? ret : ack_packet->item[0].value > 0; + } + +-static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, +- int value) ++static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, int value) + { +- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; ++ struct ljca_gpio_packet *packet = ++ (struct ljca_gpio_packet *)ljca_gpio->obuf; + int ret; + + mutex_lock(&ljca_gpio->trans_lock); +@@ -126,10 +130,11 @@ static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, + packet->item[0].index = gpio_id; + packet->item[0].value = value & 1; + +- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_WRITE, packet, +- struct_size(packet, item, packet->num), NULL, NULL); ++ ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_WRITE, (u8 *)packet, ++ struct_size(packet, item, packet->num), NULL, 0); + mutex_unlock(&ljca_gpio->trans_lock); +- return ret; ++ ++ return ret < 0 ? ret : 0; + } + + static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset) +@@ -147,16 +152,24 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset, + + ret = ljca_gpio_write(ljca_gpio, offset, val); + if (ret) +- dev_err(chip->parent, "offset:%u val:%d set value failed %d\n", offset, val, ret); ++ dev_err(chip->parent, ++ "set value failed offset: %u val: %d ret: %d\n", ++ offset, val, ret); + } + +-static int ljca_gpio_direction_input(struct gpio_chip *chip, +- unsigned int offset) ++static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) + { + struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); + u8 config = LJCA_GPIO_CONF_INPUT | LJCA_GPIO_CONF_CLR; ++ int ret; + +- return gpio_config(ljca_gpio, offset, config); ++ ret = ljca_gpio_config(ljca_gpio, offset, config); ++ if (ret) ++ return ret; ++ ++ clear_bit(offset, ljca_gpio->output_enabled); ++ ++ return 0; + } + + static int ljca_gpio_direction_output(struct gpio_chip *chip, +@@ -166,14 +179,26 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip, + u8 config = LJCA_GPIO_CONF_OUTPUT | LJCA_GPIO_CONF_CLR; + int ret; + +- ret = gpio_config(ljca_gpio, offset, config); ++ ret = ljca_gpio_config(ljca_gpio, offset, config); + if (ret) + return ret; + + ljca_gpio_set_value(chip, offset, val); ++ set_bit(offset, ljca_gpio->output_enabled); ++ + return 0; + } + ++static int ljca_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); ++ ++ if (test_bit(offset, ljca_gpio->output_enabled)) ++ return GPIO_LINE_DIRECTION_OUT; ++ ++ return GPIO_LINE_DIRECTION_IN; ++} ++ + static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset, + unsigned long config) + { +@@ -197,7 +222,8 @@ static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset, + return 0; + } + +-static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask, ++static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, ++ unsigned long *valid_mask, + unsigned int ngpios) + { + struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); +@@ -208,15 +234,18 @@ static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *vali + return 0; + } + +-static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask, ++static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip, ++ unsigned long *valid_mask, + unsigned int ngpios) + { + ljca_gpio_init_valid_mask(chip, valid_mask, ngpios); + } + +-static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool enable) ++static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, ++ bool enable) + { +- struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; ++ struct ljca_gpio_packet *packet = ++ (struct ljca_gpio_packet *)ljca_gpio->obuf; + int ret; + + mutex_lock(&ljca_gpio->trans_lock); +@@ -224,18 +253,20 @@ static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool en + packet->item[0].index = gpio_id; + packet->item[0].value = 0; + +- ret = ljca_transfer(ljca_gpio->gpio_info->ljca, +- enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK, packet, +- struct_size(packet, item, packet->num), NULL, NULL); ++ ret = ljca_transfer(ljca_gpio->ljca, ++ enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK, ++ (u8 *)packet, struct_size(packet, item, packet->num), ++ NULL, 0); + mutex_unlock(&ljca_gpio->trans_lock); +- return ret; ++ ++ return ret < 0 ? ret : 0; + } + + static void ljca_gpio_async(struct work_struct *work) + { +- struct ljca_gpio_dev *ljca_gpio = container_of(work, struct ljca_gpio_dev, work); +- int gpio_id; +- int unmasked; ++ struct ljca_gpio_dev *ljca_gpio = ++ container_of(work, struct ljca_gpio_dev, work); ++ int gpio_id, unmasked; + + for_each_set_bit(gpio_id, ljca_gpio->reenable_irqs, ljca_gpio->gc.ngpio) { + clear_bit(gpio_id, ljca_gpio->reenable_irqs); +@@ -245,25 +276,19 @@ static void ljca_gpio_async(struct work_struct *work) + } + } + +-static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, int len) ++static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, ++ int len) + { +- const struct gpio_packet *packet = evt_data; ++ const struct ljca_gpio_packet *packet = evt_data; + struct ljca_gpio_dev *ljca_gpio = context; + int i; +- int irq; + + if (cmd != LJCA_GPIO_INT_EVENT) + return; + + for (i = 0; i < packet->num; i++) { +- irq = irq_find_mapping(ljca_gpio->gc.irq.domain, packet->item[i].index); +- if (!irq) { +- dev_err(ljca_gpio->gc.parent, "gpio_id %u does not mapped to IRQ yet\n", +- packet->item[i].index); +- return; +- } +- +- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq); ++ generic_handle_domain_irq(ljca_gpio->gc.irq.domain, ++ packet->item[i].index); + set_bit(packet->item[i].index, ljca_gpio->reenable_irqs); + } + +@@ -299,18 +324,22 @@ static int ljca_irq_set_type(struct irq_data *irqd, unsigned int type) + ljca_gpio->connect_mode[gpio_id] = LJCA_GPIO_CONF_INTERRUPT; + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: +- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP); ++ ljca_gpio->connect_mode[gpio_id] |= ++ (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP); + break; + case IRQ_TYPE_LEVEL_LOW: +- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN); ++ ljca_gpio->connect_mode[gpio_id] |= ++ (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN); + break; + case IRQ_TYPE_EDGE_BOTH: + break; + case IRQ_TYPE_EDGE_RISING: +- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP); ++ ljca_gpio->connect_mode[gpio_id] |= ++ (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP); + break; + case IRQ_TYPE_EDGE_FALLING: +- ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN); ++ ljca_gpio->connect_mode[gpio_id] |= ++ (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN); + break; + default: + return -EINVAL; +@@ -332,15 +361,14 @@ static void ljca_irq_bus_unlock(struct irq_data *irqd) + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); + struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); + int gpio_id = irqd_to_hwirq(irqd); +- int enabled; +- int unmasked; ++ int enabled, unmasked; + + enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs); + unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs); + + if (enabled != unmasked) { + if (unmasked) { +- gpio_config(ljca_gpio, gpio_id, 0); ++ ljca_gpio_config(ljca_gpio, gpio_id, 0); + ljca_enable_irq(ljca_gpio, gpio_id, true); + set_bit(gpio_id, ljca_gpio->enabled_irqs); + } else { +@@ -363,43 +391,48 @@ static const struct irq_chip ljca_gpio_irqchip = { + GPIOCHIP_IRQ_RESOURCE_HELPERS, + }; + +-static int ljca_gpio_probe(struct platform_device *pdev) ++static int ljca_gpio_probe(struct auxiliary_device *auxdev, ++ const struct auxiliary_device_id *aux_dev_id) + { ++ struct ljca_client *ljca = auxiliary_dev_to_ljca_client(auxdev); + struct ljca_gpio_dev *ljca_gpio; + struct gpio_irq_chip *girq; + int ret; + +- ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL); ++ ljca_gpio = devm_kzalloc(&auxdev->dev, sizeof(*ljca_gpio), GFP_KERNEL); + if (!ljca_gpio) + return -ENOMEM; + +- ljca_gpio->gpio_info = dev_get_platdata(&pdev->dev); +- ljca_gpio->connect_mode = devm_kcalloc(&pdev->dev, ljca_gpio->gpio_info->num, +- sizeof(*ljca_gpio->connect_mode), GFP_KERNEL); ++ ljca_gpio->ljca = ljca; ++ ljca_gpio->gpio_info = dev_get_platdata(&auxdev->dev); ++ ljca_gpio->connect_mode = devm_kcalloc(&auxdev->dev, ++ ljca_gpio->gpio_info->num, ++ sizeof(*ljca_gpio->connect_mode), ++ GFP_KERNEL); + if (!ljca_gpio->connect_mode) + return -ENOMEM; + + mutex_init(&ljca_gpio->irq_lock); + mutex_init(&ljca_gpio->trans_lock); +- ljca_gpio->pdev = pdev; + ljca_gpio->gc.direction_input = ljca_gpio_direction_input; + ljca_gpio->gc.direction_output = ljca_gpio_direction_output; ++ ljca_gpio->gc.get_direction = ljca_gpio_get_direction; + ljca_gpio->gc.get = ljca_gpio_get_value; + ljca_gpio->gc.set = ljca_gpio_set_value; + ljca_gpio->gc.set_config = ljca_gpio_set_config; + ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask; + ljca_gpio->gc.can_sleep = true; +- ljca_gpio->gc.parent = &pdev->dev; ++ ljca_gpio->gc.parent = &auxdev->dev; + + ljca_gpio->gc.base = -1; + ljca_gpio->gc.ngpio = ljca_gpio->gpio_info->num; +- ljca_gpio->gc.label = ACPI_COMPANION(&pdev->dev) ? +- acpi_dev_name(ACPI_COMPANION(&pdev->dev)) : +- dev_name(&pdev->dev); ++ ljca_gpio->gc.label = ACPI_COMPANION(&auxdev->dev) ? ++ acpi_dev_name(ACPI_COMPANION(&auxdev->dev)) : ++ dev_name(&auxdev->dev); + ljca_gpio->gc.owner = THIS_MODULE; + +- platform_set_drvdata(pdev, ljca_gpio); +- ljca_register_event_cb(ljca_gpio->gpio_info->ljca, ljca_gpio_event_cb, ljca_gpio); ++ auxiliary_set_drvdata(auxdev, ljca_gpio); ++ ljca_register_event_cb(ljca, ljca_gpio_event_cb, ljca_gpio); + + girq = &ljca_gpio->gc.irq; + gpio_irq_chip_set_chip(girq, &ljca_gpio_irqchip); +@@ -413,7 +446,7 @@ static int ljca_gpio_probe(struct platform_device *pdev) + INIT_WORK(&ljca_gpio->work, ljca_gpio_async); + ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio); + if (ret) { +- ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca); ++ ljca_unregister_event_cb(ljca); + mutex_destroy(&ljca_gpio->irq_lock); + mutex_destroy(&ljca_gpio->trans_lock); + } +@@ -421,34 +454,33 @@ static int ljca_gpio_probe(struct platform_device *pdev) + return ret; + } + +-static int ljca_gpio_remove(struct platform_device *pdev) ++static void ljca_gpio_remove(struct auxiliary_device *auxdev) + { +- struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev); ++ struct ljca_gpio_dev *ljca_gpio = auxiliary_get_drvdata(auxdev); + + gpiochip_remove(&ljca_gpio->gc); +- ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca); ++ ljca_unregister_event_cb(ljca_gpio->ljca); ++ cancel_work_sync(&ljca_gpio->work); + mutex_destroy(&ljca_gpio->irq_lock); + mutex_destroy(&ljca_gpio->trans_lock); +- return 0; + } + +-#define LJCA_GPIO_DRV_NAME "ljca-gpio" +-static const struct platform_device_id ljca_gpio_id[] = { +- { LJCA_GPIO_DRV_NAME, 0 }, +- { /* sentinel */ } ++static const struct auxiliary_device_id ljca_gpio_id_table[] = { ++ { "usb_ljca.ljca-gpio", 0 }, ++ { /* sentinel */ }, + }; +-MODULE_DEVICE_TABLE(platform, ljca_gpio_id); ++MODULE_DEVICE_TABLE(auxiliary, ljca_gpio_id_table); + +-static struct platform_driver ljca_gpio_driver = { +- .driver.name = LJCA_GPIO_DRV_NAME, ++static struct auxiliary_driver ljca_gpio_driver = { + .probe = ljca_gpio_probe, + .remove = ljca_gpio_remove, ++ .id_table = ljca_gpio_id_table, + }; +-module_platform_driver(ljca_gpio_driver); ++module_auxiliary_driver(ljca_gpio_driver); + +-MODULE_AUTHOR("Ye Xiang "); +-MODULE_AUTHOR("Wang Zhifeng "); +-MODULE_AUTHOR("Zhang Lixu "); ++MODULE_AUTHOR("Wentong Wu "); ++MODULE_AUTHOR("Zhifeng Wang "); ++MODULE_AUTHOR("Lixu Zhang "); + MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver"); + MODULE_LICENSE("GPL"); + MODULE_IMPORT_NS(LJCA); +diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c +index 44c0a21b1d1d9f..5827b18d190bdd 100644 +--- a/drivers/gpio/gpio-pci-idio-16.c ++++ b/drivers/gpio/gpio-pci-idio-16.c +@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = { + .reg_stride = 1, + .val_bits = 8, + .io_port = true, ++ .max_register = 0x7, + .wr_table = &idio_16_wr_table, + .rd_table = &idio_16_rd_table, + .volatile_table = &idio_16_rd_table, +diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c +index 79657910b79e64..d8a86e60cf8c1a 100644 +--- a/drivers/hwmon/sht3x.c ++++ b/drivers/hwmon/sht3x.c +@@ -288,24 +288,26 @@ static struct sht3x_data *sht3x_update_client(struct device *dev) + return data; + } + +-static int temp1_input_read(struct device *dev) ++static int temp1_input_read(struct device *dev, long *temp) + { + struct sht3x_data *data = sht3x_update_client(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + +- return data->temperature; ++ *temp = data->temperature; ++ return 0; + } + +-static int humidity1_input_read(struct device *dev) ++static int humidity1_input_read(struct device *dev, long *humidity) + { + struct sht3x_data *data = sht3x_update_client(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + +- return data->humidity; ++ *humidity = data->humidity; ++ return 0; + } + + /* +@@ -703,6 +705,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) + { + enum sht3x_limits index; ++ int ret; + + switch (type) { + case hwmon_chip: +@@ -717,10 +720,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: +- *val = temp1_input_read(dev); +- break; ++ return temp1_input_read(dev, val); + case hwmon_temp_alarm: +- *val = temp1_alarm_read(dev); ++ ret = temp1_alarm_read(dev); ++ if (ret < 0) ++ return ret; ++ *val = ret; + break; + case hwmon_temp_max: + index = limit_max; +@@ -745,10 +750,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, + case hwmon_humidity: + switch (attr) { + case hwmon_humidity_input: +- *val = humidity1_input_read(dev); +- break; ++ return humidity1_input_read(dev, val); + case hwmon_humidity_alarm: +- *val = humidity1_alarm_read(dev); ++ ret = humidity1_alarm_read(dev); ++ if (ret < 0) ++ return ret; ++ *val = ret; + break; + case hwmon_humidity_max: + index = limit_max; +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index a85442d74e0928..c110fb606dfb8b 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -383,6 +383,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, + } + spin_unlock(&fl->lock); + ++ dma_buf_put(buf); ++ + return ret; + } + +diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c +index 0159276656780d..00ed2147113e69 100644 +--- a/drivers/misc/lkdtm/fortify.c ++++ b/drivers/misc/lkdtm/fortify.c +@@ -44,6 +44,9 @@ static void lkdtm_FORTIFY_STR_MEMBER(void) + char *src; + + src = kmalloc(size, GFP_KERNEL); ++ if (!src) ++ return; ++ + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + +@@ -109,6 +112,9 @@ static void lkdtm_FORTIFY_MEM_MEMBER(void) + char *src; + + src = kmalloc(size, GFP_KERNEL); ++ if (!src) ++ return; ++ + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index 4adfa5af162f1d..fcfc6c7e6dc8af 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -120,6 +120,8 @@ + #define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ + #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + ++#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */ ++ + /* + * MEI HW Section + */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 93b98a7f4c7fd9..1a1df0390a40d0 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)}, ++ + /* required last entry */ + {0, } + }; +diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c +index 485d5ca399513c..988e33f2797046 100644 +--- a/drivers/most/most_usb.c ++++ b/drivers/most/most_usb.c +@@ -929,6 +929,10 @@ static void release_mdev(struct device *dev) + { + struct most_dev *mdev = to_mdev_from_dev(dev); + ++ kfree(mdev->busy_urbs); ++ kfree(mdev->cap); ++ kfree(mdev->conf); ++ kfree(mdev->ep_address); + kfree(mdev); + } + /** +@@ -1093,7 +1097,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) + err_free_conf: + kfree(mdev->conf); + err_free_mdev: +- put_device(&mdev->dev); ++ kfree(mdev); + return ret; + } + +@@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface) + if (mdev->dci) + device_unregister(&mdev->dci->dev); + most_deregister_interface(&mdev->iface); +- +- kfree(mdev->busy_urbs); +- kfree(mdev->cap); +- kfree(mdev->conf); +- kfree(mdev->ep_address); +- put_device(&mdev->dci->dev); +- put_device(&mdev->dev); + } + + static int hdm_suspend(struct usb_interface *interface, pm_message_t message) +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index f7ed129fc8110a..9aa328b958be47 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2874,7 +2874,7 @@ static void bond_mii_monitor(struct work_struct *work) + { + struct bonding *bond = container_of(work, struct bonding, + mii_work.work); +- bool should_notify_peers = false; ++ bool should_notify_peers; + bool commit; + unsigned long delay; + struct slave *slave; +@@ -2886,30 +2886,33 @@ static void bond_mii_monitor(struct work_struct *work) + goto re_arm; + + rcu_read_lock(); ++ + should_notify_peers = bond_should_notify_peers(bond); + commit = !!bond_miimon_inspect(bond); +- if (bond->send_peer_notif) { +- rcu_read_unlock(); +- if (rtnl_trylock()) { +- bond->send_peer_notif--; +- rtnl_unlock(); +- } +- } else { +- rcu_read_unlock(); +- } + +- if (commit) { ++ rcu_read_unlock(); ++ ++ if (commit || bond->send_peer_notif) { + /* Race avoidance with bond_close cancel of workqueue */ + if (!rtnl_trylock()) { + delay = 1; +- should_notify_peers = false; + goto re_arm; + } + +- bond_for_each_slave(bond, slave, iter) { +- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); ++ if (commit) { ++ bond_for_each_slave(bond, slave, iter) { ++ bond_commit_link_state(slave, ++ BOND_SLAVE_NOTIFY_LATER); ++ } ++ bond_miimon_commit(bond); ++ } ++ ++ if (bond->send_peer_notif) { ++ bond->send_peer_notif--; ++ if (should_notify_peers) ++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ++ bond->dev); + } +- bond_miimon_commit(bond); + + rtnl_unlock(); /* might sleep, hold no other locks */ + } +@@ -2917,13 +2920,6 @@ static void bond_mii_monitor(struct work_struct *work) + re_arm: + if (bond->params.miimon) + queue_delayed_work(bond->wq, &bond->mii_work, delay); +- +- if (should_notify_peers) { +- if (!rtnl_trylock()) +- return; +- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); +- rtnl_unlock(); +- } + } + + static int bond_upper_dev_walk(struct net_device *upper, +diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c +index 49cf9682b9254c..247d02447fc3f1 100644 +--- a/drivers/net/can/bxcan.c ++++ b/drivers/net/can/bxcan.c +@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb, + u32 id; + int i, j; + +- if (can_dropped_invalid_skb(ndev, skb)) ++ if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (bxcan_tx_busy(priv)) +diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c +index abe8dc051d94f1..77d165ed0d5341 100644 +--- a/drivers/net/can/dev/netlink.c ++++ b/drivers/net/can/dev/netlink.c +@@ -285,7 +285,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], + } + + if (data[IFLA_CAN_RESTART_MS]) { +- if (!priv->do_set_mode) { ++ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); ++ ++ if (restart_ms != 0 && !priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; +@@ -294,7 +296,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], + /* Do not allow changing restart delay while running */ + if (dev->flags & IFF_UP) + return -EBUSY; +- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); ++ priv->restart_ms = restart_ms; + } + + if (data[IFLA_CAN_RESTART]) { +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c +index 0d201a57d7e29e..dd9c50d3ec0f03 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c +@@ -1310,7 +1310,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, + } + + if (xdp_flags & ENA_XDP_REDIRECT) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + return work_done; + +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +index 81a99f4824d054..61bd2389ef4b54 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, + dma_addr_t addr; + + buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); +- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, +- DPAA2_ETH_TX_BUF_ALIGN); ++ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN); + if (aligned_start >= skb->head) + buffer_start = aligned_start; + else +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index 0c09d82dbf00d4..7accf3a3e9f0dd 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -1246,6 +1246,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + /* next descriptor to process */ + i = rx_ring->next_to_clean; + ++ enetc_lock_mdio(); ++ + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; +@@ -1281,7 +1283,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + rx_byte_cnt += skb->len + ETH_HLEN; + rx_frm_cnt++; + ++ enetc_unlock_mdio(); + napi_gro_receive(napi, skb); ++ enetc_lock_mdio(); + } + + rx_ring->next_to_clean = i; +@@ -1289,6 +1293,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + ++ enetc_unlock_mdio(); ++ + return rx_frm_cnt; + } + +@@ -1598,6 +1604,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + /* next descriptor to process */ + i = rx_ring->next_to_clean; + ++ enetc_lock_mdio(); ++ + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd, *orig_rxbd; + int orig_i, orig_cleaned_cnt; +@@ -1657,7 +1665,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + if (unlikely(!skb)) + goto out; + ++ enetc_unlock_mdio(); + napi_gro_receive(napi, skb); ++ enetc_lock_mdio(); + break; + case XDP_TX: + tx_ring = priv->xdp_tx_ring[rx_ring->index]; +@@ -1692,7 +1702,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + } + break; + case XDP_REDIRECT: ++ enetc_unlock_mdio(); + err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); ++ enetc_lock_mdio(); + if (unlikely(err)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_failures++; +@@ -1712,8 +1724,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + +- if (xdp_redirect_frm_cnt) +- xdp_do_flush_map(); ++ if (xdp_redirect_frm_cnt) { ++ enetc_unlock_mdio(); ++ xdp_do_flush(); ++ enetc_lock_mdio(); ++ } + + if (xdp_tx_frm_cnt) + enetc_update_tx_ring_tail(tx_ring); +@@ -1722,6 +1737,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - + rx_ring->xdp.xdp_tx_in_flight); + ++ enetc_unlock_mdio(); ++ + return rx_frm_cnt; + } + +@@ -1740,6 +1757,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; ++ enetc_unlock_mdio(); + + prog = rx_ring->xdp.prog; + if (prog) +@@ -1751,10 +1769,8 @@ static int enetc_poll(struct napi_struct *napi, int budget) + if (work_done) + v->rx_napi_work = true; + +- if (!complete) { +- enetc_unlock_mdio(); ++ if (!complete) + return budget; +- } + + napi_complete_done(napi, work_done); + +@@ -1763,6 +1779,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) + + v->rx_napi_work = false; + ++ enetc_lock_mdio(); + /* enable interrupts */ + enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h +index 860ecee302f1a6..dcf3e4b4e3f555 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.h ++++ b/drivers/net/ethernet/freescale/enetc/enetc.h +@@ -41,7 +41,7 @@ struct enetc_tx_swbd { + }; + + #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE +-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ ++#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1) + #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ + #define ENETC_RXB_DMA_SIZE \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 8352d9b6469f2a..64cd72c1947837 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1904,7 +1904,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) + rxq->bd.cur = bdp; + + if (xdp_result & FEC_ENET_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + return pkt_received; + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +index 6a9b47b005d29b..99604379c87b6c 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +@@ -2398,7 +2398,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring, + void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) + { + if (xdp_res & I40E_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (xdp_res & I40E_XDP_TX) { + struct i40e_ring *xdp_ring = +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +index c8322fb6f2b37f..7e06373e14d98e 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +@@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, + struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; + + if (xdp_res & ICE_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (xdp_res & ICE_XDP_TX) { + if (static_branch_unlikely(&ice_xdp_locking_key)) +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index f245f3df40fcac..99876b765b08bc 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + } + + if (xdp_xmit & IXGBE_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (xdp_xmit & IXGBE_XDP_TX) { + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +index 7ef82c30e85712..9fdd19acf2242f 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +@@ -351,7 +351,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + } + + if (xdp_xmit & IXGBE_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (xdp_xmit & IXGBE_XDP_TX) { + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index 165f76d1231c19..2941721b65152e 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2520,7 +2520,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); + + if (ps.xdp_redirect) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (ps.rx_packets) + mvneta_update_stats(pp, &ps); +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index fce57faf345ce4..aabc39f7690f8e 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -4055,7 +4055,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + } + + if (xdp_ret & MVPP2_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (ps.rx_packets) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index aefe2af6f01d41..c843e6531449ba 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2221,7 +2221,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, + net_dim(ð->rx_dim, dim_sample); + + if (xdp_flush) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + return done; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 9cf33ae48c216f..455d02b6500d05 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -519,6 +519,12 @@ struct mlx5e_xdpsq { + struct mlx5e_channel *channel; + } ____cacheline_aligned_in_smp; + ++struct mlx5e_xdp_buff { ++ struct xdp_buff xdp; ++ struct mlx5_cqe64 *cqe; ++ struct mlx5e_rq *rq; ++}; ++ + struct mlx5e_ktls_resync_resp; + + struct mlx5e_icosq { +@@ -717,6 +723,7 @@ struct mlx5e_rq { + struct mlx5e_xdpsq *xdpsq; + DECLARE_BITMAP(flags, 8); + struct page_pool *page_pool; ++ struct mlx5e_xdp_buff mxbuf; + + /* AF_XDP zero-copy */ + struct xsk_buff_pool *xsk_pool; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +index dcd5db907f1028..9c22d64af68534 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +@@ -98,7 +98,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) + return sizeof(struct mlx5_ksm) * 4; + } + WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); +- return 0; ++ return 1; + } + + u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +index b723ff5e5249cf..13c7ed1bb37e93 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +@@ -895,7 +895,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) + mlx5e_xmit_xdp_doorbell(xdpsq); + + if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { +- xdp_do_flush_map(); ++ xdp_do_flush(); + __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); + } + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +index ecfe93a479da8e..38e9ff6aa3aee2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +@@ -44,12 +44,6 @@ + (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ + sizeof(struct mlx5_wqe_inline_seg)) + +-struct mlx5e_xdp_buff { +- struct xdp_buff xdp; +- struct mlx5_cqe64 *cqe; +- struct mlx5e_rq *rq; +-}; +- + /* XDP packets can be transmitted in different ways. On completion, we need to + * distinguish between them to clean up things in a proper way. + */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 8278395ee20a01..fcf7437174e189 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1697,17 +1697,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + + prog = rcu_dereference(rq->xdp_prog); + if (prog) { +- struct mlx5e_xdp_buff mxbuf; ++ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; + + net_prefetchw(va); /* xdp_frame data area */ + mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, +- cqe_bcnt, &mxbuf); +- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) ++ cqe_bcnt, mxbuf); ++ if (mlx5e_xdp_handle(rq, prog, mxbuf)) + return NULL; /* page/packet was consumed by XDP */ + +- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; +- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; +- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; ++ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; ++ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; ++ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; + } + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); +@@ -1726,11 +1726,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi + struct mlx5_cqe64 *cqe, u32 cqe_bcnt) + { + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; ++ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; + struct mlx5e_wqe_frag_info *head_wi = wi; + u16 rx_headroom = rq->buff.headroom; + struct mlx5e_frag_page *frag_page; + struct skb_shared_info *sinfo; +- struct mlx5e_xdp_buff mxbuf; + u32 frag_consumed_bytes; + struct bpf_prog *prog; + struct sk_buff *skb; +@@ -1750,8 +1750,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi + net_prefetch(va + rx_headroom); + + mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, +- frag_consumed_bytes, &mxbuf); +- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); ++ frag_consumed_bytes, mxbuf); ++ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); + truesize = 0; + + cqe_bcnt -= frag_consumed_bytes; +@@ -1763,8 +1763,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi + + frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); + +- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, +- wi->offset, frag_consumed_bytes); ++ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, ++ frag_page, wi->offset, ++ frag_consumed_bytes); + truesize += frag_info->frag_stride; + + cqe_bcnt -= frag_consumed_bytes; +@@ -1773,31 +1774,46 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi + } + + prog = rcu_dereference(rq->xdp_prog); +- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { +- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { +- struct mlx5e_wqe_frag_info *pwi; ++ if (prog) { ++ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; ++ ++ if (mlx5e_xdp_handle(rq, prog, mxbuf)) { ++ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, ++ rq->flags)) { ++ struct mlx5e_wqe_frag_info *pwi; ++ ++ wi -= old_nr_frags - sinfo->nr_frags; ++ ++ for (pwi = head_wi; pwi < wi; pwi++) ++ pwi->frag_page->frags++; ++ } ++ return NULL; /* page/packet was consumed by XDP */ ++ } + +- for (pwi = head_wi; pwi < wi; pwi++) +- pwi->frag_page->frags++; ++ nr_frags_free = old_nr_frags - sinfo->nr_frags; ++ if (unlikely(nr_frags_free)) { ++ wi -= nr_frags_free; ++ truesize -= nr_frags_free * frag_info->frag_stride; + } +- return NULL; /* page/packet was consumed by XDP */ + } + +- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz, +- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, +- mxbuf.xdp.data_end - mxbuf.xdp.data, +- mxbuf.xdp.data - mxbuf.xdp.data_meta); ++ skb = mlx5e_build_linear_skb( ++ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz, ++ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, ++ mxbuf->xdp.data_end - mxbuf->xdp.data, ++ mxbuf->xdp.data - mxbuf->xdp.data_meta); + if (unlikely(!skb)) + return NULL; + + skb_mark_for_recycle(skb); + head_wi->frag_page->frags++; + +- if (xdp_buff_has_frags(&mxbuf.xdp)) { ++ if (xdp_buff_has_frags(&mxbuf->xdp)) { + /* sinfo->nr_frags is reset by build_skb, calculate again. */ + xdp_update_skb_shared_info(skb, wi - head_wi - 1, + sinfo->xdp_frags_size, truesize, +- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); ++ xdp_buff_is_frag_pfmemalloc( ++ &mxbuf->xdp)); + + for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++) + pwi->frag_page->frags++; +@@ -2003,11 +2019,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w + struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx]; + u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); + struct mlx5e_frag_page *head_page = frag_page; ++ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; + u32 frag_offset = head_offset; + u32 byte_cnt = cqe_bcnt; + struct skb_shared_info *sinfo; +- struct mlx5e_xdp_buff mxbuf; + unsigned int truesize = 0; ++ u32 pg_consumed_bytes; + struct bpf_prog *prog; + struct sk_buff *skb; + u32 linear_frame_sz; +@@ -2052,20 +2069,23 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w + } + } + +- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf); ++ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, ++ linear_data_len, mxbuf); + +- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); ++ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); + + while (byte_cnt) { + /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ +- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); ++ pg_consumed_bytes = ++ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + truesize += pg_consumed_bytes; + else + truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); + +- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset, ++ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, ++ frag_page, frag_offset, + pg_consumed_bytes); + byte_cnt -= pg_consumed_bytes; + frag_offset = 0; +@@ -2073,10 +2093,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w + } + + if (prog) { +- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { ++ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; ++ u32 len; ++ ++ if (mlx5e_xdp_handle(rq, prog, mxbuf)) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { + struct mlx5e_frag_page *pfp; + ++ frag_page -= old_nr_frags - sinfo->nr_frags; ++ + for (pfp = head_page; pfp < frag_page; pfp++) + pfp->frags++; + +@@ -2086,10 +2111,20 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w + return NULL; /* page/packet was consumed by XDP */ + } + +- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, +- linear_frame_sz, +- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0, +- mxbuf.xdp.data - mxbuf.xdp.data_meta); ++ nr_frags_free = old_nr_frags - sinfo->nr_frags; ++ if (unlikely(nr_frags_free)) { ++ frag_page -= nr_frags_free; ++ truesize -= (nr_frags_free - 1) * PAGE_SIZE + ++ ALIGN(pg_consumed_bytes, ++ BIT(rq->mpwqe.log_stride_sz)); ++ } ++ ++ len = mxbuf->xdp.data_end - mxbuf->xdp.data; ++ ++ skb = mlx5e_build_linear_skb( ++ rq, mxbuf->xdp.data_hard_start, linear_frame_sz, ++ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len, ++ mxbuf->xdp.data - mxbuf->xdp.data_meta); + if (unlikely(!skb)) { + mlx5e_page_release_fragmented(rq, &wi->linear_page); + return NULL; +@@ -2099,29 +2134,34 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w + wi->linear_page.frags++; + mlx5e_page_release_fragmented(rq, &wi->linear_page); + +- if (xdp_buff_has_frags(&mxbuf.xdp)) { ++ if (xdp_buff_has_frags(&mxbuf->xdp)) { + struct mlx5e_frag_page *pagep; + + /* sinfo->nr_frags is reset by build_skb, calculate again. */ + xdp_update_skb_shared_info(skb, frag_page - head_page, + sinfo->xdp_frags_size, truesize, +- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); ++ xdp_buff_is_frag_pfmemalloc( ++ &mxbuf->xdp)); + + pagep = head_page; + do + pagep->frags++; + while (++pagep < frag_page); ++ ++ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len, ++ skb->data_len); ++ __pskb_pull_tail(skb, headlen); + } +- __pskb_pull_tail(skb, headlen); + } else { + dma_addr_t addr; + +- if (xdp_buff_has_frags(&mxbuf.xdp)) { ++ if (xdp_buff_has_frags(&mxbuf->xdp)) { + struct mlx5e_frag_page *pagep; + + xdp_update_skb_shared_info(skb, sinfo->nr_frags, + sinfo->xdp_frags_size, truesize, +- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); ++ xdp_buff_is_frag_pfmemalloc( ++ &mxbuf->xdp)); + + pagep = frag_page - sinfo->nr_frags; + do +@@ -2171,20 +2211,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + + prog = rcu_dereference(rq->xdp_prog); + if (prog) { +- struct mlx5e_xdp_buff mxbuf; ++ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; + + net_prefetchw(va); /* xdp_frame data area */ + mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, +- cqe_bcnt, &mxbuf); +- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { ++ cqe_bcnt, mxbuf); ++ if (mlx5e_xdp_handle(rq, prog, mxbuf)) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) + frag_page->frags++; + return NULL; /* page/packet was consumed by XDP */ + } + +- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; +- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; +- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; ++ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; ++ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; ++ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; + } + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); +diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c +index 5d9db8c2a5b437..45be6954d5aaea 100644 +--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c ++++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c +@@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget, + nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring); + + if (xdp_redir) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (tx_ring->wr_ptr_add) + nfp_net_tx_xmit_more_flush(tx_ring); +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 58fdc4f8dd4835..085d81576f1a47 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -2038,15 +2038,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) + + skb_tx_timestamp(skb); + } +- /* Descriptor type must be set after all the above writes */ +- dma_wmb(); ++ + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; ++ /* When using multi-descriptors, DT_FEND needs to get written ++ * before DT_FSTART, but the compiler may reorder the memory ++ * writes in an attempt to optimize the code. ++ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART ++ * are written exactly in the order shown in the code. ++ * This is particularly important for cases where the DMA engine ++ * is already running when we are running this code. If the DMA ++ * sees DT_FSTART without the corresponding DT_FEND it will enter ++ * an error condition. ++ */ ++ dma_wmb(); + desc->die_dt = DT_FSTART; + } else { ++ /* Descriptor type must be set after all the above writes */ ++ dma_wmb(); + desc->die_dt = DT_FSINGLE; + } ++ ++ /* Before ringing the doorbell we need to make sure that the latest ++ * writes have been committed to memory, otherwise it could delay ++ * things until the doorbell is rang again. ++ * This is in replacement of the read operation mentioned in the HW ++ * manuals. ++ */ ++ dma_wmb(); + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); + + priv->cur_tx[q] += num_tx_desc; +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c +index 8d2d7ea2ebefc6..c9e17a8208a901 100644 +--- a/drivers/net/ethernet/sfc/efx_channels.c ++++ b/drivers/net/ethernet/sfc/efx_channels.c +@@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget) + + spent = efx_process_channel(channel, budget); + +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && +diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c +index 1776f7f8a7a90e..a7346e965bfe70 100644 +--- a/drivers/net/ethernet/sfc/siena/efx_channels.c ++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c +@@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget) + + spent = efx_process_channel(channel, budget); + +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c +index f358ea00319369..b834b129639f01 100644 +--- a/drivers/net/ethernet/socionext/netsec.c ++++ b/drivers/net/ethernet/socionext/netsec.c +@@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, + u16 pkts) + { + if (xdp_res & NETSEC_XDP_REDIR) +- xdp_do_flush_map(); ++ xdp_do_flush(); + + if (xdp_res & NETSEC_XDP_TX) + netsec_xdp_ring_tx_db(priv, pkts); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +index d920a50dd16c7c..bab315517bad91 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -1565,14 +1565,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) + } + } else { + if (bsp_priv->clk_enabled) { ++ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) { ++ bsp_priv->ops->set_clock_selection(bsp_priv, ++ bsp_priv->clock_input, false); ++ } ++ + clk_bulk_disable_unprepare(bsp_priv->num_clks, + bsp_priv->clks); + clk_disable_unprepare(bsp_priv->clk_phy); + +- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) +- bsp_priv->ops->set_clock_selection(bsp_priv, +- bsp_priv->clock_input, false); +- + bsp_priv->clk_enabled = false; + } + } +diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c +index 0ec85635dfd60a..764ed298b57081 100644 +--- a/drivers/net/ethernet/ti/cpsw_priv.c ++++ b/drivers/net/ethernet/ti/cpsw_priv.c +@@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, + * particular hardware is sharing a common queue, so the + * incoming device might change per packet. + */ +- xdp_do_flush_map(); ++ xdp_do_flush(); + break; + default: + bpf_warn_invalid_xdp_action(ndev, prog, act); +diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c +index 92add3daadbb18..278e6cb6f4d99a 100644 +--- a/drivers/net/usb/rtl8150.c ++++ b/drivers/net/usb/rtl8150.c +@@ -685,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, + rtl8150_t *dev = netdev_priv(netdev); + int count, res; + ++ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */ ++ count = max(skb->len, ETH_ZLEN); ++ if (count % 64 == 0) ++ count++; ++ if (skb_padto(skb, count)) { ++ netdev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ + netif_stop_queue(netdev); +- count = (skb->len < 60) ? 60 : skb->len; +- count = (count & 0x3f) ? count : count + 1; + dev->tx_skb = skb; + usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), + skb->data, count, write_bulk_callback, dev); +diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c +index 04031450d5feca..c3013059cca82a 100644 +--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c ++++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c +@@ -212,7 +212,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event) + return -EINVAL; + + hisi_pmu = to_hisi_pmu(event->pmu); +- if (event->attr.config > hisi_pmu->check_event) ++ if ((event->attr.config & HISI_EVENTID_MASK) > hisi_pmu->check_event) + return -EINVAL; + + if (hisi_pmu->on_cpu == -1) +diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h +index 92402aa69d70f5..67d1c3d3a41c0a 100644 +--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h ++++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h +@@ -43,7 +43,8 @@ + return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \ + } + +-#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) ++#define HISI_EVENTID_MASK GENMASK(7, 0) ++#define HISI_GET_EVENTID(ev) ((ev)->hw.config_base & HISI_EVENTID_MASK) + + #define HISI_PMU_EVTYPE_BITS 8 + #define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS) +diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c +index 6b374026cd4f44..3942aa49217696 100644 +--- a/drivers/s390/cio/device.c ++++ b/drivers/s390/cio/device.c +@@ -1318,23 +1318,34 @@ void ccw_device_schedule_recovery(void) + spin_unlock_irqrestore(&recovery_lock, flags); + } + +-static int purge_fn(struct device *dev, void *data) ++static int purge_fn(struct subchannel *sch, void *data) + { +- struct ccw_device *cdev = to_ccwdev(dev); +- struct ccw_dev_id *id = &cdev->private->dev_id; +- struct subchannel *sch = to_subchannel(cdev->dev.parent); ++ struct ccw_device *cdev; + +- spin_lock_irq(cdev->ccwlock); +- if (is_blacklisted(id->ssid, id->devno) && +- (cdev->private->state == DEV_STATE_OFFLINE) && +- (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { +- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, +- id->devno); ++ spin_lock_irq(sch->lock); ++ if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) ++ goto unlock; ++ ++ if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) ++ goto unlock; ++ ++ cdev = sch_get_cdev(sch); ++ if (cdev) { ++ if (cdev->private->state != DEV_STATE_OFFLINE) ++ goto unlock; ++ ++ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) ++ goto unlock; + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); +- css_sched_sch_todo(sch, SCH_TODO_UNREG); + atomic_set(&cdev->private->onoff, 0); + } +- spin_unlock_irq(cdev->ccwlock); ++ ++ css_sched_sch_todo(sch, SCH_TODO_UNREG); ++ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, ++ sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); ++ ++unlock: ++ spin_unlock_irq(sch->lock); + /* Abort loop in case of pending signal. */ + if (signal_pending(current)) + return -EINTR; +@@ -1350,7 +1361,7 @@ static int purge_fn(struct device *dev, void *data) + int ccw_purge_blacklisted(void) + { + CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); +- bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); ++ for_each_subchannel_staged(purge_fn, NULL, NULL); + return 0; + } + +diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c +index bc6c086ddd43f4..731504ec7ef8b0 100644 +--- a/drivers/spi/spi-nxp-fspi.c ++++ b/drivers/spi/spi-nxp-fspi.c +@@ -665,6 +665,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f) + 0, POLL_TOUT, true); + if (ret) + dev_warn(f->dev, "DLL lock failed, please fix it!\n"); ++ ++ /* ++ * For ERR050272, DLL lock status bit is not accurate, ++ * wait for 4us more as a workaround. ++ */ ++ udelay(4); + } + + /* +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index eaf4a907380aa4..57b27f9ea1f034 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -653,7 +653,9 @@ static int dw8250_probe(struct platform_device *pdev) + if (IS_ERR(data->rst)) + return PTR_ERR(data->rst); + +- reset_control_deassert(data->rst); ++ err = reset_control_deassert(data->rst); ++ if (err) ++ return dev_err_probe(dev, err, "failed to deassert resets\n"); + + err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst); + if (err) +diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c +index 27430fdd9e761c..e79ee33d432c90 100644 +--- a/drivers/tty/serial/8250/8250_exar.c ++++ b/drivers/tty/serial/8250/8250_exar.c +@@ -33,6 +33,8 @@ + #define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db + #define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea + ++#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018 ++ + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 + #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 + #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a +@@ -845,6 +847,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = { + .exit = pci_xr17v35x_exit, + }; + ++static const struct exar8250_board pbn_adv_XR17V352 = { ++ .num_ports = 2, ++ .setup = pci_xr17v35x_setup, ++ .exit = pci_xr17v35x_exit, ++}; ++ + static const struct exar8250_board pbn_exar_XR17V4358 = { + .num_ports = 12, + .setup = pci_xr17v35x_setup, +@@ -914,6 +922,9 @@ static const struct pci_device_id exar_pci_tbl[] = { + USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), + USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + ++ /* ADVANTECH devices */ ++ EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352), ++ + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ + EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), + EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), +diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c +index 28f9a2679a20e8..c4a2a3e289c14e 100644 +--- a/drivers/tty/serial/8250/8250_mtk.c ++++ b/drivers/tty/serial/8250/8250_mtk.c +@@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev) + while + (serial_in(up, MTK_UART_DEBUG0)); + ++ clk_disable_unprepare(data->uart_clk); + clk_disable_unprepare(data->bus_clk); + + return 0; +@@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev) + struct mtk8250_data *data = dev_get_drvdata(dev); + + clk_prepare_enable(data->bus_clk); ++ clk_prepare_enable(data->uart_clk); + + return 0; + } +@@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, + int dmacnt; + #endif + +- data->uart_clk = devm_clk_get(&pdev->dev, "baud"); ++ data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud"); + if (IS_ERR(data->uart_clk)) { + /* + * For compatibility with older device trees try unnamed + * clk when no baud clk can be found. + */ +- data->uart_clk = devm_clk_get(&pdev->dev, NULL); ++ data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(data->uart_clk)) { + dev_warn(&pdev->dev, "Can't get uart clock\n"); + return PTR_ERR(data->uart_clk); +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index c0fd8ab3fe8fc2..c322d0c1d965a8 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -464,6 +464,8 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Huawei 4G LTE module */ + { USB_DEVICE(0x12d1, 0x15bb), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, ++ { USB_DEVICE(0x12d1, 0x15c1), .driver_info = ++ USB_QUIRK_DISCONNECT_SUSPEND }, + { USB_DEVICE(0x12d1, 0x15c3), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, + +diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c +index ea106ad665a1fa..2deab4a6030d77 100644 +--- a/drivers/usb/gadget/legacy/raw_gadget.c ++++ b/drivers/usb/gadget/legacy/raw_gadget.c +@@ -620,8 +620,6 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, + return ERR_PTR(-EINVAL); + if (!usb_raw_io_flags_valid(io->flags)) + return ERR_PTR(-EINVAL); +- if (io->length > PAGE_SIZE) +- return ERR_PTR(-EINVAL); + if (get_from_user) + data = memdup_user(ptr + sizeof(*io), io->length); + else { +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c +index 764657070883c1..bfd437269800cf 100644 +--- a/drivers/usb/host/xhci-dbgcap.c ++++ b/drivers/usb/host/xhci-dbgcap.c +@@ -1319,8 +1319,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci) + if (!dbc) + return 0; + +- if (dbc->state == DS_CONFIGURED) ++ switch (dbc->state) { ++ case DS_ENABLED: ++ case DS_CONNECTED: ++ case DS_CONFIGURED: + dbc->resume_required = 1; ++ break; ++ default: ++ break; ++ } + + xhci_dbc_stop(dbc); + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index dca610369ca94a..e9d0d8591a01f2 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); + #define QUECTEL_PRODUCT_EM05CN 0x0312 + #define QUECTEL_PRODUCT_EM05G_GR 0x0313 + #define QUECTEL_PRODUCT_EM05G_RS 0x0314 ++#define QUECTEL_PRODUCT_RG255C 0x0316 + #define QUECTEL_PRODUCT_EM12 0x0512 + #define QUECTEL_PRODUCT_RM500Q 0x0800 + #define QUECTEL_PRODUCT_RM520N 0x0801 +@@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb); + #define UNISOC_VENDOR_ID 0x1782 + /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ + #define TOZED_PRODUCT_LT70C 0x4055 ++#define UNISOC_PRODUCT_UIS7720 0x4064 + /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ + #define LUAT_PRODUCT_AIR720U 0x4e00 + +@@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) }, + + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, +@@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */ + .driver_info = NCTRL(4) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */ ++ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */ + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */ + .driver_info = NCTRL(4) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */ ++ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */ + .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ +@@ -2466,6 +2475,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ + .driver_info = NCTRL(1) }, +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index 5774b50eeaf7f7..2e39686e01c9e0 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -6636,9 +6636,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) + port->partner_desc.identity = &port->partner_ident; + port->port_type = port->typec_caps.type; + +- port->role_sw = usb_role_switch_get(port->dev); ++ port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); + if (!port->role_sw) +- port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); ++ port->role_sw = usb_role_switch_get(port->dev); + if (IS_ERR(port->role_sw)) { + err = PTR_ERR(port->role_sw); + goto out_destroy_wq; +diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c +index 0455dddb0797c9..0b17657690d4d5 100644 +--- a/fs/dlm/lockspace.c ++++ b/fs/dlm/lockspace.c +@@ -802,7 +802,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) + + dlm_device_deregister(ls); + +- if (force < 3 && dlm_user_daemon_available()) ++ if (force != 3 && dlm_user_daemon_available()) + do_uevent(ls, 0); + + dlm_recoverd_stop(ls); +diff --git a/fs/exec.c b/fs/exec.c +index ee71a315cc51f5..a7dfac338a22c8 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -748,7 +748,7 @@ int setup_arg_pages(struct linux_binprm *bprm, + unsigned long stack_top, + int executable_stack) + { +- unsigned long ret; ++ int ret; + unsigned long stack_shift; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = bprm->vma; +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 0b84284ece98fa..387d43aa89e3d3 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -634,7 +634,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, + goto out_err; + + err = -ENOMEM; +- ff = fuse_file_alloc(fm); ++ ff = fuse_file_alloc(fm, true); + if (!ff) + goto out_put_forget_req; + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 952c99fcb636dc..2055af1ffaf339 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -55,7 +55,7 @@ struct fuse_release_args { + struct inode *inode; + }; + +-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) ++struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) + { + struct fuse_file *ff; + +@@ -64,11 +64,13 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) + return NULL; + + ff->fm = fm; +- ff->release_args = kzalloc(sizeof(*ff->release_args), +- GFP_KERNEL_ACCOUNT); +- if (!ff->release_args) { +- kfree(ff); +- return NULL; ++ if (release) { ++ ff->release_args = kzalloc(sizeof(*ff->release_args), ++ GFP_KERNEL_ACCOUNT); ++ if (!ff->release_args) { ++ kfree(ff); ++ return NULL; ++ } + } + + INIT_LIST_HEAD(&ff->write_entry); +@@ -104,14 +106,14 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, + kfree(ra); + } + +-static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) ++static void fuse_file_put(struct fuse_file *ff, bool sync) + { + if (refcount_dec_and_test(&ff->count)) { +- struct fuse_args *args = &ff->release_args->args; ++ struct fuse_release_args *ra = ff->release_args; ++ struct fuse_args *args = (ra ? &ra->args : NULL); + +- if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { +- /* Do nothing when client does not implement 'open' */ +- fuse_release_end(ff->fm, args, 0); ++ if (!args) { ++ /* Do nothing when server does not implement 'open' */ + } else if (sync) { + fuse_simple_request(ff->fm, args); + fuse_release_end(ff->fm, args, 0); +@@ -131,15 +133,16 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, + struct fuse_conn *fc = fm->fc; + struct fuse_file *ff; + int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; ++ bool open = isdir ? !fc->no_opendir : !fc->no_open; + +- ff = fuse_file_alloc(fm); ++ ff = fuse_file_alloc(fm, open); + if (!ff) + return ERR_PTR(-ENOMEM); + + ff->fh = 0; + /* Default for no-open */ + ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); +- if (isdir ? !fc->no_opendir : !fc->no_open) { ++ if (open) { + struct fuse_open_out outarg; + int err; + +@@ -147,11 +150,13 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, + if (!err) { + ff->fh = outarg.fh; + ff->open_flags = outarg.open_flags; +- + } else if (err != -ENOSYS) { + fuse_file_free(ff); + return ERR_PTR(err); + } else { ++ /* No release needed */ ++ kfree(ff->release_args); ++ ff->release_args = NULL; + if (isdir) + fc->no_opendir = 1; + else +@@ -273,7 +278,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) + } + + static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, +- unsigned int flags, int opcode) ++ unsigned int flags, int opcode, bool sync) + { + struct fuse_conn *fc = ff->fm->fc; + struct fuse_release_args *ra = ff->release_args; +@@ -291,6 +296,9 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, + + wake_up_interruptible_all(&ff->poll_wait); + ++ if (!ra) ++ return; ++ + ra->inarg.fh = ff->fh; + ra->inarg.flags = flags; + ra->args.in_numargs = 1; +@@ -300,6 +308,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, + ra->args.nodeid = ff->nodeid; + ra->args.force = true; + ra->args.nocreds = true; ++ ++ /* ++ * Hold inode until release is finished. ++ * From fuse_sync_release() the refcount is 1 and everything's ++ * synchronous, so we are fine with not doing igrab() here. ++ */ ++ ra->inode = sync ? NULL : igrab(&fi->inode); + } + + void fuse_file_release(struct inode *inode, struct fuse_file *ff, +@@ -309,14 +324,12 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, + struct fuse_release_args *ra = ff->release_args; + int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; + +- fuse_prepare_release(fi, ff, open_flags, opcode); ++ fuse_prepare_release(fi, ff, open_flags, opcode, false); + +- if (ff->flock) { ++ if (ra && ff->flock) { + ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; + ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); + } +- /* Hold inode until release is finished */ +- ra->inode = igrab(inode); + + /* + * Normally this will send the RELEASE request, however if +@@ -326,8 +339,14 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. ++ * ++ * Always use the asynchronous file put because the current thread ++ * might be the fuse server. This can happen if a process starts some ++ * aio and closes the fd before the aio completes. Since aio takes its ++ * own ref to the file, the IO completion has to drop the ref, which is ++ * how the fuse server can end up closing its clients' files. + */ +- fuse_file_put(ff, ff->fm->fc->destroy, isdir); ++ fuse_file_put(ff, false); + } + + void fuse_release_common(struct file *file, bool isdir) +@@ -362,12 +381,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, + unsigned int flags) + { + WARN_ON(refcount_read(&ff->count) > 1); +- fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); +- /* +- * iput(NULL) is a no-op and since the refcount is 1 and everything's +- * synchronous, we are fine with not doing igrab() here" +- */ +- fuse_file_put(ff, true, false); ++ fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); ++ fuse_file_put(ff, true); + } + EXPORT_SYMBOL_GPL(fuse_sync_release); + +@@ -924,7 +939,7 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, + put_page(page); + } + if (ia->ff) +- fuse_file_put(ia->ff, false, false); ++ fuse_file_put(ia->ff, false); + + fuse_io_free(ia); + } +@@ -1666,7 +1681,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) + __free_page(ap->pages[i]); + + if (wpa->ia.ff) +- fuse_file_put(wpa->ia.ff, false, false); ++ fuse_file_put(wpa->ia.ff, false); + + kfree(ap->pages); + kfree(wpa); +@@ -1914,7 +1929,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) + ff = __fuse_write_file_get(fi); + err = fuse_flush_times(inode, ff); + if (ff) +- fuse_file_put(ff, false, false); ++ fuse_file_put(ff, false); + + return err; + } +@@ -2312,7 +2327,7 @@ static int fuse_writepages(struct address_space *mapping, + fuse_writepages_send(&data); + } + if (data.ff) +- fuse_file_put(data.ff, false, false); ++ fuse_file_put(data.ff, false); + + kfree(data.orig_pages); + out: +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index 4ce1a6fdc94f03..aa12ff6de70686 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -1036,7 +1036,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, + */ + int fuse_open_common(struct inode *inode, struct file *file, bool isdir); + +-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm); ++struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); + void fuse_file_free(struct fuse_file *ff); + void fuse_finish_open(struct inode *inode, struct file *file); + +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c +index 34e9804e0f3601..e46f650b5e9c26 100644 +--- a/fs/hfs/bfind.c ++++ b/fs/hfs/bfind.c +@@ -21,7 +21,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) + + fd->tree = tree; + fd->bnode = NULL; +- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); ++ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + fd->search_key = ptr; +@@ -115,6 +115,12 @@ int hfs_brec_find(struct hfs_find_data *fd) + __be32 data; + int height, res; + ++ fd->record = -1; ++ fd->keyoffset = -1; ++ fd->keylength = -1; ++ fd->entryoffset = -1; ++ fd->entrylength = -1; ++ + tree = fd->tree; + if (fd->bnode) + hfs_bnode_put(fd->bnode); +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c +index 896396554bcc17..b01db1fae147cd 100644 +--- a/fs/hfs/brec.c ++++ b/fs/hfs/brec.c +@@ -179,6 +179,7 @@ int hfs_brec_remove(struct hfs_find_data *fd) + struct hfs_btree *tree; + struct hfs_bnode *node, *parent; + int end_off, rec_off, data_off, size; ++ int src, dst, len; + + tree = fd->tree; + node = fd->bnode; +@@ -208,10 +209,14 @@ int hfs_brec_remove(struct hfs_find_data *fd) + } + hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); + +- if (rec_off == end_off) +- goto skip; + size = fd->keylength + fd->entrylength; + ++ if (rec_off == end_off) { ++ src = fd->keyoffset; ++ hfs_bnode_clear(node, src, size); ++ goto skip; ++ } ++ + do { + data_off = hfs_bnode_read_u16(node, rec_off); + hfs_bnode_write_u16(node, rec_off + 2, data_off - size); +@@ -219,9 +224,23 @@ int hfs_brec_remove(struct hfs_find_data *fd) + } while (rec_off >= end_off); + + /* fill hole */ +- hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, +- data_off - fd->keyoffset - size); ++ dst = fd->keyoffset; ++ src = fd->keyoffset + size; ++ len = data_off - src; ++ ++ hfs_bnode_move(node, dst, src, len); ++ ++ src = dst + len; ++ len = data_off - src; ++ ++ hfs_bnode_clear(node, src, len); ++ + skip: ++ /* ++ * Remove the obsolete offset to free space. ++ */ ++ hfs_bnode_write_u16(node, end_off, 0); ++ + hfs_bnode_dump(node); + if (!fd->record) + hfs_brec_update_parent(fd); +diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c +index 8082eb01127cdf..bf811347bb07d3 100644 +--- a/fs/hfs/mdb.c ++++ b/fs/hfs/mdb.c +@@ -172,7 +172,7 @@ int hfs_mdb_get(struct super_block *sb) + pr_warn("continuing without an alternate MDB\n"); + } + +- HFS_SB(sb)->bitmap = kmalloc(8192, GFP_KERNEL); ++ HFS_SB(sb)->bitmap = kzalloc(8192, GFP_KERNEL); + if (!HFS_SB(sb)->bitmap) + goto out; + +diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c +index 901e83d65d2021..26ebac4c604242 100644 +--- a/fs/hfsplus/bfind.c ++++ b/fs/hfsplus/bfind.c +@@ -18,7 +18,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) + + fd->tree = tree; + fd->bnode = NULL; +- ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); ++ ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + fd->search_key = ptr; +@@ -158,6 +158,12 @@ int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare) + __be32 data; + int height, res; + ++ fd->record = -1; ++ fd->keyoffset = -1; ++ fd->keylength = -1; ++ fd->entryoffset = -1; ++ fd->entrylength = -1; ++ + tree = fd->tree; + if (fd->bnode) + hfs_bnode_put(fd->bnode); +diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c +index 14f4995588ff03..407d5152eb411e 100644 +--- a/fs/hfsplus/bnode.c ++++ b/fs/hfsplus/bnode.c +@@ -18,47 +18,6 @@ + #include "hfsplus_fs.h" + #include "hfsplus_raw.h" + +-static inline +-bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +-{ +- bool is_valid = off < node->tree->node_size; +- +- if (!is_valid) { +- pr_err("requested invalid offset: " +- "NODE: id %u, type %#x, height %u, " +- "node_size %u, offset %d\n", +- node->this, node->type, node->height, +- node->tree->node_size, off); +- } +- +- return is_valid; +-} +- +-static inline +-int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +-{ +- unsigned int node_size; +- +- if (!is_bnode_offset_valid(node, off)) +- return 0; +- +- node_size = node->tree->node_size; +- +- if ((off + len) > node_size) { +- int new_len = (int)node_size - off; +- +- pr_err("requested length has been corrected: " +- "NODE: id %u, type %#x, height %u, " +- "node_size %u, offset %d, " +- "requested_len %d, corrected_len %d\n", +- node->this, node->type, node->height, +- node->tree->node_size, off, len, new_len); +- +- return new_len; +- } +- +- return len; +-} + + /* Copy a specified range of bytes from the raw data of a node */ + void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) +diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c +index 9e1732a2b92a8c..fe6a54c4083c34 100644 +--- a/fs/hfsplus/btree.c ++++ b/fs/hfsplus/btree.c +@@ -393,6 +393,12 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) + len = hfs_brec_lenoff(node, 2, &off16); + off = off16; + ++ if (!is_bnode_offset_valid(node, off)) { ++ hfs_bnode_put(node); ++ return ERR_PTR(-EIO); ++ } ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + pagep = node->page + (off >> PAGE_SHIFT); + data = kmap_local_page(*pagep); +diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h +index 1473b04fc0f311..e67b35cb5ccc7a 100644 +--- a/fs/hfsplus/hfsplus_fs.h ++++ b/fs/hfsplus/hfsplus_fs.h +@@ -574,6 +574,48 @@ hfsplus_btree_lock_class(struct hfs_btree *tree) + return class; + } + ++static inline ++bool is_bnode_offset_valid(struct hfs_bnode *node, int off) ++{ ++ bool is_valid = off < node->tree->node_size; ++ ++ if (!is_valid) { ++ pr_err("requested invalid offset: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off); ++ } ++ ++ return is_valid; ++} ++ ++static inline ++int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) ++{ ++ unsigned int node_size; ++ ++ if (!is_bnode_offset_valid(node, off)) ++ return 0; ++ ++ node_size = node->tree->node_size; ++ ++ if ((off + len) > node_size) { ++ int new_len = (int)node_size - off; ++ ++ pr_err("requested length has been corrected: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, " ++ "requested_len %d, corrected_len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len, new_len); ++ ++ return new_len; ++ } ++ ++ return len; ++} ++ + /* compatibility */ + #define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) } + #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c +index 1986b4f18a9013..7e889820a63d0b 100644 +--- a/fs/hfsplus/super.c ++++ b/fs/hfsplus/super.c +@@ -67,13 +67,26 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) + if (!(inode->i_state & I_NEW)) + return inode; + +- INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); +- spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); +- mutex_init(&HFSPLUS_I(inode)->extents_lock); +- HFSPLUS_I(inode)->flags = 0; ++ atomic_set(&HFSPLUS_I(inode)->opencnt, 0); ++ HFSPLUS_I(inode)->first_blocks = 0; ++ HFSPLUS_I(inode)->clump_blocks = 0; ++ HFSPLUS_I(inode)->alloc_blocks = 0; ++ HFSPLUS_I(inode)->cached_start = U32_MAX; ++ HFSPLUS_I(inode)->cached_blocks = 0; ++ memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec)); ++ memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec)); + HFSPLUS_I(inode)->extent_state = 0; ++ mutex_init(&HFSPLUS_I(inode)->extents_lock); + HFSPLUS_I(inode)->rsrc_inode = NULL; +- atomic_set(&HFSPLUS_I(inode)->opencnt, 0); ++ HFSPLUS_I(inode)->create_date = 0; ++ HFSPLUS_I(inode)->linkid = 0; ++ HFSPLUS_I(inode)->flags = 0; ++ HFSPLUS_I(inode)->fs_blocks = 0; ++ HFSPLUS_I(inode)->userflags = 0; ++ HFSPLUS_I(inode)->subfolders = 0; ++ INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); ++ spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); ++ HFSPLUS_I(inode)->phys_size = 0; + + if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || + inode->i_ino == HFSPLUS_ROOT_CNID) { +@@ -525,7 +538,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) + if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { + hfs_find_exit(&fd); + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { +- err = -EINVAL; ++ err = -EIO; + goto out_put_root; + } + inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); +diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c +index 26655572975d3d..1aa7de55094cd9 100644 +--- a/fs/notify/fdinfo.c ++++ b/fs/notify/fdinfo.c +@@ -17,6 +17,7 @@ + #include "fanotify/fanotify.h" + #include "fdinfo.h" + #include "fsnotify.h" ++#include "../internal.h" + + #if defined(CONFIG_PROC_FS) + +@@ -50,7 +51,12 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode) + f.handle.handle_bytes = sizeof(f.pad); + size = f.handle.handle_bytes >> 2; + ++ if (!super_trylock_shared(inode->i_sb)) ++ return; ++ + ret = exportfs_encode_fid(inode, (struct fid *)f.handle.f_handle, &size); ++ up_read(&inode->i_sb->s_umount); ++ + if ((ret == FILEID_INVALID) || (ret < 0)) + return; + +diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c +index 1f9ed117e78b61..2f34074f0078bf 100644 +--- a/fs/ocfs2/move_extents.c ++++ b/fs/ocfs2/move_extents.c +@@ -868,6 +868,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh, + mlog_errno(ret); + goto out; + } ++ /* ++ * Invalidate extent cache after moving/defragging to prevent ++ * stale cached data with outdated extent flags. ++ */ ++ ocfs2_extent_map_trunc(inode, cpos); + + context->clusters_moved += alloc_size; + next: +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index 4bafb1adfb2235..6168c6d62b5e59 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -703,7 +703,7 @@ struct TCP_Server_Info { + bool nosharesock; + bool tcp_nodelay; + bool terminate; +- unsigned int credits; /* send no more requests at once */ ++ int credits; /* send no more requests at once */ + unsigned int max_credits; /* can override large 32000 default at mnt */ + unsigned int in_flight; /* number of requests on the wire to server */ + unsigned int max_in_flight; /* max number of requests that were on wire */ +diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c +index 354f7144c59049..36e1e52c30a0fb 100644 +--- a/fs/smb/server/transport_ipc.c ++++ b/fs/smb/server/transport_ipc.c +@@ -249,10 +249,16 @@ static void ipc_msg_handle_free(int handle) + + static int handle_response(int type, void *payload, size_t sz) + { +- unsigned int handle = *(unsigned int *)payload; ++ unsigned int handle; + struct ipc_msg_table_entry *entry; + int ret = 0; + ++ /* Prevent 4-byte read beyond declared payload size */ ++ if (sz < sizeof(unsigned int)) ++ return -EINVAL; ++ ++ handle = *(unsigned int *)payload; ++ + ipc_update_last_active(); + down_read(&ipc_msg_table_lock); + hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) { +diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c +index 31c1ac256e1be4..91e85a1a154fdf 100644 +--- a/fs/smb/server/transport_rdma.c ++++ b/fs/smb/server/transport_rdma.c +@@ -938,12 +938,15 @@ static int smb_direct_flush_send_list(struct smb_direct_transport *t, + struct smb_direct_sendmsg, + list); + ++ if (send_ctx->need_invalidate_rkey) { ++ first->wr.opcode = IB_WR_SEND_WITH_INV; ++ first->wr.ex.invalidate_rkey = send_ctx->remote_key; ++ send_ctx->need_invalidate_rkey = false; ++ send_ctx->remote_key = 0; ++ } ++ + last->wr.send_flags = IB_SEND_SIGNALED; + last->wr.wr_cqe = &last->cqe; +- if (is_last && send_ctx->need_invalidate_rkey) { +- last->wr.opcode = IB_WR_SEND_WITH_INV; +- last->wr.ex.invalidate_rkey = send_ctx->remote_key; +- } + + ret = smb_direct_post_send(t, &first->wr); + if (!ret) { +diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c +index a726fbba49e40a..9038e82fc6050c 100644 +--- a/fs/xfs/xfs_super.c ++++ b/fs/xfs/xfs_super.c +@@ -1230,16 +1230,25 @@ suffix_kstrtoint( + static inline void + xfs_fs_warn_deprecated( + struct fs_context *fc, +- struct fs_parameter *param, +- uint64_t flag, +- bool value) ++ struct fs_parameter *param) + { +- /* Don't print the warning if reconfiguring and current mount point +- * already had the flag set ++ /* ++ * Always warn about someone passing in a deprecated mount option. ++ * Previously we wouldn't print the warning if we were reconfiguring ++ * and current mount point already had the flag set, but that was not ++ * the right thing to do. ++ * ++ * Many distributions mount the root filesystem with no options in the ++ * initramfs and rely on mount -a to remount the root fs with the ++ * options in fstab. However, the old behavior meant that there would ++ * never be a warning about deprecated mount options for the root fs in ++ * /etc/fstab. On a single-fs system, that means no warning at all. ++ * ++ * Compounding this problem are distribution scripts that copy ++ * /proc/mounts to fstab, which means that we can't remove mount ++ * options unless we're 100% sure they have only ever been advertised ++ * in /proc/mounts in response to explicitly provided mount options. + */ +- if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && +- !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) +- return; + xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); + } + +@@ -1378,19 +1387,19 @@ xfs_fs_parse_param( + #endif + /* Following mount options will be removed in September 2025 */ + case Opt_ikeep: +- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); ++ xfs_fs_warn_deprecated(fc, param); + parsing_mp->m_features |= XFS_FEAT_IKEEP; + return 0; + case Opt_noikeep: +- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); ++ xfs_fs_warn_deprecated(fc, param); + parsing_mp->m_features &= ~XFS_FEAT_IKEEP; + return 0; + case Opt_attr2: +- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); ++ xfs_fs_warn_deprecated(fc, param); + parsing_mp->m_features |= XFS_FEAT_ATTR2; + return 0; + case Opt_noattr2: +- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); ++ xfs_fs_warn_deprecated(fc, param); + parsing_mp->m_features |= XFS_FEAT_NOATTR2; + return 0; + default: +diff --git a/io_uring/filetable.c b/io_uring/filetable.c +index 6e86e6188dbeeb..ff74d41d9e53c5 100644 +--- a/io_uring/filetable.c ++++ b/io_uring/filetable.c +@@ -62,7 +62,7 @@ void io_free_file_tables(struct io_file_table *table) + + static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, + u32 slot_index) +- __must_hold(&req->ctx->uring_lock) ++ __must_hold(&ctx->uring_lock) + { + struct io_fixed_file *file_slot; + int ret; +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c +index 958d4aa77dcad8..ca2e8153bd1cca 100644 +--- a/kernel/dma/debug.c ++++ b/kernel/dma/debug.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + #include "debug.h" + +@@ -601,7 +602,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) + if (rc == -ENOMEM) { + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); + global_disable = true; +- } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { ++ } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ++ !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && ++ is_swiotlb_active(entry->dev))) { + err_printk(entry->dev, entry, + "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); + } +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index f7cb505ab337a5..64634314a89cea 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -3435,11 +3435,9 @@ static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) + static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) + { + struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; +- struct cpumask *cpumask; + int cid; + + lockdep_assert_rq_held(rq); +- cpumask = mm_cidmask(mm); + cid = __this_cpu_read(pcpu_cid->cid); + if (mm_cid_is_valid(cid)) { + mm_cid_snapshot_time(rq, mm); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 26c520d1af6e61..1613563132035b 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -4383,9 +4383,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, + int err; + u16 vid; + +- if (!netlink_capable(skb, CAP_NET_ADMIN)) +- return -EPERM; +- + if (!del_bulk) { + err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, + NULL, extack); +diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c +index 5c165218180588..f5a7d5a3875555 100644 +--- a/net/sctp/inqueue.c ++++ b/net/sctp/inqueue.c +@@ -169,13 +169,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) + chunk->head_skb = chunk->skb; + + /* skbs with "cover letter" */ +- if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) ++ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) { ++ if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) { ++ __SCTP_INC_STATS(dev_net(chunk->skb->dev), ++ SCTP_MIB_IN_PKT_DISCARDS); ++ sctp_chunk_free(chunk); ++ goto next_chunk; ++ } + chunk->skb = skb_shinfo(chunk->skb)->frag_list; +- +- if (WARN_ON(!chunk->skb)) { +- __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS); +- sctp_chunk_free(chunk); +- goto next_chunk; + } + } + +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index f95ac11a7e0de4..64790062cfa2eb 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -486,12 +486,26 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + goto err; + } + +- if (vsk->transport) { +- if (vsk->transport == new_transport) { +- ret = 0; +- goto err; +- } ++ if (vsk->transport && vsk->transport == new_transport) { ++ ret = 0; ++ goto err; ++ } + ++ /* We increase the module refcnt to prevent the transport unloading ++ * while there are open sockets assigned to it. ++ */ ++ if (!new_transport || !try_module_get(new_transport->module)) { ++ ret = -ENODEV; ++ goto err; ++ } ++ ++ /* It's safe to release the mutex after a successful try_module_get(). ++ * Whichever transport `new_transport` points at, it won't go away until ++ * the last module_put() below or in vsock_deassign_transport(). ++ */ ++ mutex_unlock(&vsock_register_mutex); ++ ++ if (vsk->transport) { + /* transport->release() must be called with sock lock acquired. + * This path can only be taken during vsock_connect(), where we + * have already held the sock lock. In the other cases, this +@@ -511,20 +525,6 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + vsk->peer_shutdown = 0; + } + +- /* We increase the module refcnt to prevent the transport unloading +- * while there are open sockets assigned to it. +- */ +- if (!new_transport || !try_module_get(new_transport->module)) { +- ret = -ENODEV; +- goto err; +- } +- +- /* It's safe to release the mutex after a successful try_module_get(). +- * Whichever transport `new_transport` points at, it won't go away until +- * the last module_put() below or in vsock_deassign_transport(). +- */ +- mutex_unlock(&vsock_register_mutex); +- + if (sk->sk_type == SOCK_SEQPACKET) { + if (!new_transport->seqpacket_allow || + !new_transport->seqpacket_allow(remote_cid)) { +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 9a907d8260c9c7..d30314532bb71d 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3737,7 +3737,7 @@ endpoint_tests() + # subflow_rebuild_header is needed to support the implicit flag + # userspace pm type prevents add_addr + if reset "implicit EP" && +- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + pm_nl_set_limits $ns1 2 2 + pm_nl_set_limits $ns2 2 2 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal +@@ -3762,7 +3762,7 @@ endpoint_tests() + fi + + if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && +- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + start_events + pm_nl_set_limits $ns1 0 3 + pm_nl_set_limits $ns2 0 3 +@@ -3910,7 +3910,7 @@ endpoint_tests() + + # flush and re-add + if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT && +- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + pm_nl_set_limits $ns1 0 2 + pm_nl_set_limits $ns2 1 2 + # broadcast IP: no packet for this address will be received on ns1 +diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c +index f02f1f95d2275e..a04dac0b8027d9 100644 +--- a/tools/testing/selftests/net/sctp_hello.c ++++ b/tools/testing/selftests/net/sctp_hello.c +@@ -29,7 +29,6 @@ static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len + static int do_client(int argc, char *argv[]) + { + struct sockaddr_storage ss; +- char buf[] = "hello"; + int csk, ret, len; + + if (argc < 5) { +@@ -56,16 +55,10 @@ static int do_client(int argc, char *argv[]) + + set_addr(&ss, argv[3], argv[4], &len); + ret = connect(csk, (struct sockaddr *)&ss, len); +- if (ret < 0) { +- printf("failed to connect to peer\n"); ++ if (ret < 0) + return -1; +- } + +- ret = send(csk, buf, strlen(buf) + 1, 0); +- if (ret < 0) { +- printf("failed to send msg %d\n", ret); +- return -1; +- } ++ recv(csk, NULL, 0, 0); + close(csk); + + return 0; +@@ -75,7 +68,6 @@ int main(int argc, char *argv[]) + { + struct sockaddr_storage ss; + int lsk, csk, ret, len; +- char buf[20]; + + if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) { + printf("%s server|client ...\n", argv[0]); +@@ -125,11 +117,6 @@ int main(int argc, char *argv[]) + return -1; + } + +- ret = recv(csk, buf, sizeof(buf), 0); +- if (ret <= 0) { +- printf("failed to recv msg %d\n", ret); +- return -1; +- } + close(csk); + close(lsk); + +diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh +index c721e952e5f301..667b211aa8a11c 100755 +--- a/tools/testing/selftests/net/sctp_vrf.sh ++++ b/tools/testing/selftests/net/sctp_vrf.sh +@@ -6,13 +6,11 @@ + # SERVER_NS + # CLIENT_NS2 (veth1) <---> (veth2) -> vrf_s2 + +-CLIENT_NS1="client-ns1" +-CLIENT_NS2="client-ns2" ++source lib.sh + CLIENT_IP4="10.0.0.1" + CLIENT_IP6="2000::1" + CLIENT_PORT=1234 + +-SERVER_NS="server-ns" + SERVER_IP4="10.0.0.2" + SERVER_IP6="2000::2" + SERVER_PORT=1234 +@@ -20,13 +18,11 @@ SERVER_PORT=1234 + setup() { + modprobe sctp + modprobe sctp_diag +- ip netns add $CLIENT_NS1 +- ip netns add $CLIENT_NS2 +- ip netns add $SERVER_NS ++ setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS + +- ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null +- ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null +- ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null ++ ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0 ++ ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0 ++ ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0 + + ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1 + ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2 +@@ -66,19 +62,40 @@ setup() { + } + + cleanup() { +- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null +- ip netns del "$CLIENT_NS1" +- ip netns del "$CLIENT_NS2" +- ip netns del "$SERVER_NS" ++ wait_client $CLIENT_NS1 ++ wait_client $CLIENT_NS2 ++ stop_server ++ cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS + } + +-wait_server() { ++start_server() { + local IFACE=$1 + local CNT=0 + +- until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \ +- grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do +- [ $((CNT++)) = "20" ] && { RET=3; return $RET; } ++ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE & ++ disown ++ until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do ++ [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; } ++ sleep 0.1 ++ done ++} ++ ++stop_server() { ++ local CNT=0 ++ ++ ip netns exec $SERVER_NS pkill sctp_hello ++ while ip netns exec $SERVER_NS ss -SaH | grep -q .; do ++ [ $((CNT++)) -eq 30 ] && break ++ sleep 0.1 ++ done ++} ++ ++wait_client() { ++ local CLIENT_NS=$1 ++ local CNT=0 ++ ++ while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do ++ [ $((CNT++)) -eq 30 ] && break + sleep 0.1 + done + } +@@ -87,14 +104,12 @@ do_test() { + local CLIENT_NS=$1 + local IFACE=$2 + +- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null +- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ +- $SERVER_PORT $IFACE 2>&1 >/dev/null & +- disown +- wait_server $IFACE || return $RET ++ start_server $IFACE || return $RET + timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \ +- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null ++ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT + RET=$? ++ wait_client $CLIENT_NS ++ stop_server + return $RET + } + +@@ -102,25 +117,21 @@ do_testx() { + local IFACE1=$1 + local IFACE2=$2 + +- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null +- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ +- $SERVER_PORT $IFACE1 2>&1 >/dev/null & +- disown +- wait_server $IFACE1 || return $RET +- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ +- $SERVER_PORT $IFACE2 2>&1 >/dev/null & +- disown +- wait_server $IFACE2 || return $RET ++ start_server $IFACE1 || return $RET ++ start_server $IFACE2 || return $RET + timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \ +- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \ ++ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \ + timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \ +- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null ++ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT + RET=$? ++ wait_client $CLIENT_NS1 ++ wait_client $CLIENT_NS2 ++ stop_server + return $RET + } + + testup() { +- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null ++ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1 + echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y " + do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; } + echo "[PASS]" +@@ -129,7 +140,7 @@ testup() { + do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; } + echo "[PASS]" + +- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null ++ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0 + echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N " + do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; } + echo "[PASS]" +@@ -166,7 +177,7 @@ testup() { + do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; } + echo "[PASS]" + +- echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N " ++ echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y " + do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; } + echo "[PASS]" + } diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.115-116.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.115-116.patch new file mode 100644 index 0000000000..ae809f2039 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.115-116.patch @@ -0,0 +1,1467 @@ +diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd b/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd +index 5a775b8f654351..fc82aa4e54b005 100644 +--- a/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd ++++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd +@@ -75,3 +75,13 @@ Description: + The default value is 1 (GNU Remote Debug command). + Other permissible value is 0 which is for vendor defined debug + target. ++ ++What: /sys/bus/pci/drivers/xhci_hcd/.../dbc_poll_interval_ms ++Date: February 2024 ++Contact: Mathias Nyman ++Description: ++ This attribute adjust the polling interval used to check for ++ DbC events. Unit is milliseconds. Accepted values range from 0 ++ up to 5000. The default value is 64 ms. ++ This polling interval is used while DbC is enabled but has no ++ active data transfers. +diff --git a/Makefile b/Makefile +index 85d8fa82569578..f28f1f9f5f4e6d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 115 ++SUBLEVEL = 116 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c +index 11c35cf45b4610..cb205f22096d78 100644 +--- a/arch/alpha/kernel/asm-offsets.c ++++ b/arch/alpha/kernel/asm-offsets.c +@@ -4,6 +4,7 @@ + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c +index f77deb7991757e..2978da85fcb65b 100644 +--- a/arch/arc/kernel/asm-offsets.c ++++ b/arch/arc/kernel/asm-offsets.c +@@ -2,6 +2,7 @@ + /* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c +index 219cbc7e5d134b..3840e1e22b7517 100644 +--- a/arch/arm/kernel/asm-offsets.c ++++ b/arch/arm/kernel/asm-offsets.c +@@ -7,6 +7,8 @@ + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ ++#define COMPILE_OFFSETS ++ + #include + #include + #include +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c +index 5ff1942b04fcfd..ea2d740db81c52 100644 +--- a/arch/arm64/kernel/asm-offsets.c ++++ b/arch/arm64/kernel/asm-offsets.c +@@ -6,6 +6,7 @@ + * 2001-2002 Keith Owens + * Copyright (C) 2012 ARM Ltd. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c +index d1e9035794733d..5525c8e7e1d9ea 100644 +--- a/arch/csky/kernel/asm-offsets.c ++++ b/arch/csky/kernel/asm-offsets.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c +index 03a7063f945614..50eea9fa6f1375 100644 +--- a/arch/hexagon/kernel/asm-offsets.c ++++ b/arch/hexagon/kernel/asm-offsets.c +@@ -8,6 +8,7 @@ + * + * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c +index 8da0726777edb4..110afd3cc8f348 100644 +--- a/arch/loongarch/kernel/asm-offsets.c ++++ b/arch/loongarch/kernel/asm-offsets.c +@@ -4,6 +4,8 @@ + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ ++#define COMPILE_OFFSETS ++ + #include + #include + #include +diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c +index 906d7323053744..67a1990f9d748f 100644 +--- a/arch/m68k/kernel/asm-offsets.c ++++ b/arch/m68k/kernel/asm-offsets.c +@@ -9,6 +9,7 @@ + * #defines from the assembly-language output. + */ + ++#define COMPILE_OFFSETS + #define ASM_OFFSETS_C + + #include +diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c +index 104c3ac5f30c88..b4b67d58e7f6ae 100644 +--- a/arch/microblaze/kernel/asm-offsets.c ++++ b/arch/microblaze/kernel/asm-offsets.c +@@ -7,6 +7,7 @@ + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c +index cb1045ebab0621..22c99a2cd5707b 100644 +--- a/arch/mips/kernel/asm-offsets.c ++++ b/arch/mips/kernel/asm-offsets.c +@@ -9,6 +9,8 @@ + * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ ++#define COMPILE_OFFSETS ++ + #include + #include + #include +diff --git a/arch/nios2/kernel/asm-offsets.c b/arch/nios2/kernel/asm-offsets.c +index e3d9b7b6fb48aa..88190b503ce5de 100644 +--- a/arch/nios2/kernel/asm-offsets.c ++++ b/arch/nios2/kernel/asm-offsets.c +@@ -2,6 +2,7 @@ + /* + * Copyright (C) 2011 Tobias Klauser + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c +index 710651d5aaae10..3cc826f2216b10 100644 +--- a/arch/openrisc/kernel/asm-offsets.c ++++ b/arch/openrisc/kernel/asm-offsets.c +@@ -18,6 +18,7 @@ + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c +index 757816a7bd4b28..9abfe65492c65e 100644 +--- a/arch/parisc/kernel/asm-offsets.c ++++ b/arch/parisc/kernel/asm-offsets.c +@@ -13,6 +13,7 @@ + * Copyright (C) 2002 Randolph Chung + * Copyright (C) 2003 James Bottomley + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index 2affd30468bc4c..e2cee2f2ededdb 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -8,6 +8,7 @@ + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c +index 6a992cba2f2876..e4589457e6085d 100644 +--- a/arch/riscv/kernel/asm-offsets.c ++++ b/arch/riscv/kernel/asm-offsets.c +@@ -3,6 +3,7 @@ + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c +index fa5f6885c74aa1..73a989dcfe2082 100644 +--- a/arch/s390/kernel/asm-offsets.c ++++ b/arch/s390/kernel/asm-offsets.c +@@ -4,6 +4,7 @@ + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ ++#define COMPILE_OFFSETS + + #define ASM_OFFSETS_C + +diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c +index a0322e8328456e..429b6a76314684 100644 +--- a/arch/sh/kernel/asm-offsets.c ++++ b/arch/sh/kernel/asm-offsets.c +@@ -8,6 +8,7 @@ + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c +index 5784f2df489a4d..f1e27a7f800f41 100644 +--- a/arch/sparc/kernel/asm-offsets.c ++++ b/arch/sparc/kernel/asm-offsets.c +@@ -10,6 +10,7 @@ + * + * On sparc, thread_info data is static and TI_XXX offsets are computed by hand. + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/arch/um/kernel/asm-offsets.c b/arch/um/kernel/asm-offsets.c +index 1fb12235ab9c84..a69873aa697f4f 100644 +--- a/arch/um/kernel/asm-offsets.c ++++ b/arch/um/kernel/asm-offsets.c +@@ -1 +1,3 @@ ++#define COMPILE_OFFSETS ++ + #include +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 315926ccea0fa3..ef1d3a5024ed4b 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1185,8 +1185,10 @@ static void __init retbleed_select_mitigation(void) + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: +- if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) ++ if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { + pr_err(RETBLEED_INTEL_MSG); ++ retbleed_mitigation = RETBLEED_MITIGATION_NONE; ++ } + } + } + +@@ -1592,7 +1594,7 @@ spectre_v2_user_select_mitigation(void) + static const char * const spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", +- [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", ++ [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE", + [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", + [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", + [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", +@@ -3220,9 +3222,6 @@ static const char *spectre_bhi_state(void) + + static ssize_t spectre_v2_show_state(char *buf) + { +- if (spectre_v2_enabled == SPECTRE_V2_LFENCE) +- return sysfs_emit(buf, "Vulnerable: LFENCE\n"); +- + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); + +diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c +index da38de20ae598b..cfbced95e944a4 100644 +--- a/arch/xtensa/kernel/asm-offsets.c ++++ b/arch/xtensa/kernel/asm-offsets.c +@@ -11,6 +11,7 @@ + * + * Chris Zankel + */ ++#define COMPILE_OFFSETS + + #include + #include +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index 15f63452a9bec8..b01436d9ddaed8 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -306,6 +306,14 @@ DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 10); + DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 11); ++DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 12); ++DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 13); ++DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 14); ++DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 15); + + /* Total possible dynamic DIMM Label attribute file table */ + static struct attribute *dynamic_csrow_dimm_attr[] = { +@@ -321,6 +329,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { + &dev_attr_legacy_ch9_dimm_label.attr.attr, + &dev_attr_legacy_ch10_dimm_label.attr.attr, + &dev_attr_legacy_ch11_dimm_label.attr.attr, ++ &dev_attr_legacy_ch12_dimm_label.attr.attr, ++ &dev_attr_legacy_ch13_dimm_label.attr.attr, ++ &dev_attr_legacy_ch14_dimm_label.attr.attr, ++ &dev_attr_legacy_ch15_dimm_label.attr.attr, + NULL + }; + +@@ -349,6 +361,14 @@ DEVICE_CHANNEL(ch10_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 10); + DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 11); ++DEVICE_CHANNEL(ch12_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 12); ++DEVICE_CHANNEL(ch13_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 13); ++DEVICE_CHANNEL(ch14_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 14); ++DEVICE_CHANNEL(ch15_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 15); + + /* Total possible dynamic ce_count attribute file table */ + static struct attribute *dynamic_csrow_ce_count_attr[] = { +@@ -364,6 +384,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { + &dev_attr_legacy_ch9_ce_count.attr.attr, + &dev_attr_legacy_ch10_ce_count.attr.attr, + &dev_attr_legacy_ch11_ce_count.attr.attr, ++ &dev_attr_legacy_ch12_ce_count.attr.attr, ++ &dev_attr_legacy_ch13_ce_count.attr.attr, ++ &dev_attr_legacy_ch14_ce_count.attr.attr, ++ &dev_attr_legacy_ch15_ce_count.attr.attr, + NULL + }; + +diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c +index 53b1eb876a1257..e978fd0898aaa6 100644 +--- a/drivers/gpio/gpio-idio-16.c ++++ b/drivers/gpio/gpio-idio-16.c +@@ -3,6 +3,7 @@ + * GPIO library for the ACCES IDIO-16 family + * Copyright (C) 2022 William Breathitt Gray + */ ++#include + #include + #include + #include +@@ -106,6 +107,7 @@ int devm_idio_16_regmap_register(struct device *const dev, + struct idio_16_data *data; + struct regmap_irq_chip *chip; + struct regmap_irq_chip_data *chip_data; ++ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO); + + if (!config->parent) + return -EINVAL; +@@ -163,6 +165,9 @@ int devm_idio_16_regmap_register(struct device *const dev, + gpio_config.irq_domain = regmap_irq_get_domain(chip_data); + gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate; + ++ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0)); ++ gpio_config.fixed_direction_output = fixed_direction_output; ++ + return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); + } + EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register); +diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c +index c08c8e528867ee..fd986afa7db5f0 100644 +--- a/drivers/gpio/gpio-regmap.c ++++ b/drivers/gpio/gpio-regmap.c +@@ -29,6 +29,12 @@ struct gpio_regmap { + unsigned int reg_clr_base; + unsigned int reg_dir_in_base; + unsigned int reg_dir_out_base; ++ unsigned long *fixed_direction_output; ++ ++#ifdef CONFIG_REGMAP_IRQ ++ int regmap_irq_line; ++ struct regmap_irq_chip_data *irq_chip_data; ++#endif + + int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, + unsigned int offset, unsigned int *reg, +@@ -117,6 +123,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip, + unsigned int base, val, reg, mask; + int invert, ret; + ++ if (gpio->fixed_direction_output) { ++ if (test_bit(offset, gpio->fixed_direction_output)) ++ return GPIO_LINE_DIRECTION_OUT; ++ else ++ return GPIO_LINE_DIRECTION_IN; ++ } ++ + if (gpio->reg_dat_base && !gpio->reg_set_base) + return GPIO_LINE_DIRECTION_IN; + if (gpio->reg_set_base && !gpio->reg_dat_base) +@@ -203,6 +216,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata); + */ + struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config) + { ++ struct irq_domain *irq_domain; + struct gpio_regmap *gpio; + struct gpio_chip *chip; + int ret; +@@ -274,12 +288,37 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config + chip->direction_output = gpio_regmap_direction_output; + } + ++ if (config->fixed_direction_output) { ++ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio, ++ GFP_KERNEL); ++ if (!gpio->fixed_direction_output) { ++ ret = -ENOMEM; ++ goto err_free_gpio; ++ } ++ bitmap_copy(gpio->fixed_direction_output, ++ config->fixed_direction_output, chip->ngpio); ++ } ++ + ret = gpiochip_add_data(chip, gpio); + if (ret < 0) +- goto err_free_gpio; ++ goto err_free_bitmap; ++ ++#ifdef CONFIG_REGMAP_IRQ ++ if (config->regmap_irq_chip) { ++ gpio->regmap_irq_line = config->regmap_irq_line; ++ ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap, ++ config->regmap_irq_line, config->regmap_irq_flags, ++ 0, config->regmap_irq_chip, &gpio->irq_chip_data); ++ if (ret) ++ goto err_free_bitmap; + +- if (config->irq_domain) { +- ret = gpiochip_irqchip_add_domain(chip, config->irq_domain); ++ irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); ++ } else ++#endif ++ irq_domain = config->irq_domain; ++ ++ if (irq_domain) { ++ ret = gpiochip_irqchip_add_domain(chip, irq_domain); + if (ret) + goto err_remove_gpiochip; + } +@@ -288,6 +327,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config + + err_remove_gpiochip: + gpiochip_remove(chip); ++err_free_bitmap: ++ bitmap_free(gpio->fixed_direction_output); + err_free_gpio: + kfree(gpio); + return ERR_PTR(ret); +@@ -300,7 +341,13 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register); + */ + void gpio_regmap_unregister(struct gpio_regmap *gpio) + { ++#ifdef CONFIG_REGMAP_IRQ ++ if (gpio->irq_chip_data) ++ regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data); ++#endif ++ + gpiochip_remove(&gpio->gpio_chip); ++ bitmap_free(gpio->fixed_direction_output); + kfree(gpio); + } + EXPORT_SYMBOL_GPL(gpio_regmap_unregister); +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index 66a88bba8f15b8..b5f9a40f4a8196 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -329,8 +329,9 @@ struct sc16is7xx_one { + struct kthread_work reg_work; + struct kthread_delayed_work ms_work; + struct sc16is7xx_one_config config; +- bool irda_mode; + unsigned int old_mctrl; ++ u8 old_lcr; /* Value before EFR access. */ ++ bool irda_mode; + }; + + struct sc16is7xx_port { +@@ -355,10 +356,6 @@ static struct uart_driver sc16is7xx_uart = { + .nr = SC16IS7XX_MAX_DEVS, + }; + +-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit); +-static void sc16is7xx_stop_tx(struct uart_port *port); +- +-#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e))) + #define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e))) + + static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg) +@@ -416,6 +413,85 @@ static void sc16is7xx_power(struct uart_port *port, int on) + on ? 0 : SC16IS7XX_IER_SLEEP_BIT); + } + ++/* ++ * In an amazing feat of design, the Enhanced Features Register (EFR) ++ * shares the address of the Interrupt Identification Register (IIR). ++ * Access to EFR is switched on by writing a magic value (0xbf) to the ++ * Line Control Register (LCR). Any interrupt firing during this time will ++ * see the EFR where it expects the IIR to be, leading to ++ * "Unexpected interrupt" messages. ++ * ++ * Prevent this possibility by claiming a mutex while accessing the EFR, ++ * and claiming the same mutex from within the interrupt handler. This is ++ * similar to disabling the interrupt, but that doesn't work because the ++ * bulk of the interrupt processing is run as a workqueue job in thread ++ * context. ++ */ ++static void sc16is7xx_efr_lock(struct uart_port *port) ++{ ++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); ++ ++ mutex_lock(&one->efr_lock); ++ ++ /* Backup content of LCR. */ ++ one->old_lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); ++ ++ /* Enable access to Enhanced register set */ ++ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B); ++ ++ /* Disable cache updates when writing to EFR registers */ ++ regcache_cache_bypass(one->regmap, true); ++} ++ ++static void sc16is7xx_efr_unlock(struct uart_port *port) ++{ ++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); ++ ++ /* Re-enable cache updates when writing to normal registers */ ++ regcache_cache_bypass(one->regmap, false); ++ ++ /* Restore original content of LCR */ ++ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, one->old_lcr); ++ ++ mutex_unlock(&one->efr_lock); ++} ++ ++static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit) ++{ ++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev); ++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); ++ ++ lockdep_assert_held_once(&port->lock); ++ ++ one->config.flags |= SC16IS7XX_RECONF_IER; ++ one->config.ier_mask |= bit; ++ one->config.ier_val &= ~bit; ++ kthread_queue_work(&s->kworker, &one->reg_work); ++} ++ ++static void sc16is7xx_ier_set(struct uart_port *port, u8 bit) ++{ ++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev); ++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); ++ ++ lockdep_assert_held_once(&port->lock); ++ ++ one->config.flags |= SC16IS7XX_RECONF_IER; ++ one->config.ier_mask |= bit; ++ one->config.ier_val |= bit; ++ kthread_queue_work(&s->kworker, &one->reg_work); ++} ++ ++static void sc16is7xx_stop_tx(struct uart_port *port) ++{ ++ sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT); ++} ++ ++static void sc16is7xx_stop_rx(struct uart_port *port) ++{ ++ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT); ++} ++ + static const struct sc16is7xx_devtype sc16is74x_devtype = { + .name = "SC16IS74X", + .nr_gpio = 0, +@@ -506,40 +582,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) + div /= prescaler; + } + +- /* In an amazing feat of design, the Enhanced Features Register shares +- * the address of the Interrupt Identification Register, and is +- * switched in by writing a magic value (0xbf) to the Line Control +- * Register. Any interrupt firing during this time will see the EFR +- * where it expects the IIR to be, leading to "Unexpected interrupt" +- * messages. +- * +- * Prevent this possibility by claiming a mutex while accessing the +- * EFR, and claiming the same mutex from within the interrupt handler. +- * This is similar to disabling the interrupt, but that doesn't work +- * because the bulk of the interrupt processing is run as a workqueue +- * job in thread context. +- */ +- mutex_lock(&one->efr_lock); +- +- lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); +- +- /* Open the LCR divisors for configuration */ +- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, +- SC16IS7XX_LCR_CONF_MODE_B); +- +- /* Enable enhanced features */ +- regcache_cache_bypass(one->regmap, true); +- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, +- SC16IS7XX_EFR_ENABLE_BIT, +- SC16IS7XX_EFR_ENABLE_BIT); +- +- regcache_cache_bypass(one->regmap, false); +- +- /* Put LCR back to the normal mode */ +- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); +- +- mutex_unlock(&one->efr_lock); +- + /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */ + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_CLKSEL_BIT, +@@ -547,7 +589,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) + + mutex_lock(&one->efr_lock); + +- /* Open the LCR divisors for configuration */ ++ /* Backup LCR and access special register set (DLL/DLH) */ ++ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, + SC16IS7XX_LCR_CONF_MODE_A); + +@@ -557,7 +600,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) + sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256); + regcache_cache_bypass(one->regmap, false); + +- /* Put LCR back to the normal mode */ ++ /* Restore LCR and access to general register set */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + + mutex_unlock(&one->efr_lock); +@@ -889,42 +932,6 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws) + sc16is7xx_reconf_rs485(&one->port); + } + +-static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit) +-{ +- struct sc16is7xx_port *s = dev_get_drvdata(port->dev); +- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); +- +- lockdep_assert_held_once(&port->lock); +- +- one->config.flags |= SC16IS7XX_RECONF_IER; +- one->config.ier_mask |= bit; +- one->config.ier_val &= ~bit; +- kthread_queue_work(&s->kworker, &one->reg_work); +-} +- +-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit) +-{ +- struct sc16is7xx_port *s = dev_get_drvdata(port->dev); +- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); +- +- lockdep_assert_held_once(&port->lock); +- +- one->config.flags |= SC16IS7XX_RECONF_IER; +- one->config.ier_mask |= bit; +- one->config.ier_val |= bit; +- kthread_queue_work(&s->kworker, &one->reg_work); +-} +- +-static void sc16is7xx_stop_tx(struct uart_port *port) +-{ +- sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT); +-} +- +-static void sc16is7xx_stop_rx(struct uart_port *port) +-{ +- sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT); +-} +- + static void sc16is7xx_ms_proc(struct kthread_work *ws) + { + struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work); +@@ -1074,17 +1081,7 @@ static void sc16is7xx_set_termios(struct uart_port *port, + if (!(termios->c_cflag & CREAD)) + port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK; + +- /* As above, claim the mutex while accessing the EFR. */ +- mutex_lock(&one->efr_lock); +- +- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, +- SC16IS7XX_LCR_CONF_MODE_B); +- + /* Configure flow control */ +- regcache_cache_bypass(one->regmap, true); +- sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]); +- sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]); +- + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + if (termios->c_cflag & CRTSCTS) { + flow |= SC16IS7XX_EFR_AUTOCTS_BIT | +@@ -1096,16 +1093,16 @@ static void sc16is7xx_set_termios(struct uart_port *port, + if (termios->c_iflag & IXOFF) + flow |= SC16IS7XX_EFR_SWFLOW1_BIT; + +- sc16is7xx_port_update(port, +- SC16IS7XX_EFR_REG, +- SC16IS7XX_EFR_FLOWCTRL_BITS, +- flow); +- regcache_cache_bypass(one->regmap, false); +- + /* Update LCR register */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + +- mutex_unlock(&one->efr_lock); ++ /* Update EFR registers */ ++ sc16is7xx_efr_lock(port); ++ sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]); ++ sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]); ++ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, ++ SC16IS7XX_EFR_FLOWCTRL_BITS, flow); ++ sc16is7xx_efr_unlock(port); + + /* Get baud rate generator configuration */ + baud = uart_get_baud_rate(port, termios, old, +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c +index bfd437269800cf..4d975b26a185cf 100644 +--- a/drivers/usb/host/xhci-dbgcap.c ++++ b/drivers/usb/host/xhci-dbgcap.c +@@ -665,7 +665,8 @@ static int xhci_dbc_start(struct xhci_dbc *dbc) + return ret; + } + +- return mod_delayed_work(system_wq, &dbc->event_work, 1); ++ return mod_delayed_work(system_wq, &dbc->event_work, ++ msecs_to_jiffies(dbc->poll_interval)); + } + + static void xhci_dbc_stop(struct xhci_dbc *dbc) +@@ -854,6 +855,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + { + dma_addr_t deq; + union xhci_trb *evt; ++ enum evtreturn ret = EVT_DONE; + u32 ctrl, portsc; + bool update_erdp = false; + +@@ -878,7 +880,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + dev_info(dbc->dev, "DbC configured\n"); + portsc = readl(&dbc->regs->portsc); + writel(portsc, &dbc->regs->portsc); +- return EVT_GSER; ++ ret = EVT_GSER; ++ break; + } + + return EVT_DONE; +@@ -938,6 +941,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + break; + case TRB_TYPE(TRB_TRANSFER): + dbc_handle_xfer_event(dbc, evt); ++ if (ret != EVT_GSER) ++ ret = EVT_XFER_DONE; + break; + default: + break; +@@ -956,7 +961,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + lo_hi_writeq(deq, &dbc->regs->erdp); + } + +- return EVT_DONE; ++ return ret; + } + + static void xhci_dbc_handle_events(struct work_struct *work) +@@ -964,8 +969,11 @@ static void xhci_dbc_handle_events(struct work_struct *work) + enum evtreturn evtr; + struct xhci_dbc *dbc; + unsigned long flags; ++ unsigned int poll_interval; ++ unsigned long busypoll_timelimit; + + dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); ++ poll_interval = dbc->poll_interval; + + spin_lock_irqsave(&dbc->lock, flags); + evtr = xhci_dbc_do_handle_events(dbc); +@@ -981,13 +989,28 @@ static void xhci_dbc_handle_events(struct work_struct *work) + dbc->driver->disconnect(dbc); + break; + case EVT_DONE: ++ /* ++ * Set fast poll rate if there are pending out transfers, or ++ * a transfer was recently processed ++ */ ++ busypoll_timelimit = dbc->xfer_timestamp + ++ msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT); ++ ++ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || ++ time_is_after_jiffies(busypoll_timelimit)) ++ poll_interval = 0; ++ break; ++ case EVT_XFER_DONE: ++ dbc->xfer_timestamp = jiffies; ++ poll_interval = 0; + break; + default: + dev_info(dbc->dev, "stop handling dbc events\n"); + return; + } + +- mod_delayed_work(system_wq, &dbc->event_work, 1); ++ mod_delayed_work(system_wq, &dbc->event_work, ++ msecs_to_jiffies(poll_interval)); + } + + static ssize_t dbc_show(struct device *dev, +@@ -1206,11 +1229,48 @@ static ssize_t dbc_bInterfaceProtocol_store(struct device *dev, + return size; + } + ++static ssize_t dbc_poll_interval_ms_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct xhci_dbc *dbc; ++ struct xhci_hcd *xhci; ++ ++ xhci = hcd_to_xhci(dev_get_drvdata(dev)); ++ dbc = xhci->dbc; ++ ++ return sysfs_emit(buf, "%u\n", dbc->poll_interval); ++} ++ ++static ssize_t dbc_poll_interval_ms_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct xhci_dbc *dbc; ++ struct xhci_hcd *xhci; ++ u32 value; ++ int ret; ++ ++ ret = kstrtou32(buf, 0, &value); ++ if (ret || value > DBC_POLL_INTERVAL_MAX) ++ return -EINVAL; ++ ++ xhci = hcd_to_xhci(dev_get_drvdata(dev)); ++ dbc = xhci->dbc; ++ ++ dbc->poll_interval = value; ++ ++ mod_delayed_work(system_wq, &dbc->event_work, 0); ++ ++ return size; ++} ++ + static DEVICE_ATTR_RW(dbc); + static DEVICE_ATTR_RW(dbc_idVendor); + static DEVICE_ATTR_RW(dbc_idProduct); + static DEVICE_ATTR_RW(dbc_bcdDevice); + static DEVICE_ATTR_RW(dbc_bInterfaceProtocol); ++static DEVICE_ATTR_RW(dbc_poll_interval_ms); + + static struct attribute *dbc_dev_attributes[] = { + &dev_attr_dbc.attr, +@@ -1218,6 +1278,7 @@ static struct attribute *dbc_dev_attributes[] = { + &dev_attr_dbc_idProduct.attr, + &dev_attr_dbc_bcdDevice.attr, + &dev_attr_dbc_bInterfaceProtocol.attr, ++ &dev_attr_dbc_poll_interval_ms.attr, + NULL + }; + +@@ -1242,6 +1303,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver * + dbc->idVendor = DBC_VENDOR_ID; + dbc->bcdDevice = DBC_DEVICE_REV; + dbc->bInterfaceProtocol = DBC_PROTOCOL; ++ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT; + + if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE) + goto err; +diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h +index 2de0dc49a3e9f7..1fab3eb9c831d3 100644 +--- a/drivers/usb/host/xhci-dbgcap.h ++++ b/drivers/usb/host/xhci-dbgcap.h +@@ -93,7 +93,9 @@ struct dbc_ep { + + #define DBC_QUEUE_SIZE 16 + #define DBC_WRITE_BUF_SIZE 8192 +- ++#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */ ++#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */ ++#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */ + /* + * Private structure for DbC hardware state: + */ +@@ -139,6 +141,8 @@ struct xhci_dbc { + + enum dbc_state state; + struct delayed_work event_work; ++ unsigned int poll_interval; /* ms */ ++ unsigned long xfer_timestamp; + unsigned resume_required:1; + struct dbc_ep eps[2]; + +@@ -184,6 +188,7 @@ struct dbc_request { + enum evtreturn { + EVT_ERR = -1, + EVT_DONE, ++ EVT_XFER_DONE, + EVT_GSER, + EVT_DISC, + }; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index bb5f7911d473cb..7ad1734cbbfc91 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2080,10 +2080,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, + + /* returns with log_tree_root freed on success */ + ret = btrfs_recover_log_trees(log_tree_root); ++ btrfs_put_root(log_tree_root); + if (ret) { + btrfs_handle_fs_error(fs_info, ret, + "Failed to recover log tree"); +- btrfs_put_root(log_tree_root); + return ret; + } + +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 8248113eb067fa..5e3d1a87b7e9da 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -4175,7 +4175,8 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, + } + + static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, +- struct find_free_extent_ctl *ffe_ctl) ++ struct find_free_extent_ctl *ffe_ctl, ++ struct btrfs_space_info *space_info) + { + if (ffe_ctl->for_treelog) { + spin_lock(&fs_info->treelog_bg_lock); +@@ -4199,6 +4200,7 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, + u64 avail = block_group->zone_capacity - block_group->alloc_offset; + + if (block_group_bits(block_group, ffe_ctl->flags) && ++ block_group->space_info == space_info && + avail >= ffe_ctl->num_bytes) { + ffe_ctl->hint_byte = block_group->start; + break; +@@ -4220,7 +4222,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info, + return prepare_allocation_clustered(fs_info, ffe_ctl, + space_info, ins); + case BTRFS_EXTENT_ALLOC_ZONED: +- return prepare_allocation_zoned(fs_info, ffe_ctl); ++ return prepare_allocation_zoned(fs_info, ffe_ctl, space_info); + default: + BUG(); + } +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index ee5ffeab85bb78..b1be3e0fe72823 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3051,9 +3051,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) + goto out; + } + +- if (btrfs_is_zoned(fs_info)) +- btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, +- ordered_extent->disk_num_bytes); ++ ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, ++ ordered_extent->disk_num_bytes); ++ if (ret) ++ goto out; + + if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { + truncated = true; +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index 7632d652a1257d..4a5a5ee360e579 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -1271,8 +1271,7 @@ static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *d + * Slice is divided into intervals when the IO is submitted, adjust by + * bwlimit and maximum of 64 intervals. + */ +- div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); +- div = min_t(u32, 64, div); ++ div = clamp(bwlimit / (16 * 1024 * 1024), 1, 64); + + /* Start new epoch, set deadline */ + now = ktime_get(); +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 3989cb19cdae70..20add63421b3d8 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1796,7 +1796,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + } + /* see comments in should_cow_block() */ + set_bit(BTRFS_ROOT_FORCE_COW, &root->state); +- smp_wmb(); ++ smp_mb__after_atomic(); + + btrfs_set_root_node(new_root_item, tmp); + /* record when the snapshot was created in key.offset */ +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 4b53e19f7520fe..5512991b24faa8 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -2493,15 +2493,13 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + int i; + int ret; + ++ if (level != 0) ++ return 0; ++ + ret = btrfs_read_extent_buffer(eb, &check); + if (ret) + return ret; + +- level = btrfs_header_level(eb); +- +- if (level != 0) +- return 0; +- + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +@@ -7422,7 +7420,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) + + log_root_tree->log_root = NULL; + clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); +- btrfs_put_root(log_root_tree); + + return 0; + error: +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c +index 3622ba1d8e09f4..6e8b8c46ba18f6 100644 +--- a/fs/btrfs/zoned.c ++++ b/fs/btrfs/zoned.c +@@ -2263,16 +2263,17 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) + return ret; + } + +-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) ++int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) + { + struct btrfs_block_group *block_group; + u64 min_alloc_bytes; + + if (!btrfs_is_zoned(fs_info)) +- return; ++ return 0; + + block_group = btrfs_lookup_block_group(fs_info, logical); +- ASSERT(block_group); ++ if (WARN_ON_ONCE(!block_group)) ++ return -ENOENT; + + /* No MIXED_BG on zoned btrfs. */ + if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) +@@ -2289,6 +2290,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len + + out: + btrfs_put_block_group(block_group); ++ return 0; + } + + static void btrfs_zone_finish_endio_workfn(struct work_struct *work) +diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h +index 448955641d1143..c18f31d3dc25f6 100644 +--- a/fs/btrfs/zoned.h ++++ b/fs/btrfs/zoned.h +@@ -71,7 +71,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, + bool btrfs_zone_activate(struct btrfs_block_group *block_group); + int btrfs_zone_finish(struct btrfs_block_group *block_group); + bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags); +-void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, ++int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, + u64 length); + void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, + struct extent_buffer *eb); +@@ -227,8 +227,11 @@ static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, + return true; + } + +-static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, +- u64 logical, u64 length) { } ++static inline int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, ++ u64 logical, u64 length) ++{ ++ return 0; ++} + + static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, + struct extent_buffer *eb) { } +diff --git a/include/linux/audit.h b/include/linux/audit.h +index 335e1ba5a23271..7ca75f8873799d 100644 +--- a/include/linux/audit.h ++++ b/include/linux/audit.h +@@ -526,7 +526,7 @@ static inline void audit_log_kern_module(const char *name) + + static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) + { +- if (!audit_dummy_context()) ++ if (audit_enabled) + __audit_fanotify(response, friar); + } + +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index f7f5a783da2aa8..b2342eebc8d226 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -15,7 +15,6 @@ + # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) + #endif + +-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) + #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) + #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) + #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +diff --git a/include/linux/bits.h b/include/linux/bits.h +index 7c0cf5031abe87..09e167bc453046 100644 +--- a/include/linux/bits.h ++++ b/include/linux/bits.h +@@ -11,6 +11,7 @@ + #define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) + #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) + #define BITS_PER_BYTE 8 ++#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) + + /* + * Create a contiguous bitmask starting at bit position @l and ending at +@@ -18,17 +19,50 @@ + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ + #if !defined(__ASSEMBLY__) ++ ++/* ++ * Missing asm support ++ * ++ * GENMASK_U*() depend on BITS_PER_TYPE() which relies on sizeof(), ++ * something not available in asm. Nevertheless, fixed width integers is a C ++ * concept. Assembly code can rely on the long and long long versions instead. ++ */ ++ + #include ++#include + #define GENMASK_INPUT_CHECK(h, l) \ + (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ + __is_constexpr((l) > (h)), (l) > (h), 0))) +-#else ++ ++/* ++ * Generate a mask for the specified type @t. Additional checks are made to ++ * guarantee the value returned fits in that type, relying on ++ * -Wshift-count-overflow compiler check to detect incompatible arguments. ++ * For example, all these create build errors or warnings: ++ * ++ * - GENMASK(15, 20): wrong argument order ++ * - GENMASK(72, 15): doesn't fit unsigned long ++ * - GENMASK_U32(33, 15): doesn't fit in a u32 ++ */ ++#define GENMASK_TYPE(t, h, l) \ ++ ((t)(GENMASK_INPUT_CHECK(h, l) + \ ++ (type_max(t) << (l) & \ ++ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h))))) ++ ++#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l) ++#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l) ++#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l) ++#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l) ++ ++#else /* defined(__ASSEMBLY__) */ ++ + /* + * BUILD_BUG_ON_ZERO is not available in h files included from asm files, + * disable the input check if that is the case. + */ + #define GENMASK_INPUT_CHECK(h, l) 0 +-#endif ++ ++#endif /* !defined(__ASSEMBLY__) */ + + #define __GENMASK(h, l) \ + (((~UL(0)) - (UL(1) << (l)) + 1) & \ +diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h +index a9f7b7faf57b0d..cf55202aaec264 100644 +--- a/include/linux/gpio/regmap.h ++++ b/include/linux/gpio/regmap.h +@@ -37,9 +37,18 @@ struct regmap; + * offset to a register/bitmask pair. If not + * given the default gpio_regmap_simple_xlate() + * is used. ++ * @fixed_direction_output: ++ * (Optional) Bitmap representing the fixed direction of ++ * the GPIO lines. Useful when there are GPIO lines with a ++ * fixed direction mixed together in the same register. + * @drvdata: (Optional) Pointer to driver specific data which is + * not used by gpio-remap but is provided "as is" to the + * driver callback(s). ++ * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If ++ * set, a regmap-irq device will be created and the IRQ ++ * domain will be set accordingly. ++ * @regmap_irq_line (Optional) The IRQ the device uses to signal interrupts. ++ * @regmap_irq_flags (Optional) The IRQF_ flags to use for the interrupt. + * + * The ->reg_mask_xlate translates a given base address and GPIO offset to + * register and mask pair. The base address is one of the given register +@@ -77,6 +86,13 @@ struct gpio_regmap_config { + int reg_stride; + int ngpio_per_reg; + struct irq_domain *irq_domain; ++ unsigned long *fixed_direction_output; ++ ++#ifdef CONFIG_REGMAP_IRQ ++ struct regmap_irq_chip *regmap_irq_chip; ++ int regmap_irq_line; ++ unsigned long regmap_irq_flags; ++#endif + + int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, + unsigned int offset, unsigned int *reg, +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h +index 15960564e0c364..4d72d24b1f33e7 100644 +--- a/include/net/pkt_sched.h ++++ b/include/net/pkt_sched.h +@@ -112,7 +112,6 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, + struct netlink_ext_ack *extack); + void qdisc_put_rtab(struct qdisc_rate_table *tab); + void qdisc_put_stab(struct qdisc_size_table *tab); +-void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); + bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock, bool validate); +@@ -306,4 +305,28 @@ static inline bool tc_qdisc_stats_dump(struct Qdisc *sch, + return true; + } + ++static inline void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) ++{ ++ if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { ++ pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", ++ txt, qdisc->ops->id, qdisc->handle >> 16); ++ qdisc->flags |= TCQ_F_WARN_NONWC; ++ } ++} ++ ++static inline unsigned int qdisc_peek_len(struct Qdisc *sch) ++{ ++ struct sk_buff *skb; ++ unsigned int len; ++ ++ skb = sch->ops->peek(sch); ++ if (unlikely(skb == NULL)) { ++ qdisc_warn_nonwc("qdisc_peek_len", sch); ++ return 0; ++ } ++ len = qdisc_pkt_len(skb); ++ ++ return len; ++} ++ + #endif +diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c +index 1273be84392cfc..ee01cfcc35064a 100644 +--- a/kernel/events/callchain.c ++++ b/kernel/events/callchain.c +@@ -184,6 +184,10 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + struct perf_callchain_entry_ctx ctx; + int rctx; + ++ /* crosstask is not supported for user stacks */ ++ if (crosstask && user && !kernel) ++ return NULL; ++ + entry = get_callchain_entry(&rctx); + if (!entry) + return NULL; +@@ -200,18 +204,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + perf_callchain_kernel(&ctx, regs); + } + +- if (user) { ++ if (user && !crosstask) { + if (!user_mode(regs)) { +- if (current->mm) +- regs = task_pt_regs(current); +- else ++ if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) + regs = NULL; ++ else ++ regs = task_pt_regs(current); + } + + if (regs) { +- if (crosstask) +- goto exit_put; +- + if (add_mark) + perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); + +@@ -219,7 +220,6 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + } + } + +-exit_put: + put_callchain_entry(rctx); + + return entry; +diff --git a/kernel/events/core.c b/kernel/events/core.c +index b73f5c44113d64..c9a3fb6fdb2f64 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6985,7 +6985,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, + if (user_mode(regs)) { + regs_user->abi = perf_reg_abi(current); + regs_user->regs = regs; +- } else if (!(current->flags & PF_KTHREAD)) { ++ } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + perf_get_regs_user(regs_user, regs); + } else { + regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; +@@ -7612,7 +7612,7 @@ static u64 perf_virt_to_phys(u64 virt) + * Try IRQ-safe get_user_page_fast_only first. + * If failed, leave phys_addr as 0. + */ +- if (current->mm != NULL) { ++ if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + struct page *p; + + pagefault_disable(); +@@ -7724,7 +7724,8 @@ struct perf_callchain_entry * + perf_callchain(struct perf_event *event, struct pt_regs *regs) + { + bool kernel = !event->attr.exclude_callchain_kernel; +- bool user = !event->attr.exclude_callchain_user; ++ bool user = !event->attr.exclude_callchain_user && ++ !(current->flags & (PF_KTHREAD | PF_USER_WORKER)); + /* Disallow cross-task user callchains. */ + bool crosstask = event->ctx->task && event->ctx->task != current; + const u32 max_stack = event->attr.sample_max_stack; +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 9d2c38421f7a2b..7fd6714f41fe79 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -619,6 +619,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + } + + subflow: ++ /* No need to try establishing subflows to remote id0 if not allowed */ ++ if (mptcp_pm_add_addr_c_flag_case(msk)) ++ goto exit; ++ + /* check if should create a new subflow */ + while (msk->pm.local_addr_used < local_addr_max && + msk->pm.subflows < subflows_max) { +@@ -650,6 +654,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) + __mptcp_subflow_connect(sk, &local.addr, &addrs[i]); + spin_lock_bh(&msk->pm.lock); + } ++ ++exit: + mptcp_pm_nl_check_work_pending(msk); + } + +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index a300e8c1b53aaa..b20dc987b907fc 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -599,16 +599,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, + qdisc_skb_cb(skb)->pkt_len = pkt_len; + } + +-void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) +-{ +- if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { +- pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", +- txt, qdisc->ops->id, qdisc->handle >> 16); +- qdisc->flags |= TCQ_F_WARN_NONWC; +- } +-} +-EXPORT_SYMBOL(qdisc_warn_nonwc); +- + static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) + { + struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index afcb83d469ff60..751b1e2c35b3f9 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -835,22 +835,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) + } + } + +-static unsigned int +-qdisc_peek_len(struct Qdisc *sch) +-{ +- struct sk_buff *skb; +- unsigned int len; +- +- skb = sch->ops->peek(sch); +- if (unlikely(skb == NULL)) { +- qdisc_warn_nonwc("qdisc_peek_len", sch); +- return 0; +- } +- len = qdisc_pkt_len(skb); +- +- return len; +-} +- + static void + hfsc_adjust_levels(struct hfsc_class *cl) + { +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c +index c3f9a6375b4ea6..69fdbbbb3b6346 100644 +--- a/net/sched/sch_qfq.c ++++ b/net/sched/sch_qfq.c +@@ -1002,7 +1002,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, + + if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ + list_del_init(&cl->alist); +- else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { ++ else if (cl->deficit < qdisc_peek_len(cl->qdisc)) { + cl->deficit += agg->lmax; + list_move_tail(&cl->alist, &agg->active); + } +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index d30314532bb71d..b9cc3d51dc2857 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3837,7 +3837,8 @@ endpoint_tests() + + # remove and re-add + if reset_with_events "delete re-add signal" && +- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then ++ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0 + pm_nl_set_limits $ns1 0 3 + pm_nl_set_limits $ns2 3 3 + pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.116-117.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.116-117.patch new file mode 100644 index 0000000000..092d64e1a3 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.116-117.patch @@ -0,0 +1,18627 @@ +diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst +index b26b5274eaaf14..622a7f28db1fd5 100644 +--- a/Documentation/admin-guide/cgroup-v2.rst ++++ b/Documentation/admin-guide/cgroup-v2.rst +@@ -1532,6 +1532,15 @@ PAGE_SIZE multiple when read back. + collapsing an existing range of pages. This counter is not + present when CONFIG_TRANSPARENT_HUGEPAGE is not set. + ++ thp_swpout (npn) ++ Number of transparent hugepages which are swapout in one piece ++ without splitting. ++ ++ thp_swpout_fallback (npn) ++ Number of transparent hugepages which were split before swapout. ++ Usually because failed to allocate some continuous swap space ++ for the huge page. ++ + memory.numa_stat + A read-only nested-keyed file which exists on non-root cgroups. + +diff --git a/MAINTAINERS b/MAINTAINERS +index 294d2ce29b7356..c1aafeb3babf98 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -14725,6 +14725,7 @@ NETDEVSIM + M: Jakub Kicinski + S: Maintained + F: drivers/net/netdevsim/* ++F: tools/testing/selftests/drivers/net/netdevsim/* + + NETEM NETWORK EMULATOR + M: Stephen Hemminger +diff --git a/Makefile b/Makefile +index f28f1f9f5f4e6d..a849399cebf8fd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 116 ++SUBLEVEL = 117 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h +index f5a936496f0600..24981bba974d3f 100644 +--- a/arch/arc/include/asm/bitops.h ++++ b/arch/arc/include/asm/bitops.h +@@ -133,6 +133,8 @@ static inline __attribute__ ((const)) int fls(unsigned int x) + */ + static inline __attribute__ ((const)) unsigned long __fls(unsigned long x) + { ++ if (__builtin_constant_p(x)) ++ return x ? BITS_PER_LONG - 1 - __builtin_clzl(x) : 0; + /* FLS insn has exactly same semantics as the API */ + return __builtin_arc_fls(x); + } +diff --git a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts +index ac44c745bdf8e6..a39a021a39107e 100644 +--- a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts ++++ b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts +@@ -55,8 +55,8 @@ &gmac0 { + mdio { + /delete-node/ switch@1e; + +- bcm54210e: ethernet-phy@0 { +- reg = <0>; ++ bcm54210e: ethernet-phy@25 { ++ reg = <25>; + }; + }; + }; +diff --git a/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts b/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts +index a3757b7daeda49..071a31191ec271 100644 +--- a/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts ++++ b/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts +@@ -502,6 +502,9 @@ magnetometer@e { + compatible = "asahi-kasei,ak8974"; + reg = <0xe>; + ++ interrupt-parent = <&gpio>; ++ interrupts = ; ++ + avdd-supply = <&vdd_3v3_sys>; + dvdd-supply = <&vdd_1v8_sys>; + +@@ -515,7 +518,7 @@ wm8903: audio-codec@1a { + reg = <0x1a>; + + interrupt-parent = <&gpio>; +- interrupts = ; ++ interrupts = ; + + gpio-controller; + #gpio-cells = <2>; +diff --git a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts +index 5d4b29d765853e..6cc4c2f08b15dc 100644 +--- a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts ++++ b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts +@@ -259,7 +259,7 @@ &audmux { + pinctrl-0 = <&pinctrl_audmux>; + status = "okay"; + +- ssi2 { ++ mux-ssi2 { + fsl,audmux-port = <1>; + fsl,port-config = < + (IMX_AUDMUX_V2_PTCR_SYN | +@@ -271,7 +271,7 @@ IMX_AUDMUX_V2_PDCR_RXDSEL(2) + >; + }; + +- aud3 { ++ mux-aud3 { + fsl,audmux-port = <2>; + fsl,port-config = < + IMX_AUDMUX_V2_PTCR_SYN +diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig +index 847b7a0033569c..1f684e29cff2e3 100644 +--- a/arch/arm/crypto/Kconfig ++++ b/arch/arm/crypto/Kconfig +@@ -4,7 +4,7 @@ menu "Accelerated Cryptographic Algorithms for CPU (arm)" + + config CRYPTO_CURVE25519_NEON + tristate "Public key crypto: Curve25519 (NEON)" +- depends on KERNEL_MODE_NEON ++ depends on KERNEL_MODE_NEON && !CPU_BIG_ENDIAN + select CRYPTO_LIB_CURVE25519_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CURVE25519 + help +diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S +index 94dece1839af34..99aaf5cf896966 100644 +--- a/arch/arm/mach-at91/pm_suspend.S ++++ b/arch/arm/mach-at91/pm_suspend.S +@@ -689,6 +689,10 @@ sr_dis_exit: + bic tmp2, tmp2, #AT91_PMC_PLL_UPDT_ID + str tmp2, [pmc, #AT91_PMC_PLL_UPDT] + ++ /* save acr */ ++ ldr tmp2, [pmc, #AT91_PMC_PLL_ACR] ++ str tmp2, .saved_acr ++ + /* save div. */ + mov tmp1, #0 + ldr tmp2, [pmc, #AT91_PMC_PLL_CTRL0] +@@ -758,7 +762,7 @@ sr_dis_exit: + str tmp1, [pmc, #AT91_PMC_PLL_UPDT] + + /* step 2. */ +- ldr tmp1, =AT91_PMC_PLL_ACR_DEFAULT_PLLA ++ ldr tmp1, .saved_acr + str tmp1, [pmc, #AT91_PMC_PLL_ACR] + + /* step 3. */ +@@ -1134,6 +1138,8 @@ ENDPROC(at91_pm_suspend_in_sram) + .word 0 + .saved_mckr: + .word 0 ++.saved_acr: ++ .word 0 + .saved_pllar: + .word 0 + .saved_sam9_lpr: +diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +index 6a02db4f073f29..a5426b82552ed8 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +@@ -482,6 +482,8 @@ &i2s0_8ch { + }; + + &i2s1_8ch { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>; + rockchip,trcm-sync-tx-only; + status = "okay"; + }; +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts +index 50c384aa253e40..8c3f9735e563b9 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts +@@ -808,8 +808,8 @@ conf-tx { + pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59", + "MIO60", "MIO61", "MIO62", "MIO63"; + bias-disable; +- drive-strength = <4>; +- slew-rate = ; ++ drive-strength = <12>; ++ slew-rate = ; + }; + }; + +diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h +index 13b2462f3d8c9d..5faa97a87a9e2d 100644 +--- a/arch/loongarch/include/asm/hw_breakpoint.h ++++ b/arch/loongarch/include/asm/hw_breakpoint.h +@@ -134,13 +134,13 @@ static inline void hw_breakpoint_thread_switch(struct task_struct *next) + /* Determine number of BRP registers available. */ + static inline int get_num_brps(void) + { +- return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM; ++ return csr_read32(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM; + } + + /* Determine number of WRP registers available. */ + static inline int get_num_wrps(void) + { +- return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM; ++ return csr_read32(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM; + } + + #endif /* __KERNEL__ */ +diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h +index 29d9b12298bc84..fc51c6df7e998c 100644 +--- a/arch/loongarch/include/asm/pgtable.h ++++ b/arch/loongarch/include/asm/pgtable.h +@@ -448,6 +448,9 @@ static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) + + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + { ++ if (pte_val(pte) & _PAGE_DIRTY) ++ pte_val(pte) |= _PAGE_MODIFIED; ++ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); + } +@@ -570,9 +573,11 @@ static inline struct page *pmd_page(pmd_t pmd) + + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) + { +- pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | +- (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); +- return pmd; ++ if (pmd_val(pmd) & _PAGE_DIRTY) ++ pmd_val(pmd) |= _PAGE_MODIFIED; ++ ++ return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) | ++ (pgprot_val(newprot) & ~_HPAGE_CHG_MASK)); + } + + static inline pmd_t pmd_mkinvalid(pmd_t pmd) +diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c +index 2b4b99b4e6c94e..d7291b8ea65aac 100644 +--- a/arch/loongarch/kernel/traps.c ++++ b/arch/loongarch/kernel/traps.c +@@ -1097,8 +1097,8 @@ static void configure_exception_vector(void) + tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE; + + csr_write64(eentry, LOONGARCH_CSR_EENTRY); +- csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); +- csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); ++ csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY); ++ csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); + } + + void per_cpu_trap_init(int cpu) +diff --git a/arch/mips/boot/dts/lantiq/danube.dtsi b/arch/mips/boot/dts/lantiq/danube.dtsi +index 7a7ba66aa5349d..650400bd5725fa 100644 +--- a/arch/mips/boot/dts/lantiq/danube.dtsi ++++ b/arch/mips/boot/dts/lantiq/danube.dtsi +@@ -5,8 +5,12 @@ / { + compatible = "lantiq,xway", "lantiq,danube"; + + cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ + cpu@0 { + compatible = "mips,mips24Kc"; ++ reg = <0>; + }; + }; + +@@ -100,6 +104,8 @@ pci0: pci@e105400 { + 0x1000000 0 0x00000000 0xae00000 0 0x200000>; /* io space */ + reg = <0x7000000 0x8000 /* config space */ + 0xe105400 0x400>; /* pci bridge */ ++ ++ device_type = "pci"; + }; + }; + }; +diff --git a/arch/mips/boot/dts/lantiq/danube_easy50712.dts b/arch/mips/boot/dts/lantiq/danube_easy50712.dts +index c4d7aa5753b043..c9f7886f57b8ce 100644 +--- a/arch/mips/boot/dts/lantiq/danube_easy50712.dts ++++ b/arch/mips/boot/dts/lantiq/danube_easy50712.dts +@@ -4,6 +4,8 @@ + /include/ "danube.dtsi" + + / { ++ model = "Intel EASY50712"; ++ + chosen { + bootargs = "console=ttyLTQ0,115200 init=/etc/preinit"; + }; +@@ -94,7 +96,7 @@ ethernet@e180000 { + lantiq,tx-burst-length = <4>; + }; + +- stp0: stp@e100bb0 { ++ stp0: gpio@e100bb0 { + #gpio-cells = <2>; + compatible = "lantiq,gpio-stp-xway"; + gpio-controller; +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 4c72b59fdf98cc..492f375bf12903 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -466,7 +466,7 @@ void __init ltq_soc_init(void) + /* add our generic xway clocks */ + clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI); + clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT); +- clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP); ++ clkdev_add_pmu("1e100bb0.gpio", NULL, 1, 0, PMU_STP); + clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1); + clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA); + clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI); +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c +index 9ba4adc214af7c..cc8bedf410ea70 100644 +--- a/arch/powerpc/kernel/eeh_driver.c ++++ b/arch/powerpc/kernel/eeh_driver.c +@@ -334,7 +334,7 @@ static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, + rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen); + + edev->in_error = true; +- pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE); ++ pci_uevent_ers(pdev, rc); + return rc; + } + +diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c +index 6b710ef9d9aef5..2114903ee3ad40 100644 +--- a/arch/riscv/kernel/cpu-hotplug.c ++++ b/arch/riscv/kernel/cpu-hotplug.c +@@ -61,6 +61,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) + + pr_notice("CPU%u: off\n", cpu); + ++ clear_tasks_mm_cpumask(cpu); + /* Verify from the firmware if the cpu is really stopped*/ + if (cpu_ops[cpu]->cpu_is_stopped) + ret = cpu_ops[cpu]->cpu_is_stopped(cpu); +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S +index 1f90fee24a8ba8..11b2b1a3897df9 100644 +--- a/arch/riscv/kernel/entry.S ++++ b/arch/riscv/kernel/entry.S +@@ -82,7 +82,6 @@ _save_context: + la gp, __global_pointer$ + .option pop + move a0, sp /* pt_regs */ +- la ra, ret_from_exception + + /* + * MSB of cause differentiates between +@@ -91,7 +90,8 @@ _save_context: + bge s4, zero, 1f + + /* Handle interrupts */ +- tail do_irq ++ call do_irq ++ j ret_from_exception + 1: + /* Handle other exceptions */ + slli t0, s4, RISCV_LGPTR +@@ -99,11 +99,14 @@ _save_context: + la t2, excp_vect_table_end + add t0, t1, t0 + /* Check if exception code lies within bounds */ +- bgeu t0, t2, 1f +- REG_L t0, 0(t0) +- jr t0 +-1: +- tail do_trap_unknown ++ bgeu t0, t2, 3f ++ REG_L t1, 0(t0) ++2: jalr t1 ++ j ret_from_exception ++3: ++ ++ la t1, do_trap_unknown ++ j 2b + SYM_CODE_END(handle_exception) + ASM_NOKPROBE(handle_exception) + +@@ -171,6 +174,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception) + #else + sret + #endif ++SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL) + SYM_CODE_END(ret_from_exception) + ASM_NOKPROBE(ret_from_exception) + +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c +index f598e0eb3b0a04..ee269b1c99a190 100644 +--- a/arch/riscv/kernel/setup.c ++++ b/arch/riscv/kernel/setup.c +@@ -318,11 +318,14 @@ void __init setup_arch(char **cmdline_p) + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + ++ if (acpi_disabled) { + #if IS_ENABLED(CONFIG_BUILTIN_DTB) +- unflatten_and_copy_device_tree(); ++ unflatten_and_copy_device_tree(); + #else +- unflatten_device_tree(); ++ unflatten_device_tree(); + #endif ++ } ++ + misc_mem_init(); + + init_resources(); +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c +index 10e311b2759d39..124ad8fe626fbe 100644 +--- a/arch/riscv/kernel/stacktrace.c ++++ b/arch/riscv/kernel/stacktrace.c +@@ -16,7 +16,24 @@ + + #ifdef CONFIG_FRAME_POINTER + +-extern asmlinkage void ret_from_exception(void); ++/* ++ * This disables KASAN checking when reading a value from another task's stack, ++ * since the other task could be running on another CPU and could have poisoned ++ * the stack in the meantime. ++ */ ++#define READ_ONCE_TASK_STACK(task, x) \ ++({ \ ++ unsigned long val; \ ++ unsigned long addr = x; \ ++ if ((task) == current) \ ++ val = READ_ONCE(addr); \ ++ else \ ++ val = READ_ONCE_NOCHECK(addr); \ ++ val; \ ++}) ++ ++extern asmlinkage void handle_exception(void); ++extern unsigned long ret_from_exception_end; + + static inline int fp_is_valid(unsigned long fp, unsigned long sp) + { +@@ -68,10 +85,12 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + fp = frame->ra; + pc = regs->ra; + } else { +- fp = frame->fp; +- pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra, ++ fp = READ_ONCE_TASK_STACK(task, frame->fp); ++ pc = READ_ONCE_TASK_STACK(task, frame->ra); ++ pc = ftrace_graph_ret_addr(current, &graph_idx, pc, + &frame->ra); +- if (pc == (unsigned long)ret_from_exception) { ++ if (pc >= (unsigned long)handle_exception && ++ pc < (unsigned long)&ret_from_exception_end) { + if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) + break; + +diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c +index e9090b38f8117c..52cc3d9380c084 100644 +--- a/arch/riscv/mm/ptdump.c ++++ b/arch/riscv/mm/ptdump.c +@@ -22,7 +22,7 @@ + #define pt_dump_seq_puts(m, fmt) \ + ({ \ + if (m) \ +- seq_printf(m, fmt); \ ++ seq_puts(m, fmt); \ + }) + + /* +diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c +index 16eb4cd11cbd67..5426dc2697f94e 100644 +--- a/arch/riscv/net/bpf_jit_comp64.c ++++ b/arch/riscv/net/bpf_jit_comp64.c +@@ -855,10 +855,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, + stack_size += 16; + + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); +- if (save_ret) { ++ if (save_ret) + stack_size += 16; /* Save both A5 (BPF R0) and A0 */ +- retval_off = stack_size; +- } ++ retval_off = stack_size; + + stack_size += nregs * 8; + args_off = stack_size; +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig +index bd4782f23f66df..e99dae26500d26 100644 +--- a/arch/s390/Kconfig ++++ b/arch/s390/Kconfig +@@ -128,7 +128,6 @@ config S390 + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_KERNEL_PMD_MKWRITE +- select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + select BUILDTIME_TABLE_SORT + select CLONE_BACKWARDS2 + select DMA_OPS if PCI +diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h +index b248694e00247b..30e8e6baa5f856 100644 +--- a/arch/s390/include/asm/pci.h ++++ b/arch/s390/include/asm/pci.h +@@ -138,7 +138,6 @@ struct zpci_dev { + u8 has_resources : 1; + u8 is_physfn : 1; + u8 util_str_avail : 1; +- u8 irqs_registered : 1; + u8 reserved : 2; + unsigned int devfn; /* DEVFN part of the RID*/ + +diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c +index d969f36bf186f2..bd4172db263450 100644 +--- a/arch/s390/pci/pci_event.c ++++ b/arch/s390/pci/pci_event.c +@@ -83,6 +83,7 @@ static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev, + pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT; + + ers_res = driver->err_handler->error_detected(pdev, pdev->error_state); ++ pci_uevent_ers(pdev, ers_res); + if (ers_result_indicates_abort(ers_res)) + pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev)); + else if (ers_res == PCI_ERS_RESULT_NEED_RESET) +@@ -173,7 +174,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) + * is unbound or probed and that userspace can't access its + * configuration space while we perform recovery. + */ +- pci_dev_lock(pdev); ++ device_lock(&pdev->dev); + if (pdev->error_state == pci_channel_io_perm_failure) { + ers_res = PCI_ERS_RESULT_DISCONNECT; + goto out_unlock; +@@ -212,6 +213,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) + ers_res = zpci_event_do_reset(pdev, driver); + + if (ers_res != PCI_ERS_RESULT_RECOVERED) { ++ pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT); + pr_err("%s: Automatic recovery failed; operator intervention is required\n", + pci_name(pdev)); + goto out_unlock; +@@ -220,8 +222,9 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) + pr_info("%s: The device is ready to resume operations\n", pci_name(pdev)); + if (driver->err_handler->resume) + driver->err_handler->resume(pdev); ++ pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED); + out_unlock: +- pci_dev_unlock(pdev); ++ device_unlock(&pdev->dev); + + return ers_res; + } +diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c +index 84482a92133220..e73be96ce5fe64 100644 +--- a/arch/s390/pci/pci_irq.c ++++ b/arch/s390/pci/pci_irq.c +@@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev) + else + rc = zpci_set_airq(zdev); + +- if (!rc) +- zdev->irqs_registered = 1; +- + return rc; + } + +@@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev) + else + rc = zpci_clear_airq(zdev); + +- if (!rc) +- zdev->irqs_registered = 0; +- + return rc; + } + +@@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev) + { + struct zpci_dev *zdev = to_zpci(pdev); + +- if (!zdev->irqs_registered) +- zpci_set_irq(zdev); ++ zpci_set_irq(zdev); + return true; + } + +diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h +index 8fb09eec8c3e79..694ed081cf8d99 100644 +--- a/arch/sparc/include/asm/elf_64.h ++++ b/arch/sparc/include/asm/elf_64.h +@@ -58,6 +58,7 @@ + #define R_SPARC_7 43 + #define R_SPARC_5 44 + #define R_SPARC_6 45 ++#define R_SPARC_UA64 54 + + /* Bits present in AT_HWCAP, primarily for Sparc32. */ + #define HWCAP_SPARC_FLUSH 0x00000001 +diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h +index 9303270b22f3cf..f9d370324729e7 100644 +--- a/arch/sparc/include/asm/io_64.h ++++ b/arch/sparc/include/asm/io_64.h +@@ -250,19 +250,19 @@ void insl(unsigned long, void *, unsigned long); + #define insw insw + #define insl insl + +-static inline void readsb(void __iomem *port, void *buf, unsigned long count) ++static inline void readsb(const volatile void __iomem *port, void *buf, unsigned long count) + { + insb((unsigned long __force)port, buf, count); + } + #define readsb readsb + +-static inline void readsw(void __iomem *port, void *buf, unsigned long count) ++static inline void readsw(const volatile void __iomem *port, void *buf, unsigned long count) + { + insw((unsigned long __force)port, buf, count); + } + #define readsw readsw + +-static inline void readsl(void __iomem *port, void *buf, unsigned long count) ++static inline void readsl(const volatile void __iomem *port, void *buf, unsigned long count) + { + insl((unsigned long __force)port, buf, count); + } +diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c +index 66c45a2764bc89..a7780907fc2f96 100644 +--- a/arch/sparc/kernel/module.c ++++ b/arch/sparc/kernel/module.c +@@ -117,6 +117,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, + break; + #ifdef CONFIG_SPARC64 + case R_SPARC_64: ++ case R_SPARC_UA64: + location[0] = v >> 56; + location[1] = v >> 48; + location[2] = v >> 40; +diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c +index 277cea3d30eb59..8006a5bd578c27 100644 +--- a/arch/um/drivers/ssl.c ++++ b/arch/um/drivers/ssl.c +@@ -199,4 +199,7 @@ static int ssl_non_raw_setup(char *str) + return 1; + } + __setup("ssl-non-raw", ssl_non_raw_setup); +-__channel_help(ssl_non_raw_setup, "set serial lines to non-raw mode"); ++__uml_help(ssl_non_raw_setup, ++"ssl-non-raw\n" ++" Set serial lines to non-raw mode.\n\n" ++); +diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c +index 2fb7d53cf3338d..95e053b0a4bc08 100644 +--- a/arch/x86/entry/vsyscall/vsyscall_64.c ++++ b/arch/x86/entry/vsyscall/vsyscall_64.c +@@ -124,7 +124,12 @@ bool emulate_vsyscall(unsigned long error_code, + if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER) + return false; + +- if (!(error_code & X86_PF_INSTR)) { ++ /* ++ * Assume that faults at regs->ip are because of an ++ * instruction fetch. Return early and avoid ++ * emulation for faults during data accesses: ++ */ ++ if (address != regs->ip) { + /* Failed vsyscall read */ + if (vsyscall_mode == EMULATE) + return false; +@@ -136,13 +141,19 @@ bool emulate_vsyscall(unsigned long error_code, + return false; + } + ++ /* ++ * X86_PF_INSTR is only set when NX is supported. When ++ * available, use it to double-check that the emulation code ++ * is only being used for instruction fetches: ++ */ ++ if (cpu_feature_enabled(X86_FEATURE_NX)) ++ WARN_ON_ONCE(!(error_code & X86_PF_INSTR)); ++ + /* + * No point in checking CS -- the only way to get here is a user mode + * trap to a high address, which means that we're in 64-bit user code. + */ + +- WARN_ON_ONCE(address != regs->ip); +- + if (vsyscall_mode == NONE) { + warn_bad_vsyscall(KERN_INFO, regs, + "vsyscall attempted with vsyscall=none"); +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index e689e3981bd9e1..f9b6e2043e6b2b 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -210,10 +210,13 @@ static bool need_sha_check(u32 cur_rev) + case 0xaa001: return cur_rev <= 0xaa00116; break; + case 0xaa002: return cur_rev <= 0xaa00218; break; + case 0xb0021: return cur_rev <= 0xb002146; break; ++ case 0xb0081: return cur_rev <= 0xb008111; break; + case 0xb1010: return cur_rev <= 0xb101046; break; + case 0xb2040: return cur_rev <= 0xb204031; break; + case 0xb4040: return cur_rev <= 0xb404031; break; ++ case 0xb4041: return cur_rev <= 0xb404101; break; + case 0xb6000: return cur_rev <= 0xb600031; break; ++ case 0xb6080: return cur_rev <= 0xb608031; break; + case 0xb7000: return cur_rev <= 0xb700031; break; + default: break; + } +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index aaed20f46be4ce..175c14567cf142 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -757,6 +757,9 @@ void fpu__clear_user_states(struct fpu *fpu) + !fpregs_state_valid(fpu, smp_processor_id())) + os_xrstor_supervisor(fpu->fpstate); + ++ /* Ensure XFD state is in sync before reloading XSTATE */ ++ xfd_update_state(fpu->fpstate); ++ + /* Reset user states in registers. */ + restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); + +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index b8ab9ee5896c19..b5ef2c2899f2a4 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -1066,16 +1066,6 @@ static void kvm_wait(u8 *ptr, u8 val) + */ + void __init kvm_spinlock_init(void) + { +- /* +- * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an +- * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is +- * preferred over native qspinlock when vCPU is preempted. +- */ +- if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { +- pr_info("PV spinlocks disabled, no host support\n"); +- return; +- } +- + /* + * Disable PV spinlocks and use native qspinlock when dedicated pCPUs + * are available. +@@ -1095,6 +1085,16 @@ void __init kvm_spinlock_init(void) + goto out; + } + ++ /* ++ * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an ++ * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is ++ * preferred over native qspinlock when vCPU is preempted. ++ */ ++ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { ++ pr_info("PV spinlocks disabled, no host support\n"); ++ return; ++ } ++ + pr_info("PV spinlocks enabled\n"); + + __pv_init_lock_hash(); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 0833f2c1a9d68b..99180d0af3ea61 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -3183,7 +3183,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + if (data & DEBUGCTL_RESERVED_BITS) + return 1; + ++ if (svm_get_lbr_vmcb(svm)->save.dbgctl == data) ++ break; ++ + svm_get_lbr_vmcb(svm)->save.dbgctl = data; ++ vmcb_mark_dirty(svm->vmcb, VMCB_LBR); + svm_update_lbrv(vcpu); + break; + case MSR_VM_HSAVE_PA: +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index 07592eef253c21..0be138fbd0a058 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -1995,7 +1995,7 @@ st: if (is_imm8(insn->off)) + ctx->cleanup_addr = proglen; + + if (bpf_prog_was_classic(bpf_prog) && +- !capable(CAP_SYS_ADMIN)) { ++ !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { + u8 *ip = image + addrs[i - 1]; + + if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 64551b0aa51e66..75e9d5a9d707c4 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -848,14 +848,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + disk = ctx->bdev->bd_disk; + q = disk->queue; + +- /* +- * blkcg_deactivate_policy() requires queue to be frozen, we can grab +- * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). +- */ +- ret = blk_queue_enter(q, 0); +- if (ret) +- goto fail; +- ++ /* Prevent concurrent with blkcg_deactivate_policy() */ ++ mutex_lock(&q->blkcg_mutex); + spin_lock_irq(&q->queue_lock); + + if (!blkcg_policy_enabled(q, pol)) { +@@ -885,16 +879,16 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + /* Drop locks to do new blkg allocation with GFP_KERNEL. */ + spin_unlock_irq(&q->queue_lock); + +- new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); ++ new_blkg = blkg_alloc(pos, disk, GFP_NOIO); + if (unlikely(!new_blkg)) { + ret = -ENOMEM; +- goto fail_exit_queue; ++ goto fail_exit; + } + + if (radix_tree_preload(GFP_KERNEL)) { + blkg_free(new_blkg); + ret = -ENOMEM; +- goto fail_exit_queue; ++ goto fail_exit; + } + + spin_lock_irq(&q->queue_lock); +@@ -922,7 +916,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + goto success; + } + success: +- blk_queue_exit(q); ++ mutex_unlock(&q->blkcg_mutex); + ctx->blkg = blkg; + return 0; + +@@ -930,9 +924,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + radix_tree_preload_end(); + fail_unlock: + spin_unlock_irq(&q->queue_lock); +-fail_exit_queue: +- blk_queue_exit(q); +-fail: ++fail_exit: ++ mutex_unlock(&q->blkcg_mutex); + /* + * If queue was bypassing, we should retry. Do so after a + * short msleep(). It isn't strictly necessary but queue +diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c +index 5b7d9a351133fe..33a4246dd73589 100644 +--- a/drivers/accel/habanalabs/common/memory.c ++++ b/drivers/accel/habanalabs/common/memory.c +@@ -2323,7 +2323,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size, + if (rc < 0) + goto destroy_pages; + npages = rc; +- rc = -EFAULT; ++ rc = -ENOMEM; + goto put_pages; + } + userptr->npages = npages; +diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c +index 056e2ef44afb50..ceed6cfe2f9190 100644 +--- a/drivers/accel/habanalabs/gaudi/gaudi.c ++++ b/drivers/accel/habanalabs/gaudi/gaudi.c +@@ -4173,10 +4173,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE); + ++#ifdef _HAS_DMA_MMAP_COHERENT ++ /* ++ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP ++ * so vm_insert_page() can handle it safely. Without this, the kernel ++ * may BUG_ON due to VM_PFNMAP. ++ */ ++ if (is_vmalloc_addr(cpu_addr)) ++ vm_flags_set(vma, VM_MIXEDMAP); ++ + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, + (dma_addr - HOST_PHYS_BASE), size); + if (rc) + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); ++#else ++ ++ rc = remap_pfn_range(vma, vma->vm_start, ++ virt_to_phys(cpu_addr) >> PAGE_SHIFT, ++ size, vma->vm_page_prot); ++ if (rc) ++ dev_err(hdev->dev, "remap_pfn_range error %d", rc); ++ ++ #endif ++ + + return rc; + } +diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c +index 31c74ca70a2e5c..44b5678ea615c4 100644 +--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c ++++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c +@@ -2985,7 +2985,6 @@ static int gaudi2_early_init(struct hl_device *hdev) + rc = hl_fw_read_preboot_status(hdev); + if (rc) { + if (hdev->reset_on_preboot_fail) +- /* we are already on failure flow, so don't check if hw_fini fails. */ + hdev->asic_funcs->hw_fini(hdev, true, false); + goto pci_fini; + } +@@ -2997,6 +2996,13 @@ static int gaudi2_early_init(struct hl_device *hdev) + dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc); + goto pci_fini; + } ++ ++ rc = hl_fw_read_preboot_status(hdev); ++ if (rc) { ++ if (hdev->reset_on_preboot_fail) ++ hdev->asic_funcs->hw_fini(hdev, true, false); ++ goto pci_fini; ++ } + } + + return 0; +@@ -6339,6 +6345,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + VM_DONTCOPY | VM_NORESERVE); + + #ifdef _HAS_DMA_MMAP_COHERENT ++ /* ++ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP ++ * so vm_insert_page() can handle it safely. Without this, the kernel ++ * may BUG_ON due to VM_PFNMAP. ++ */ ++ if (is_vmalloc_addr(cpu_addr)) ++ vm_flags_set(vma, VM_MIXEDMAP); + + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); + if (rc) +diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c +index 25b5368f37dde9..9ff00b65ae34bc 100644 +--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c ++++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c +@@ -2409,7 +2409,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa + WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0); + WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0); + WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8)); +- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24); ++ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41); + } + + return 0; +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c +index a971770e24ff90..841db33e947560 100644 +--- a/drivers/acpi/acpi_video.c ++++ b/drivers/acpi/acpi_video.c +@@ -1952,8 +1952,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) + struct acpi_video_device *dev; + + mutex_lock(&video->device_list_lock); +- list_for_each_entry(dev, &video->video_device_list, entry) ++ list_for_each_entry(dev, &video->video_device_list, entry) { + acpi_video_dev_remove_notify_handler(dev); ++ cancel_delayed_work_sync(&dev->switch_brightness_work); ++ } + mutex_unlock(&video->device_list_lock); + + acpi_video_bus_stop_devices(video); +diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c +index a232746d150a75..6168597a96e6fe 100644 +--- a/drivers/acpi/acpica/dsmethod.c ++++ b/drivers/acpi/acpica/dsmethod.c +@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, + struct acpi_walk_state *next_walk_state = NULL; + union acpi_operand_object *obj_desc; + struct acpi_evaluate_info *info; +- u32 i; + + ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); + +@@ -546,14 +545,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, + * Delete the operands on the previous walkstate operand stack + * (they were copied to new objects) + */ +- for (i = 0; i < obj_desc->method.param_count; i++) { +- acpi_ut_remove_reference(this_walk_state->operands[i]); +- this_walk_state->operands[i] = NULL; +- } +- +- /* Clear the operand stack */ +- +- this_walk_state->num_operands = 0; ++ acpi_ds_clear_operands(this_walk_state); + + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, + "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", +diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c +index 906a7bfa448b31..c760e38df981f3 100644 +--- a/drivers/acpi/button.c ++++ b/drivers/acpi/button.c +@@ -603,8 +603,10 @@ static int acpi_button_add(struct acpi_device *device) + + input_set_drvdata(input, device); + error = input_register_device(input); +- if (error) ++ if (error) { ++ input_free_device(input); + goto err_remove_fs; ++ } + + switch (device->device_type) { + case ACPI_BUS_TYPE_POWER_BUTTON: +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 10d531427ba77d..888c7838579a87 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -445,7 +445,7 @@ bool acpi_cpc_valid(void) + if (acpi_disabled) + return false; + +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + cpc_ptr = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_ptr) + return false; +@@ -461,7 +461,7 @@ bool cppc_allow_fast_switch(void) + struct cpc_desc *cpc_ptr; + int cpu; + +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + cpc_ptr = per_cpu(cpc_desc_ptr, cpu); + desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; + if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && +@@ -1368,7 +1368,7 @@ bool cppc_perf_ctrs_in_pcc(void) + { + int cpu; + +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + struct cpc_register_resource *ref_perf_reg; + struct cpc_desc *cpc_desc; + +diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c +index bba268ecd802f4..1d6450c139225d 100644 +--- a/drivers/acpi/numa/hmat.c ++++ b/drivers/acpi/numa/hmat.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + static u8 hmat_revision; + static int hmat_disable __initdata; +@@ -57,14 +58,20 @@ struct target_cache { + struct node_cache_attrs cache_attrs; + }; + ++enum { ++ NODE_ACCESS_CLASS_GENPORT_SINK = ACCESS_COORDINATE_MAX, ++ NODE_ACCESS_CLASS_MAX, ++}; ++ + struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct resource memregions; +- struct node_hmem_attrs hmem_attrs[2]; ++ struct access_coordinate coord[NODE_ACCESS_CLASS_MAX]; + struct list_head caches; + struct node_cache_attrs cache_attrs; ++ u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE]; + bool registered; + }; + +@@ -119,8 +126,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm) + list_add_tail(&initiator->node, &initiators); + } + +-static __init void alloc_memory_target(unsigned int mem_pxm, +- resource_size_t start, resource_size_t len) ++static __init struct memory_target *alloc_target(unsigned int mem_pxm) + { + struct memory_target *target; + +@@ -128,7 +134,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm, + if (!target) { + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) +- return; ++ return NULL; + target->memory_pxm = mem_pxm; + target->processor_pxm = PXM_INVAL; + target->memregions = (struct resource) { +@@ -141,6 +147,19 @@ static __init void alloc_memory_target(unsigned int mem_pxm, + INIT_LIST_HEAD(&target->caches); + } + ++ return target; ++} ++ ++static __init void alloc_memory_target(unsigned int mem_pxm, ++ resource_size_t start, ++ resource_size_t len) ++{ ++ struct memory_target *target; ++ ++ target = alloc_target(mem_pxm); ++ if (!target) ++ return; ++ + /* + * There are potentially multiple ranges per PXM, so record each + * in the per-target memregions resource tree. +@@ -151,6 +170,18 @@ static __init void alloc_memory_target(unsigned int mem_pxm, + start, start + len, mem_pxm); + } + ++static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle) ++{ ++ struct memory_target *target; ++ ++ target = alloc_target(mem_pxm); ++ if (!target) ++ return; ++ ++ memcpy(target->gen_port_device_handle, handle, ++ ACPI_SRAT_DEVICE_HANDLE_SIZE); ++} ++ + static __init const char *hmat_data_type(u8 type) + { + switch (type) { +@@ -227,24 +258,24 @@ static void hmat_update_target_access(struct memory_target *target, + { + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: +- target->hmem_attrs[access].read_latency = value; +- target->hmem_attrs[access].write_latency = value; ++ target->coord[access].read_latency = value; ++ target->coord[access].write_latency = value; + break; + case ACPI_HMAT_READ_LATENCY: +- target->hmem_attrs[access].read_latency = value; ++ target->coord[access].read_latency = value; + break; + case ACPI_HMAT_WRITE_LATENCY: +- target->hmem_attrs[access].write_latency = value; ++ target->coord[access].write_latency = value; + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: +- target->hmem_attrs[access].read_bandwidth = value; +- target->hmem_attrs[access].write_bandwidth = value; ++ target->coord[access].read_bandwidth = value; ++ target->coord[access].write_bandwidth = value; + break; + case ACPI_HMAT_READ_BANDWIDTH: +- target->hmem_attrs[access].read_bandwidth = value; ++ target->coord[access].read_bandwidth = value; + break; + case ACPI_HMAT_WRITE_BANDWIDTH: +- target->hmem_attrs[access].write_bandwidth = value; ++ target->coord[access].write_bandwidth = value; + break; + default: + break; +@@ -290,11 +321,28 @@ static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc) + } + } + ++static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm, ++ u8 mem_hier, u8 type, u32 value) ++{ ++ struct memory_target *target = find_mem_target(tgt_pxm); ++ ++ if (mem_hier != ACPI_HMAT_MEMORY) ++ return; ++ ++ if (target && target->processor_pxm == init_pxm) { ++ hmat_update_target_access(target, type, value, ++ ACCESS_COORDINATE_LOCAL); ++ /* If the node has a CPU, update access 1 */ ++ if (node_state(pxm_to_node(init_pxm), N_CPU)) ++ hmat_update_target_access(target, type, value, ++ ACCESS_COORDINATE_CPU); ++ } ++} ++ + static __init int hmat_parse_locality(union acpi_subtable_headers *header, + const unsigned long end) + { + struct acpi_hmat_locality *hmat_loc = (void *)header; +- struct memory_target *target; + unsigned int init, targ, total_size, ipds, tpds; + u32 *inits, *targs, value; + u16 *entries; +@@ -335,15 +383,8 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, + inits[init], targs[targ], value, + hmat_data_type_suffix(type)); + +- if (mem_hier == ACPI_HMAT_MEMORY) { +- target = find_mem_target(targs[targ]); +- if (target && target->processor_pxm == inits[init]) { +- hmat_update_target_access(target, type, value, 0); +- /* If the node has a CPU, update access 1 */ +- if (node_state(pxm_to_node(inits[init]), N_CPU)) +- hmat_update_target_access(target, type, value, 1); +- } +- } ++ hmat_update_target(targs[targ], inits[init], ++ mem_hier, type, value); + } + } + +@@ -490,6 +531,27 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header, + return 0; + } + ++static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_generic_affinity *ga = (void *)header; ++ ++ if (!ga) ++ return -EINVAL; ++ ++ if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) ++ return 0; ++ ++ /* Skip PCI device_handle for now */ ++ if (ga->device_handle_type != 0) ++ return 0; ++ ++ alloc_genport_target(ga->proximity_domain, ++ (u8 *)ga->device_handle); ++ ++ return 0; ++} ++ + static u32 hmat_initiator_perf(struct memory_target *target, + struct memory_initiator *initiator, + struct acpi_hmat_locality *hmat_loc) +@@ -582,28 +644,31 @@ static int initiators_to_nodemask(unsigned long *p_nodes) + return 0; + } + +-static void hmat_register_target_initiators(struct memory_target *target) ++static void hmat_update_target_attrs(struct memory_target *target, ++ unsigned long *p_nodes, int access) + { +- static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + struct memory_initiator *initiator; +- unsigned int mem_nid, cpu_nid; ++ unsigned int cpu_nid; + struct memory_locality *loc = NULL; + u32 best = 0; +- bool access0done = false; + int i; + +- mem_nid = pxm_to_node(target->memory_pxm); ++ /* Don't update for generic port if there's no device handle */ ++ if (access == NODE_ACCESS_CLASS_GENPORT_SINK && ++ !(*(u16 *)target->gen_port_device_handle)) ++ return; ++ ++ bitmap_zero(p_nodes, MAX_NUMNODES); + /* +- * If the Address Range Structure provides a local processor pxm, link ++ * If the Address Range Structure provides a local processor pxm, set + * only that one. Otherwise, find the best performance attributes and +- * register all initiators that match. ++ * collect all initiators that match. + */ + if (target->processor_pxm != PXM_INVAL) { + cpu_nid = pxm_to_node(target->processor_pxm); +- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); +- access0done = true; +- if (node_state(cpu_nid, N_CPU)) { +- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); ++ if (access == ACCESS_COORDINATE_LOCAL || ++ node_state(cpu_nid, N_CPU)) { ++ set_bit(target->processor_pxm, p_nodes); + return; + } + } +@@ -617,47 +682,10 @@ static void hmat_register_target_initiators(struct memory_target *target) + * We'll also use the sorting to prime the candidate nodes with known + * initiators. + */ +- bitmap_zero(p_nodes, MAX_NUMNODES); + list_sort(NULL, &initiators, initiator_cmp); + if (initiators_to_nodemask(p_nodes) < 0) + return; + +- if (!access0done) { +- for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { +- loc = localities_types[i]; +- if (!loc) +- continue; +- +- best = 0; +- list_for_each_entry(initiator, &initiators, node) { +- u32 value; +- +- if (!test_bit(initiator->processor_pxm, p_nodes)) +- continue; +- +- value = hmat_initiator_perf(target, initiator, +- loc->hmat_loc); +- if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) +- bitmap_clear(p_nodes, 0, initiator->processor_pxm); +- if (value != best) +- clear_bit(initiator->processor_pxm, p_nodes); +- } +- if (best) +- hmat_update_target_access(target, loc->hmat_loc->data_type, +- best, 0); +- } +- +- for_each_set_bit(i, p_nodes, MAX_NUMNODES) { +- cpu_nid = pxm_to_node(i); +- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); +- } +- } +- +- /* Access 1 ignores Generic Initiators */ +- bitmap_zero(p_nodes, MAX_NUMNODES); +- if (initiators_to_nodemask(p_nodes) < 0) +- return; +- + for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { + loc = localities_types[i]; + if (!loc) +@@ -667,7 +695,8 @@ static void hmat_register_target_initiators(struct memory_target *target) + list_for_each_entry(initiator, &initiators, node) { + u32 value; + +- if (!initiator->has_cpu) { ++ if (access == ACCESS_COORDINATE_CPU && ++ !initiator->has_cpu) { + clear_bit(initiator->processor_pxm, p_nodes); + continue; + } +@@ -681,14 +710,43 @@ static void hmat_register_target_initiators(struct memory_target *target) + clear_bit(initiator->processor_pxm, p_nodes); + } + if (best) +- hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1); ++ hmat_update_target_access(target, loc->hmat_loc->data_type, best, access); + } ++} ++ ++static void __hmat_register_target_initiators(struct memory_target *target, ++ unsigned long *p_nodes, ++ int access) ++{ ++ unsigned int mem_nid, cpu_nid; ++ int i; ++ ++ mem_nid = pxm_to_node(target->memory_pxm); ++ hmat_update_target_attrs(target, p_nodes, access); + for_each_set_bit(i, p_nodes, MAX_NUMNODES) { + cpu_nid = pxm_to_node(i); +- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); ++ register_memory_node_under_compute_node(mem_nid, cpu_nid, access); + } + } + ++static void hmat_update_generic_target(struct memory_target *target) ++{ ++ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); ++ ++ hmat_update_target_attrs(target, p_nodes, ++ NODE_ACCESS_CLASS_GENPORT_SINK); ++} ++ ++static void hmat_register_target_initiators(struct memory_target *target) ++{ ++ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); ++ ++ __hmat_register_target_initiators(target, p_nodes, ++ ACCESS_COORDINATE_LOCAL); ++ __hmat_register_target_initiators(target, p_nodes, ++ ACCESS_COORDINATE_CPU); ++} ++ + static void hmat_register_target_cache(struct memory_target *target) + { + unsigned mem_nid = pxm_to_node(target->memory_pxm); +@@ -701,7 +759,7 @@ static void hmat_register_target_cache(struct memory_target *target) + static void hmat_register_target_perf(struct memory_target *target, int access) + { + unsigned mem_nid = pxm_to_node(target->memory_pxm); +- node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access); ++ node_set_perf_attrs(mem_nid, &target->coord[access], access); + } + + static void hmat_register_target_devices(struct memory_target *target) +@@ -722,10 +780,32 @@ static void hmat_register_target_devices(struct memory_target *target) + } + } + +-static void hmat_register_target(struct memory_target *target) ++static void hmat_hotplug_target(struct memory_target *target) + { + int nid = pxm_to_node(target->memory_pxm); + ++ /* ++ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP, ++ * "specific purpose", is applied to all the memory in a proximity ++ * domain leading to * the node being marked offline / unplugged, or if ++ * memory-only "hotplug" node is offline. ++ */ ++ if (nid == NUMA_NO_NODE || !node_online(nid)) ++ return; ++ ++ guard(mutex)(&target_lock); ++ if (target->registered) ++ return; ++ ++ hmat_register_target_initiators(target); ++ hmat_register_target_cache(target); ++ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL); ++ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU); ++ target->registered = true; ++} ++ ++static void hmat_register_target(struct memory_target *target) ++{ + /* + * Devices may belong to either an offline or online + * node, so unconditionally add them. +@@ -733,24 +813,17 @@ static void hmat_register_target(struct memory_target *target) + hmat_register_target_devices(target); + + /* +- * Skip offline nodes. This can happen when memory +- * marked EFI_MEMORY_SP, "specific purpose", is applied +- * to all the memory in a proximity domain leading to +- * the node being marked offline / unplugged, or if +- * memory-only "hotplug" node is offline. ++ * Register generic port perf numbers. The nid may not be ++ * initialized and is still NUMA_NO_NODE. + */ +- if (nid == NUMA_NO_NODE || !node_online(nid)) +- return; +- + mutex_lock(&target_lock); +- if (!target->registered) { +- hmat_register_target_initiators(target); +- hmat_register_target_cache(target); +- hmat_register_target_perf(target, 0); +- hmat_register_target_perf(target, 1); ++ if (*(u16 *)target->gen_port_device_handle) { ++ hmat_update_generic_target(target); + target->registered = true; + } + mutex_unlock(&target_lock); ++ ++ hmat_hotplug_target(target); + } + + static void hmat_register_targets(void) +@@ -776,10 +849,65 @@ static int hmat_callback(struct notifier_block *self, + if (!target) + return NOTIFY_OK; + +- hmat_register_target(target); ++ hmat_hotplug_target(target); + return NOTIFY_OK; + } + ++static int hmat_set_default_dram_perf(void) ++{ ++ int rc; ++ int nid, pxm; ++ struct memory_target *target; ++ struct access_coordinate *attrs; ++ ++ if (!default_dram_type) ++ return -EIO; ++ ++ for_each_node_mask(nid, default_dram_type->nodes) { ++ pxm = node_to_pxm(nid); ++ target = find_mem_target(pxm); ++ if (!target) ++ continue; ++ attrs = &target->coord[1]; ++ rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT"); ++ if (rc) ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int hmat_calculate_adistance(struct notifier_block *self, ++ unsigned long nid, void *data) ++{ ++ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); ++ struct memory_target *target; ++ struct access_coordinate *perf; ++ int *adist = data; ++ int pxm; ++ ++ pxm = node_to_pxm(nid); ++ target = find_mem_target(pxm); ++ if (!target) ++ return NOTIFY_OK; ++ ++ mutex_lock(&target_lock); ++ hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU); ++ mutex_unlock(&target_lock); ++ ++ perf = &target->coord[1]; ++ ++ if (mt_perf_to_adistance(perf, adist)) ++ return NOTIFY_OK; ++ ++ return NOTIFY_STOP; ++} ++ ++static struct notifier_block hmat_adist_nb __meminitdata = { ++ .notifier_call = hmat_calculate_adistance, ++ .priority = 100, ++}; ++ + static __init void hmat_free_structures(void) + { + struct memory_target *target, *tnext; +@@ -835,6 +963,13 @@ static __init int hmat_init(void) + ACPI_SRAT_TYPE_MEMORY_AFFINITY, + srat_parse_mem_affinity, 0) < 0) + goto out_put; ++ ++ if (acpi_table_parse_entries(ACPI_SIG_SRAT, ++ sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY, ++ srat_parse_genport_affinity, 0) < 0) ++ goto out_put; ++ + acpi_put_table(tbl); + + status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); +@@ -862,8 +997,13 @@ static __init int hmat_init(void) + hmat_register_targets(); + + /* Keep the table and structures if the notifier may use them */ +- if (!hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI)) +- return 0; ++ if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI)) ++ goto out_put; ++ ++ if (!hmat_set_default_dram_perf()) ++ register_mt_adistance_algorithm(&hmat_adist_nb); ++ ++ return 0; + out_put: + hmat_free_structures(); + acpi_put_table(tbl); +diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c +index a44c0761fd1c06..848942bf883cb6 100644 +--- a/drivers/acpi/numa/srat.c ++++ b/drivers/acpi/numa/srat.c +@@ -140,7 +140,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) + struct acpi_srat_generic_affinity *p = + (struct acpi_srat_generic_affinity *)header; + +- if (p->device_handle_type == 0) { ++ if (p->device_handle_type == 1) { + /* + * For pci devices this may be the only place they + * are assigned a proximity domain +diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c +index eb8f2a1ce1388d..6efaeda01d166c 100644 +--- a/drivers/acpi/prmt.c ++++ b/drivers/acpi/prmt.c +@@ -150,15 +150,28 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) + th = &tm->handlers[cur_handler]; + + guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); ++ ++ /* ++ * Print an error message if handler_address is NULL, the parse of VA also ++ * can be skipped. ++ */ ++ if (unlikely(!handler_info->handler_address)) { ++ pr_info("Skipping handler with NULL address for GUID: %pUL", ++ (guid_t *)handler_info->handler_guid); ++ continue; ++ } ++ + th->handler_addr = + (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); + /* +- * Print a warning message if handler_addr is zero which is not expected to +- * ever happen. ++ * Print a warning message and skip the parse of VA if handler_addr is zero ++ * which is not expected to ever happen. + */ +- if (unlikely(!th->handler_addr)) ++ if (unlikely(!th->handler_addr)) { + pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->handler_address); ++ continue; ++ } + + th->static_data_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c +index 5898c3c8c2a7f2..d02b332744e641 100644 +--- a/drivers/acpi/property.c ++++ b/drivers/acpi/property.c +@@ -1286,6 +1286,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, + return NULL; + } + ++/* ++ * acpi_get_next_present_subnode - Return the next present child node handle ++ * @fwnode: Firmware node to find the next child node for. ++ * @child: Handle to one of the device's child nodes or a null handle. ++ * ++ * Like acpi_get_next_subnode(), but the device nodes returned by ++ * acpi_get_next_present_subnode() are guaranteed to be present. ++ * ++ * Returns: The fwnode handle of the next present sub-node. ++ */ ++static struct fwnode_handle * ++acpi_get_next_present_subnode(const struct fwnode_handle *fwnode, ++ struct fwnode_handle *child) ++{ ++ do { ++ child = acpi_get_next_subnode(fwnode, child); ++ } while (is_acpi_device_node(child) && ++ !acpi_device_is_present(to_acpi_device_node(child))); ++ ++ return child; ++} ++ + /** + * acpi_node_get_parent - Return parent fwnode of this fwnode + * @fwnode: Firmware node whose parent to get +@@ -1629,7 +1651,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode, + .property_read_string_array = \ + acpi_fwnode_property_read_string_array, \ + .get_parent = acpi_node_get_parent, \ +- .get_next_child_node = acpi_get_next_subnode, \ ++ .get_next_child_node = acpi_get_next_present_subnode, \ + .get_named_child_node = acpi_fwnode_get_named_child_node, \ + .get_name = acpi_fwnode_get_name, \ + .get_name_prefix = acpi_fwnode_get_name_prefix, \ +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index c0c5c5c58ae1e7..5b5986e10c2d7f 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -784,6 +784,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info, + static const char * const acpi_ignore_dep_ids[] = { + "PNP0D80", /* Windows-compatible System Power Management Controller */ + "INT33BD", /* Intel Baytrail Mailbox Device */ ++ "INTC10DE", /* Intel CVS LNL */ ++ "INTC10E0", /* Intel CVS ARL */ + "LATT2021", /* Lattice FW Update Client Driver */ + NULL + }; +diff --git a/drivers/base/node.c b/drivers/base/node.c +index 47960a34305d3f..2b398c8a0f06c3 100644 +--- a/drivers/base/node.c ++++ b/drivers/base/node.c +@@ -74,14 +74,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES); + * @dev: Device for this memory access class + * @list_node: List element in the node's access list + * @access: The access class rank +- * @hmem_attrs: Heterogeneous memory performance attributes ++ * @coord: Heterogeneous memory performance coordinates + */ + struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned int access; + #ifdef CONFIG_HMEM_REPORTING +- struct node_hmem_attrs hmem_attrs; ++ struct access_coordinate coord; + #endif + }; + #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) +@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev) + } + + static struct node_access_nodes *node_init_node_access(struct node *node, +- unsigned int access) ++ enum access_coordinate_class access) + { + struct node_access_nodes *access_node; + struct device *dev; +@@ -167,7 +167,7 @@ static ssize_t property##_show(struct device *dev, \ + char *buf) \ + { \ + return sysfs_emit(buf, "%u\n", \ +- to_access_nodes(dev)->hmem_attrs.property); \ ++ to_access_nodes(dev)->coord.property); \ + } \ + static DEVICE_ATTR_RO(property) + +@@ -187,11 +187,11 @@ static struct attribute *access_attrs[] = { + /** + * node_set_perf_attrs - Set the performance values for given access class + * @nid: Node identifier to be set +- * @hmem_attrs: Heterogeneous memory performance attributes ++ * @coord: Heterogeneous memory performance coordinates + * @access: The access class the for the given attributes + */ +-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, +- unsigned int access) ++void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, ++ enum access_coordinate_class access) + { + struct node_access_nodes *c; + struct node *node; +@@ -205,7 +205,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, + if (!c) + return; + +- c->hmem_attrs = *hmem_attrs; ++ c->coord = *coord; + for (i = 0; access_attrs[i] != NULL; i++) { + if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], + "initiators")) { +@@ -689,7 +689,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid) + */ + int register_memory_node_under_compute_node(unsigned int mem_nid, + unsigned int cpu_nid, +- unsigned int access) ++ enum access_coordinate_class access) + { + struct node *init_node, *targ_node; + struct node_access_nodes *initiator, *target; +diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c +index 8075db788b39ad..6a864433cdd9bc 100644 +--- a/drivers/base/regmap/regmap-slimbus.c ++++ b/drivers/base/regmap/regmap-slimbus.c +@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, + if (IS_ERR(bus)) + return ERR_CAST(bus); + +- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, +- lock_key, lock_name); ++ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); + } + EXPORT_SYMBOL_GPL(__regmap_init_slimbus); + +@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, + if (IS_ERR(bus)) + return ERR_CAST(bus); + +- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, +- lock_key, lock_name); ++ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); + } + EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); + +diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c +index f9a3444753c2bb..97659b4792e696 100644 +--- a/drivers/bluetooth/btmtksdio.c ++++ b/drivers/bluetooth/btmtksdio.c +@@ -1257,6 +1257,12 @@ static void btmtksdio_cmd_timeout(struct hci_dev *hdev) + + sdio_claim_host(bdev->func); + ++ /* set drv_pmctrl if BT is closed before doing reset */ ++ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { ++ sdio_enable_func(bdev->func); ++ btmtksdio_drv_pmctrl(bdev); ++ } ++ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); + skb_queue_purge(&bdev->txq); + cancel_work_sync(&bdev->txrx_work); +@@ -1272,6 +1278,12 @@ static void btmtksdio_cmd_timeout(struct hci_dev *hdev) + goto err; + } + ++ /* set fw_pmctrl back if BT is closed after doing reset */ ++ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { ++ btmtksdio_fw_pmctrl(bdev); ++ sdio_disable_func(bdev->func); ++ } ++ + clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); + err: + sdio_release_host(bdev->func); +diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c +index 7f67e460f7f491..24dae5440c036c 100644 +--- a/drivers/bluetooth/btrtl.c ++++ b/drivers/bluetooth/btrtl.c +@@ -604,8 +604,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev, + len += entry->len; + } + +- if (!len) ++ if (!len) { ++ kvfree(ptr); + return -EPERM; ++ } + + *_buf = ptr; + return len; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 4c21230aee460e..74d264b64b5342 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -65,6 +65,7 @@ static struct usb_driver btusb_driver; + #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) + #define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) + #define BTUSB_ACTIONS_SEMI BIT(27) ++#define BTUSB_BARROT BIT(28) + + static const struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -770,6 +771,10 @@ static const struct usb_device_id quirks_table[] = { + { USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + ++ /* Barrot Technology Bluetooth devices */ ++ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT }, ++ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT }, ++ + /* Actions Semiconductor ATS2851 based devices */ + { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI }, + +@@ -1167,6 +1172,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) + } + + if (!hci_skb_expect(skb)) { ++ /* Each chunk should correspond to at least 1 or more ++ * events so if there are still bytes left that doesn't ++ * constitute a new event this is likely a bug in the ++ * controller. ++ */ ++ if (count && count < HCI_EVENT_HDR_SIZE) { ++ bt_dev_warn(data->hdev, ++ "Unexpected continuation: %d bytes", ++ count); ++ count = 0; ++ } ++ + /* Complete frame */ + btusb_recv_event(data, skb); + skb = NULL; +@@ -4696,6 +4713,11 @@ static void btusb_disconnect(struct usb_interface *intf) + + hci_unregister_dev(hdev); + ++ if (data->oob_wake_irq) ++ device_init_wakeup(&data->udev->dev, false); ++ if (data->reset_gpio) ++ gpiod_put(data->reset_gpio); ++ + if (intf == data->intf) { + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); +@@ -4706,17 +4728,11 @@ static void btusb_disconnect(struct usb_interface *intf) + usb_driver_release_interface(&btusb_driver, data->diag); + usb_driver_release_interface(&btusb_driver, data->intf); + } else if (intf == data->diag) { +- usb_driver_release_interface(&btusb_driver, data->intf); + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); ++ usb_driver_release_interface(&btusb_driver, data->intf); + } + +- if (data->oob_wake_irq) +- device_init_wakeup(&data->udev->dev, false); +- +- if (data->reset_gpio) +- gpiod_put(data->reset_gpio); +- + hci_free_dev(hdev); + } + +diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c +index 2a5a27d713f8a0..e991d9e6248694 100644 +--- a/drivers/bluetooth/hci_bcsp.c ++++ b/drivers/bluetooth/hci_bcsp.c +@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) + struct bcsp_struct *bcsp = hu->priv; + const unsigned char *ptr; + ++ if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) ++ return -EUNATCH; ++ + BT_DBG("hu %p count %d rx_state %d rx_count %ld", + hu, count, bcsp->rx_state, bcsp->rx_count); + +diff --git a/drivers/char/misc.c b/drivers/char/misc.c +index 30178e20d962d4..8d8c4bcf07e1c2 100644 +--- a/drivers/char/misc.c ++++ b/drivers/char/misc.c +@@ -58,9 +58,8 @@ static LIST_HEAD(misc_list); + static DEFINE_MUTEX(misc_mtx); + + /* +- * Assigned numbers, used for dynamic minors ++ * Assigned numbers. + */ +-#define DYNAMIC_MINORS 128 /* like dynamic majors */ + static DEFINE_IDA(misc_minors_ida); + + static int misc_minor_alloc(int minor) +@@ -69,34 +68,17 @@ static int misc_minor_alloc(int minor) + + if (minor == MISC_DYNAMIC_MINOR) { + /* allocate free id */ +- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); +- if (ret >= 0) { +- ret = DYNAMIC_MINORS - ret - 1; +- } else { +- ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, +- MINORMASK, GFP_KERNEL); +- } ++ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, ++ MINORMASK, GFP_KERNEL); + } else { +- /* specific minor, check if it is in dynamic or misc dynamic range */ +- if (minor < DYNAMIC_MINORS) { +- minor = DYNAMIC_MINORS - minor - 1; +- ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); +- } else if (minor > MISC_DYNAMIC_MINOR) { +- ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); +- } else { +- /* case of non-dynamic minors, no need to allocate id */ +- ret = 0; +- } ++ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL); + } + return ret; + } + + static void misc_minor_free(int minor) + { +- if (minor < DYNAMIC_MINORS) +- ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1); +- else if (minor > MISC_DYNAMIC_MINOR) +- ida_free(&misc_minors_ida, minor); ++ ida_free(&misc_minors_ida, minor); + } + + #ifdef CONFIG_PROC_FS +@@ -150,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file) + break; + } + +- if (!new_fops) { ++ /* Only request module for fixed minor code */ ++ if (!new_fops && minor < MISC_DYNAMIC_MINOR) { + mutex_unlock(&misc_mtx); + request_module("char-major-%d-%d", MISC_MAJOR, minor); + mutex_lock(&misc_mtx); +@@ -162,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file) + new_fops = fops_get(iter->fops); + break; + } +- if (!new_fops) +- goto fail; + } + ++ if (!new_fops) ++ goto fail; ++ + /* + * Place the miscdevice in the file's + * private_data so it can be used by the +@@ -297,9 +281,11 @@ void misc_deregister(struct miscdevice *misc) + return; + + mutex_lock(&misc_mtx); +- list_del(&misc->list); ++ list_del_init(&misc->list); + device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor)); + misc_minor_free(misc->minor); ++ if (misc->minor > MISC_DYNAMIC_MINOR) ++ misc->minor = MISC_DYNAMIC_MINOR; + mutex_unlock(&misc_mtx); + } + EXPORT_SYMBOL(misc_deregister); +diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c +index 15c46489ba8501..4c87a0f789de1c 100644 +--- a/drivers/clk/at91/clk-master.c ++++ b/drivers/clk/at91/clk-master.c +@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw, + { + struct clk_master *master = to_clk_master(hw); + ++ if (master->div == MASTER_PRES_MAX) ++ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3); ++ + return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div)); + } + +diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c +index ff65f7b916f077..57d7aef1ea8658 100644 +--- a/drivers/clk/at91/clk-sam9x60-pll.c ++++ b/drivers/clk/at91/clk-sam9x60-pll.c +@@ -90,8 +90,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core) + + spin_lock_irqsave(core->lock, flags); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_ID_MSK, core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_ID_MSK, core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val); + cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift; + cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift; +@@ -125,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core) + udelay(10); + } + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + while (!sam9x60_pll_ready(regmap, core->id)) + cpu_relax(); +@@ -161,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw) + + spin_lock_irqsave(core->lock, flags); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_ID_MSK, core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_ID_MSK, core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0); + +@@ -170,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw) + regmap_update_bits(regmap, AT91_PMC_PLL_ACR, + AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + spin_unlock_irqrestore(core->lock, flags); + } +@@ -257,8 +257,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate, + + spin_lock_irqsave(core->lock, irqflags); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, +- core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, ++ core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val); + cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift; + cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift; +@@ -270,18 +270,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate, + (frac->mul << core->layout->mul_shift) | + (frac->frac << core->layout->frac_shift)); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL, + AT91_PMC_PLL_CTRL0_ENLOCK | + AT91_PMC_PLL_CTRL0_ENPLL); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + while (!sam9x60_pll_ready(regmap, core->id)) + cpu_relax(); +@@ -333,7 +333,10 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = { + .restore_context = sam9x60_frac_pll_restore_context, + }; + +-/* This function should be called with spinlock acquired. */ ++/* This function should be called with spinlock acquired. ++ * Warning: this function must be called only if the same PLL ID was set in ++ * PLL_UPDT register previously. ++ */ + static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div, + bool enable) + { +@@ -345,9 +348,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div, + core->layout->div_mask | ena_msk, + (div << core->layout->div_shift) | ena_val); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + while (!sam9x60_pll_ready(regmap, core->id)) + cpu_relax(); +@@ -361,8 +364,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core) + unsigned int val, cdiv; + + spin_lock_irqsave(core->lock, flags); +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_ID_MSK, core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_ID_MSK, core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + cdiv = (val & core->layout->div_mask) >> core->layout->div_shift; + +@@ -393,15 +396,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw) + + spin_lock_irqsave(core->lock, flags); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_ID_MSK, core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_ID_MSK, core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + core->layout->endiv_mask, 0); + +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, +- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, +- AT91_PMC_PLL_UPDT_UPDATE | core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, ++ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, ++ AT91_PMC_PLL_UPDT_UPDATE | core->id); + + spin_unlock_irqrestore(core->lock, flags); + } +@@ -507,8 +510,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate, + div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1; + + spin_lock_irqsave(core->lock, irqflags); +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, +- core->id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, ++ core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + cdiv = (val & core->layout->div_mask) >> core->layout->div_shift; + +@@ -563,8 +566,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier, + div->div = div->safe_div; + + spin_lock_irqsave(core.lock, irqflags); +- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, +- core.id); ++ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK, ++ core.id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + cdiv = (val & core.layout->div_mask) >> core.layout->div_shift; + +diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c +index fdc8ccc586c994..ec1717ddaf275e 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c ++++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c +@@ -325,6 +325,13 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = { + .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents), + }; + ++static const struct sun6i_rtc_match_data sun55i_a523_rtc_ccu_data = { ++ .have_ext_osc32k = true, ++ .have_iosc_calibration = true, ++ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents, ++ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents), ++}; ++ + static const struct of_device_id sun6i_rtc_ccu_match[] = { + { + .compatible = "allwinner,sun50i-h616-rtc", +@@ -334,6 +341,10 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = { + .compatible = "allwinner,sun50i-r329-rtc", + .data = &sun50i_r329_rtc_ccu_data, + }, ++ { ++ .compatible = "allwinner,sun55i-a523-rtc", ++ .data = &sun55i_a523_rtc_ccu_data, ++ }, + {}, + }; + +diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c +index 85c50ea39e6da0..9269e6a0db6a4e 100644 +--- a/drivers/clk/ti/clk-33xx.c ++++ b/drivers/clk/ti/clk-33xx.c +@@ -258,6 +258,8 @@ static const char *enable_init_clks[] = { + "dpll_ddr_m2_ck", + "dpll_mpu_m2_ck", + "l3_gclk", ++ /* WKUP_DEBUGSS_CLKCTRL - disable fails, AM335x Errata Advisory 1.0.42 */ ++ "l3-aon-clkctrl:0000:0", + /* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */ + "l3-clkctrl:00bc:0", + "l4hs_gclk", +diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c +index 911c92146eca6d..8041a8f62d1fa4 100644 +--- a/drivers/clocksource/timer-vf-pit.c ++++ b/drivers/clocksource/timer-vf-pit.c +@@ -35,30 +35,30 @@ static unsigned long cycle_per_jiffy; + + static inline void pit_timer_enable(void) + { +- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); ++ writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); + } + + static inline void pit_timer_disable(void) + { +- __raw_writel(0, clkevt_base + PITTCTRL); ++ writel(0, clkevt_base + PITTCTRL); + } + + static inline void pit_irq_acknowledge(void) + { +- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); ++ writel(PITTFLG_TIF, clkevt_base + PITTFLG); + } + + static u64 notrace pit_read_sched_clock(void) + { +- return ~__raw_readl(clksrc_base + PITCVAL); ++ return ~readl(clksrc_base + PITCVAL); + } + + static int __init pit_clocksource_init(unsigned long rate) + { + /* set the max load value and start the clock source counter */ +- __raw_writel(0, clksrc_base + PITTCTRL); +- __raw_writel(~0UL, clksrc_base + PITLDVAL); +- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); ++ writel(0, clksrc_base + PITTCTRL); ++ writel(~0UL, clksrc_base + PITLDVAL); ++ writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); + + sched_clock_register(pit_read_sched_clock, 32, rate); + return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, +@@ -76,7 +76,7 @@ static int pit_set_next_event(unsigned long delta, + * hardware requirement. + */ + pit_timer_disable(); +- __raw_writel(delta - 1, clkevt_base + PITLDVAL); ++ writel(delta - 1, clkevt_base + PITLDVAL); + pit_timer_enable(); + + return 0; +@@ -125,8 +125,8 @@ static struct clock_event_device clockevent_pit = { + + static int __init pit_clockevent_init(unsigned long rate, int irq) + { +- __raw_writel(0, clkevt_base + PITTCTRL); +- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); ++ writel(0, clkevt_base + PITTCTRL); ++ writel(PITTFLG_TIF, clkevt_base + PITTFLG); + + BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, + "VF pit timer", &clockevent_pit)); +@@ -183,7 +183,7 @@ static int __init pit_timer_init(struct device_node *np) + cycle_per_jiffy = clk_rate / (HZ); + + /* enable the pit module */ +- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); ++ writel(~PITMCR_MDIS, timer_base + PITMCR); + + ret = pit_clocksource_init(clk_rate); + if (ret) +diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c +index 4c57c6725c134c..1412d4617a04ad 100644 +--- a/drivers/cpufreq/longhaul.c ++++ b/drivers/cpufreq/longhaul.c +@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void) + struct cpufreq_policy *policy = cpufreq_cpu_get(0); + int i; + ++ if (unlikely(!policy)) ++ return; ++ + for (i = 0; i < numscales; i++) { + if (mults[i] == maxmult) { + struct cpufreq_freqs freqs; +diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c +index 39186008afbfdf..233c82a8340861 100644 +--- a/drivers/cpufreq/tegra186-cpufreq.c ++++ b/drivers/cpufreq/tegra186-cpufreq.c +@@ -132,13 +132,14 @@ static struct cpufreq_driver tegra186_cpufreq_driver = { + + static struct cpufreq_frequency_table *init_vhint_table( + struct platform_device *pdev, struct tegra_bpmp *bpmp, +- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id) ++ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id, ++ int *num_rates) + { + struct cpufreq_frequency_table *table; + struct mrq_cpu_vhint_request req; + struct tegra_bpmp_message msg; + struct cpu_vhint_data *data; +- int err, i, j, num_rates = 0; ++ int err, i, j; + dma_addr_t phys; + void *virt; + +@@ -168,6 +169,7 @@ static struct cpufreq_frequency_table *init_vhint_table( + goto free; + } + ++ *num_rates = 0; + for (i = data->vfloor; i <= data->vceil; i++) { + u16 ndiv = data->ndiv[i]; + +@@ -178,10 +180,10 @@ static struct cpufreq_frequency_table *init_vhint_table( + if (i > 0 && ndiv == data->ndiv[i - 1]) + continue; + +- num_rates++; ++ (*num_rates)++; + } + +- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table), ++ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table), + GFP_KERNEL); + if (!table) { + table = ERR_PTR(-ENOMEM); +@@ -223,7 +225,9 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) + { + struct tegra186_cpufreq_data *data; + struct tegra_bpmp *bpmp; +- unsigned int i = 0, err; ++ unsigned int i = 0, err, edvd_offset; ++ int num_rates = 0; ++ u32 edvd_val, cpu; + + data = devm_kzalloc(&pdev->dev, + struct_size(data, clusters, TEGRA186_NUM_CLUSTERS), +@@ -246,10 +250,21 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) + for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) { + struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; + +- cluster->table = init_vhint_table(pdev, bpmp, cluster, i); ++ cluster->table = init_vhint_table(pdev, bpmp, cluster, i, &num_rates); + if (IS_ERR(cluster->table)) { + err = PTR_ERR(cluster->table); + goto put_bpmp; ++ } else if (!num_rates) { ++ err = -EINVAL; ++ goto put_bpmp; ++ } ++ ++ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { ++ if (data->cpus[cpu].bpmp_cluster_id == i) { ++ edvd_val = cluster->table[num_rates - 1].driver_data; ++ edvd_offset = data->cpus[cpu].edvd_offset; ++ writel(edvd_val, data->regs + edvd_offset); ++ } + } + } + +diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c +index 737a026ef58a38..6704d610573ad6 100644 +--- a/drivers/cpuidle/cpuidle.c ++++ b/drivers/cpuidle/cpuidle.c +@@ -634,8 +634,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev) + static int __cpuidle_register_device(struct cpuidle_device *dev) + { + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); ++ unsigned int cpu = dev->cpu; + int i, ret; + ++ if (per_cpu(cpuidle_devices, cpu)) { ++ pr_info("CPU%d: cpuidle device already registered\n", cpu); ++ return -EEXIST; ++ } ++ + if (!try_module_get(drv->owner)) + return -EINVAL; + +@@ -647,7 +653,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + +- per_cpu(cpuidle_devices, dev->cpu) = dev; ++ per_cpu(cpuidle_devices, cpu) = dev; + list_add(&dev->device_list, &cpuidle_detected_devices); + + ret = cpuidle_coupled_register_device(dev); +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c +index cafe6eed3349f4..8c591dde610232 100644 +--- a/drivers/cpuidle/governors/menu.c ++++ b/drivers/cpuidle/governors/menu.c +@@ -348,45 +348,50 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + if (s->exit_latency_ns > latency_req) + break; + +- if (s->target_residency_ns > predicted_ns) { +- /* +- * Use a physical idle state, not busy polling, unless +- * a timer is going to trigger soon enough. +- */ +- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && +- s->target_residency_ns <= data->next_timer_ns) { +- predicted_ns = s->target_residency_ns; +- idx = i; +- break; +- } +- if (predicted_ns < TICK_NSEC) +- break; +- +- if (!tick_nohz_tick_stopped()) { +- /* +- * If the state selected so far is shallow, +- * waking up early won't hurt, so retain the +- * tick in that case and let the governor run +- * again in the next iteration of the loop. +- */ +- predicted_ns = drv->states[idx].target_residency_ns; +- break; +- } ++ if (s->target_residency_ns <= predicted_ns) { ++ idx = i; ++ continue; ++ } ++ ++ /* ++ * Use a physical idle state, not busy polling, unless a timer ++ * is going to trigger soon enough or the exit latency of the ++ * idle state in question is greater than the predicted idle ++ * duration. ++ */ ++ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && ++ s->target_residency_ns <= data->next_timer_ns && ++ s->exit_latency_ns <= predicted_ns) { ++ predicted_ns = s->target_residency_ns; ++ idx = i; ++ break; ++ } + ++ if (predicted_ns < TICK_NSEC) ++ break; ++ ++ if (!tick_nohz_tick_stopped()) { + /* +- * If the state selected so far is shallow and this +- * state's target residency matches the time till the +- * closest timer event, select this one to avoid getting +- * stuck in the shallow one for too long. ++ * If the state selected so far is shallow, waking up ++ * early won't hurt, so retain the tick in that case and ++ * let the governor run again in the next iteration of ++ * the idle loop. + */ +- if (drv->states[idx].target_residency_ns < TICK_NSEC && +- s->target_residency_ns <= delta_tick) +- idx = i; +- +- return idx; ++ predicted_ns = drv->states[idx].target_residency_ns; ++ break; + } + +- idx = i; ++ /* ++ * If the state selected so far is shallow and this state's ++ * target residency matches the time till the closest timer ++ * event, select this one to avoid getting stuck in the shallow ++ * one for too long. ++ */ ++ if (drv->states[idx].target_residency_ns < TICK_NSEC && ++ s->target_residency_ns <= delta_tick) ++ idx = i; ++ ++ return idx; + } + + if (idx == -1) +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +index 9e093d44a06629..31f2b4adf3d9ae 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +@@ -264,7 +264,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req + goto theend_sgs; + } + +- chan->timeout = areq->cryptlen; + rctx->nr_sgs = ns; + rctx->nr_sgd = nd; + return 0; +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +index d4ccd5254280bc..eba186c5c62513 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +@@ -186,11 +186,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) + mutex_unlock(&ce->mlock); + + wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete, +- msecs_to_jiffies(ce->chanlist[flow].timeout)); ++ msecs_to_jiffies(CE_DMA_TIMEOUT_MS)); + + if (ce->chanlist[flow].status == 0) { +- dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name, +- ce->chanlist[flow].timeout, flow); ++ dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow); + err = -EFAULT; + } + /* No need to lock for this read, the channel is locked so +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +index ebc857ed10e11e..8ee1e94335c376 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +@@ -457,8 +457,6 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) + else + cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); + +- chan->timeout = areq->nbytes; +- + err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); + + dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +index 80815379f6fc55..b571d1d0c4c4ca 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +@@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); +- ce->chanlist[flow].timeout = 2000; + + err = sun8i_ce_run_task(ce, 3, "PRNG"); + mutex_unlock(&ce->rnglock); +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +index 9c35f2a83eda85..630a0b84b494d8 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +@@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); +- ce->chanlist[flow].timeout = todo; + + err = sun8i_ce_run_task(ce, 3, "TRNG"); + mutex_unlock(&ce->rnglock); +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +index 65cc1278ee1555..d817ce445f4a39 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +@@ -106,6 +106,7 @@ + #define MAX_SG 8 + + #define CE_MAX_CLOCKS 4 ++#define CE_DMA_TIMEOUT_MS 3000 + + #define MAXFLOW 4 + +@@ -195,7 +196,6 @@ struct sun8i_ce_flow { + struct completion complete; + int status; + dma_addr_t t_phy; +- int timeout; + struct ce_task *tl; + void *backup_iv; + void *bounce_iv; +diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c +index 247c568aa8dfe3..8ca0913d94abfe 100644 +--- a/drivers/crypto/aspeed/aspeed-acry.c ++++ b/drivers/crypto/aspeed/aspeed-acry.c +@@ -789,28 +789,24 @@ static int aspeed_acry_probe(struct platform_device *pdev) + err_engine_rsa_start: + crypto_engine_exit(acry_dev->crypt_engine_rsa); + clk_exit: +- clk_disable_unprepare(acry_dev->clk); + + return rc; + } + +-static int aspeed_acry_remove(struct platform_device *pdev) ++static void aspeed_acry_remove(struct platform_device *pdev) + { + struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev); + + aspeed_acry_unregister(acry_dev); + crypto_engine_exit(acry_dev->crypt_engine_rsa); + tasklet_kill(&acry_dev->done_task); +- clk_disable_unprepare(acry_dev->clk); +- +- return 0; + } + + MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); + + static struct platform_driver aspeed_acry_driver = { + .probe = aspeed_acry_probe, +- .remove = aspeed_acry_remove, ++ .remove_new = aspeed_acry_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_acry_of_matches, +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c +index bdf367f3f67980..eb880f0435ee9f 100644 +--- a/drivers/crypto/caam/ctrl.c ++++ b/drivers/crypto/caam/ctrl.c +@@ -692,12 +692,12 @@ static int caam_ctrl_rng_init(struct device *dev) + */ + if (needs_entropy_delay_adjustment()) + ent_delay = 12000; +- if (!(ctrlpriv->rng4_sh_init || inst_handles)) { ++ if (!inst_handles) { + dev_info(dev, + "Entropy delay = %u\n", + ent_delay); + kick_trng(dev, ent_delay); +- ent_delay += 400; ++ ent_delay = ent_delay * 2; + } + /* + * if instantiate_rng(...) fails, the loop will rerun +diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c +index 203240e78f6adb..a2c6b28f4b84bc 100644 +--- a/drivers/crypto/hisilicon/qm.c ++++ b/drivers/crypto/hisilicon/qm.c +@@ -3750,10 +3750,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, + pdev = container_of(dev, struct pci_dev, dev); + if (pci_physfn(pdev) != qm->pdev) { + pci_err(qm->pdev, "the pdev input does not match the pf!\n"); ++ put_device(dev); + return -EINVAL; + } + + *fun_index = pdev->devfn; ++ put_device(dev); + + return 0; + } +diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c +index 4bd150d1441a02..473e1ab9b8baac 100644 +--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c ++++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c +@@ -1745,7 +1745,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) + if (sobj_hdr) + sobj_chunk_num = sobj_hdr->num_chunks; + +- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) * ++ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num), + sizeof(*mobj_hdr), GFP_KERNEL); + if (!mobj_hdr) + return -ENOMEM; +diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c +index 68236247059d13..9ae789d4aca7b6 100644 +--- a/drivers/dma/dw-edma/dw-edma-core.c ++++ b/drivers/dma/dw-edma/dw-edma-core.c +@@ -595,6 +595,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, + return dw_edma_device_transfer(&xfer); + } + ++static void dw_hdma_set_callback_result(struct virt_dma_desc *vd, ++ enum dmaengine_tx_result result) ++{ ++ u32 residue = 0; ++ struct dw_edma_desc *desc; ++ struct dmaengine_result *res; ++ ++ if (!vd->tx.callback_result) ++ return; ++ ++ desc = vd2dw_edma_desc(vd); ++ if (desc) ++ residue = desc->alloc_sz - desc->xfer_sz; ++ ++ res = &vd->tx_result; ++ res->result = result; ++ res->residue = residue; ++} ++ + static void dw_edma_done_interrupt(struct dw_edma_chan *chan) + { + struct dw_edma_desc *desc; +@@ -608,6 +627,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan) + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (!desc->chunks_alloc) { ++ dw_hdma_set_callback_result(vd, ++ DMA_TRANS_NOERROR); + list_del(&vd->node); + vchan_cookie_complete(vd); + } +@@ -644,6 +665,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { ++ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED); + list_del(&vd->node); + vchan_cookie_complete(vd); + } +diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c +index ca0ba1d462832d..8b215cbca1186b 100644 +--- a/drivers/dma/mv_xor.c ++++ b/drivers/dma/mv_xor.c +@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) + + dma_async_device_unregister(&mv_chan->dmadev); + +- dma_free_coherent(dev, MV_XOR_POOL_SIZE, ++ dma_free_wc(dev, MV_XOR_POOL_SIZE, + mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); + dma_unmap_single(dev, mv_chan->dummy_src_addr, + MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); +@@ -1163,7 +1163,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, + err_free_irq: + free_irq(mv_chan->irq, mv_chan); + err_free_dma: +- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, ++ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE, + mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); + err_unmap_dst: + dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr, +diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c +index 588c5f409a8087..8d796504cb7f1a 100644 +--- a/drivers/dma/sh/shdma-base.c ++++ b/drivers/dma/sh/shdma-base.c +@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) + const struct shdma_ops *ops = sdev->ops; + dev_dbg(schan->dev, "Bring up channel %d\n", + schan->id); +- /* +- * TODO: .xfer_setup() might fail on some platforms. +- * Make it int then, on error remove chunks from the +- * queue again +- */ +- ops->setup_xfer(schan, schan->slave_id); ++ ++ ret = ops->setup_xfer(schan, schan->slave_id); ++ if (ret < 0) { ++ dev_err(schan->dev, "setup_xfer failed: %d\n", ret); ++ ++ /* Remove chunks from the queue and mark them as idle */ ++ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) { ++ if (chunk->cookie == cookie) { ++ chunk->mark = DESC_IDLE; ++ list_move(&chunk->node, &schan->ld_free); ++ } ++ } ++ ++ schan->pm_state = SHDMA_PM_ESTABLISHED; ++ ret = pm_runtime_put(schan->dev); ++ ++ spin_unlock_irq(&schan->chan_lock); ++ return ret; ++ } + + if (schan->pm_state == SHDMA_PM_PENDING) + shdma_chan_xfer_ld_queue(schan); +diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c +index 00067b29e23223..d8210488dd40c7 100644 +--- a/drivers/dma/sh/shdmac.c ++++ b/drivers/dma/sh/shdmac.c +@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan) + return dmae_is_busy(sh_chan); + } + +-static void sh_dmae_setup_xfer(struct shdma_chan *schan, +- int slave_id) ++static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + ++ int ret = 0; + if (slave_id >= 0) { + const struct sh_dmae_slave_config *cfg = + sh_chan->config; + +- dmae_set_dmars(sh_chan, cfg->mid_rid); +- dmae_set_chcr(sh_chan, cfg->chcr); ++ ret = dmae_set_dmars(sh_chan, cfg->mid_rid); ++ if (ret < 0) ++ goto END; ++ ++ ret = dmae_set_chcr(sh_chan, cfg->chcr); ++ if (ret < 0) ++ goto END; ++ + } else { + dmae_init(sh_chan); + } ++ ++END: ++ return ret; + } + + /* +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +index 233e58278943e0..4f8f87207b67b8 100644 +--- a/drivers/edac/altera_edac.c ++++ b/drivers/edac/altera_edac.c +@@ -1194,10 +1194,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device) + if (ret) + return ret; + +- /* Verify OCRAM has been initialized */ ++ /* ++ * Verify that OCRAM has been initialized. ++ * During a warm reset, OCRAM contents are retained, but the control ++ * and status registers are reset to their default values. Therefore, ++ * ECC must be explicitly re-enabled in the control register. ++ * Error condition: if INITCOMPLETEA is clear and ECC_EN is already set. ++ */ + if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA, +- (base + ALTR_A10_ECC_INITSTAT_OFST))) +- return -ENODEV; ++ (base + ALTR_A10_ECC_INITSTAT_OFST))) { ++ if (!ecc_test_bits(ALTR_A10_ECC_EN, ++ (base + ALTR_A10_ECC_CTRL_OFST))) ++ ecc_set_bits(ALTR_A10_ECC_EN, ++ (base + ALTR_A10_ECC_CTRL_OFST)); ++ else ++ return -ENODEV; ++ } + + /* Enable IRQ on Single Bit Error */ + writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST)); +@@ -1367,7 +1379,7 @@ static const struct edac_device_prv_data a10_enetecc_data = { + .ue_set_mask = ALTR_A10_ECC_TDERRA, + .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST, + .ecc_irq_handler = altr_edac_a10_ecc_irq, +- .inject_fops = &altr_edac_a10_device_inject2_fops, ++ .inject_fops = &altr_edac_a10_device_inject_fops, + }; + + #endif /* CONFIG_EDAC_ALTERA_ETHERNET */ +@@ -1457,7 +1469,7 @@ static const struct edac_device_prv_data a10_usbecc_data = { + .ue_set_mask = ALTR_A10_ECC_TDERRA, + .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST, + .ecc_irq_handler = altr_edac_a10_ecc_irq, +- .inject_fops = &altr_edac_a10_device_inject2_fops, ++ .inject_fops = &altr_edac_a10_device_inject_fops, + }; + + #endif /* CONFIG_EDAC_ALTERA_USB */ +diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c +index 0317b614b68052..26b083ccc94b2c 100644 +--- a/drivers/extcon/extcon-adc-jack.c ++++ b/drivers/extcon/extcon-adc-jack.c +@@ -162,6 +162,8 @@ static int adc_jack_remove(struct platform_device *pdev) + { + struct adc_jack_data *data = platform_get_drvdata(pdev); + ++ if (data->wakeup_source) ++ device_init_wakeup(&pdev->dev, false); + free_irq(data->irq, data); + cancel_work_sync(&data->handler.work); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index d34037b85cf859..52c093e42531bd 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -1205,29 +1205,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector) + amdgpu_connector->use_digital = true; + } + ++/** ++ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock ++ * @adev: pointer to amdgpu_device ++ * ++ * Return: maximum supported HDMI (TMDS) pixel clock in KHz. ++ */ ++static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev) ++{ ++ if (adev->asic_type >= CHIP_POLARIS10) ++ return 600000; ++ else if (adev->asic_type >= CHIP_TONGA) ++ return 300000; ++ else ++ return 297000; ++} ++ ++/** ++ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors ++ * @connector: DRM connector to validate the mode on ++ * @mode: display mode to validate ++ * ++ * Validate the given display mode on DVI and HDMI connectors, including ++ * analog signals on DVI-I. ++ * ++ * Return: drm_mode_status indicating whether the mode is valid. ++ */ + static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); ++ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev); ++ const int max_dvi_single_link_pixel_clock = 165000; ++ int max_digital_pixel_clock_khz; + + /* XXX check mode bandwidth */ + +- if (amdgpu_connector->use_digital && (mode->clock > 165000)) { +- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || +- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || +- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) { +- return MODE_OK; +- } else if (connector->display_info.is_hdmi) { +- /* HDMI 1.3+ supports max clock of 340 Mhz */ +- if (mode->clock > 340000) +- return MODE_CLOCK_HIGH; +- else +- return MODE_OK; +- } else { +- return MODE_CLOCK_HIGH; ++ if (amdgpu_connector->use_digital) { ++ switch (amdgpu_connector->connector_object_id) { ++ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A: ++ max_digital_pixel_clock_khz = max_hdmi_pixel_clock; ++ break; ++ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I: ++ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D: ++ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock; ++ break; ++ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I: ++ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D: ++ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B: ++ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2; ++ break; + } ++ ++ /* When the display EDID claims that it's an HDMI display, ++ * we use the HDMI encoder mode of the display HW, ++ * so we should verify against the max HDMI clock here. ++ */ ++ if (connector->display_info.is_hdmi) ++ max_digital_pixel_clock_khz = max_hdmi_pixel_clock; ++ ++ if (mode->clock > max_digital_pixel_clock_khz) ++ return MODE_CLOCK_HIGH; + } + + /* check against the max pixel clock */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index 13c97ba7a820b4..05712d322024a2 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -286,7 +286,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, + } + } + +- if (!p->gang_size) { ++ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { + ret = -EINVAL; + goto free_all_kdata; + } +@@ -690,7 +690,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, + */ + const s64 us_upper_bound = 200000; + +- if (!adev->mm_stats.log2_max_MBps) { ++ if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) { + *max_bytes = 0; + *max_vis_bytes = 0; + return; +@@ -1732,30 +1732,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, + { + struct amdgpu_device *adev = drm_to_adev(dev); + union drm_amdgpu_wait_fences *wait = data; +- uint32_t fence_count = wait->in.fence_count; +- struct drm_amdgpu_fence *fences_user; + struct drm_amdgpu_fence *fences; + int r; + + /* Get the fences from userspace */ +- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), +- GFP_KERNEL); +- if (fences == NULL) +- return -ENOMEM; +- +- fences_user = u64_to_user_ptr(wait->in.fences); +- if (copy_from_user(fences, fences_user, +- sizeof(struct drm_amdgpu_fence) * fence_count)) { +- r = -EFAULT; +- goto err_free_fences; +- } ++ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), ++ wait->in.fence_count, ++ sizeof(struct drm_amdgpu_fence)); ++ if (IS_ERR(fences)) ++ return PTR_ERR(fences); + + if (wait->in.wait_all) + r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); + else + r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); + +-err_free_fences: + kfree(fences); + + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 200b59318759da..b2a1dc193cb8f6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -93,6 +93,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); + MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); + MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); + MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); ++MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin"); + + #define AMDGPU_RESUME_MS 2000 + #define AMDGPU_MAX_RETRY_LIMIT 2 +@@ -1939,6 +1940,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) + case CHIP_NAVI12: + chip_name = "navi12"; + break; ++ case CHIP_CYAN_SKILLFISH: ++ chip_name = "cyan_skillfish"; ++ break; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); +@@ -4109,6 +4113,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) + if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) + return 0; + ++ /* No need to evict when going to S5 through S4 callbacks */ ++ if (system_state == SYSTEM_POWER_OFF) ++ return 0; ++ + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); + if (ret) + DRM_WARN("evicting device resources failed\n"); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +index b04d789bfd1005..2e492f779b54ce 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +@@ -1819,13 +1819,16 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): +- case IP_VERSION(11, 0, 8): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + break; ++ case IP_VERSION(11, 0, 8): ++ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) ++ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); ++ break; + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): + amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index 940411f8e99be0..b15ce4df747986 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -1983,6 +1983,11 @@ static const struct pci_device_id pciidlist[] = { + {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, + + /* CYAN_SKILLFISH */ ++ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, ++ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, ++ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, ++ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, ++ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + {0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +index 2ff2897fd1db6c..cd8fa1164d540e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +@@ -87,10 +87,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); + } + +- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) ++ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) { ++ mutex_lock(&adev->jpeg.jpeg_pg_lock); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_GATE); +- else ++ mutex_unlock(&adev->jpeg.jpeg_pg_lock); ++ } else + schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +index 5797055b1148f7..1f0de6e717112e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +@@ -651,7 +651,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_VRAM_USAGE: +- ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); ++ ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? ++ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0; + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_VIS_VRAM_USAGE: + ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); +@@ -697,8 +698,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + mem.vram.usable_heap_size = adev->gmc.real_vram_size - + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; +- mem.vram.heap_usage = +- ttm_resource_manager_usage(vram_man); ++ mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? ++ ttm_resource_manager_usage(vram_man) : 0; + mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; + + mem.cpu_accessible_vram.total_heap_size = +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +index d358a08b5e0067..08886e0ee6428b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +@@ -2015,8 +2015,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp) + if (!ret && !psp->securedisplay_context.context.resp_status) { + psp->securedisplay_context.context.initialized = true; + mutex_init(&psp->securedisplay_context.mutex); +- } else ++ } else { ++ /* don't try again */ ++ psp->securedisplay_context.context.bin_desc.size_bytes = 0; + return ret; ++ } + + mutex_lock(&psp->securedisplay_context.mutex); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index 7cb4b4118335a6..5a4b1b625f0371 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -604,8 +604,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) + vf2pf_info->driver_cert = 0; + vf2pf_info->os_info.all = 0; + +- vf2pf_info->fb_usage = +- ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; ++ vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? ++ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0; + vf2pf_info->fb_vis_usage = + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; + vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +index 35dc926f234e39..2e194aa608489d 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +@@ -1085,7 +1085,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, + svm_range_list_lock_and_flush_work(&p->svms, current->mm); + mutex_lock(&p->svms.lock); + mmap_write_unlock(current->mm); +- if (interval_tree_iter_first(&p->svms.objects, ++ ++ /* Skip a special case that allocates VRAM without VA, ++ * VA will be invalid of 0. ++ */ ++ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) && ++ interval_tree_iter_first(&p->svms.objects, + args->va_addr >> PAGE_SHIFT, + (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { + pr_err("Address: 0x%llx already allocated by SVM\n", +@@ -2567,8 +2572,8 @@ static int criu_restore(struct file *filep, + pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n", + args->num_devices, args->num_bos, args->num_objects, args->priv_data_size); + +- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size || +- !args->num_devices || !args->num_bos) ++ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data || ++ !args->priv_data_size || !args->num_devices) + return -EINVAL; + + mutex_lock(&p->mutex); +@@ -3252,8 +3257,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) + int retcode = -EINVAL; + bool ptrace_attached = false; + +- if (nr >= AMDKFD_CORE_IOCTL_COUNT) ++ if (nr >= AMDKFD_CORE_IOCTL_COUNT) { ++ retcode = -ENOTTY; + goto err_i1; ++ } + + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { + u32 amdkfd_size; +@@ -3266,8 +3273,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) + asize = amdkfd_size; + + cmd = ioctl->cmd; +- } else ++ } else { ++ retcode = -ENOTTY; + goto err_i1; ++ } + + dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 2786d47961e075..6af65db4de9479 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -1017,7 +1017,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) + } + + for (i = 0; i < kfd->num_nodes; i++) { +- node = kfd->nodes[i]; ++ /* Race if another thread in b/w ++ * kfd_cleanup_nodes and kfree(kfd), ++ * when kfd->nodes[i] = NULL ++ */ ++ if (kfd->nodes[i]) ++ node = kfd->nodes[i]; ++ else ++ return; ++ + spin_lock_irqsave(&node->interrupt_lock, flags); + + if (node->interrupts_active +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +index 27c9d5c43765af..b475c2ab9768af 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +@@ -111,7 +111,14 @@ + + #define KFD_KERNEL_QUEUE_SIZE 2048 + +-#define KFD_UNMAP_LATENCY_MS (4000) ++/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC ++ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time ++ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is ++ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload ++ * The format here makes CP workload 10% of total timeout ++ */ ++#define KFD_UNMAP_LATENCY_MS \ ++ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1) + + #define KFD_MAX_SDMA_QUEUES 128 + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +index 3168d6fb11e76b..9ba9732f0172ac 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +@@ -1709,6 +1709,29 @@ static int svm_range_validate_and_map(struct mm_struct *mm, + + next = min(vma->vm_end, end); + npages = (next - addr) >> PAGE_SHIFT; ++ /* HMM requires at least READ permissions. If provided with PROT_NONE, ++ * unmap the memory. If it's not already mapped, this is a no-op ++ * If PROT_WRITE is provided without READ, warn first then unmap ++ */ ++ if (!(vma->vm_flags & VM_READ)) { ++ unsigned long e, s; ++ ++ svm_range_lock(prange); ++ if (vma->vm_flags & VM_WRITE) ++ pr_debug("VM_WRITE without VM_READ is not supported"); ++ s = max(start, prange->start); ++ e = min(end, prange->last); ++ if (e >= s) ++ r = svm_range_unmap_from_gpus(prange, s, e, ++ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU); ++ svm_range_unlock(prange); ++ /* If unmap returns non-zero, we'll bail on the next for loop ++ * iteration, so just leave r and continue ++ */ ++ addr = next; ++ continue; ++ } ++ + WRITE_ONCE(p->svms.faulting_task, current); + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, NULL, +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 8421e5f0737bfc..faef07fdfd3027 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2993,6 +2993,7 @@ static int dm_resume(void *handle) + /* Do mst topology probing after resuming cached state*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { ++ bool init = false; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; +@@ -3002,7 +3003,14 @@ static int dm_resume(void *handle) + aconnector->mst_root) + continue; + +- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); ++ scoped_guard(mutex, &aconnector->mst_mgr.lock) { ++ init = !aconnector->mst_mgr.mst_primary; ++ } ++ if (init) ++ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx, ++ aconnector->dc_link, false); ++ else ++ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); + } + drm_connector_list_iter_end(&iter); + +@@ -9243,6 +9251,8 @@ static void get_freesync_config_for_crtc( + } else { + config.state = VRR_STATE_INACTIVE; + } ++ } else { ++ config.state = VRR_STATE_UNSUPPORTED; + } + out: + new_crtc_state->freesync_config = config; +@@ -10838,7 +10848,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, + + dm_con_state = to_dm_connector_state(connector->state); + +- if (!adev->dm.freesync_module) ++ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) + goto update; + + /* Some eDP panels only have the refresh rate range info in DisplayID */ +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +index a5489fe6875f45..086f60e1dd1730 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +@@ -561,6 +561,7 @@ static void vg_clk_mgr_helper_populate_bw_params( + { + int i, j; + struct clk_bw_params *bw_params = clk_mgr->base.bw_params; ++ uint32_t max_dispclk = 0, max_dppclk = 0; + + j = -1; + +@@ -581,6 +582,15 @@ static void vg_clk_mgr_helper_populate_bw_params( + return; + } + ++ /* dispclk and dppclk can be max at any voltage, same number of levels for both */ ++ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS && ++ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) { ++ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); ++ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); ++ } else { ++ ASSERT(0); ++ } ++ + bw_params->clk_table.num_entries = j + 1; + + for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) { +@@ -588,11 +598,17 @@ static void vg_clk_mgr_helper_populate_bw_params( + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; + bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage); ++ ++ /* Now update clocks we do read */ ++ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; ++ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; + } + bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; + bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS); ++ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS); ++ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS); + + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 640d010b52bec3..31c53491b83747 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -2068,6 +2068,18 @@ enum dc_status dc_commit_streams(struct dc *dc, + goto fail; + } + ++ /* ++ * If not already seamless, make transition seamless by inserting intermediate minimal transition ++ */ ++ if (dc->hwss.is_pipe_topology_transition_seamless && ++ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) { ++ res = commit_minimal_transition_state(dc, context); ++ if (res != DC_OK) { ++ BREAK_TO_DEBUGGER(); ++ goto fail; ++ } ++ } ++ + res = dc_commit_state_no_check(dc, context); + + for (i = 0; i < stream_count; i++) { +@@ -2940,6 +2952,9 @@ static void copy_stream_update_to_stream(struct dc *dc, + if (update->adaptive_sync_infopacket) + stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; + ++ if (update->avi_infopacket) ++ stream->avi_infopacket = *update->avi_infopacket; ++ + if (update->dither_option) + stream->dither_option = *update->dither_option; + +@@ -3146,7 +3161,8 @@ static void commit_planes_do_stream_update(struct dc *dc, + stream_update->vsp_infopacket || + stream_update->hfvsif_infopacket || + stream_update->adaptive_sync_infopacket || +- stream_update->vtem_infopacket) { ++ stream_update->vtem_infopacket || ++ stream_update->avi_infopacket) { + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); + +@@ -4229,6 +4245,7 @@ static bool full_update_required(struct dc *dc, + stream_update->hfvsif_infopacket || + stream_update->vtem_infopacket || + stream_update->adaptive_sync_infopacket || ++ stream_update->avi_infopacket || + stream_update->dpms_off || + stream_update->allow_freesync || + stream_update->vrr_active_variable || +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index 2f25f7096c4d63..802c0e19d03b3c 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -150,7 +150,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) + + case FAMILY_NV: + dc_version = DCN_VERSION_2_0; +- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) { ++ if (asic_id.chip_id == DEVICE_ID_NV_13FE || ++ asic_id.chip_id == DEVICE_ID_NV_143F || ++ asic_id.chip_id == DEVICE_ID_NV_13F9 || ++ asic_id.chip_id == DEVICE_ID_NV_13FA || ++ asic_id.chip_id == DEVICE_ID_NV_13FB || ++ asic_id.chip_id == DEVICE_ID_NV_13FC || ++ asic_id.chip_id == DEVICE_ID_NV_13DB) { + dc_version = DCN_VERSION_2_01; + break; + } +@@ -3219,8 +3225,14 @@ static void set_avi_info_frame( + unsigned int fr_ind = pipe_ctx->stream->timing.fr_index; + enum dc_timing_3d_format format; + ++ if (stream->avi_infopacket.valid) { ++ *info_packet = stream->avi_infopacket; ++ return; ++ } ++ + memset(&hdmi_info, 0, sizeof(union hdmi_info_packet)); + ++ + color_space = pipe_ctx->stream->output_color_space; + if (color_space == COLOR_SPACE_UNKNOWN) + color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? +diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c +index 3907eeff560ce7..0713a503f7f6d4 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c ++++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c +@@ -744,3 +744,8 @@ char *dce_version_to_string(const int version) + return "Unknown"; + } + } ++ ++bool dc_supports_vrr(const enum dce_version v) ++{ ++ return v >= DCE_VERSION_8_0; ++} +diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h +index d5b3e3a32cc6d4..ad020cc2463764 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h +@@ -197,6 +197,7 @@ struct dc_stream_state { + struct dc_info_packet hfvsif_infopacket; + struct dc_info_packet vtem_infopacket; + struct dc_info_packet adaptive_sync_infopacket; ++ struct dc_info_packet avi_infopacket; + uint8_t dsc_packed_pps[128]; + struct rect src; /* composition area */ + struct rect dst; /* stream addressable area */ +@@ -323,6 +324,8 @@ struct dc_stream_update { + struct dc_info_packet *hfvsif_infopacket; + struct dc_info_packet *vtem_infopacket; + struct dc_info_packet *adaptive_sync_infopacket; ++ struct dc_info_packet *avi_infopacket; ++ + bool *dpms_off; + bool integer_scaling_update; + bool *allow_freesync; +diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h +index d0eed3b4771e6f..f2ab2c42781a44 100644 +--- a/drivers/gpu/drm/amd/display/dc/dm_services.h ++++ b/drivers/gpu/drm/amd/display/dc/dm_services.h +@@ -294,4 +294,6 @@ void dm_dtn_log_end(struct dc_context *ctx, + + char *dce_version_to_string(const int version); + ++bool dc_supports_vrr(const enum dce_version v); ++ + #endif /* __DM_SERVICES_H__ */ +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +index 6ce90678b33c03..dc7435d5ef4df7 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +@@ -326,7 +326,7 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param + struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); + struct clk_limit_table *clk_table = &bw_params->clk_table; + unsigned int i, closest_clk_lvl; +- int j; ++ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0; + + dc_assert_fp_enabled(); + +@@ -338,6 +338,15 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param + dcn3_01_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); ++ ++ /* Prepass to find max clocks independent of voltage level. */ ++ for (i = 0; i < clk_table->num_entries; ++i) { ++ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) ++ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; ++ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) ++ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; ++ } ++ + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { +@@ -353,8 +362,13 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + +- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; +- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; ++ /* Clocks independent of voltage level. */ ++ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : ++ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; ++ ++ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : ++ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; ++ + s[i].dram_bw_per_chan_gbps = + dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; +diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c +index c7a9e286a5d4d3..2d98f539a100ae 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c ++++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c +@@ -1113,6 +1113,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, + if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && + !sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; ++ else if (dc_is_dvi_signal(sink->sink_signal) && ++ dc_is_dvi_signal(link->connector_signal) && ++ aud_support->hdmi_audio_native && ++ sink->edid_caps.edid_hdmi) ++ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; + + if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) + dp_trace_init(link); +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +index 51e88efee11e4a..08c2f117241407 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +@@ -974,14 +974,19 @@ void repeater_training_done(struct dc_link *link, uint32_t offset) + static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) + { + uint8_t sink_status = 0; +- uint8_t i; ++ uint32_t i; ++ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); ++ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10); ++ ++ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL) ++ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (encoding == DP_128b_132b_ENCODING) { + /* poll for intra-hop disable */ +- for (i = 0; i < 10; i++) { ++ for (i = 0; i < intra_hop_disable_time_ms; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; +diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h +index e317089cf6ee7f..b913d08a5e0380 100644 +--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h ++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h +@@ -213,6 +213,11 @@ enum { + #endif + #define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH + #define DEVICE_ID_NV_143F 0x143F ++#define DEVICE_ID_NV_13F9 0x13F9 ++#define DEVICE_ID_NV_13FA 0x13FA ++#define DEVICE_ID_NV_13FB 0x13FB ++#define DEVICE_ID_NV_13FC 0x13FC ++#define DEVICE_ID_NV_13DB 0x13DB + #define FAMILY_VGH 144 + #define DEVICE_ID_VGH_163F 0x163F + #define DEVICE_ID_VGH_1435 0x1435 +diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +index 2863dc65ffc6fb..e5f68b2b8def3b 100644 +--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c ++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +@@ -3485,6 +3485,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, + * for these GPUs to calculate bandwidth requirements. + */ + if (high_pixelclock_count) { ++ /* Work around flickering lines at the bottom edge ++ * of the screen when using a single 4K 60Hz monitor. ++ */ ++ disable_mclk_switching = true; ++ + /* On Oland, we observe some flickering when two 4K 60Hz + * displays are connected, possibly because voltage is too low. + * Raise the voltage by requiring a higher SCLK. +diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +index 5e43ad2b295641..e7e497b166b3eb 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; +- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ ++ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); + table->PCIeGenInterval = 1; + table->VRConfig = 0; + +diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +index 97d9802fe6731f..43458f1b0077dd 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; +- table->PCIeBootLinkLevel = 0; ++ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +index 0cdf3257b19b3b..a47898487de09b 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +@@ -2376,7 +2376,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, + + ret = smu_cmn_get_metrics_table(smu, + &metrics, +- true); ++ false); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +index b2256178014990..d1fd643e7a48c8 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +@@ -1737,7 +1737,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, + + ret = smu_cmn_get_metrics_table(smu, + &metrics, +- true); ++ false); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +index c1962f1974c6fe..2c9612b5f15689 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +@@ -870,7 +870,7 @@ int smu_cmn_update_table(struct smu_context *smu, + table_index); + uint32_t table_size; + int ret = 0; +- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) ++ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) + return -EINVAL; + + table_size = smu_table->tables[table_index].size; +diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +index 89eed0668bfb24..ddfbb2009c8d38 100644 +--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c ++++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +@@ -833,7 +833,13 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) + + tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8, + phy_cfg->hs_clk_rate); +- reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period; ++ ++ /* ++ * Estimated time [in clock cycles] to perform LP->HS on D-PHY. ++ * It is not clear how to calculate this, so for now, ++ * set it to 1/10 of the total number of clocks in a line. ++ */ ++ reg_wakeup = dsi_cfg.htotal / nlanes / 10; + writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp), + dsi->regs + VID_DPHY_TIME); + +@@ -953,10 +959,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, + if (output->dev) + return -EBUSY; + +- /* We do not support burst mode yet. */ +- if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) +- return -ENOTSUPP; +- + /* + * The host <-> device link might be described using an OF-graph + * representation, in this case we extract the device of_node from +diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c +index 08bd5695ddae0f..0f3714aae60891 100644 +--- a/drivers/gpu/drm/bridge/display-connector.c ++++ b/drivers/gpu/drm/bridge/display-connector.c +@@ -363,7 +363,8 @@ static int display_connector_probe(struct platform_device *pdev) + if (conn->bridge.ddc) + conn->bridge.ops |= DRM_BRIDGE_OP_EDID + | DRM_BRIDGE_OP_DETECT; +- if (conn->hpd_gpio) ++ /* Detecting the monitor requires reading DPCD */ ++ if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort) + conn->bridge.ops |= DRM_BRIDGE_OP_DETECT; + if (conn->hpd_irq >= 0) + conn->bridge.ops |= DRM_BRIDGE_OP_HPD; +diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c +index 5d4b9cd077f7a6..e0ea3c661cb778 100644 +--- a/drivers/gpu/drm/drm_gem_atomic_helper.c ++++ b/drivers/gpu/drm/drm_gem_atomic_helper.c +@@ -301,7 +301,11 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); + void __drm_gem_reset_shadow_plane(struct drm_plane *plane, + struct drm_shadow_plane_state *shadow_plane_state) + { +- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); ++ if (shadow_plane_state) { ++ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); ++ } else { ++ __drm_atomic_helper_plane_reset(plane, NULL); ++ } + } + EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +index b13a17276d07cd..88385dc3b30d85 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +@@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, + u32 link_target, link_dwords; + bool switch_context = gpu->exec_state != exec_state; + bool switch_mmu_context = gpu->mmu_context != mmu_context; +- unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); ++ unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq); + bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); +diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +index 7c9be4fd1c8c44..7a950c1502b6e6 100644 +--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c ++++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +@@ -208,7 +208,7 @@ static u64 div_u64_roundup(u64 nom, u32 den) + + u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count) + { +- return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency); ++ return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency); + } + + u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) +@@ -218,7 +218,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) + + u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns) + { +- return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC); ++ return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC); + } + + u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns) +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c +index 46e4a45e3c72ae..772594360d57e0 100644 +--- a/drivers/gpu/drm/i915/i915_vma.c ++++ b/drivers/gpu/drm/i915/i915_vma.c +@@ -1586,8 +1586,20 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + err_vma_res: + i915_vma_resource_free(vma_res); + err_fence: +- if (work) +- dma_fence_work_commit_imm(&work->base); ++ if (work) { ++ /* ++ * When pinning VMA to GGTT on CHV or BXT with VTD enabled, ++ * commit VMA binding asynchronously to avoid risk of lock ++ * inversion among reservation_ww locks held here and ++ * cpu_hotplug_lock acquired from stop_machine(), which we ++ * wrap around GGTT updates when running in those environments. ++ */ ++ if (i915_vma_is_ggtt(vma) && ++ intel_vm_no_concurrent_access_wa(vma->vm->i915)) ++ dma_fence_work_commit(&work->base); ++ else ++ dma_fence_work_commit_imm(&work->base); ++ } + err_rpm: + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +index f1f73c1e7b5cbf..83dcc475769b1e 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +@@ -655,10 +655,6 @@ static int mtk_drm_bind(struct device *dev) + for (i = 0; i < private->data->mmsys_dev_num; i++) + private->all_drm_private[i]->drm = NULL; + err_put_dev: +- for (i = 0; i < private->data->mmsys_dev_num; i++) { +- /* For device_find_child in mtk_drm_get_all_priv() */ +- put_device(private->all_drm_private[i]->dev); +- } + put_device(private->mutex_dev); + return ret; + } +@@ -666,18 +662,12 @@ static int mtk_drm_bind(struct device *dev) + static void mtk_drm_unbind(struct device *dev) + { + struct mtk_drm_private *private = dev_get_drvdata(dev); +- int i; + + /* for multi mmsys dev, unregister drm dev in mmsys master */ + if (private->drm_master) { + drm_dev_unregister(private->drm); + mtk_drm_kms_deinit(private->drm); + drm_dev_put(private->drm); +- +- for (i = 0; i < private->data->mmsys_dev_num; i++) { +- /* For device_find_child in mtk_drm_get_all_priv() */ +- put_device(private->all_drm_private[i]->dev); +- } + put_device(private->mutex_dev); + } + private->mtk_drm_bound = false; +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c +index f10d4cc6c2234f..32038cff273083 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c +@@ -21,9 +21,6 @@ + + static const u64 modifiers[] = { + DRM_FORMAT_MOD_LINEAR, +- DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | +- AFBC_FORMAT_MOD_SPLIT | +- AFBC_FORMAT_MOD_SPARSE), + DRM_FORMAT_MOD_INVALID, + }; + +@@ -71,26 +68,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) + { +- if (modifier == DRM_FORMAT_MOD_LINEAR) +- return true; +- +- if (modifier != DRM_FORMAT_MOD_ARM_AFBC( +- AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | +- AFBC_FORMAT_MOD_SPLIT | +- AFBC_FORMAT_MOD_SPARSE)) +- return false; +- +- if (format != DRM_FORMAT_XRGB8888 && +- format != DRM_FORMAT_ARGB8888 && +- format != DRM_FORMAT_BGRX8888 && +- format != DRM_FORMAT_BGRA8888 && +- format != DRM_FORMAT_ABGR8888 && +- format != DRM_FORMAT_XBGR8888 && +- format != DRM_FORMAT_RGB888 && +- format != DRM_FORMAT_BGR888) +- return false; +- +- return true; ++ return modifier == DRM_FORMAT_MOD_LINEAR; + } + + static void mtk_drm_plane_destroy_state(struct drm_plane *plane, +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +index c50aafa0ecdb61..e816ddcac2f8d5 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +@@ -693,6 +693,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) + return true; + } + ++#define NEXT_BLK(blk) \ ++ ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) ++ + static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) + { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); +@@ -723,7 +726,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) + + for (blk = (const struct block_header *) fw_image->data; + (const u8*) blk < fw_image->data + fw_image->size; +- blk = (const struct block_header *) &blk->data[blk->size >> 2]) { ++ blk = NEXT_BLK(blk)) { + if (blk->size == 0) + continue; + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 00bfc6f38f459d..4654c0f362c7de 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1702,6 +1702,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + ++ /* Turn off interrupts to avoid triggering recovery again */ ++ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, 0); ++ + kthread_queue_work(gpu->worker, &gpu->recover_work); + } + +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +index f72ce6a3c456d5..bed9867ced6d4e 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +@@ -445,6 +445,10 @@ static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw) + if (pll_7nm->slave) + dsi_pll_enable_global_clk(pll_7nm->slave); + ++ writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); ++ if (pll_7nm->slave) ++ writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); ++ + error: + return rc; + } +@@ -793,6 +797,12 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_phy_pll_save_state(phy); ++ /* ++ * Store also proper vco_current_rate, because its value will be used in ++ * dsi_7nm_pll_restore_state(). ++ */ ++ if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE)) ++ pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate; + + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c +index b9581feb24ccb7..a23b40b27b81bc 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c ++++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c +@@ -44,7 +44,7 @@ nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value) + bool space = false; + while (size >= 1 && bf->name) { + if (value & bf->mask) { +- int this = snprintf(data, size, "%s%s", ++ int this = scnprintf(data, size, "%s%s", + space ? " " : "", bf->name); + size -= this; + data += this; +diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c +index 245a1ef5278e88..6855fbb7e6a3bd 100644 +--- a/drivers/gpu/drm/scheduler/sched_entity.c ++++ b/drivers/gpu/drm/scheduler/sched_entity.c +@@ -163,26 +163,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity) + } + EXPORT_SYMBOL(drm_sched_entity_error); + ++static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ++ struct dma_fence_cb *cb); ++ + static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) + { + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); +- +- drm_sched_fence_scheduled(job->s_fence, NULL); +- drm_sched_fence_finished(job->s_fence, -ESRCH); +- WARN_ON(job->s_fence->parent); +- job->sched->ops->free_job(job); +-} +- +-/* Signal the scheduler finished fence when the entity in question is killed. */ +-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, +- struct dma_fence_cb *cb) +-{ +- struct drm_sched_job *job = container_of(cb, struct drm_sched_job, +- finish_cb); ++ struct dma_fence *f; + unsigned long index; + +- dma_fence_put(f); +- + /* Wait for all dependencies to avoid data corruptions */ + xa_for_each(&job->dependencies, index, f) { + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); +@@ -210,6 +199,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + dma_fence_put(f); + } + ++ drm_sched_fence_scheduled(job->s_fence, NULL); ++ drm_sched_fence_finished(job->s_fence, -ESRCH); ++ WARN_ON(job->s_fence->parent); ++ job->sched->ops->free_job(job); ++} ++ ++/* Signal the scheduler finished fence when the entity in question is killed. */ ++static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ++ struct dma_fence_cb *cb) ++{ ++ struct drm_sched_job *job = container_of(cb, struct drm_sched_job, ++ finish_cb); ++ ++ dma_fence_put(f); ++ + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); + schedule_work(&job->work); + } +@@ -531,10 +535,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + } +- spin_unlock(&entity->rq_lock); + + if (entity->num_sched_list == 1) + entity->sched_list = NULL; ++ ++ spin_unlock(&entity->rq_lock); + } + + /** +diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c +index 1baa4ace12e150..356b7566013b1b 100644 +--- a/drivers/gpu/drm/tidss/tidss_crtc.c ++++ b/drivers/gpu/drm/tidss/tidss_crtc.c +@@ -91,7 +91,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, + struct dispc_device *dispc = tidss->dispc; + struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); + u32 hw_videoport = tcrtc->hw_videoport; +- const struct drm_display_mode *mode; ++ struct drm_display_mode *mode; + enum drm_mode_status ok; + + dev_dbg(ddev->dev, "%s\n", __func__); +@@ -108,6 +108,9 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, + return -EINVAL; + } + ++ if (drm_atomic_crtc_needs_modeset(crtc_state)) ++ drm_mode_set_crtcinfo(mode, 0); ++ + return dispc_vp_bus_check(dispc, hw_videoport, crtc_state); + } + +@@ -229,7 +232,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, + tidss_runtime_get(tidss); + + r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport, +- mode->clock * 1000); ++ mode->crtc_clock * 1000); + if (r != 0) + return; + +diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c +index 355c64bafb82b8..8a702f4059b952 100644 +--- a/drivers/gpu/drm/tidss/tidss_dispc.c ++++ b/drivers/gpu/drm/tidss/tidss_dispc.c +@@ -1030,13 +1030,13 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, + + dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width); + +- hfp = mode->hsync_start - mode->hdisplay; +- hsw = mode->hsync_end - mode->hsync_start; +- hbp = mode->htotal - mode->hsync_end; ++ hfp = mode->crtc_hsync_start - mode->crtc_hdisplay; ++ hsw = mode->crtc_hsync_end - mode->crtc_hsync_start; ++ hbp = mode->crtc_htotal - mode->crtc_hsync_end; + +- vfp = mode->vsync_start - mode->vdisplay; +- vsw = mode->vsync_end - mode->vsync_start; +- vbp = mode->vtotal - mode->vsync_end; ++ vfp = mode->crtc_vsync_start - mode->crtc_vdisplay; ++ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ vbp = mode->crtc_vtotal - mode->crtc_vsync_end; + + dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H, + FLD_VAL(hsw - 1, 7, 0) | +@@ -1078,8 +1078,8 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, + FLD_VAL(ivs, 12, 12)); + + dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN, +- FLD_VAL(mode->hdisplay - 1, 11, 0) | +- FLD_VAL(mode->vdisplay - 1, 27, 16)); ++ FLD_VAL(mode->crtc_hdisplay - 1, 11, 0) | ++ FLD_VAL(mode->crtc_vdisplay - 1, 27, 16)); + + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0); + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index b235e7cc41f3f8..92b3e44d022fea 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -3683,6 +3683,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, + + + cmd_id = header->id; ++ if (header->size > SVGA_CMD_MAX_DATASIZE) { ++ VMW_DEBUG_USER("SVGA3D command: %d is too big.\n", ++ cmd_id + SVGA_3D_CMD_BASE); ++ return -E2BIG; ++ } + *size = header->size + sizeof(SVGA3dCmdHeader); + + cmd_id -= SVGA_3D_CMD_BASE; +diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c +index a8d440f6e804fb..d971e339b5eac7 100644 +--- a/drivers/hid/hid-asus.c ++++ b/drivers/hid/hid-asus.c +@@ -1282,9 +1282,6 @@ static const struct hid_device_id asus_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2), + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, +- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, +- USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3), +- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR), + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, +@@ -1314,6 +1311,9 @@ static const struct hid_device_id asus_devices[] = { + * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard + * part, while letting hid-multitouch.c handle the touchpad. + */ ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, ++ USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO), ++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) }, + { } +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 3f74633070b6ec..fbbab353f040a8 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -213,7 +213,7 @@ + #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822 + #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866 + #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6 +-#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30 ++#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO 0x1a30 + #define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6 + #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe + #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c +@@ -332,6 +332,9 @@ + #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500 + #define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff + ++#define USB_VENDOR_ID_COOLER_MASTER 0x2516 ++#define USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE 0x01b7 ++ + #define USB_VENDOR_ID_CORSAIR 0x1b1c + #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 + #define USB_DEVICE_ID_CORSAIR_K70R 0x1b09 +@@ -1400,6 +1403,7 @@ + + #define USB_VENDOR_ID_VRS 0x0483 + #define USB_DEVICE_ID_VRS_DFP 0xa355 ++#define USB_DEVICE_ID_VRS_R295 0xa44c + + #define USB_VENDOR_ID_VTL 0x0306 + #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c +index a1128c5315fffa..3c41f6841f7753 100644 +--- a/drivers/hid/hid-ntrig.c ++++ b/drivers/hid/hid-ntrig.c +@@ -142,13 +142,13 @@ static void ntrig_report_version(struct hid_device *hdev) + int ret; + char buf[20]; + struct usb_device *usb_dev = hid_to_usb_dev(hdev); +- unsigned char *data = kmalloc(8, GFP_KERNEL); ++ unsigned char *data __free(kfree) = kmalloc(8, GFP_KERNEL); + + if (!hid_is_usb(hdev)) + return; + + if (!data) +- goto err_free; ++ return; + + ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), + USB_REQ_CLEAR_FEATURE, +@@ -163,9 +163,6 @@ static void ntrig_report_version(struct hid_device *hdev) + hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n", + buf, data[2], data[3], data[4], data[5]); + } +- +-err_free: +- kfree(data); + } + + static ssize_t show_phys_width(struct device *dev, +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 64f9728018b885..75480ec3c15a2d 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -57,6 +57,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_COOLER_MASTER, USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS }, +@@ -206,6 +207,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_R295), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, +diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c +index 9859dad36495ad..eee05d668e361d 100644 +--- a/drivers/hid/hid-uclogic-params.c ++++ b/drivers/hid/hid-uclogic-params.c +@@ -1364,8 +1364,10 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev, + event_hook->hdev = hdev; + event_hook->size = ARRAY_SIZE(reconnect_event); + event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL); +- if (!event_hook->event) ++ if (!event_hook->event) { ++ kfree(event_hook); + return -ENOMEM; ++ } + + list_add_tail(&event_hook->list, &p->event_hooks->list); + +diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c +index 1b49243adb16a5..abd700a101f46c 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c ++++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c +@@ -76,6 +76,13 @@ static int i2c_hid_acpi_get_descriptor(struct i2c_hid_acpi *ihid_acpi) + return hid_descriptor_address; + } + ++static void i2c_hid_acpi_restore_sequence(struct i2chid_ops *ops) ++{ ++ struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops); ++ ++ i2c_hid_acpi_get_descriptor(ihid_acpi); ++} ++ + static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops) + { + struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops); +@@ -96,6 +103,7 @@ static int i2c_hid_acpi_probe(struct i2c_client *client) + + ihid_acpi->adev = ACPI_COMPANION(dev); + ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail; ++ ihid_acpi->ops.restore_sequence = i2c_hid_acpi_restore_sequence; + + ret = i2c_hid_acpi_get_descriptor(ihid_acpi); + if (ret < 0) +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 3dcdd3368b463e..172b783274201b 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -937,6 +937,14 @@ static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid) + ihid->ops->shutdown_tail(ihid->ops); + } + ++static void i2c_hid_core_restore_sequence(struct i2c_hid *ihid) ++{ ++ if (!ihid->ops->restore_sequence) ++ return; ++ ++ ihid->ops->restore_sequence(ihid->ops); ++} ++ + static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff) + { + struct i2c_client *client = ihid->client; +@@ -1320,8 +1328,26 @@ static int i2c_hid_core_pm_resume(struct device *dev) + return i2c_hid_core_resume(ihid); + } + ++static int i2c_hid_core_pm_restore(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct i2c_hid *ihid = i2c_get_clientdata(client); ++ ++ if (ihid->is_panel_follower) ++ return 0; ++ ++ i2c_hid_core_restore_sequence(ihid); ++ ++ return i2c_hid_core_resume(ihid); ++} ++ + const struct dev_pm_ops i2c_hid_core_pm = { +- SYSTEM_SLEEP_PM_OPS(i2c_hid_core_pm_suspend, i2c_hid_core_pm_resume) ++ .suspend = pm_sleep_ptr(i2c_hid_core_pm_suspend), ++ .resume = pm_sleep_ptr(i2c_hid_core_pm_resume), ++ .freeze = pm_sleep_ptr(i2c_hid_core_pm_suspend), ++ .thaw = pm_sleep_ptr(i2c_hid_core_pm_resume), ++ .poweroff = pm_sleep_ptr(i2c_hid_core_pm_suspend), ++ .restore = pm_sleep_ptr(i2c_hid_core_pm_restore), + }; + EXPORT_SYMBOL_GPL(i2c_hid_core_pm); + +diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h +index 2c7b66d5caa0f9..1724a435c783aa 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.h ++++ b/drivers/hid/i2c-hid/i2c-hid.h +@@ -27,11 +27,13 @@ static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product) + * @power_up: do sequencing to power up the device. + * @power_down: do sequencing to power down the device. + * @shutdown_tail: called at the end of shutdown. ++ * @restore_sequence: hibernation restore sequence. + */ + struct i2chid_ops { + int (*power_up)(struct i2chid_ops *ops); + void (*power_down)(struct i2chid_ops *ops); + void (*shutdown_tail)(struct i2chid_ops *ops); ++ void (*restore_sequence)(struct i2chid_ops *ops); + }; + + int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, +diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c +index ce2f14a62754ee..bc2197f1dfb7f5 100644 +--- a/drivers/hwmon/asus-ec-sensors.c ++++ b/drivers/hwmon/asus-ec-sensors.c +@@ -49,7 +49,7 @@ static char *mutex_path_override; + */ + #define ASUS_EC_MAX_BANK 3 + +-#define ACPI_LOCK_DELAY_MS 500 ++#define ACPI_LOCK_DELAY_MS 800 + + /* ACPI mutex for locking access to the EC for the firmware */ + #define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX" +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c +index 8d94ecc3cc468c..9844843bdfadcd 100644 +--- a/drivers/hwmon/dell-smm-hwmon.c ++++ b/drivers/hwmon/dell-smm-hwmon.c +@@ -1158,6 +1158,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = { + }, + .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490], + }, ++ { ++ .ident = "Dell OptiPlex 7040", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7040"), ++ }, ++ }, + { + .ident = "Dell Precision", + .matches = { +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c +index c906731c6c2d3e..dc82e33d59c5fc 100644 +--- a/drivers/hwmon/k10temp.c ++++ b/drivers/hwmon/k10temp.c +@@ -84,6 +84,13 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); + */ + #define AMD_I3255_STR "3255" + ++/* ++ * PCI Device IDs for AMD's Family 1Ah-based SOCs. ++ * Defining locally as IDs are not shared. ++ */ ++#define PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3 0x12cb ++#define PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3 0x127b ++ + struct k10temp_data { + struct pci_dev *pdev; + void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); +@@ -545,7 +552,10 @@ static const struct pci_device_id k10temp_id_table[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) }, ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + {} + }; +diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c +index dd85cf89f008a9..7c49fcf8641433 100644 +--- a/drivers/hwmon/sbtsi_temp.c ++++ b/drivers/hwmon/sbtsi_temp.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + /* + * SB-TSI registers only support SMBus byte data access. "_INT" registers are +@@ -29,8 +30,22 @@ + #define SBTSI_REG_TEMP_HIGH_DEC 0x13 /* RW */ + #define SBTSI_REG_TEMP_LOW_DEC 0x14 /* RW */ + ++/* ++ * Bit for reporting value with temperature measurement range. ++ * bit == 0: Use default temperature range (0C to 255.875C). ++ * bit == 1: Use extended temperature range (-49C to +206.875C). ++ */ ++#define SBTSI_CONFIG_EXT_RANGE_SHIFT 2 ++/* ++ * ReadOrder bit specifies the reading order of integer and decimal part of ++ * CPU temperature for atomic reads. If bit == 0, reading integer part triggers ++ * latching of the decimal part, so integer part should be read first. ++ * If bit == 1, read order should be reversed. ++ */ + #define SBTSI_CONFIG_READ_ORDER_SHIFT 5 + ++#define SBTSI_TEMP_EXT_RANGE_ADJ 49000 ++ + #define SBTSI_TEMP_MIN 0 + #define SBTSI_TEMP_MAX 255875 + +@@ -38,6 +53,8 @@ + struct sbtsi_data { + struct i2c_client *client; + struct mutex lock; ++ bool ext_range_mode; ++ bool read_order; + }; + + /* +@@ -74,23 +91,11 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type, + { + struct sbtsi_data *data = dev_get_drvdata(dev); + s32 temp_int, temp_dec; +- int err; + + switch (attr) { + case hwmon_temp_input: +- /* +- * ReadOrder bit specifies the reading order of integer and +- * decimal part of CPU temp for atomic reads. If bit == 0, +- * reading integer part triggers latching of the decimal part, +- * so integer part should be read first. If bit == 1, read +- * order should be reversed. +- */ +- err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG); +- if (err < 0) +- return err; +- + mutex_lock(&data->lock); +- if (err & BIT(SBTSI_CONFIG_READ_ORDER_SHIFT)) { ++ if (data->read_order) { + temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_DEC); + temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_INT); + } else { +@@ -122,6 +127,8 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type, + return temp_dec; + + *val = sbtsi_reg_to_mc(temp_int, temp_dec); ++ if (data->ext_range_mode) ++ *val -= SBTSI_TEMP_EXT_RANGE_ADJ; + + return 0; + } +@@ -146,6 +153,8 @@ static int sbtsi_write(struct device *dev, enum hwmon_sensor_types type, + return -EINVAL; + } + ++ if (data->ext_range_mode) ++ val += SBTSI_TEMP_EXT_RANGE_ADJ; + val = clamp_val(val, SBTSI_TEMP_MIN, SBTSI_TEMP_MAX); + sbtsi_mc_to_reg(val, &temp_int, &temp_dec); + +@@ -203,6 +212,7 @@ static int sbtsi_probe(struct i2c_client *client) + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct sbtsi_data *data; ++ int err; + + data = devm_kzalloc(dev, sizeof(struct sbtsi_data), GFP_KERNEL); + if (!data) +@@ -211,8 +221,14 @@ static int sbtsi_probe(struct i2c_client *client) + data->client = client; + mutex_init(&data->lock); + +- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &sbtsi_chip_info, +- NULL); ++ err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG); ++ if (err < 0) ++ return err; ++ data->ext_range_mode = FIELD_GET(BIT(SBTSI_CONFIG_EXT_RANGE_SHIFT), err); ++ data->read_order = FIELD_GET(BIT(SBTSI_CONFIG_READ_ORDER_SHIFT), err); ++ ++ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, ++ &sbtsi_chip_info, NULL); + + return PTR_ERR_OR_ZERO(hwmon_dev); + } +diff --git a/drivers/hwmon/sy7636a-hwmon.c b/drivers/hwmon/sy7636a-hwmon.c +index ed110884786b48..a12fc0ce70e76e 100644 +--- a/drivers/hwmon/sy7636a-hwmon.c ++++ b/drivers/hwmon/sy7636a-hwmon.c +@@ -104,3 +104,4 @@ module_platform_driver(sy7636a_sensor_driver); + + MODULE_DESCRIPTION("SY7636A sensor driver"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:sy7636a-temperature"); +diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c +index 512d7b95b08e6f..2303ae19c602ea 100644 +--- a/drivers/iio/adc/imx93_adc.c ++++ b/drivers/iio/adc/imx93_adc.c +@@ -38,6 +38,7 @@ + #define IMX93_ADC_PCDR6 0x118 + #define IMX93_ADC_PCDR7 0x11c + #define IMX93_ADC_CALSTAT 0x39C ++#define IMX93_ADC_CALCFG0 0x3A0 + + /* ADC bit shift */ + #define IMX93_ADC_MCR_MODE_MASK BIT(29) +@@ -58,6 +59,8 @@ + #define IMX93_ADC_IMR_ECH_MASK BIT(0) + #define IMX93_ADC_PCDR_CDATA_MASK GENMASK(11, 0) + ++#define IMX93_ADC_CALCFG0_LDFAIL_MASK BIT(4) ++ + /* ADC status */ + #define IMX93_ADC_MSR_ADCSTATUS_IDLE 0 + #define IMX93_ADC_MSR_ADCSTATUS_POWER_DOWN 1 +@@ -145,7 +148,7 @@ static void imx93_adc_config_ad_clk(struct imx93_adc *adc) + + static int imx93_adc_calibration(struct imx93_adc *adc) + { +- u32 mcr, msr; ++ u32 mcr, msr, calcfg; + int ret; + + /* make sure ADC in power down mode */ +@@ -158,6 +161,11 @@ static int imx93_adc_calibration(struct imx93_adc *adc) + + imx93_adc_power_up(adc); + ++ /* Enable loading of calibrated values even in fail condition */ ++ calcfg = readl(adc->regs + IMX93_ADC_CALCFG0); ++ calcfg |= IMX93_ADC_CALCFG0_LDFAIL_MASK; ++ writel(calcfg, adc->regs + IMX93_ADC_CALCFG0); ++ + /* + * TODO: we use the default TSAMP/NRSMPL/AVGEN in MCR, + * can add the setting of these bit if need in future. +@@ -180,9 +188,13 @@ static int imx93_adc_calibration(struct imx93_adc *adc) + /* check whether calbration is success or not */ + msr = readl(adc->regs + IMX93_ADC_MSR); + if (msr & IMX93_ADC_MSR_CALFAIL_MASK) { ++ /* ++ * Only give warning here, this means the noise of the ++ * reference voltage do not meet the requirement: ++ * ADC reference voltage Noise < 1.8V * 1/2^ENOB ++ * And the resault of ADC is not that accurate. ++ */ + dev_warn(adc->dev, "ADC calibration failed!\n"); +- imx93_adc_power_down(adc); +- return -EAGAIN; + } + + return 0; +diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c +index ad54ef79810905..602ed05552bfd4 100644 +--- a/drivers/iio/adc/spear_adc.c ++++ b/drivers/iio/adc/spear_adc.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -29,9 +30,9 @@ + + /* Bit definitions for SPEAR_ADC_STATUS */ + #define SPEAR_ADC_STATUS_START_CONVERSION BIT(0) +-#define SPEAR_ADC_STATUS_CHANNEL_NUM(x) ((x) << 1) ++#define SPEAR_ADC_STATUS_CHANNEL_NUM_MASK GENMASK(3, 1) + #define SPEAR_ADC_STATUS_ADC_ENABLE BIT(4) +-#define SPEAR_ADC_STATUS_AVG_SAMPLE(x) ((x) << 5) ++#define SPEAR_ADC_STATUS_AVG_SAMPLE_MASK GENMASK(8, 5) + #define SPEAR_ADC_STATUS_VREF_INTERNAL BIT(9) + + #define SPEAR_ADC_DATA_MASK 0x03ff +@@ -157,8 +158,8 @@ static int spear_adc_read_raw(struct iio_dev *indio_dev, + case IIO_CHAN_INFO_RAW: + mutex_lock(&st->lock); + +- status = SPEAR_ADC_STATUS_CHANNEL_NUM(chan->channel) | +- SPEAR_ADC_STATUS_AVG_SAMPLE(st->avg_samples) | ++ status = FIELD_PREP(SPEAR_ADC_STATUS_CHANNEL_NUM_MASK, chan->channel) | ++ FIELD_PREP(SPEAR_ADC_STATUS_AVG_SAMPLE_MASK, st->avg_samples) | + SPEAR_ADC_STATUS_START_CONVERSION | + SPEAR_ADC_STATUS_ADC_ENABLE; + if (st->vref_external == 0) +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +index 4a10b826d15a38..f1d4494c7d0082 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +@@ -161,6 +161,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_clear(fseg, FRMR_BLK_MODE); ++ hr_reg_clear(fseg, FRMR_BLOCK_SIZE); ++ hr_reg_clear(fseg, FRMR_ZBVA); + } + + static void set_atomic_seg(const struct ib_send_wr *wr, +@@ -335,9 +337,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, + int j = 0; + int i; + +- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, +- (*sge_ind) & (qp->sge.sge_cnt - 1)); +- + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, + !!(wr->send_flags & IB_SEND_INLINE)); + if (wr->send_flags & IB_SEND_INLINE) +@@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, + (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); + ++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, ++ curr_idx & (qp->sge.sge_cnt - 1)); ++ + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + if (msg_len != ATOMIC_WR_LEN) +@@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, + owner_bit = + ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); + ++ /* RC and UD share the same DirectWQE field layout */ ++ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0; ++ + /* Corresponding to the QP type, wqe process separately */ + if (ibqp->qp_type == IB_QPT_RC) + ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c +index 0cad6fc7bf32c3..26784b296ffa6b 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c +@@ -654,7 +654,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev, + + hr_qp->sq.wqe_shift = ucmd->log_sq_stride; + hr_qp->sq.wqe_cnt = cnt; +- cap->max_send_sge = hr_qp->sq.max_gs; + + return 0; + } +@@ -736,7 +735,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, + + /* sync the parameters of kernel QP to user's configuration */ + cap->max_send_wr = cnt; +- cap->max_send_sge = hr_qp->sq.max_gs; + + return 0; + } +diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c +index c0bef11436b940..fa096557adc83b 100644 +--- a/drivers/infiniband/hw/irdma/pble.c ++++ b/drivers/infiniband/hw/irdma/pble.c +@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev, + static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct sd_pd_idx *idx) + { +- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; ++ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; + idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE); + idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD); + } +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c +index 38cecb28d322e4..29540b2b2373c8 100644 +--- a/drivers/infiniband/hw/irdma/verbs.c ++++ b/drivers/infiniband/hw/irdma/verbs.c +@@ -2077,6 +2077,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, + spin_lock_init(&iwcq->lock); + INIT_LIST_HEAD(&iwcq->resize_list); + INIT_LIST_HEAD(&iwcq->cmpl_generated); ++ iwcq->cq_num = cq_num; + info.dev = dev; + ukinfo->cq_size = max(entries, 4); + ukinfo->cq_id = cq_num; +@@ -2115,8 +2116,6 @@ static int irdma_create_cq(struct ib_cq *ibcq, + goto cq_free_rsrc; + } + +- iwcq->iwpbl = iwpbl; +- iwcq->cq_mem_size = 0; + cqmr = &iwpbl->cq_mr; + + if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & +@@ -2131,7 +2130,6 @@ static int irdma_create_cq(struct ib_cq *ibcq, + err_code = -EPROTO; + goto cq_free_rsrc; + } +- iwcq->iwpbl_shadow = iwpbl_shadow; + cqmr_shadow = &iwpbl_shadow->cq_mr; + info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; + cqmr->split = true; +diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h +index 20297a14c9a61d..bb9ab945938e05 100644 +--- a/drivers/infiniband/hw/irdma/verbs.h ++++ b/drivers/infiniband/hw/irdma/verbs.h +@@ -113,21 +113,15 @@ struct irdma_mr { + struct irdma_cq { + struct ib_cq ibcq; + struct irdma_sc_cq sc_cq; +- u16 cq_head; +- u16 cq_size; +- u16 cq_num; ++ u32 cq_num; + bool user_mode; + atomic_t armed; + enum irdma_cmpl_notify last_notify; +- u32 polled_cmpls; +- u32 cq_mem_size; + struct irdma_dma_mem kmem; + struct irdma_dma_mem kmem_shadow; + struct completion free_cq; + refcount_t refcnt; + spinlock_t lock; /* for poll cq */ +- struct irdma_pbl *iwpbl; +- struct irdma_pbl *iwpbl_shadow; + struct list_head resize_list; + struct irdma_cq_poll_info cur_cqe; + struct list_head cmpl_generated; +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c +index 431cea41df2af1..1897619209f147 100644 +--- a/drivers/iommu/amd/init.c ++++ b/drivers/iommu/amd/init.c +@@ -840,11 +840,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) + + BUG_ON(iommu->cmd_buf == NULL); + +- entry = iommu_virt_to_phys(iommu->cmd_buf); +- entry |= MMIO_CMD_SIZE_512; +- +- memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, +- &entry, sizeof(entry)); ++ if (!is_kdump_kernel()) { ++ /* ++ * Command buffer is re-used for kdump kernel and setting ++ * of MMIO register is not required. ++ */ ++ entry = iommu_virt_to_phys(iommu->cmd_buf); ++ entry |= MMIO_CMD_SIZE_512; ++ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, ++ &entry, sizeof(entry)); ++ } + + amd_iommu_reset_cmd_buffer(iommu); + } +@@ -893,10 +898,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu) + + BUG_ON(iommu->evt_buf == NULL); + +- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; +- +- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, +- &entry, sizeof(entry)); ++ if (!is_kdump_kernel()) { ++ /* ++ * Event buffer is re-used for kdump kernel and setting ++ * of MMIO register is not required. ++ */ ++ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; ++ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, ++ &entry, sizeof(entry)); ++ } + + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); +diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c +index 0b892750842746..0ea450cbb77868 100644 +--- a/drivers/iommu/apple-dart.c ++++ b/drivers/iommu/apple-dart.c +@@ -121,6 +121,8 @@ + #define DART_T8110_ERROR_ADDR_LO 0x170 + #define DART_T8110_ERROR_ADDR_HI 0x174 + ++#define DART_T8110_ERROR_STREAMS 0x1c0 ++ + #define DART_T8110_PROTECT 0x200 + #define DART_T8110_UNPROTECT 0x204 + #define DART_T8110_PROTECT_LOCK 0x208 +@@ -1041,6 +1043,9 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev) + error, stream_idx, error_code, fault_name, addr); + + writel(error, dart->regs + DART_T8110_ERROR); ++ for (int i = 0; i < BITS_TO_U32(dart->num_streams); i++) ++ writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i); ++ + return IRQ_HANDLED; + } + +diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c +index 1f925285104eee..aa9cfcb6039e24 100644 +--- a/drivers/iommu/intel/debugfs.c ++++ b/drivers/iommu/intel/debugfs.c +@@ -562,17 +562,11 @@ DEFINE_SHOW_ATTRIBUTE(ir_translation_struct); + static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu, + struct dmar_drhd_unit *drhd) + { +- int ret; +- + seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", + iommu->name, drhd->reg_base_addr); + +- ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE); +- if (ret < 0) +- seq_puts(m, "Failed to get latency snapshot"); +- else +- seq_puts(m, debug_buf); +- seq_puts(m, "\n"); ++ dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE); ++ seq_printf(m, "%s\n", debug_buf); + } + + static int latency_show(struct seq_file *m, void *v) +diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c +index 94ee70ac38e301..ae64e1123f2571 100644 +--- a/drivers/iommu/intel/perf.c ++++ b/drivers/iommu/intel/perf.c +@@ -113,7 +113,7 @@ static char *latency_type_names[] = { + " svm_prq" + }; + +-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) ++void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) + { + struct latency_statistic *lstat = iommu->perf_statistic; + unsigned long flags; +@@ -122,7 +122,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) + memset(str, 0, size); + + for (i = 0; i < COUNTS_NUM; i++) +- bytes += snprintf(str + bytes, size - bytes, ++ bytes += scnprintf(str + bytes, size - bytes, + "%s", latency_counter_names[i]); + + spin_lock_irqsave(&latency_lock, flags); +@@ -130,7 +130,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) + if (!dmar_latency_enabled(iommu, i)) + continue; + +- bytes += snprintf(str + bytes, size - bytes, ++ bytes += scnprintf(str + bytes, size - bytes, + "\n%s", latency_type_names[i]); + + for (j = 0; j < COUNTS_NUM; j++) { +@@ -156,11 +156,9 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) + break; + } + +- bytes += snprintf(str + bytes, size - bytes, ++ bytes += scnprintf(str + bytes, size - bytes, + "%12lld", val); + } + } + spin_unlock_irqrestore(&latency_lock, flags); +- +- return bytes; + } +diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h +index fd6db8049d1a77..1e481e9e4ad04d 100644 +--- a/drivers/iommu/intel/perf.h ++++ b/drivers/iommu/intel/perf.h +@@ -41,7 +41,7 @@ void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type); + bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type); + void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, + u64 latency); +-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size); ++void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size); + #else + static inline int + dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type) +@@ -65,9 +65,8 @@ dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 laten + { + } + +-static inline int ++static inline void + dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) + { +- return 0; + } + #endif /* CONFIG_DMAR_PERF */ +diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c +index f058405c5fbb66..6bd37343061e00 100644 +--- a/drivers/iommu/iommufd/io_pagetable.c ++++ b/drivers/iommu/iommufd/io_pagetable.c +@@ -488,7 +488,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, + struct iopt_area *area; + unsigned long unmapped_bytes = 0; + unsigned int tries = 0; +- int rc = -ENOENT; ++ /* If there are no mapped entries then success */ ++ int rc = 0; + + /* + * The domains_rwsem must be held in read mode any time any area->pages +@@ -552,8 +553,6 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, + + down_write(&iopt->iova_rwsem); + } +- if (unmapped_bytes) +- rc = 0; + + out_unlock_iova: + up_write(&iopt->iova_rwsem); +@@ -590,13 +589,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, + + int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) + { +- int rc; +- +- rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); + /* If the IOVAs are empty then unmap all succeeds */ +- if (rc == -ENOENT) +- return 0; +- return rc; ++ return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); + } + + /* The caller must always free all the nodes in the allowed_iova rb_root. */ +diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c +index 0407e2b758ef43..18bbbeef5cccd2 100644 +--- a/drivers/iommu/iommufd/ioas.c ++++ b/drivers/iommu/iommufd/ioas.c +@@ -317,6 +317,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd) + &unmapped); + if (rc) + goto out_put; ++ if (!unmapped) { ++ rc = -ENOENT; ++ goto out_put; ++ } + } + + cmd->length = unmapped; +diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c +index 683e8721e3b498..4bce4758241718 100644 +--- a/drivers/irqchip/irq-gic-v2m.c ++++ b/drivers/irqchip/irq-gic-v2m.c +@@ -179,14 +179,19 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + { + msi_alloc_info_t *info = args; + struct v2m_data *v2m = NULL, *tmp; +- int hwirq, offset, i, err = 0; ++ int hwirq, i, err = 0; ++ unsigned long offset; ++ unsigned long align_mask = nr_irqs - 1; + + spin_lock(&v2m_lock); + list_for_each_entry(tmp, &v2m_nodes, entry) { +- offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, +- get_count_order(nr_irqs)); +- if (offset >= 0) { ++ unsigned long align_off = tmp->spi_start - (tmp->spi_start & ~align_mask); ++ ++ offset = bitmap_find_next_zero_area_off(tmp->bm, tmp->nr_spis, 0, ++ nr_irqs, align_mask, align_off); ++ if (offset < tmp->nr_spis) { + v2m = tmp; ++ bitmap_set(v2m->bm, offset, nr_irqs); + break; + } + } +diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c +index 9b35492fb6be9e..1e9fcea1826f8f 100644 +--- a/drivers/irqchip/irq-loongson-pch-lpc.c ++++ b/drivers/irqchip/irq-loongson-pch-lpc.c +@@ -198,8 +198,13 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent, + goto iounmap_base; + } + +- priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT, +- &pch_lpc_domain_ops, priv); ++ /* ++ * The LPC interrupt controller is a legacy i8259-compatible device, ++ * which requires a static 1:1 mapping for IRQs 0-15. ++ * Use irq_domain_create_legacy to establish this static mapping early. ++ */ ++ priv->lpc_domain = irq_domain_create_legacy(irq_handle, LPC_COUNT, 0, 0, ++ &pch_lpc_domain_ops, priv); + if (!priv->lpc_domain) { + pr_err("Failed to create IRQ domain\n"); + goto free_irq_handle; +diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c +index 627beae9649a21..84cb9cda365ada 100644 +--- a/drivers/irqchip/irq-riscv-intc.c ++++ b/drivers/irqchip/irq-riscv-intc.c +@@ -149,7 +149,8 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain, + static const struct irq_domain_ops riscv_intc_domain_ops = { + .map = riscv_intc_domain_map, + .xlate = irq_domain_xlate_onecell, +- .alloc = riscv_intc_domain_alloc ++ .alloc = riscv_intc_domain_alloc, ++ .free = irq_domain_free_irqs_top, + }; + + static struct fwnode_handle *riscv_intc_hwnode(void) +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c +index 2d20cf9d84cead..a8f5cfad16f7de 100644 +--- a/drivers/irqchip/irq-sifive-plic.c ++++ b/drivers/irqchip/irq-sifive-plic.c +@@ -176,12 +176,14 @@ static int plic_set_affinity(struct irq_data *d, + if (cpu >= nr_cpu_ids) + return -EINVAL; + +- plic_irq_disable(d); ++ /* Invalidate the original routing entry */ ++ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + ++ /* Setting the new routing entry if irq is enabled */ + if (!irqd_irq_disabled(d)) +- plic_irq_enable(d); ++ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); + + return IRQ_SET_MASK_OK_DONE; + } +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c +index b82b89888a5e04..c55438e1f67801 100644 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c +@@ -1903,13 +1903,13 @@ setup_instance(struct hfcsusb *hw, struct device *parent) + mISDN_freebchannel(&hw->bch[1]); + mISDN_freebchannel(&hw->bch[0]); + mISDN_freedchannel(&hw->dch); +- kfree(hw); + return err; + } + + static int + hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) + { ++ int err; + struct hfcsusb *hw; + struct usb_device *dev = interface_to_usbdev(intf); + struct usb_host_interface *iface = intf->cur_altsetting; +@@ -2100,20 +2100,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) + if (!hw->ctrl_urb) { + pr_warn("%s: No memory for control urb\n", + driver_info->vend_name); +- kfree(hw); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto err_free_hw; + } + + pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n", + hw->name, __func__, driver_info->vend_name, + conf_str[small_match], ifnum, alt_used); + +- if (setup_instance(hw, dev->dev.parent)) +- return -EIO; ++ if (setup_instance(hw, dev->dev.parent)) { ++ err = -EIO; ++ goto err_free_urb; ++ } + + hw->intf = intf; + usb_set_intfdata(hw->intf, hw); + return 0; ++ ++err_free_urb: ++ usb_free_urb(hw->ctrl_urb); ++err_free_hw: ++ kfree(hw); ++ return err; + } + + /* function called when an active device is removed */ +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig +index 53b443be5a59ee..d4082a86fcedb5 100644 +--- a/drivers/media/i2c/Kconfig ++++ b/drivers/media/i2c/Kconfig +@@ -27,7 +27,7 @@ config VIDEO_IR_I2C + + menuconfig VIDEO_CAMERA_SENSOR + bool "Camera sensor devices" +- depends on MEDIA_CAMERA_SUPPORT && I2C ++ depends on MEDIA_CAMERA_SUPPORT && I2C && HAVE_CLK + select MEDIA_CONTROLLER + select V4L2_FWNODE + select VIDEO_V4L2_SUBDEV_API +diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c +index 114ac0c263fb2b..ecb0e7b1f2a5fc 100644 +--- a/drivers/media/i2c/adv7180.c ++++ b/drivers/media/i2c/adv7180.c +@@ -356,32 +356,27 @@ static inline struct adv7180_state *to_state(struct v4l2_subdev *sd) + static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) + { + struct adv7180_state *state = to_state(sd); +- int err = mutex_lock_interruptible(&state->mutex); +- if (err) +- return err; +- +- if (state->streaming) { +- err = -EBUSY; +- goto unlock; +- } ++ int ret; + +- err = adv7180_set_video_standard(state, +- ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM); +- if (err) +- goto unlock; ++ guard(mutex)(&state->mutex); + +- msleep(100); +- __adv7180_status(state, NULL, std); ++ /* ++ * We can't sample the standard if the device is streaming as that would ++ * interfere with the capture session as the VID_SEL reg is touched. ++ */ ++ if (state->streaming) ++ return -EBUSY; + +- err = v4l2_std_to_adv7180(state->curr_norm); +- if (err < 0) +- goto unlock; ++ /* Set the standard to autodetect PAL B/G/H/I/D, NTSC J or SECAM */ ++ ret = adv7180_set_video_standard(state, ++ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM); ++ if (ret) ++ return ret; + +- err = adv7180_set_video_standard(state, err); ++ /* Allow some time for the autodetection to run. */ ++ msleep(100); + +-unlock: +- mutex_unlock(&state->mutex); +- return err; ++ return __adv7180_status(state, NULL, std); + } + + static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input, +@@ -803,12 +798,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd, + ret = adv7180_mbus_fmt(sd, &format->format); + + if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { +- if (state->field != format->format.field) { +- state->field = format->format.field; +- adv7180_set_power(state, false); +- adv7180_set_field_mode(state); +- adv7180_set_power(state, true); +- } ++ state->field = format->format.field; + } else { + framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0); + *framefmt = format->format; +@@ -1568,6 +1558,8 @@ static int adv7180_suspend(struct device *dev) + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct adv7180_state *state = to_state(sd); + ++ guard(mutex)(&state->mutex); ++ + return adv7180_set_power(state, false); + } + +@@ -1581,6 +1573,8 @@ static int adv7180_resume(struct device *dev) + if (ret < 0) + return ret; + ++ guard(mutex)(&state->mutex); ++ + ret = adv7180_set_power(state, state->powered); + if (ret) + return ret; +diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c +index b37a2aaf8ac047..a8026f0f980f98 100644 +--- a/drivers/media/i2c/ir-kbd-i2c.c ++++ b/drivers/media/i2c/ir-kbd-i2c.c +@@ -321,9 +321,9 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol, + + static int ir_key_poll(struct IR_i2c *ir) + { +- enum rc_proto protocol; +- u32 scancode; +- u8 toggle; ++ enum rc_proto protocol = 0; ++ u32 scancode = 0; ++ u8 toggle = 0; + int rc; + + dev_dbg(&ir->rc->dev, "%s\n", __func__); +diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c +index 365ce568458360..9bd204911651ea 100644 +--- a/drivers/media/i2c/og01a1b.c ++++ b/drivers/media/i2c/og01a1b.c +@@ -676,7 +676,7 @@ static void og01a1b_update_pad_format(const struct og01a1b_mode *mode, + { + fmt->width = mode->width; + fmt->height = mode->height; +- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; ++ fmt->code = MEDIA_BUS_FMT_Y10_1X10; + fmt->field = V4L2_FIELD_NONE; + } + +@@ -867,7 +867,7 @@ static int og01a1b_enum_mbus_code(struct v4l2_subdev *sd, + if (code->index > 0) + return -EINVAL; + +- code->code = MEDIA_BUS_FMT_SGRBG10_1X10; ++ code->code = MEDIA_BUS_FMT_Y10_1X10; + + return 0; + } +@@ -879,7 +879,7 @@ static int og01a1b_enum_frame_size(struct v4l2_subdev *sd, + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + +- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) ++ if (fse->code != MEDIA_BUS_FMT_Y10_1X10) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; +diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c +index 637da4df69011d..4d15fa6ac311e6 100644 +--- a/drivers/media/i2c/ov08x40.c ++++ b/drivers/media/i2c/ov08x40.c +@@ -2643,7 +2643,7 @@ static int ov08x40_set_ctrl_hflip(struct ov08x40 *ov08x, u32 ctrl_val) + + return ov08x40_write_reg(ov08x, OV08X40_REG_MIRROR, + OV08X40_REG_VALUE_08BIT, +- ctrl_val ? val | BIT(2) : val & ~BIT(2)); ++ ctrl_val ? val & ~BIT(2) : val | BIT(2)); + } + + static int ov08x40_set_ctrl_vflip(struct ov08x40 *ov08x, u32 ctrl_val) +diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c +index 8f346d7da9c8de..269a799ec046c6 100644 +--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c ++++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c +@@ -148,14 +148,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream) + + s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM]; + +- v4l2_fh_init(&item.fh, &s->vdev); + item.itv = itv; + item.type = s->type; + + /* See if the stream is available */ + if (ivtv_claim_stream(&item, item.type)) { + /* No, it's already in use */ +- v4l2_fh_exit(&item.fh); + snd_ivtv_unlock(itvsc); + return -EBUSY; + } +diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h +index ce3a7ca51736e5..df2dcef1af3f01 100644 +--- a/drivers/media/pci/ivtv/ivtv-driver.h ++++ b/drivers/media/pci/ivtv/ivtv-driver.h +@@ -322,6 +322,7 @@ struct ivtv_queue { + }; + + struct ivtv; /* forward reference */ ++struct ivtv_open_id; + + struct ivtv_stream { + /* These first four fields are always set, even if the stream +@@ -331,7 +332,7 @@ struct ivtv_stream { + const char *name; /* name of the stream */ + int type; /* stream type */ + +- struct v4l2_fh *fh; /* pointer to the streaming filehandle */ ++ struct ivtv_open_id *id; /* pointer to the streaming ivtv_open_id */ + spinlock_t qlock; /* locks access to the queues */ + unsigned long s_flags; /* status flags, see above */ + int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */ +diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c +index 4202c3a47d33e6..7ed0d2d85253e5 100644 +--- a/drivers/media/pci/ivtv/ivtv-fileops.c ++++ b/drivers/media/pci/ivtv/ivtv-fileops.c +@@ -38,16 +38,16 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type) + + if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { + /* someone already claimed this stream */ +- if (s->fh == &id->fh) { ++ if (s->id == id) { + /* yes, this file descriptor did. So that's OK. */ + return 0; + } +- if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI || ++ if (s->id == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI || + type == IVTV_ENC_STREAM_TYPE_VBI)) { + /* VBI is handled already internally, now also assign + the file descriptor to this stream for external + reading of the stream. */ +- s->fh = &id->fh; ++ s->id = id; + IVTV_DEBUG_INFO("Start Read VBI\n"); + return 0; + } +@@ -55,7 +55,7 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type) + IVTV_DEBUG_INFO("Stream %d is busy\n", type); + return -EBUSY; + } +- s->fh = &id->fh; ++ s->id = id; + if (type == IVTV_DEC_STREAM_TYPE_VBI) { + /* Enable reinsertion interrupt */ + ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); +@@ -93,7 +93,7 @@ void ivtv_release_stream(struct ivtv_stream *s) + struct ivtv *itv = s->itv; + struct ivtv_stream *s_vbi; + +- s->fh = NULL; ++ s->id = NULL; + if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) && + test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { + /* this stream is still in use internally */ +@@ -125,7 +125,7 @@ void ivtv_release_stream(struct ivtv_stream *s) + /* was already cleared */ + return; + } +- if (s_vbi->fh) { ++ if (s_vbi->id) { + /* VBI stream still claimed by a file descriptor */ + return; + } +@@ -349,7 +349,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co + size_t tot_written = 0; + int single_frame = 0; + +- if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) { ++ if (atomic_read(&itv->capturing) == 0 && s->id == NULL) { + /* shouldn't happen */ + IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name); + return -EIO; +@@ -819,7 +819,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end) + id->type == IVTV_ENC_STREAM_TYPE_VBI) && + test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { + /* Also used internally, don't stop capturing */ +- s->fh = NULL; ++ s->id = NULL; + } + else { + ivtv_stop_v4l2_encode_stream(s, gop_end); +@@ -903,7 +903,7 @@ int ivtv_v4l2_close(struct file *filp) + v4l2_fh_exit(fh); + + /* Easy case first: this stream was never claimed by us */ +- if (s->fh != &id->fh) ++ if (s->id != id) + goto close_done; + + /* 'Unclaim' this stream */ +diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c +index e39bf64c5c715b..404335e5aff4ec 100644 +--- a/drivers/media/pci/ivtv/ivtv-irq.c ++++ b/drivers/media/pci/ivtv/ivtv-irq.c +@@ -305,7 +305,7 @@ static void dma_post(struct ivtv_stream *s) + ivtv_process_vbi_data(itv, buf, 0, s->type); + s->q_dma.bytesused += buf->bytesused; + } +- if (s->fh == NULL) { ++ if (s->id == NULL) { + ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); + return; + } +@@ -330,7 +330,7 @@ static void dma_post(struct ivtv_stream *s) + set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); + } + +- if (s->fh) ++ if (s->id) + wake_up(&s->waitq); + } + +diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c +index 61d27b63b99d47..75084ba8f93b63 100644 +--- a/drivers/media/platform/amphion/vpu_v4l2.c ++++ b/drivers/media/platform/amphion/vpu_v4l2.c +@@ -693,8 +693,6 @@ static int vpu_v4l2_release(struct vpu_inst *inst) + + v4l2_ctrl_handler_free(&inst->ctrl_handler); + mutex_destroy(&inst->lock); +- v4l2_fh_del(&inst->fh); +- v4l2_fh_exit(&inst->fh); + + call_void_vop(inst, cleanup); + +@@ -763,6 +761,8 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) + + return 0; + error: ++ v4l2_fh_del(&inst->fh); ++ v4l2_fh_exit(&inst->fh); + vpu_inst_put(inst); + return ret; + } +@@ -782,6 +782,9 @@ int vpu_v4l2_close(struct file *file) + call_void_vop(inst, release); + vpu_inst_unlock(inst); + ++ v4l2_fh_del(&inst->fh); ++ v4l2_fh_exit(&inst->fh); ++ + vpu_inst_unregister(inst); + vpu_inst_put(inst); + +diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c +index 1874c976081f8e..35833ee8beb517 100644 +--- a/drivers/media/platform/verisilicon/hantro_drv.c ++++ b/drivers/media/platform/verisilicon/hantro_drv.c +@@ -910,6 +910,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid) + vpu->decoder = func; + v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD); + v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD); ++ v4l2_disable_ioctl(vfd, VIDIOC_G_SELECTION); ++ v4l2_disable_ioctl(vfd, VIDIOC_S_SELECTION); + } + + video_set_drvdata(vfd, vpu); +diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c +index db145519fc5d38..52009c5b0db4b5 100644 +--- a/drivers/media/platform/verisilicon/hantro_v4l2.c ++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c +@@ -655,8 +655,7 @@ static int vidioc_g_selection(struct file *file, void *priv, + struct hantro_ctx *ctx = fh_to_ctx(priv); + + /* Crop only supported on source. */ +- if (!ctx->is_encoder || +- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) ++ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (sel->target) { +@@ -688,8 +687,7 @@ static int vidioc_s_selection(struct file *file, void *priv, + struct vb2_queue *vq; + + /* Crop only supported on source. */ +- if (!ctx->is_encoder || +- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) ++ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + /* Change not allowed if the queue is streaming. */ +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c +index f4deca8894e0f7..bb4aabb08c06ef 100644 +--- a/drivers/media/rc/imon.c ++++ b/drivers/media/rc/imon.c +@@ -650,12 +650,15 @@ static int send_packet(struct imon_context *ictx) + smp_rmb(); /* ensure later readers know we're not busy */ + pr_err_ratelimited("error submitting urb(%d)\n", retval); + } else { +- /* Wait for transmission to complete (or abort) */ +- retval = wait_for_completion_interruptible( +- &ictx->tx.finished); +- if (retval) { ++ /* Wait for transmission to complete (or abort or timeout) */ ++ retval = wait_for_completion_interruptible_timeout(&ictx->tx.finished, 10 * HZ); ++ if (retval <= 0) { + usb_kill_urb(ictx->tx_urb); + pr_err_ratelimited("task interrupted\n"); ++ if (retval < 0) ++ ictx->tx.status = retval; ++ else ++ ictx->tx.status = -ETIMEDOUT; + } + + ictx->tx.busy = false; +@@ -1754,14 +1757,6 @@ static void usb_rx_callback_intf0(struct urb *urb) + if (!ictx) + return; + +- /* +- * if we get a callback before we're done configuring the hardware, we +- * can't yet process the data, as there's nowhere to send it, but we +- * still need to submit a new rx URB to avoid wedging the hardware +- */ +- if (!ictx->dev_present_intf0) +- goto out; +- + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; +@@ -1770,16 +1765,29 @@ static void usb_rx_callback_intf0(struct urb *urb) + break; + + case 0: +- imon_incoming_packet(ictx, urb, intfnum); ++ /* ++ * if we get a callback before we're done configuring the hardware, we ++ * can't yet process the data, as there's nowhere to send it, but we ++ * still need to submit a new rx URB to avoid wedging the hardware ++ */ ++ if (ictx->dev_present_intf0) ++ imon_incoming_packet(ictx, urb, intfnum); + break; + ++ case -ECONNRESET: ++ case -EILSEQ: ++ case -EPROTO: ++ case -EPIPE: ++ dev_warn(ictx->dev, "imon %s: status(%d)\n", ++ __func__, urb->status); ++ return; ++ + default: + dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", + __func__, urb->status); + break; + } + +-out: + usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); + } + +@@ -1795,14 +1803,6 @@ static void usb_rx_callback_intf1(struct urb *urb) + if (!ictx) + return; + +- /* +- * if we get a callback before we're done configuring the hardware, we +- * can't yet process the data, as there's nowhere to send it, but we +- * still need to submit a new rx URB to avoid wedging the hardware +- */ +- if (!ictx->dev_present_intf1) +- goto out; +- + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; +@@ -1811,16 +1811,29 @@ static void usb_rx_callback_intf1(struct urb *urb) + break; + + case 0: +- imon_incoming_packet(ictx, urb, intfnum); ++ /* ++ * if we get a callback before we're done configuring the hardware, we ++ * can't yet process the data, as there's nowhere to send it, but we ++ * still need to submit a new rx URB to avoid wedging the hardware ++ */ ++ if (ictx->dev_present_intf1) ++ imon_incoming_packet(ictx, urb, intfnum); + break; + ++ case -ECONNRESET: ++ case -EILSEQ: ++ case -EPROTO: ++ case -EPIPE: ++ dev_warn(ictx->dev, "imon %s: status(%d)\n", ++ __func__, urb->status); ++ return; ++ + default: + dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", + __func__, urb->status); + break; + } + +-out: + usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); + } + +diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c +index 9f2947af33aa7c..880981e1c507e1 100644 +--- a/drivers/media/rc/redrat3.c ++++ b/drivers/media/rc/redrat3.c +@@ -422,7 +422,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3) + static int redrat3_enable_detector(struct redrat3_dev *rr3) + { + struct device *dev = rr3->dev; +- u8 ret; ++ int ret; + + ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3); + if (ret != 0) +diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c +index 29bc63021c5aae..6fb3550811a283 100644 +--- a/drivers/media/tuners/xc4000.c ++++ b/drivers/media/tuners/xc4000.c +@@ -1087,12 +1087,12 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type, + + static void xc_debug_dump(struct xc4000_priv *priv) + { +- u16 adc_envelope; ++ u16 adc_envelope = 0; + u32 freq_error_hz = 0; +- u16 lock_status; ++ u16 lock_status = 0; + u32 hsync_freq_hz = 0; +- u16 frame_lines; +- u16 quality; ++ u16 frame_lines = 0; ++ u16 quality = 0; + u16 signal = 0; + u16 noise = 0; + u8 hw_majorversion = 0, hw_minorversion = 0; +diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c +index ec9a3cd4784e1f..a28481edd22ed0 100644 +--- a/drivers/media/tuners/xc5000.c ++++ b/drivers/media/tuners/xc5000.c +@@ -622,14 +622,14 @@ static int xc5000_fwupload(struct dvb_frontend *fe, + + static void xc_debug_dump(struct xc5000_priv *priv) + { +- u16 adc_envelope; ++ u16 adc_envelope = 0; + u32 freq_error_hz = 0; +- u16 lock_status; ++ u16 lock_status = 0; + u32 hsync_freq_hz = 0; +- u16 frame_lines; +- u16 quality; +- u16 snr; +- u16 totalgain; ++ u16 frame_lines = 0; ++ u16 quality = 0; ++ u16 snr = 0; ++ u16 totalgain = 0; + u8 hw_majorversion = 0, hw_minorversion = 0; + u8 fw_majorversion = 0, fw_minorversion = 0; + u16 fw_buildversion = 0; +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index c7cee6b185264d..a4b9a7ca4e50f9 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -165,13 +165,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev, + + static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) + { +- struct uvc_streaming *stream; ++ struct uvc_streaming *stream, *last_stream; ++ unsigned int count = 0; + + list_for_each_entry(stream, &dev->streams, list) { ++ count += 1; ++ last_stream = stream; + if (stream->header.bTerminalLink == id) + return stream; + } + ++ /* ++ * If the streaming entity is referenced by an invalid ID, notify the ++ * user and use heuristics to guess the correct entity. ++ */ ++ if (count == 1 && id == UVC_INVALID_ENTITY_ID) { ++ dev_warn(&dev->intf->dev, ++ "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one."); ++ return last_stream; ++ } ++ + return NULL; + } + +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c +index e0895e979e35b3..65af43201b6933 100644 +--- a/drivers/memstick/core/memstick.c ++++ b/drivers/memstick/core/memstick.c +@@ -367,7 +367,9 @@ int memstick_set_rw_addr(struct memstick_dev *card) + { + card->next_request = h_memstick_set_rw_addr; + memstick_new_req(card->host); +- wait_for_completion(&card->mrq_complete); ++ if (!wait_for_completion_timeout(&card->mrq_complete, ++ msecs_to_jiffies(500))) ++ card->current_mrq.error = -ETIMEDOUT; + + return card->current_mrq.error; + } +@@ -401,7 +403,9 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host) + + card->next_request = h_memstick_read_dev_id; + memstick_new_req(host); +- wait_for_completion(&card->mrq_complete); ++ if (!wait_for_completion_timeout(&card->mrq_complete, ++ msecs_to_jiffies(500))) ++ card->current_mrq.error = -ETIMEDOUT; + + if (card->current_mrq.error) + goto err_out; +diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c +index d715cf9a9e6883..7b829bfe04bc7b 100644 +--- a/drivers/mfd/da9063-i2c.c ++++ b/drivers/mfd/da9063-i2c.c +@@ -37,9 +37,13 @@ enum da9063_page_sel_buf_fmt { + DA9063_PAGE_SEL_BUF_SIZE, + }; + ++enum da9063_page_sel_msgs { ++ DA9063_PAGE_SEL_MSG = 0, ++ DA9063_PAGE_SEL_CNT, ++}; ++ + enum da9063_paged_read_msgs { +- DA9063_PAGED_READ_MSG_PAGE_SEL = 0, +- DA9063_PAGED_READ_MSG_REG_SEL, ++ DA9063_PAGED_READ_MSG_REG_SEL = 0, + DA9063_PAGED_READ_MSG_DATA, + DA9063_PAGED_READ_MSG_CNT, + }; +@@ -65,10 +69,21 @@ static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr, + (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK; + + /* Write reg address, page selection */ +- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr; +- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0; +- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE; +- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf; ++ xfer[DA9063_PAGE_SEL_MSG].addr = client->addr; ++ xfer[DA9063_PAGE_SEL_MSG].flags = 0; ++ xfer[DA9063_PAGE_SEL_MSG].len = DA9063_PAGE_SEL_BUF_SIZE; ++ xfer[DA9063_PAGE_SEL_MSG].buf = page_sel_buf; ++ ++ ret = i2c_transfer(client->adapter, xfer, DA9063_PAGE_SEL_CNT); ++ if (ret < 0) { ++ dev_err(&client->dev, "Page switch failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (ret != DA9063_PAGE_SEL_CNT) { ++ dev_err(&client->dev, "Page switch failed to complete\n"); ++ return -EIO; ++ } + + /* Select register address */ + xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr; +diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c +index bdbd5bfc971456..2f74a8c644a32a 100644 +--- a/drivers/mfd/madera-core.c ++++ b/drivers/mfd/madera-core.c +@@ -456,7 +456,7 @@ int madera_dev_init(struct madera *madera) + struct device *dev = madera->dev; + unsigned int hwid; + int (*patch_fn)(struct madera *) = NULL; +- const struct mfd_cell *mfd_devs; ++ const struct mfd_cell *mfd_devs = NULL; + int n_devs = 0; + int i, ret; + +@@ -670,7 +670,7 @@ int madera_dev_init(struct madera *madera) + goto err_reset; + } + +- if (!n_devs) { ++ if (!n_devs || !mfd_devs) { + dev_err(madera->dev, "Device ID 0x%x not a %s\n", hwid, + madera->type_name); + ret = -ENODEV; +diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c +index fe018bedab9837..7e2ca397588250 100644 +--- a/drivers/mfd/stmpe-i2c.c ++++ b/drivers/mfd/stmpe-i2c.c +@@ -137,3 +137,4 @@ module_exit(stmpe_exit); + + MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver"); + MODULE_AUTHOR("Rabin Vincent "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c +index 9c3cf58457a7db..be6a84a3062cce 100644 +--- a/drivers/mfd/stmpe.c ++++ b/drivers/mfd/stmpe.c +@@ -1485,6 +1485,9 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) + + void stmpe_remove(struct stmpe *stmpe) + { ++ if (stmpe->domain) ++ irq_domain_remove(stmpe->domain); ++ + if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio)) + regulator_disable(stmpe->vio); + if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc)) +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c +index 597b00e8c9539d..cffacf4434b55f 100644 +--- a/drivers/mmc/host/renesas_sdhi_core.c ++++ b/drivers/mmc/host/renesas_sdhi_core.c +@@ -220,7 +220,11 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, + clk &= ~0xff; + } + +- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); ++ clock = clk & CLK_CTL_DIV_MASK; ++ if (clock != 0xff) ++ host->mmc->actual_clock /= (1 << (ffs(clock) + 1)); ++ ++ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clock); + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index c2144a3efb308e..74234ee5f60894 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -81,6 +81,7 @@ + #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15) + #define CORE_IO_PAD_PWR_SWITCH BIT(16) + #define CORE_HC_SELECT_IN_EN BIT(18) ++#define CORE_HC_SELECT_IN_SDR50 (4 << 19) + #define CORE_HC_SELECT_IN_HS400 (6 << 19) + #define CORE_HC_SELECT_IN_MASK (7 << 19) + +@@ -1133,6 +1134,10 @@ static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) + { + struct mmc_ios *ios = &host->mmc->ios; + ++ if (ios->timing == MMC_TIMING_UHS_SDR50 && ++ host->flags & SDHCI_SDR50_NEEDS_TUNING) ++ return true; ++ + /* + * Tuning is required for SDR104, HS200 and HS400 cards and + * if clock frequency is greater than 100MHz in these modes. +@@ -1201,6 +1206,8 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) + struct mmc_ios ios = host->mmc->ios; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); ++ const struct sdhci_msm_offset *msm_offset = msm_host->offset; ++ u32 config; + + if (!sdhci_msm_is_tuning_needed(host)) { + msm_host->use_cdr = false; +@@ -1217,6 +1224,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) + */ + msm_host->tuning_done = 0; + ++ if (ios.timing == MMC_TIMING_UHS_SDR50 && ++ host->flags & SDHCI_SDR50_NEEDS_TUNING) { ++ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); ++ config &= ~CORE_HC_SELECT_IN_MASK; ++ config |= CORE_HC_SELECT_IN_EN | CORE_HC_SELECT_IN_SDR50; ++ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); ++ } ++ + /* + * For HS400 tuning in HS200 timing requires: + * - select MCLK/2 in VENDOR_SPEC +diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c +index a0524127ca073d..4c6fa92a95a6e5 100644 +--- a/drivers/mmc/host/sdhci-of-dwcmshc.c ++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c +@@ -54,7 +54,7 @@ + #define DLL_TXCLK_TAPNUM_DEFAULT 0x10 + #define DLL_TXCLK_TAPNUM_90_DEGREES 0xA + #define DLL_TXCLK_TAPNUM_FROM_SW BIT(24) +-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8 ++#define DLL_STRBIN_TAPNUM_DEFAULT 0x4 + #define DLL_STRBIN_TAPNUM_FROM_SW BIT(24) + #define DLL_STRBIN_DELAY_NUM_SEL BIT(26) + #define DLL_STRBIN_DELAY_NUM_OFFSET 16 +diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c +index fd6890a03d5571..0e21d443078e41 100644 +--- a/drivers/mtd/nand/onenand/onenand_samsung.c ++++ b/drivers/mtd/nand/onenand/onenand_samsung.c +@@ -906,7 +906,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) + err = devm_request_irq(&pdev->dev, r->start, + s5pc110_onenand_irq, + IRQF_SHARED, "onenand", +- &onenand); ++ onenand); + if (err) { + dev_err(&pdev->dev, "failed to get irq\n"); + return err; +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index b00bac4686773d..ffe8db7c2f1f40 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -349,11 +349,11 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) + * frames should be flooded or not. + */ + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); +- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; ++ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } else { + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); +- mgmt |= B53_IP_MCAST_25; ++ mgmt |= B53_IP_MC; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } + } +@@ -1215,6 +1215,10 @@ static void b53_force_port_config(struct b53_device *dev, int port, + else + reg &= ~PORT_OVERRIDE_FULL_DUPLEX; + ++ reg &= ~(0x3 << GMII_PO_SPEED_S); ++ if (is5301x(dev) || is58xx(dev)) ++ reg &= ~PORT_OVERRIDE_SPEED_2000M; ++ + switch (speed) { + case 2000: + reg |= PORT_OVERRIDE_SPEED_2000M; +@@ -1233,6 +1237,11 @@ static void b53_force_port_config(struct b53_device *dev, int port, + return; + } + ++ if (is5325(dev)) ++ reg &= ~PORT_OVERRIDE_LP_FLOW_25; ++ else ++ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW); ++ + if (rx_pause) { + if (is5325(dev)) + reg |= PORT_OVERRIDE_LP_FLOW_25; +@@ -1807,7 +1816,7 @@ static int b53_arl_search_wait(struct b53_device *dev) + do { + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); + if (!(reg & ARL_SRCH_STDN)) +- return 0; ++ return -ENOENT; + + if (reg & ARL_SRCH_VLID) + return 0; +diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h +index 3179fe58de6b62..38e2d60dab7d59 100644 +--- a/drivers/net/dsa/b53/b53_regs.h ++++ b/drivers/net/dsa/b53/b53_regs.h +@@ -104,8 +104,7 @@ + + /* IP Multicast control (8 bit) */ + #define B53_IP_MULTICAST_CTRL 0x21 +-#define B53_IP_MCAST_25 BIT(0) +-#define B53_IPMC_FWD_EN BIT(1) ++#define B53_IP_MC BIT(0) + #define B53_UC_FWD_EN BIT(6) + #define B53_MC_FWD_EN BIT(7) + +diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c +index 5b139f2206b6ef..48cf9d300bbf56 100644 +--- a/drivers/net/dsa/dsa_loop.c ++++ b/drivers/net/dsa/dsa_loop.c +@@ -378,13 +378,10 @@ static struct mdio_driver dsa_loop_drv = { + + static void dsa_loop_phydevs_unregister(void) + { +- unsigned int i; +- +- for (i = 0; i < NUM_FIXED_PHYS; i++) +- if (!IS_ERR(phydevs[i])) { ++ for (int i = 0; i < NUM_FIXED_PHYS; i++) { ++ if (!IS_ERR(phydevs[i])) + fixed_phy_unregister(phydevs[i]); +- phy_device_free(phydevs[i]); +- } ++ } + } + + static int __init dsa_loop_init(void) +diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c +index 59134d117846d1..a11f2c1aabacab 100644 +--- a/drivers/net/dsa/microchip/ksz9477.c ++++ b/drivers/net/dsa/microchip/ksz9477.c +@@ -1087,9 +1087,15 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds) + } + } + ++#define RESV_MCAST_CNT 8 ++ ++static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 }; ++ + int ksz9477_enable_stp_addr(struct ksz_device *dev) + { ++ u8 i, ports, update; + const u32 *masks; ++ bool override; + u32 data; + int ret; + +@@ -1098,23 +1104,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev) + /* Enable Reserved multicast table */ + ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true); + +- /* Set the Override bit for forwarding BPDU packet to CPU */ +- ret = ksz_write32(dev, REG_SW_ALU_VAL_B, +- ALU_V_OVERRIDE | BIT(dev->cpu_port)); +- if (ret < 0) +- return ret; ++ /* The reserved multicast address table has 8 entries. Each entry has ++ * a default value of which port to forward. It is assumed the host ++ * port is the last port in most of the switches, but that is not the ++ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default ++ * port is port 5, the first RGMII port. It is okay for LAN9370, a ++ * 5-port switch, but may not be correct for the other 8-port ++ * versions. It is necessary to update the whole table to forward to ++ * the right ports. ++ * Furthermore PTP messages can use a reserved multicast address and ++ * the host will not receive them if this table is not correct. ++ */ ++ for (i = 0; i < RESV_MCAST_CNT; i++) { ++ data = reserved_mcast_map[i] << ++ dev->info->shifts[ALU_STAT_INDEX]; ++ data |= ALU_STAT_START | ++ masks[ALU_STAT_DIRECT] | ++ masks[ALU_RESV_MCAST_ADDR] | ++ masks[ALU_STAT_READ]; ++ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); ++ if (ret < 0) ++ return ret; + +- data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE]; ++ /* wait to be finished */ ++ ret = ksz9477_wait_alu_sta_ready(dev); ++ if (ret < 0) ++ return ret; + +- ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); +- if (ret < 0) +- return ret; ++ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data); ++ if (ret < 0) ++ return ret; + +- /* wait to be finished */ +- ret = ksz9477_wait_alu_sta_ready(dev); +- if (ret < 0) { +- dev_err(dev->dev, "Failed to update Reserved Multicast table\n"); +- return ret; ++ override = false; ++ ports = data & dev->port_mask; ++ switch (i) { ++ case 0: ++ case 6: ++ /* Change the host port. */ ++ update = BIT(dev->cpu_port); ++ override = true; ++ break; ++ case 2: ++ /* Change the host port. */ ++ update = BIT(dev->cpu_port); ++ break; ++ case 4: ++ case 5: ++ case 7: ++ /* Skip the host port. */ ++ update = dev->port_mask & ~BIT(dev->cpu_port); ++ break; ++ default: ++ update = ports; ++ break; ++ } ++ if (update != ports || override) { ++ data &= ~dev->port_mask; ++ data |= update; ++ /* Set Override bit to receive frame even when port is ++ * closed. ++ */ ++ if (override) ++ data |= ALU_V_OVERRIDE; ++ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data); ++ if (ret < 0) ++ return ret; ++ ++ data = reserved_mcast_map[i] << ++ dev->info->shifts[ALU_STAT_INDEX]; ++ data |= ALU_STAT_START | ++ masks[ALU_STAT_DIRECT] | ++ masks[ALU_RESV_MCAST_ADDR] | ++ masks[ALU_STAT_WRITE]; ++ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); ++ if (ret < 0) ++ return ret; ++ ++ /* wait to be finished */ ++ ret = ksz9477_wait_alu_sta_ready(dev); ++ if (ret < 0) ++ return ret; ++ } + } + + return 0; +diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h +index d0886ed984c578..c3ad8ce707f854 100644 +--- a/drivers/net/dsa/microchip/ksz9477_reg.h ++++ b/drivers/net/dsa/microchip/ksz9477_reg.h +@@ -2,7 +2,7 @@ + /* + * Microchip KSZ9477 register definitions + * +- * Copyright (C) 2017-2024 Microchip Technology Inc. ++ * Copyright (C) 2017-2025 Microchip Technology Inc. + */ + + #ifndef __KSZ9477_REGS_H +@@ -422,7 +422,6 @@ + + #define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1) + #define ALU_STAT_START BIT(7) +-#define ALU_RESV_MCAST_ADDR BIT(1) + + #define REG_SW_ALU_VAL_A 0x0420 + +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c +index 997c225dfba4de..cff83a8fb7d28a 100644 +--- a/drivers/net/dsa/microchip/ksz_common.c ++++ b/drivers/net/dsa/microchip/ksz_common.c +@@ -437,6 +437,8 @@ static const u16 ksz9477_regs[] = { + static const u32 ksz9477_masks[] = { + [ALU_STAT_WRITE] = 0, + [ALU_STAT_READ] = 1, ++ [ALU_STAT_DIRECT] = 0, ++ [ALU_RESV_MCAST_ADDR] = BIT(1), + [P_MII_TX_FLOW_CTRL] = BIT(5), + [P_MII_RX_FLOW_CTRL] = BIT(3), + }; +@@ -464,6 +466,8 @@ static const u8 ksz9477_xmii_ctrl1[] = { + static const u32 lan937x_masks[] = { + [ALU_STAT_WRITE] = 1, + [ALU_STAT_READ] = 2, ++ [ALU_STAT_DIRECT] = BIT(3), ++ [ALU_RESV_MCAST_ADDR] = BIT(2), + [P_MII_TX_FLOW_CTRL] = BIT(5), + [P_MII_RX_FLOW_CTRL] = BIT(3), + }; +diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h +index a4de58847deab3..0e51f2277381db 100644 +--- a/drivers/net/dsa/microchip/ksz_common.h ++++ b/drivers/net/dsa/microchip/ksz_common.h +@@ -255,6 +255,8 @@ enum ksz_masks { + DYNAMIC_MAC_TABLE_TIMESTAMP, + ALU_STAT_WRITE, + ALU_STAT_READ, ++ ALU_STAT_DIRECT, ++ ALU_RESV_MCAST_ADDR, + P_MII_TX_FLOW_CTRL, + P_MII_RX_FLOW_CTRL, + }; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +index bbe8657f6545b3..404b433f1bc08e 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +@@ -917,9 +917,9 @@ static void bnxt_ptp_free(struct bnxt *bp) + if (ptp->ptp_clock) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; +- kfree(ptp->ptp_info.pin_config); +- ptp->ptp_info.pin_config = NULL; + } ++ kfree(ptp->ptp_info.pin_config); ++ ptp->ptp_info.pin_config = NULL; + } + + int bnxt_ptp_init(struct bnxt *bp) +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index b836ab2a649a2b..7593255e6e53d0 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -281,9 +281,9 @@ static void macb_set_hwaddr(struct macb *bp) + u32 bottom; + u16 top; + +- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); ++ bottom = get_unaligned_le32(bp->dev->dev_addr); + macb_or_gem_writel(bp, SA1B, bottom); +- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); ++ top = get_unaligned_le16(bp->dev->dev_addr + 4); + macb_or_gem_writel(bp, SA1T, top); + + if (gem_has_ptp(bp)) { +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 64cd72c1947837..ee0306ab977148 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1776,6 +1776,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) + ndev->stats.rx_packets++; + pkt_len = fec16_to_cpu(bdp->cbd_datlen); + ndev->stats.rx_bytes += pkt_len; ++ if (fep->quirks & FEC_QUIRK_HAS_RACC) ++ ndev->stats.rx_bytes -= 2; + + index = fec_enet_get_bd_index(bdp, &rxq->bd); + page = rxq->rx_skb_info[index].page; +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index 789f72d1067f8a..2fa64099e8be2f 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -9346,8 +9346,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) + /* this command reads phy id and register at the same time */ + fallthrough; + case SIOCGMIIREG: +- data->val_out = hclge_read_phy_reg(hdev, data->reg_num); +- return 0; ++ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out); + + case SIOCSMIIREG: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +index 80079657afebe0..b8dbf932caf942 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +@@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev) + phy_stop(phydev); + } + +-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) ++int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val) + { + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; +@@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); +- if (ret) ++ if (ret) { + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d.\n", ret); ++ return ret; ++ } + +- return le16_to_cpu(req->reg_val); ++ *val = le16_to_cpu(req->reg_val); ++ return 0; + } + + int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +index 4200d0b6d9317b..21d434c82475b3 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle); + void hclge_mac_disconnect_phy(struct hnae3_handle *handle); + void hclge_mac_start_phy(struct hclge_dev *hdev); + void hclge_mac_stop_phy(struct hclge_dev *hdev); +-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); ++int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val); + int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); + + #endif +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c +index f51a63fca513e9..1f919a50c76535 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c +@@ -447,17 +447,16 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + /** + * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues + * @q: pointer to the ring of hardware statistics queue +- * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function invalidates the index values for the queues so any updates that + * may have happened are ignored and the base for the queue stats is reset. + **/ +-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) ++void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count) + { + u32 i; + +- for (i = 0; i < count; i++, idx++, q++) { ++ for (i = 0; i < count; i++, q++) { + q->rx_stats_idx = 0; + q->tx_stats_idx = 0; + } +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h +index 4c48fb73b3e78c..13fca6a91a01bd 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h +@@ -43,6 +43,6 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count); + #define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0) +-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); ++void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count); + s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready); + #endif /* _FM10K_COMMON_H_ */ +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +index aed5e0bf6313e9..b51b6003ad0320 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +@@ -1510,7 +1510,7 @@ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); + + /* Unbind Queue Statistics */ +- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); ++ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_pf(hw, stats); +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +index 7fb1961f292101..6861a0bdc14e18 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +@@ -465,7 +465,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) + { + /* Unbind Queue Statistics */ +- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); ++ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_vf(hw, stats); +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index e1a68fb5e9fff0..e846246261b940 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -8765,7 +8765,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi) + list_add_tail(&ch->list, &vsi->ch_list); + vsi->tc_map_vsi[i] = ch->ch_vsi; + dev_dbg(ice_pf_to_dev(pf), +- "successfully created channel: VSI %pK\n", ch->ch_vsi); ++ "successfully created channel: VSI %p\n", ch->ch_vsi); + } + return 0; + +diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h +index b2f5c9fe01492d..a41e210a310d19 100644 +--- a/drivers/net/ethernet/intel/ice/ice_trace.h ++++ b/drivers/net/ethernet/intel/ice/ice_trace.h +@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, + __entry->buf = buf; + __assign_str(devname, ring->netdev->name);), + +- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname), ++ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname), + __entry->ring, __entry->desc, __entry->buf) + ); + +@@ -158,7 +158,7 @@ DECLARE_EVENT_CLASS(ice_rx_template, + __entry->desc = desc; + __assign_str(devname, ring->netdev->name);), + +- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname), ++ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname), + __entry->ring, __entry->desc) + ); + DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq, +@@ -182,7 +182,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, + __entry->skb = skb; + __assign_str(devname, ring->netdev->name);), + +- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname), ++ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname), + __entry->ring, __entry->desc, __entry->skb) + ); + +@@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, + __entry->skb = skb; + __assign_str(devname, ring->netdev->name);), + +- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname), ++ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname), + __entry->skb, __entry->ring) + ); + +@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template, + TP_fast_assign(__entry->skb = skb; + __entry->idx = idx;), + +- TP_printk("skb %pK idx %d", ++ TP_printk("skb %p idx %d", + __entry->skb, __entry->idx) + ); + #define DEFINE_TX_TSTAMP_OP_EVENT(name) \ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +index 8705cffc747ffb..29e633e6dd3f04 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +@@ -587,32 +587,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct mlx5_core_dev *mdev = priv->mdev; + u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; +- __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB); ++ __u64 upper_limit_mbps; ++ __u64 upper_limit_gbps; + int i; ++ struct { ++ int scale; ++ const char *units_str; ++ } units[] = { ++ [MLX5_100_MBPS_UNIT] = { ++ .scale = 100, ++ .units_str = "Mbps", ++ }, ++ [MLX5_GBPS_UNIT] = { ++ .scale = 1, ++ .units_str = "Gbps", ++ }, ++ }; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + memset(max_bw_unit, 0, sizeof(max_bw_unit)); ++ upper_limit_mbps = 255 * MLX5E_100MB; ++ upper_limit_gbps = 255 * MLX5E_1GB; + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + if (!maxrate->tc_maxrate[i]) { + max_bw_unit[i] = MLX5_BW_NO_LIMIT; + continue; + } +- if (maxrate->tc_maxrate[i] < upper_limit_mbps) { ++ if (maxrate->tc_maxrate[i] <= upper_limit_mbps) { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_100MB); + max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; + max_bw_unit[i] = MLX5_100_MBPS_UNIT; +- } else { ++ } else if (max_bw_value[i] <= upper_limit_gbps) { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_1GB); + max_bw_unit[i] = MLX5_GBPS_UNIT; ++ } else { ++ netdev_err(netdev, ++ "tc_%d maxrate %llu Kbps exceeds limit %llu\n", ++ i, maxrate->tc_maxrate[i], ++ upper_limit_gbps); ++ return -EINVAL; + } + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { +- netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n", +- __func__, i, max_bw_value[i]); ++ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i, ++ max_bw_value[i] * units[max_bw_unit[i]].scale, ++ units[max_bw_unit[i]].units_str); + } + + return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 54379297a7489e..ccd2ebfd267375 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1839,12 +1839,12 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, + if (!size_read) + return i; + +- if (size_read == -EINVAL) +- return -EINVAL; + if (size_read < 0) { +- netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n", +- __func__, size_read); +- return i; ++ NL_SET_ERR_MSG_FMT_MOD( ++ extack, ++ "Query module eeprom by page failed, read %u bytes, err %d\n", ++ i, size_read); ++ return size_read; + } + + i += size_read; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index fcf7437174e189..1d586451900b8d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -2321,7 +2321,10 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) + { + int nr_frags = skb_shinfo(skb)->nr_frags; + +- return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; ++ if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE) ++ return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE; ++ else ++ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; + } + + static void +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index 4b96ad657145b8..1c69244e00d75e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -1339,16 +1339,13 @@ static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt) + } + + static void fec_set_block_stats(struct mlx5e_priv *priv, ++ int mode, + struct ethtool_fec_stats *fec_stats) + { + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); +- int mode = fec_active_mode(mdev); +- +- if (mode == MLX5E_FEC_NOFEC) +- return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); +@@ -1389,11 +1386,14 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, + void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) + { +- if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) ++ int mode = fec_active_mode(priv->mdev); ++ ++ if (mode == MLX5E_FEC_NOFEC || ++ !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) + return; + + fec_set_corrected_bits_total(priv, fec_stats); +- fec_set_block_stats(priv, fec_stats); ++ fec_set_block_stats(priv, mode, fec_stats); + } + + #define PPORT_ETH_EXT_OFF(c) \ +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +index 06811c60d598e1..df10a0b68a08e6 100644 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +@@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x) + { + int i, j; + +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + uint idx = i * lan966x->num_stats; +@@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x) + } + } + +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + } + + static int lan966x_get_sset_count(struct net_device *dev, int sset) +@@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, + + idx = port->chip_port * lan966x->num_stats; + +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + + mac_stats->FramesTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_UC] + +@@ -424,7 +424,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + } + + static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = { +@@ -450,7 +450,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev, + + idx = port->chip_port * lan966x->num_stats; + +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + + rmon_stats->undersize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + +@@ -508,7 +508,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev, + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + + *ranges = lan966x_rmon_ranges; + } +@@ -614,7 +614,7 @@ void lan966x_stats_get(struct net_device *dev, + + idx = port->chip_port * lan966x->num_stats; + +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + + stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT]; +@@ -696,7 +696,7 @@ void lan966x_stats_get(struct net_device *dev, + + stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL]; + +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + } + + int lan966x_stats_init(struct lan966x *lan966x) +@@ -712,7 +712,7 @@ int lan966x_stats_init(struct lan966x *lan966x) + return -ENOMEM; + + /* Init stats worker */ +- mutex_init(&lan966x->stats_lock); ++ spin_lock_init(&lan966x->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(lan966x->dev)); + lan966x->stats_queue = create_singlethread_workqueue(queue_name); +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +index b424e75fd40c46..5466f14e000ce5 100644 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +@@ -1263,7 +1263,6 @@ static int lan966x_probe(struct platform_device *pdev) + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); +- mutex_destroy(&lan966x->stats_lock); + + debugfs_remove_recursive(lan966x->debugfs_root); + +@@ -1281,7 +1280,6 @@ static int lan966x_remove(struct platform_device *pdev) + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); +- mutex_destroy(&lan966x->stats_lock); + + lan966x_mac_purge_entries(lan966x); + lan966x_mdb_deinit(lan966x); +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +index 5a16d76eb000d6..1f5a18b205bf20 100644 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +@@ -347,8 +347,8 @@ struct lan966x { + const struct lan966x_stat_layout *stats_layout; + u32 num_stats; + +- /* workqueue for reading stats */ +- struct mutex stats_lock; ++ /* lock for reading stats */ ++ spinlock_t stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c +index a4414f63c9b1cc..c266f903ea8a6f 100644 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c +@@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x, + u32 counter; + + id = id & 0xff; /* counter limit */ +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG); + counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) + + lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS)); +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + if (counter) + admin->cache.counter = counter; + } +@@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x, + { + id = id & 0xff; /* counter limit */ + +- mutex_lock(&lan966x->stats_lock); ++ spin_lock(&lan966x->stats_lock); + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG); + lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES)); + lan_wr(admin->cache.counter, lan966x, + SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)); + lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES)); + lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS)); +- mutex_unlock(&lan966x->stats_lock); ++ spin_unlock(&lan966x->stats_lock); + } + + static void lan966x_vcap_cache_write(struct net_device *dev, +diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig +index f58c506bda228c..15b27fc57aeddb 100644 +--- a/drivers/net/ethernet/microchip/sparx5/Kconfig ++++ b/drivers/net/ethernet/microchip/sparx5/Kconfig +@@ -3,7 +3,7 @@ config SPARX5_SWITCH + depends on NET_SWITCHDEV + depends on HAS_IOMEM + depends on OF +- depends on ARCH_SPARX5 || COMPILE_TEST ++ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + depends on BRIDGE || BRIDGE=n + select PHYLINK +diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig +index 93d9df55b361a9..01811924c4db43 100644 +--- a/drivers/net/ethernet/realtek/Kconfig ++++ b/drivers/net/ethernet/realtek/Kconfig +@@ -58,7 +58,7 @@ config 8139TOO + config 8139TOO_PIO + bool "Use PIO instead of MMIO" + default y +- depends on 8139TOO ++ depends on 8139TOO && !NO_IOPORT_MAP + help + This instructs the driver to use programmed I/O ports (PIO) instead + of PCI shared memory (MMIO). This can possibly solve some problems +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 3b90f257e94f85..f4353ccb1b87fb 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -3362,7 +3362,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + +- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); ++ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); +@@ -3467,7 +3467,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + +- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); ++ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); +@@ -3660,7 +3660,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); +- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); ++ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 0c0fd68ded423d..4597c4e9b297bc 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -2360,6 +2360,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev, + return 0; + } + ++#ifdef CONFIG_PM_SLEEP + static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) + { + struct sh_eth_private *mdp = netdev_priv(ndev); +@@ -2386,6 +2387,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) + + return 0; + } ++#endif + + static const struct ethtool_ops sh_eth_ethtool_ops = { + .get_regs_len = sh_eth_get_regs_len, +@@ -2401,8 +2403,10 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { + .set_ringparam = sh_eth_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, ++#ifdef CONFIG_PM_SLEEP + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, ++#endif + }; + + /* network device open function */ +diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c +index c3e2b4a21d1055..3b08e36e1ef87a 100644 +--- a/drivers/net/ethernet/sfc/mae.c ++++ b/drivers/net/ethernet/sfc/mae.c +@@ -1101,6 +1101,9 @@ void efx_mae_remove_mport(void *desc, void *arg) + kfree(mport); + } + ++/* ++ * Takes ownership of @desc, even if it returns an error ++ */ + static int efx_mae_process_mport(struct efx_nic *efx, + struct mae_mport_desc *desc) + { +@@ -1111,6 +1114,7 @@ static int efx_mae_process_mport(struct efx_nic *efx, + if (!IS_ERR_OR_NULL(mport)) { + netif_err(efx, drv, efx->net_dev, + "mport with id %u does exist!!!\n", desc->mport_id); ++ kfree(desc); + return -EEXIST; + } + +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c +index cb590db625e837..90a8eb517033e6 100644 +--- a/drivers/net/ethernet/smsc/smsc911x.c ++++ b/drivers/net/ethernet/smsc/smsc911x.c +@@ -2162,10 +2162,20 @@ static const struct net_device_ops smsc911x_netdev_ops = { + static void smsc911x_read_mac_address(struct net_device *dev) + { + struct smsc911x_data *pdata = netdev_priv(dev); +- u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); +- u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); ++ u32 mac_high16, mac_low32; + u8 addr[ETH_ALEN]; + ++ mac_high16 = smsc911x_mac_read(pdata, ADDRH); ++ mac_low32 = smsc911x_mac_read(pdata, ADDRL); ++ ++ /* The first mac_read in some setups can incorrectly read 0. Re-read it ++ * to get the full MAC if this is observed. ++ */ ++ if (mac_high16 == 0) { ++ SMSC_TRACE(pdata, probe, "Re-read MAC ADDRH\n"); ++ mac_high16 = smsc911x_mac_read(pdata, ADDRH); ++ } ++ + addr[0] = (u8)(mac_low32); + addr[1] = (u8)(mac_low32 >> 8); + addr[2] = (u8)(mac_low32 >> 16); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index f3155d69a013c6..56a61599d0b6f3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3516,7 +3516,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) + { + struct stmmac_priv *priv = netdev_priv(dev); + enum request_irq_err irq_err; +- cpumask_t cpu_mask; + int irq_idx = 0; + char *int_name; + int ret; +@@ -3628,9 +3627,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) + irq_idx = i; + goto irq_error; + } +- cpumask_clear(&cpu_mask); +- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); +- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); ++ irq_set_affinity_hint(priv->rx_irq[i], ++ cpumask_of(i % num_online_cpus())); + } + + /* Request Tx MSI irq */ +@@ -3653,9 +3651,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) + irq_idx = i; + goto irq_error; + } +- cpumask_clear(&cpu_mask); +- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); +- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); ++ irq_set_affinity_hint(priv->tx_irq[i], ++ cpumask_of(i % num_online_cpus())); + } + + return 0; +@@ -5527,7 +5524,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + +- if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) ++ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) || ++ (status & csum_none)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; +@@ -7901,7 +7899,14 @@ int stmmac_resume(struct device *dev) + stmmac_free_tx_skbufs(priv); + stmmac_clear_descriptors(priv, &priv->dma_conf); + +- stmmac_hw_setup(ndev, false); ++ ret = stmmac_hw_setup(ndev, false); ++ if (ret < 0) { ++ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); ++ mutex_unlock(&priv->lock); ++ rtnl_unlock(); ++ return ret; ++ } ++ + stmmac_init_coalesce(priv); + stmmac_set_rx_mode(ndev); + +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c +index 663a8988d27a73..7feb991a95924f 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c ++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c +@@ -1667,7 +1667,8 @@ int wx_sw_init(struct wx *wx) + wx->oem_svid = pdev->subsystem_vendor; + wx->oem_ssid = pdev->subsystem_device; + wx->bus.device = PCI_SLOT(pdev->devfn); +- wx->bus.func = PCI_FUNC(pdev->devfn); ++ wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID, ++ rd32(wx, WX_CFG_PORT_ST)); + + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { + wx->subsystem_vendor_id = pdev->subsystem_vendor; +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h +index 0fef9dfdd9a6b7..ee95706b446306 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h ++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h +@@ -65,6 +65,8 @@ + #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) + #define WX_CFG_PORT_CTL_QINQ BIT(2) + #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ ++#define WX_CFG_PORT_ST 0x14404 ++#define WX_CFG_PORT_ST_LANID GENMASK(9, 8) + #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ + +@@ -363,8 +365,6 @@ enum WX_MSCA_CMD_value { + #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) + #define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +-#define WX_CFG_PORT_ST 0x14404 +- + /******************* Receive Descriptor bit definitions **********************/ + #define WX_RXD_STAT_DD BIT(0) /* Done */ + #define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c +index 6ed38a3cdd734e..6cecdfa8631d72 100644 +--- a/drivers/net/hamradio/6pack.c ++++ b/drivers/net/hamradio/6pack.c +@@ -121,8 +121,6 @@ struct sixpack { + + struct timer_list tx_t; + struct timer_list resync_t; +- refcount_t refcnt; +- struct completion dead; + spinlock_t lock; + }; + +@@ -359,42 +357,13 @@ static void sp_bump(struct sixpack *sp, char cmd) + + /* ----------------------------------------------------------------------- */ + +-/* +- * We have a potential race on dereferencing tty->disc_data, because the tty +- * layer provides no locking at all - thus one cpu could be running +- * sixpack_receive_buf while another calls sixpack_close, which zeroes +- * tty->disc_data and frees the memory that sixpack_receive_buf is using. The +- * best way to fix this is to use a rwlock in the tty struct, but for now we +- * use a single global rwlock for all ttys in ppp line discipline. +- */ +-static DEFINE_RWLOCK(disc_data_lock); +- +-static struct sixpack *sp_get(struct tty_struct *tty) +-{ +- struct sixpack *sp; +- +- read_lock(&disc_data_lock); +- sp = tty->disc_data; +- if (sp) +- refcount_inc(&sp->refcnt); +- read_unlock(&disc_data_lock); +- +- return sp; +-} +- +-static void sp_put(struct sixpack *sp) +-{ +- if (refcount_dec_and_test(&sp->refcnt)) +- complete(&sp->dead); +-} +- + /* + * Called by the TTY driver when there's room for more data. If we have + * more packets to send, we send them here. + */ + static void sixpack_write_wakeup(struct tty_struct *tty) + { +- struct sixpack *sp = sp_get(tty); ++ struct sixpack *sp = tty->disc_data; + int actual; + + if (!sp) +@@ -406,7 +375,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty) + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + sp->tx_enable = 0; + netif_wake_queue(sp->dev); +- goto out; ++ return; + } + + if (sp->tx_enable) { +@@ -414,9 +383,6 @@ static void sixpack_write_wakeup(struct tty_struct *tty) + sp->xleft -= actual; + sp->xhead += actual; + } +- +-out: +- sp_put(sp); + } + + /* ----------------------------------------------------------------------- */ +@@ -436,7 +402,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, + if (!count) + return; + +- sp = sp_get(tty); ++ sp = tty->disc_data; + if (!sp) + return; + +@@ -452,7 +418,6 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, + } + sixpack_decode(sp, cp, count1); + +- sp_put(sp); + tty_unthrottle(tty); + } + +@@ -567,8 +532,6 @@ static int sixpack_open(struct tty_struct *tty) + + spin_lock_init(&sp->lock); + spin_lock_init(&sp->rxlock); +- refcount_set(&sp->refcnt, 1); +- init_completion(&sp->dead); + + /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ + +@@ -650,19 +613,11 @@ static void sixpack_close(struct tty_struct *tty) + { + struct sixpack *sp; + +- write_lock_irq(&disc_data_lock); + sp = tty->disc_data; +- tty->disc_data = NULL; +- write_unlock_irq(&disc_data_lock); + if (!sp) + return; + +- /* +- * We have now ensured that nobody can start using ap from now on, but +- * we have to wait for all existing users to finish. +- */ +- if (!refcount_dec_and_test(&sp->refcnt)) +- wait_for_completion(&sp->dead); ++ tty->disc_data = NULL; + + /* We must stop the queue to avoid potentially scribbling + * on the free buffers. The sp->dead completion is not sufficient +@@ -686,7 +641,7 @@ static void sixpack_close(struct tty_struct *tty) + static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) + { +- struct sixpack *sp = sp_get(tty); ++ struct sixpack *sp = tty->disc_data; + struct net_device *dev; + unsigned int tmp, err; + +@@ -738,8 +693,6 @@ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd, + err = tty_mode_ioctl(tty, cmd, arg); + } + +- sp_put(sp); +- + return err; + } + +diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c +index d5b05e8032199a..ca35a50bb64053 100644 +--- a/drivers/net/ipvlan/ipvlan_l3s.c ++++ b/drivers/net/ipvlan/ipvlan_l3s.c +@@ -224,5 +224,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port) + + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; + ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); +- dev->l3mdev_ops = NULL; + } +diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c +index 7eb32ebb846d86..15b72203a2584e 100644 +--- a/drivers/net/mdio/of_mdio.c ++++ b/drivers/net/mdio/of_mdio.c +@@ -440,6 +440,5 @@ void of_phy_deregister_fixed_link(struct device_node *np) + fixed_phy_unregister(phydev); + + put_device(&phydev->mdio.dev); /* of_phy_find_device() */ +- phy_device_free(phydev); /* fixed_phy_register() */ + } + EXPORT_SYMBOL(of_phy_deregister_fixed_link); +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c +index e397e7d642d925..a778379108ef13 100644 +--- a/drivers/net/phy/dp83867.c ++++ b/drivers/net/phy/dp83867.c +@@ -774,6 +774,14 @@ static int dp83867_config_init(struct phy_device *phydev) + return ret; + } + ++ /* Although the DP83867 reports EEE capability through the ++ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature ++ * is not actually implemented in hardware. ++ */ ++ linkmode_zero(phydev->supported_eee); ++ linkmode_zero(phydev->advertising_eee); ++ phydev->eee_enabled = false; ++ + if (phy_interface_is_rgmii(phydev) || + phydev->interface == PHY_INTERFACE_MODE_SGMII) { + val = phy_read(phydev, MII_DP83867_PHYCTRL); +diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c +index aef739c20ac4d5..4694fb3eaa2ff8 100644 +--- a/drivers/net/phy/fixed_phy.c ++++ b/drivers/net/phy/fixed_phy.c +@@ -329,6 +329,7 @@ void fixed_phy_unregister(struct phy_device *phy) + phy_device_remove(phy); + of_node_put(phy->mdio.dev.of_node); + fixed_phy_del(phy->mdio.addr); ++ phy_device_free(phy); + } + EXPORT_SYMBOL_GPL(fixed_phy_unregister); + +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index eba652a4c1d887..760d0d2f791ca1 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -1802,6 +1802,43 @@ static int marvell_resume(struct phy_device *phydev) + return err; + } + ++/* m88e1510_resume ++ * ++ * The 88e1510 PHY has an erratum where the phy downshift counter is not cleared ++ * after phy being suspended(BMCR_PDOWN set) and then later resumed(BMCR_PDOWN ++ * cleared). This can cause the link to intermittently downshift to a lower speed. ++ * ++ * Disabling and re-enabling the downshift feature clears the counter, allowing ++ * the PHY to retry gigabit link negotiation up to the programmed retry count ++ * before downshifting. This behavior has been observed on copper links. ++ */ ++static int m88e1510_resume(struct phy_device *phydev) ++{ ++ int err; ++ u8 cnt = 0; ++ ++ err = marvell_resume(phydev); ++ if (err < 0) ++ return err; ++ ++ /* read downshift counter value */ ++ err = m88e1011_get_downshift(phydev, &cnt); ++ if (err < 0) ++ return err; ++ ++ if (cnt) { ++ /* downshift disabled */ ++ err = m88e1011_set_downshift(phydev, 0); ++ if (err < 0) ++ return err; ++ ++ /* downshift enabled, with previous counter value */ ++ err = m88e1011_set_downshift(phydev, cnt); ++ } ++ ++ return err; ++} ++ + static int marvell_aneg_done(struct phy_device *phydev) + { + int retval = phy_read(phydev, MII_M1011_PHY_STATUS); +@@ -3506,7 +3543,7 @@ static struct phy_driver marvell_drivers[] = { + .handle_interrupt = marvell_handle_interrupt, + .get_wol = m88e1318_get_wol, + .set_wol = m88e1318_set_wol, +- .resume = marvell_resume, ++ .resume = m88e1510_resume, + .suspend = marvell_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index f1fac89721ed93..7da30a6752beef 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -81,8 +81,11 @@ int mdiobus_register_device(struct mdio_device *mdiodev) + return err; + + err = mdiobus_register_reset(mdiodev); +- if (err) ++ if (err) { ++ gpiod_put(mdiodev->reset_gpio); ++ mdiodev->reset_gpio = NULL; + return err; ++ } + + /* Assert the reset signal */ + mdio_device_reset(mdiodev, 1); +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index df54c137c5f5f8..cf171bdd667aa8 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -1482,6 +1482,19 @@ void phy_state_machine(struct work_struct *work) + } + break; + case PHY_HALTED: ++ if (phydev->link) { ++ if (phydev->autoneg == AUTONEG_ENABLE) { ++ phydev->speed = SPEED_UNKNOWN; ++ phydev->duplex = DUPLEX_UNKNOWN; ++ } ++ if (phydev->master_slave_state != ++ MASTER_SLAVE_STATE_UNSUPPORTED) ++ phydev->master_slave_state = ++ MASTER_SLAVE_STATE_UNKNOWN; ++ phydev->mdix = ETH_TP_MDI_INVALID; ++ linkmode_zero(phydev->lp_advertising); ++ } ++ fallthrough; + case PHY_ERROR: + if (phydev->link) { + phydev->link = 0; +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c +index 0516ef71a64cb0..f70b8ab0ccd264 100644 +--- a/drivers/net/usb/asix_devices.c ++++ b/drivers/net/usb/asix_devices.c +@@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) + int i; + unsigned long gpio_bits = dev->driver_info->data; + +- usbnet_get_endpoints(dev,intf); ++ ret = usbnet_get_endpoints(dev, intf); ++ if (ret) ++ goto out; + + /* Toggle the GPIOs in a manufacturer/model specific way */ + for (i = 2; i >= 0; i--) { +@@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) + + dev->driver_priv = priv; + +- usbnet_get_endpoints(dev, intf); ++ ret = usbnet_get_endpoints(dev, intf); ++ if (ret) ++ return ret; + + /* Maybe the boot loader passed the MAC address via device tree */ + if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { +@@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) + int ret; + u8 buf[ETH_ALEN] = {0}; + +- usbnet_get_endpoints(dev,intf); ++ ret = usbnet_get_endpoints(dev, intf); ++ if (ret) ++ return ret; + + /* Get the MAC address */ + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index eba755b584a459..73df808978b207 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -192,6 +192,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + if (!skbn) + return 0; + ++ /* Raw IP packets don't have a MAC header, but other subsystems ++ * (like xfrm) may still access MAC header offsets, so they must ++ * be initialized. ++ */ ++ skb_reset_mac_header(skbn); ++ + switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { + case 0x40: + skbn->protocol = htons(ETH_P_IP); +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index fd6b5865ac5135..e6a1864f03f948 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -1650,6 +1650,8 @@ void usbnet_disconnect (struct usb_interface *intf) + net = dev->net; + unregister_netdev (net); + ++ cancel_work_sync(&dev->kevent); ++ + while ((urb = usb_get_from_anchor(&dev->deferred))) { + dev_kfree_skb(urb->context); + kfree(urb->sg); +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 5a949f9446a8ed..caae11ba2f8bed 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -591,17 +591,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, + goto ok; + } + +- /* +- * Verify that we can indeed put this data into a skb. +- * This is here to handle cases when the device erroneously +- * tries to receive more than is possible. This is usually +- * the case of a broken device. +- */ +- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { +- net_dbg_ratelimited("%s: too much data\n", skb->dev->name); +- dev_kfree_skb(skb); +- return NULL; +- } + BUG_ON(offset >= PAGE_SIZE); + while (len) { + unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); +@@ -1344,9 +1333,19 @@ static struct sk_buff *receive_big(struct net_device *dev, + struct virtnet_rq_stats *stats) + { + struct page *page = buf; +- struct sk_buff *skb = +- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); ++ struct sk_buff *skb; ++ ++ /* Make sure that len does not exceed the size allocated in ++ * add_recvbuf_big. ++ */ ++ if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) { ++ pr_debug("%s: rx error: len %u exceeds allocated size %lu\n", ++ dev->name, len, ++ (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE); ++ goto err; ++ } + ++ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); + u64_stats_add(&stats->bytes, len - vi->hdr_len); + if (unlikely(!skb)) + goto err; +@@ -1822,22 +1821,28 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + return; + } + +- /* 1. Save the flags early, as the XDP program might overwrite them. ++ /* About the flags below: ++ * 1. Save the flags early, as the XDP program might overwrite them. + * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID + * stay valid after XDP processing. + * 2. XDP doesn't work with partially checksummed packets (refer to + * virtnet_xdp_set()), so packets marked as + * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing. + */ +- flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; + +- if (vi->mergeable_rx_bufs) ++ if (vi->mergeable_rx_bufs) { ++ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, + stats); +- else if (vi->big_packets) ++ } else if (vi->big_packets) { ++ void *p = page_address((struct page *)buf); ++ ++ flags = ((struct virtio_net_common_hdr *)p)->hdr.flags; + skb = receive_big(dev, vi, rq, buf, len, stats); +- else ++ } else { ++ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); ++ } + + if (unlikely(!skb)) + return; +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 655fb5cdf01f86..233e5c619fa4ee 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #include "hif.h" + #include "core.h" +@@ -287,8 +288,15 @@ static int ath10k_send_key(struct ath10k_vif *arvif, + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + if (cmd == DISABLE_KEY) { +- arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; +- arg.key_data = NULL; ++ if (flags & WMI_KEY_GROUP) { ++ /* Not all hardware handles group-key deletion operation ++ * correctly. Replace the key with a junk value to invalidate it. ++ */ ++ get_random_bytes(key->key, key->keylen); ++ } else { ++ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; ++ arg.key_data = NULL; ++ } + } + + return ath10k_wmi_vdev_install_key(arvif->ar, &arg); +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c +index 340502c47a10d6..c7c96d210061d1 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi.c ++++ b/drivers/net/wireless/ath/ath10k/wmi.c +@@ -1763,32 +1763,33 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, + + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) + { +- unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ; + unsigned long time_left, i; + +- /* Sometimes the PCI HIF doesn't receive interrupt +- * for the service ready message even if the buffer +- * was completed. PCIe sniffer shows that it's +- * because the corresponding CE ring doesn't fires +- * it. Workaround here by polling CE rings. Since +- * the message could arrive at any time, continue +- * polling until timeout. +- */ +- do { ++ time_left = wait_for_completion_timeout(&ar->wmi.service_ready, ++ WMI_SERVICE_READY_TIMEOUT_HZ); ++ if (!time_left) { ++ /* Sometimes the PCI HIF doesn't receive interrupt ++ * for the service ready message even if the buffer ++ * was completed. PCIe sniffer shows that it's ++ * because the corresponding CE ring doesn't fires ++ * it. Workaround here by polling CE rings once. ++ */ ++ ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); ++ + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(ar, i, 1); + +- /* The 100 ms granularity is a tradeoff considering scheduler +- * overhead and response latency +- */ + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, +- msecs_to_jiffies(100)); +- if (time_left) +- return 0; +- } while (time_before(jiffies, timeout)); ++ WMI_SERVICE_READY_TIMEOUT_HZ); ++ if (!time_left) { ++ ath10k_warn(ar, "polling timed out\n"); ++ return -ETIMEDOUT; ++ } + +- ath10k_warn(ar, "failed to receive service ready completion\n"); +- return -ETIMEDOUT; ++ ath10k_warn(ar, "service ready completion received, continuing normally\n"); ++ } ++ ++ return 0; + } + + int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) +@@ -1936,6 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) + if (cmd_id == WMI_CMD_UNSUPPORTED) { + ath10k_warn(ar, "wmi command %d is not supported by firmware\n", + cmd_id); ++ dev_kfree_skb_any(skb); + return ret; + } + +diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c +index 3a340cb2b205f4..355424baeedde2 100644 +--- a/drivers/net/wireless/ath/ath11k/core.c ++++ b/drivers/net/wireless/ath/ath11k/core.c +@@ -707,42 +707,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { + static const struct dmi_system_id ath11k_pm_quirk_table[] = { + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* X13 G4 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* X13 G4 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* T14 G4 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* T14 G4 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* P14s G4 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* P14s G4 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* T16 G2 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* T16 G2 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* P16s G2 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* P16s G2 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, +- .matches = { ++ .matches = { /* T14s G4 AMD #1 */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"), ++ }, ++ }, ++ { ++ .driver_data = (void *)ATH11K_PM_WOW, ++ .matches = { /* T14s G4 AMD #2 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), + }, +diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c +index 31dbabc9eaf330..16687223bdcba6 100644 +--- a/drivers/net/wireless/ath/ath11k/wmi.c ++++ b/drivers/net/wireless/ath/ath11k/wmi.c +@@ -5841,6 +5841,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, + dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + + info = IEEE80211_SKB_CB(msdu); ++ memset(&info->status, 0, sizeof(info->status)); ++ info->status.rates[0].idx = -1; ++ + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && + !tx_compl_param->status) { + info->flags |= IEEE80211_TX_STAT_ACK; +diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h +index 61f765432516bb..284032db0b98d2 100644 +--- a/drivers/net/wireless/ath/ath12k/dp.h ++++ b/drivers/net/wireless/ath/ath12k/dp.h +@@ -162,7 +162,7 @@ struct ath12k_pdev_dp { + #define DP_REO_REINJECT_RING_SIZE 32 + #define DP_RX_RELEASE_RING_SIZE 1024 + #define DP_REO_EXCEPTION_RING_SIZE 128 +-#define DP_REO_CMD_RING_SIZE 128 ++#define DP_REO_CMD_RING_SIZE 256 + #define DP_REO_STATUS_RING_SIZE 2048 + #define DP_RXDMA_BUF_RING_SIZE 4096 + #define DP_RXDMA_REFILL_RING_SIZE 2048 +diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c +index e1db6e69d22076..010413bfdb1412 100644 +--- a/drivers/net/wireless/ath/ath12k/mac.c ++++ b/drivers/net/wireless/ath/ath12k/mac.c +@@ -4743,23 +4743,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb) + wake_up(&ar->txmgmt_empty_waitq); + } + +-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) ++static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id) + { +- struct sk_buff *msdu = skb; ++ struct sk_buff *msdu; + struct ieee80211_tx_info *info; +- struct ath12k *ar = ctx; +- struct ath12k_base *ab = ar->ab; + + spin_lock_bh(&ar->txmgmt_idr_lock); +- idr_remove(&ar->txmgmt_idr, buf_id); ++ msdu = idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); +- dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, ++ ++ if (!msdu) ++ return; ++ ++ dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, + DMA_TO_DEVICE); + + info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + +- ath12k_mgmt_over_wmi_tx_drop(ar, skb); ++ ath12k_mgmt_over_wmi_tx_drop(ar, msdu); ++} ++ ++int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) ++{ ++ struct ath12k *ar = ctx; ++ ++ ath12k_mac_tx_mgmt_free(ar, buf_id); + + return 0; + } +@@ -4768,17 +4777,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) + { + struct ieee80211_vif *vif = ctx; + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); +- struct sk_buff *msdu = skb; + struct ath12k *ar = skb_cb->ar; +- struct ath12k_base *ab = ar->ab; + +- if (skb_cb->vif == vif) { +- spin_lock_bh(&ar->txmgmt_idr_lock); +- idr_remove(&ar->txmgmt_idr, buf_id); +- spin_unlock_bh(&ar->txmgmt_idr_lock); +- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, +- DMA_TO_DEVICE); +- } ++ if (skb_cb->vif == vif) ++ ath12k_mac_tx_mgmt_free(ar, buf_id); + + return 0; + } +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index e883cf80f506d1..c7f62226ebbcc5 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -5594,8 +5594,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n", + *cookie, le16_to_cpu(action_frame->len), freq); + +- ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), +- af_params); ++ ack = brcmf_p2p_send_action_frame(vif->ifp, af_params); + + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, + GFP_KERNEL); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +index d4492d02e4ea12..33f27c83c33ebc 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +@@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, + /** + * brcmf_p2p_tx_action_frame() - send action frame over fil. + * ++ * @ifp: interface to transmit on. + * @p2p: p2p info struct for vif. + * @af_params: action frame data/info. + * +@@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + */ +-static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, ++static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp, ++ struct brcmf_p2p_info *p2p, + struct brcmf_fil_af_params_le *af_params) + { + struct brcmf_pub *drvr = p2p->cfg->pub; +- struct brcmf_cfg80211_vif *vif; +- struct brcmf_p2p_action_frame *p2p_af; + s32 err = 0; + + brcmf_dbg(TRACE, "Enter\n"); +@@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); + clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + +- /* check if it is a p2p_presence response */ +- p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data; +- if (p2p_af->subtype == P2P_AF_PRESENCE_RSP) +- vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; +- else +- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; +- +- err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, ++ err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params, + sizeof(*af_params)); + if (err) { + bphy_err(drvr, " sending action frame has failed\n"); +@@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell, + /** + * brcmf_p2p_send_action_frame() - send action frame . + * +- * @cfg: driver private data for cfg80211 interface. +- * @ndev: net device to transmit on. ++ * @ifp: interface to transmit on. + * @af_params: configuration data for action frame. + */ +-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, +- struct net_device *ndev, ++bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, + struct brcmf_fil_af_params_le *af_params) + { ++ struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_p2p_info *p2p = &cfg->p2p; +- struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_fil_action_frame_le *action_frame; + struct brcmf_config_af_params config_af_params; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; +@@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, + if (af_params->channel) + msleep(P2P_AF_RETRY_DELAY_TIME); + +- ack = !brcmf_p2p_tx_action_frame(p2p, af_params); ++ ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params); + tx_retry++; + dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell, + dwell_jiffies); +@@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, + + WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); + +- init_completion(&p2p->send_af_done); + INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); + init_completion(&p2p->afx_hdl.act_frm_scan); + init_completion(&p2p->wait_next_af); +@@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced) + pri_ifp = brcmf_get_ifp(cfg->pub, 0); + p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; + ++ init_completion(&p2p->send_af_done); ++ + if (p2pdev_forced) { + err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL); + if (IS_ERR(err_ptr)) { +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +index d2ecee565bf2e2..d3137ebd715825 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +@@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, + int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, + void *data); +-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, +- struct net_device *ndev, ++bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, + struct brcmf_fil_af_params_le *af_params); + bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, + struct brcmf_bss_info_le *bi); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +index 15b7d22d3639f2..ae7a01c7ce36d6 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +@@ -135,6 +135,8 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, + if (is_mt7922(phy->mt76->dev)) { + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; ++ he_cap_elem->phy_cap_info[4] |= ++ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + he_cap_elem->phy_cap_info[8] |= + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c +index 375a3d6f4b384b..fa81a1c30704bd 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c +@@ -730,7 +730,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, + eht_cap->has_eht = true; + + eht_cap_elem->mac_cap_info[0] = +- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454, + IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); +diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c +index 832a427279b40a..df4248744d87a0 100644 +--- a/drivers/net/wireless/realtek/rtw88/sdio.c ++++ b/drivers/net/wireless/realtek/rtw88/sdio.c +@@ -143,6 +143,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr, + + static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr) + { ++ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) && ++ !rtw_sdio_is_bus_addr(addr)) ++ return false; ++ + return !rtw_sdio_is_sdio30_supported(rtwdev) || + rtw_sdio_is_bus_addr(addr); + } +diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c +index f5f48f7e6d26e3..1214e7dcc81249 100644 +--- a/drivers/net/wireless/virtual/mac80211_hwsim.c ++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c +@@ -6189,14 +6189,15 @@ static struct genl_family hwsim_genl_family __ro_after_init = { + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), + }; + +-static void remove_user_radios(u32 portid) ++static void remove_user_radios(u32 portid, int netgroup) + { + struct mac80211_hwsim_data *entry, *tmp; + LIST_HEAD(list); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { +- if (entry->destroy_on_close && entry->portid == portid) { ++ if (entry->destroy_on_close && entry->portid == portid && ++ entry->netgroup == netgroup) { + list_move(&entry->list, &list); + rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, + hwsim_rht_params); +@@ -6221,7 +6222,7 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, + if (state != NETLINK_URELEASE) + return NOTIFY_DONE; + +- remove_user_radios(notify->portid); ++ remove_user_radios(notify->portid, hwsim_net_get_netgroup(notify->net)); + + if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { + printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" +diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c +index b640aa0bf45e61..5e79cfce8649c1 100644 +--- a/drivers/ntb/hw/epf/ntb_hw_epf.c ++++ b/drivers/ntb/hw/epf/ntb_hw_epf.c +@@ -49,6 +49,7 @@ + #define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */ + + enum pci_barno { ++ NO_BAR = -1, + BAR_0, + BAR_1, + BAR_2, +@@ -57,16 +58,26 @@ enum pci_barno { + BAR_5, + }; + ++enum epf_ntb_bar { ++ BAR_CONFIG, ++ BAR_PEER_SPAD, ++ BAR_DB, ++ BAR_MW1, ++ BAR_MW2, ++ BAR_MW3, ++ BAR_MW4, ++ NTB_BAR_NUM, ++}; ++ ++#define NTB_EPF_MAX_MW_COUNT (NTB_BAR_NUM - BAR_MW1) ++ + struct ntb_epf_dev { + struct ntb_dev ntb; + struct device *dev; + /* Mutex to protect providing commands to NTB EPF */ + struct mutex cmd_lock; + +- enum pci_barno ctrl_reg_bar; +- enum pci_barno peer_spad_reg_bar; +- enum pci_barno db_reg_bar; +- enum pci_barno mw_bar; ++ const enum pci_barno *barno_map; + + unsigned int mw_count; + unsigned int spad_count; +@@ -85,17 +96,6 @@ struct ntb_epf_dev { + + #define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb) + +-struct ntb_epf_data { +- /* BAR that contains both control region and self spad region */ +- enum pci_barno ctrl_reg_bar; +- /* BAR that contains peer spad region */ +- enum pci_barno peer_spad_reg_bar; +- /* BAR that contains Doorbell region and Memory window '1' */ +- enum pci_barno db_reg_bar; +- /* BAR that contains memory windows*/ +- enum pci_barno mw_bar; +-}; +- + static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command, + u32 argument) + { +@@ -144,7 +144,7 @@ static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx) + return -EINVAL; + } + +- return idx + 2; ++ return ndev->barno_map[BAR_MW1 + idx]; + } + + static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx) +@@ -413,7 +413,9 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + return -EINVAL; + } + +- bar = idx + ndev->mw_bar; ++ bar = ntb_epf_mw_to_bar(ndev, idx); ++ if (bar < 0) ++ return bar; + + mw_size = pci_resource_len(ntb->pdev, bar); + +@@ -455,7 +457,9 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + if (idx == 0) + offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET); + +- bar = idx + ndev->mw_bar; ++ bar = ntb_epf_mw_to_bar(ndev, idx); ++ if (bar < 0) ++ return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar) + offset; +@@ -560,6 +564,11 @@ static int ntb_epf_init_dev(struct ntb_epf_dev *ndev) + ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT); + ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); + ++ if (ndev->mw_count > NTB_EPF_MAX_MW_COUNT) { ++ dev_err(dev, "Unsupported MW count: %u\n", ndev->mw_count); ++ return -EINVAL; ++ } ++ + return 0; + } + +@@ -596,14 +605,15 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); + } + +- ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0); ++ ndev->ctrl_reg = pci_iomap(pdev, ndev->barno_map[BAR_CONFIG], 0); + if (!ndev->ctrl_reg) { + ret = -EIO; + goto err_pci_regions; + } + +- if (ndev->peer_spad_reg_bar) { +- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0); ++ if (ndev->barno_map[BAR_PEER_SPAD] != ndev->barno_map[BAR_CONFIG]) { ++ ndev->peer_spad_reg = pci_iomap(pdev, ++ ndev->barno_map[BAR_PEER_SPAD], 0); + if (!ndev->peer_spad_reg) { + ret = -EIO; + goto err_pci_regions; +@@ -614,7 +624,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, + ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz; + } + +- ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0); ++ ndev->db_reg = pci_iomap(pdev, ndev->barno_map[BAR_DB], 0); + if (!ndev->db_reg) { + ret = -EIO; + goto err_pci_regions; +@@ -659,12 +669,7 @@ static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev) + static int ntb_epf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) + { +- enum pci_barno peer_spad_reg_bar = BAR_1; +- enum pci_barno ctrl_reg_bar = BAR_0; +- enum pci_barno db_reg_bar = BAR_2; +- enum pci_barno mw_bar = BAR_2; + struct device *dev = &pdev->dev; +- struct ntb_epf_data *data; + struct ntb_epf_dev *ndev; + int ret; + +@@ -675,18 +680,10 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev, + if (!ndev) + return -ENOMEM; + +- data = (struct ntb_epf_data *)id->driver_data; +- if (data) { +- peer_spad_reg_bar = data->peer_spad_reg_bar; +- ctrl_reg_bar = data->ctrl_reg_bar; +- db_reg_bar = data->db_reg_bar; +- mw_bar = data->mw_bar; +- } ++ ndev->barno_map = (const enum pci_barno *)id->driver_data; ++ if (!ndev->barno_map) ++ return -EINVAL; + +- ndev->peer_spad_reg_bar = peer_spad_reg_bar; +- ndev->ctrl_reg_bar = ctrl_reg_bar; +- ndev->db_reg_bar = db_reg_bar; +- ndev->mw_bar = mw_bar; + ndev->dev = dev; + + ntb_epf_init_struct(ndev, pdev); +@@ -730,30 +727,36 @@ static void ntb_epf_pci_remove(struct pci_dev *pdev) + ntb_epf_deinit_pci(ndev); + } + +-static const struct ntb_epf_data j721e_data = { +- .ctrl_reg_bar = BAR_0, +- .peer_spad_reg_bar = BAR_1, +- .db_reg_bar = BAR_2, +- .mw_bar = BAR_2, ++static const enum pci_barno j721e_map[NTB_BAR_NUM] = { ++ [BAR_CONFIG] = BAR_0, ++ [BAR_PEER_SPAD] = BAR_1, ++ [BAR_DB] = BAR_2, ++ [BAR_MW1] = BAR_2, ++ [BAR_MW2] = BAR_3, ++ [BAR_MW3] = BAR_4, ++ [BAR_MW4] = BAR_5 + }; + +-static const struct ntb_epf_data mx8_data = { +- .ctrl_reg_bar = BAR_0, +- .peer_spad_reg_bar = BAR_0, +- .db_reg_bar = BAR_2, +- .mw_bar = BAR_4, ++static const enum pci_barno mx8_map[NTB_BAR_NUM] = { ++ [BAR_CONFIG] = BAR_0, ++ [BAR_PEER_SPAD] = BAR_0, ++ [BAR_DB] = BAR_2, ++ [BAR_MW1] = BAR_4, ++ [BAR_MW2] = BAR_5, ++ [BAR_MW3] = NO_BAR, ++ [BAR_MW4] = NO_BAR + }; + + static const struct pci_device_id ntb_epf_pci_tbl[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), + .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, +- .driver_data = (kernel_ulong_t)&j721e_data, ++ .driver_data = (kernel_ulong_t)j721e_map, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809), + .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, +- .driver_data = (kernel_ulong_t)&mx8_data, ++ .driver_data = (kernel_ulong_t)mx8_map, + }, + { }, + }; +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 13221cc0d17d43..78d00e25a17909 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -4414,8 +4414,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) + * checking that they started once before, hence are reconnecting back. + */ + if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && +- nvme_discovery_ctrl(ctrl)) ++ nvme_discovery_ctrl(ctrl)) { ++ if (!ctrl->kato) { ++ nvme_stop_keep_alive(ctrl); ++ ctrl->kato = NVME_DEFAULT_KATO; ++ nvme_start_keep_alive(ctrl); ++ } + nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); ++ } + + if (ctrl->queue_count > 1) { + nvme_queue_scan(ctrl); +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index 3e0da2422b3343..bf9ab07257642c 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -3024,11 +3024,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) + + ++ctrl->ctrl.nr_reconnects; + +- if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) ++ spin_lock_irqsave(&ctrl->rport->lock, flags); ++ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { ++ spin_unlock_irqrestore(&ctrl->rport->lock, flags); + return -ENODEV; ++ } + +- if (nvme_fc_ctlr_active_on_rport(ctrl)) ++ if (nvme_fc_ctlr_active_on_rport(ctrl)) { ++ spin_unlock_irqrestore(&ctrl->rport->lock, flags); + return -ENOTUNIQ; ++ } ++ spin_unlock_irqrestore(&ctrl->rport->lock, flags); + + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: create association : host wwpn 0x%016llx " +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c +index a15e764bae35b7..188b9f1bdaca14 100644 +--- a/drivers/nvme/target/fc.c ++++ b/drivers/nvme/target/fc.c +@@ -1090,6 +1090,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work) + static void + nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) + { ++ int terminating; ++ ++ terminating = atomic_xchg(&assoc->terminating, 1); ++ ++ /* if already terminating, do nothing */ ++ if (terminating) ++ return; ++ + nvmet_fc_tgtport_get(assoc->tgtport); + if (!queue_work(nvmet_wq, &assoc->del_work)) + nvmet_fc_tgtport_put(assoc->tgtport); +@@ -1209,13 +1217,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) + { + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + unsigned long flags; +- int i, terminating; +- +- terminating = atomic_xchg(&assoc->terminating, 1); +- +- /* if already terminating, do nothing */ +- if (terminating) +- return; ++ int i; + + spin_lock_irqsave(&tgtport->lock, flags); + list_del_rcu(&assoc->a_list); +diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c +index 741e10a575ec75..675b7ea6ff7844 100644 +--- a/drivers/pci/controller/cadence/pcie-cadence-host.c ++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c +@@ -452,7 +452,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); + +- if (pcie->ops->cpu_addr_fixup) ++ if (pcie->ops && pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | +diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c +index 4251fac5e31065..a1b66dbfc10f82 100644 +--- a/drivers/pci/controller/cadence/pcie-cadence.c ++++ b/drivers/pci/controller/cadence/pcie-cadence.c +@@ -90,7 +90,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); + + /* Set the CPU address */ +- if (pcie->ops->cpu_addr_fixup) ++ if (pcie->ops && pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | +@@ -120,7 +120,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + } + + /* Set the CPU address */ +- if (pcie->ops->cpu_addr_fixup) ++ if (pcie->ops && pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | +diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h +index 9efb71cbe6996c..d2c310b7fc99bd 100644 +--- a/drivers/pci/controller/cadence/pcie-cadence.h ++++ b/drivers/pci/controller/cadence/pcie-cadence.h +@@ -494,7 +494,7 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) + + static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) + { +- if (pcie->ops->start_link) ++ if (pcie->ops && pcie->ops->start_link) + return pcie->ops->start_link(pcie); + + return 0; +@@ -502,13 +502,13 @@ static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) + + static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) + { +- if (pcie->ops->stop_link) ++ if (pcie->ops && pcie->ops->stop_link) + pcie->ops->stop_link(pcie); + } + + static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) + { +- if (pcie->ops->link_up) ++ if (pcie->ops && pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return true; +diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c +index 717af1b757f0a5..46b12e157bebe9 100644 +--- a/drivers/pci/controller/dwc/pcie-designware.c ++++ b/drivers/pci/controller/dwc/pcie-designware.c +@@ -927,9 +927,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) + char name[6]; + int ret; + +- if (pci->edma.nr_irqs == 1) +- return 0; +- else if (pci->edma.nr_irqs > 1) ++ if (pci->edma.nr_irqs > 1) + return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0; + + ret = platform_get_irq_byname_optional(pdev, "dma"); +diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c +index b8becc7257cda8..0f1e431bbfc20a 100644 +--- a/drivers/pci/p2pdma.c ++++ b/drivers/pci/p2pdma.c +@@ -351,7 +351,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, + pages_free: + devm_memunmap_pages(&pdev->dev, pgmap); + pgmap_free: +- devm_kfree(&pdev->dev, pgmap); ++ devm_kfree(&pdev->dev, p2p_pgmap); + return error; + } + EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 1705d2d0ed1268..11a90b55c1873c 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -1598,7 +1598,7 @@ static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env) + return 0; + } + +-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) ++#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) || defined(CONFIG_S390) + /** + * pci_uevent_ers - emit a uevent during recovery path of PCI device + * @pdev: PCI device undergoing error recovery +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index df7f7e2ed0064a..9a3f6bb60eb4d8 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1257,6 +1257,11 @@ int pci_power_up(struct pci_dev *dev) + return -EIO; + } + ++ if (pci_dev_is_disconnected(dev)) { ++ dev->current_state = PCI_D3cold; ++ return -EIO; ++ } ++ + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + if (PCI_POSSIBLE_ERROR(pmcsr)) { + pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index d67ea16e69e6ac..30a5f809ee798d 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -2712,6 +2712,7 @@ static void quirk_disable_msi(struct pci_dev *dev) + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RDC, 0x1031, quirk_disable_msi); + + /* + * The APC bridge device in AMD 780 family northbridges has some random +@@ -3824,7 +3825,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset); + */ + static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev) + { +- if (pdev->is_hotplug_bridge && ++ if (pdev->is_pciehp && + (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C || + pdev->revision <= 1)) + pdev->no_msi = 1; +diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c +index 8d93a830ab8bff..a24df36e5c3766 100644 +--- a/drivers/phy/cadence/cdns-dphy.c ++++ b/drivers/phy/cadence/cdns-dphy.c +@@ -145,7 +145,7 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy, + + dlane_bps = opts->hs_clk_rate; + +- if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL) ++ if (dlane_bps > 2500000000UL || dlane_bps < 80000000UL) + return -EINVAL; + else if (dlane_bps >= 1250000000) + cfg->pll_opdiv = 1; +@@ -155,6 +155,8 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy, + cfg->pll_opdiv = 4; + else if (dlane_bps >= 160000000) + cfg->pll_opdiv = 8; ++ else if (dlane_bps >= 80000000) ++ cfg->pll_opdiv = 16; + + cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv * + cfg->pll_ipdiv, +diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c +index 683b19bc411a84..59546301a6b9ee 100644 +--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c ++++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c +@@ -49,6 +49,13 @@ static void r8a779f0_eth_serdes_write32(void __iomem *addr, u32 offs, u32 bank, + iowrite32(data, addr + offs); + } + ++static u32 r8a779f0_eth_serdes_read32(void __iomem *addr, u32 offs, u32 bank) ++{ ++ iowrite32(bank, addr + R8A779F0_ETH_SERDES_BANK_SELECT); ++ ++ return ioread32(addr + offs); ++} ++ + static int + r8a779f0_eth_serdes_reg_wait(struct r8a779f0_eth_serdes_channel *channel, + u32 offs, u32 bank, u32 mask, u32 expected) +@@ -261,6 +268,7 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel + *channel) + { + int ret; ++ u32 val; + + ret = r8a779f0_eth_serdes_chan_setting(channel); + if (ret) +@@ -274,6 +282,26 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel + + r8a779f0_eth_serdes_write32(channel->addr, 0x03d0, 0x380, 0x0000); + ++ val = r8a779f0_eth_serdes_read32(channel->addr, 0x00c0, 0x180); ++ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val | BIT(8)); ++ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 1); ++ if (ret) ++ return ret; ++ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val & ~BIT(8)); ++ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 0); ++ if (ret) ++ return ret; ++ ++ val = r8a779f0_eth_serdes_read32(channel->addr, 0x0144, 0x180); ++ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val | BIT(4)); ++ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 1); ++ if (ret) ++ return ret; ++ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val & ~BIT(4)); ++ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 0); ++ if (ret) ++ return ret; ++ + return r8a779f0_eth_serdes_monitor_linkup(channel); + } + +diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c +index 98c92d6c482fef..279e19e7546b66 100644 +--- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c ++++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c +@@ -87,10 +87,11 @@ struct dphy_reg { + u32 offset; + u32 mask; + u32 shift; ++ u8 valid; + }; + + #define PHY_REG(_offset, _width, _shift) \ +- { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, } ++ { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, .valid = 1, } + + static const struct dphy_reg rk1808_grf_dphy_regs[] = { + [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 4, 0), +@@ -145,7 +146,7 @@ static inline void write_grf_reg(struct rockchip_inno_csidphy *priv, + const struct dphy_drv_data *drv_data = priv->drv_data; + const struct dphy_reg *reg = &drv_data->grf_regs[index]; + +- if (reg->offset) ++ if (reg->valid) + regmap_write(priv->grf, reg->offset, + HIWORD_UPDATE(value, reg->mask, reg->shift)); + } +diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c +index 152c35bce8ecc0..94b11a23829f65 100644 +--- a/drivers/pinctrl/pinctrl-keembay.c ++++ b/drivers/pinctrl/pinctrl-keembay.c +@@ -1606,7 +1606,8 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc) + * being part of 8 (hw maximum) globally unique muxes. + */ + kpc->nfuncs = 0; +- keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL); ++ keembay_funcs = devm_kcalloc(kpc->dev, kpc->npins * 8, ++ sizeof(*keembay_funcs), GFP_KERNEL); + if (!keembay_funcs) + return -ENOMEM; + +@@ -1637,7 +1638,9 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc) + } + + /* Reallocate memory based on actual number of functions */ +- new_funcs = krealloc(keembay_funcs, kpc->nfuncs * sizeof(*new_funcs), GFP_KERNEL); ++ new_funcs = devm_krealloc_array(kpc->dev, keembay_funcs, ++ kpc->nfuncs, sizeof(*new_funcs), ++ GFP_KERNEL); + if (!new_funcs) { + kfree(keembay_funcs); + return -ENOMEM; +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index 6c670203b3ac28..7684039be10cb2 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -587,8 +587,10 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev, + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_PULL_UP: +- if (arg) ++ if (arg) { + pcs_pinconf_clear_bias(pctldev, pin); ++ data = pcs->read(pcs->base + offset); ++ } + fallthrough; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + data &= ~func->conf[i].mask; +diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c +index d62a776c89a121..e592f819c8fa07 100644 +--- a/drivers/pmdomain/apple/pmgr-pwrstate.c ++++ b/drivers/pmdomain/apple/pmgr-pwrstate.c +@@ -306,6 +306,7 @@ static int apple_pmgr_ps_probe(struct platform_device *pdev) + } + + static const struct of_device_id apple_pmgr_ps_of_match[] = { ++ { .compatible = "apple,t8103-pmgr-pwrstate" }, + { .compatible = "apple,pmgr-pwrstate" }, + {} + }; +diff --git a/drivers/pmdomain/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c +index 9b502e8751d18c..0f065748f9ec07 100644 +--- a/drivers/pmdomain/samsung/exynos-pm-domains.c ++++ b/drivers/pmdomain/samsung/exynos-pm-domains.c +@@ -92,13 +92,14 @@ static const struct of_device_id exynos_pm_domain_of_match[] = { + { }, + }; + +-static const char *exynos_get_domain_name(struct device_node *node) ++static const char *exynos_get_domain_name(struct device *dev, ++ struct device_node *node) + { + const char *name; + + if (of_property_read_string(node, "label", &name) < 0) + name = kbasename(node->full_name); +- return kstrdup_const(name, GFP_KERNEL); ++ return devm_kstrdup_const(dev, name, GFP_KERNEL); + } + + static int exynos_pd_probe(struct platform_device *pdev) +@@ -115,15 +116,13 @@ static int exynos_pd_probe(struct platform_device *pdev) + if (!pd) + return -ENOMEM; + +- pd->pd.name = exynos_get_domain_name(np); ++ pd->pd.name = exynos_get_domain_name(dev, np); + if (!pd->pd.name) + return -ENOMEM; + + pd->base = of_iomap(np, 0); +- if (!pd->base) { +- kfree_const(pd->pd.name); ++ if (!pd->base) + return -ENODEV; +- } + + pd->pd.power_off = exynos_pd_power_off; + pd->pd.power_on = exynos_pd_power_on; +diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c +index 190e8a4cfa97f4..0c993780d3ef29 100644 +--- a/drivers/power/supply/qcom_battmgr.c ++++ b/drivers/power/supply/qcom_battmgr.c +@@ -29,8 +29,9 @@ enum qcom_battmgr_variant { + #define NOTIF_BAT_PROPERTY 0x30 + #define NOTIF_USB_PROPERTY 0x32 + #define NOTIF_WLS_PROPERTY 0x34 +-#define NOTIF_BAT_INFO 0x81 + #define NOTIF_BAT_STATUS 0x80 ++#define NOTIF_BAT_INFO 0x81 ++#define NOTIF_BAT_CHARGING_STATE 0x83 + + #define BATTMGR_BAT_INFO 0x9 + +@@ -940,12 +941,14 @@ static void qcom_battmgr_notification(struct qcom_battmgr *battmgr, + } + + notification = le32_to_cpu(msg->notification); ++ notification &= 0xff; + switch (notification) { + case NOTIF_BAT_INFO: + battmgr->info.valid = false; + fallthrough; + case NOTIF_BAT_STATUS: + case NOTIF_BAT_PROPERTY: ++ case NOTIF_BAT_CHARGING_STATE: + power_supply_changed(battmgr->bat_psy); + break; + case NOTIF_USB_PROPERTY: +@@ -975,7 +978,8 @@ static void qcom_battmgr_sc8280xp_strcpy(char *dest, const char *src) + + static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry) + { +- if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) ++ if ((!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) || ++ (!strncmp(chemistry, "OOI", BATTMGR_CHEMISTRY_LEN))) + return POWER_SUPPLY_TECHNOLOGY_LION; + if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN)) + return POWER_SUPPLY_TECHNOLOGY_LIPO; +diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c +index f4adde4492707b..d405c3657ce93c 100644 +--- a/drivers/power/supply/sbs-charger.c ++++ b/drivers/power/supply/sbs-charger.c +@@ -154,8 +154,7 @@ static const struct regmap_config sbs_regmap = { + .val_format_endian = REGMAP_ENDIAN_LITTLE, /* since based on SMBus */ + }; + +-static const struct power_supply_desc sbs_desc = { +- .name = "sbs-charger", ++static const struct power_supply_desc sbs_default_desc = { + .type = POWER_SUPPLY_TYPE_MAINS, + .properties = sbs_properties, + .num_properties = ARRAY_SIZE(sbs_properties), +@@ -165,9 +164,20 @@ static const struct power_supply_desc sbs_desc = { + static int sbs_probe(struct i2c_client *client) + { + struct power_supply_config psy_cfg = {}; ++ struct power_supply_desc *sbs_desc; + struct sbs_info *chip; + int ret, val; + ++ sbs_desc = devm_kmemdup(&client->dev, &sbs_default_desc, ++ sizeof(*sbs_desc), GFP_KERNEL); ++ if (!sbs_desc) ++ return -ENOMEM; ++ ++ sbs_desc->name = devm_kasprintf(&client->dev, GFP_KERNEL, "sbs-%s", ++ dev_name(&client->dev)); ++ if (!sbs_desc->name) ++ return -ENOMEM; ++ + chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL); + if (!chip) + return -ENOMEM; +@@ -191,7 +201,7 @@ static int sbs_probe(struct i2c_client *client) + return dev_err_probe(&client->dev, ret, "Failed to get device status\n"); + chip->last_state = val; + +- chip->power_supply = devm_power_supply_register(&client->dev, &sbs_desc, &psy_cfg); ++ chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc, &psy_cfg); + if (IS_ERR(chip->power_supply)) + return dev_err_probe(&client->dev, PTR_ERR(chip->power_supply), + "Failed to register power supply\n"); +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c +index 0682bb340221ab..d252d018bfe77e 100644 +--- a/drivers/ptp/ptp_clock.c ++++ b/drivers/ptp/ptp_clock.c +@@ -83,6 +83,9 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp + return -EBUSY; + } + ++ if (!timespec64_valid_settod(tp)) ++ return -EINVAL; ++ + return ptp->info->settime64(ptp->info, tp); + } + +@@ -113,7 +116,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) + ops = ptp->info; + + if (tx->modes & ADJ_SETOFFSET) { +- struct timespec64 ts; ++ struct timespec64 ts, ts2; + ktime_t kt; + s64 delta; + +@@ -126,6 +129,14 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) + if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + ++ /* Make sure the offset is valid */ ++ err = ptp_clock_gettime(pc, &ts2); ++ if (err) ++ return err; ++ ts2 = timespec64_add(ts2, ts); ++ if (!timespec64_valid_settod(&ts2)) ++ return -EINVAL; ++ + kt = timespec64_to_ktime(ts); + delta = ktime_to_ns(kt); + err = ops->adjtime(ops, delta); +diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c +index 55130efae9b8b6..4477bdeb9e60f5 100644 +--- a/drivers/regulator/fixed.c ++++ b/drivers/regulator/fixed.c +@@ -286,6 +286,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) + ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev), + "Failed to register regulator: %ld\n", + PTR_ERR(drvdata->dev)); ++ gpiod_put(cfg.ena_gpiod); + return ret; + } + +diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c +index 769c6d6d6a7316..58d5b85e58cdad 100644 +--- a/drivers/remoteproc/qcom_q6v5.c ++++ b/drivers/remoteproc/qcom_q6v5.c +@@ -164,6 +164,11 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data) + { + struct qcom_q6v5 *q6v5 = data; + ++ if (q6v5->handover_issued) { ++ dev_err(q6v5->dev, "Handover signaled, but it already happened\n"); ++ return IRQ_HANDLED; ++ } ++ + if (q6v5->handover) + q6v5->handover(q6v5); + +diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c +index 36a55f7ffa64d5..c39bd2bf2c1e78 100644 +--- a/drivers/remoteproc/wkup_m3_rproc.c ++++ b/drivers/remoteproc/wkup_m3_rproc.c +@@ -148,7 +148,9 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev) + return -ENODEV; + } + +- pm_runtime_enable(&pdev->dev); ++ ret = devm_pm_runtime_enable(dev); ++ if (ret < 0) ++ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n"); +@@ -219,7 +221,6 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev) + rproc_free(rproc); + err: + pm_runtime_put_noidle(dev); +- pm_runtime_disable(dev); + return ret; + } + +@@ -230,7 +231,6 @@ static void wkup_m3_rproc_remove(struct platform_device *pdev) + rproc_del(rproc); + rproc_free(rproc); + pm_runtime_put_sync(&pdev->dev); +- pm_runtime_disable(&pdev->dev); + } + + #ifdef CONFIG_PM +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index 502571f0c203fa..05a54f4d4d9a6a 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -41,6 +41,7 @@ + #define PCF2127_BIT_CTRL2_AF BIT(4) + #define PCF2127_BIT_CTRL2_TSF2 BIT(5) + #define PCF2127_BIT_CTRL2_WDTF BIT(6) ++#define PCF2127_BIT_CTRL2_MSF BIT(7) + /* Control register 3 */ + #define PCF2127_REG_CTRL3 0x02 + #define PCF2127_BIT_CTRL3_BLIE BIT(0) +@@ -94,7 +95,8 @@ + #define PCF2127_CTRL2_IRQ_MASK ( \ + PCF2127_BIT_CTRL2_AF | \ + PCF2127_BIT_CTRL2_WDTF | \ +- PCF2127_BIT_CTRL2_TSF2) ++ PCF2127_BIT_CTRL2_TSF2 | \ ++ PCF2127_BIT_CTRL2_MSF) + + #define PCF2127_MAX_TS_SUPPORTED 4 + +@@ -526,6 +528,21 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127) + set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status); + } + ++ /* ++ * When using interrupt pin (INT A) as watchdog output, only allow ++ * watchdog interrupt (PCF2131_BIT_INT_WD_CD) and disable (mask) all ++ * other interrupts. ++ */ ++ if (pcf2127->cfg->type == PCF2131) { ++ ret = regmap_write(pcf2127->regmap, ++ PCF2131_REG_INT_A_MASK1, ++ PCF2131_BIT_INT_BLIE | ++ PCF2131_BIT_INT_BIE | ++ PCF2131_BIT_INT_AIE | ++ PCF2131_BIT_INT_SI | ++ PCF2131_BIT_INT_MI); ++ } ++ + return devm_watchdog_register_device(dev, &pcf2127->wdd); + } + +diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c +index aabe62c283a150..7e9f7cb90c2887 100644 +--- a/drivers/rtc/rtc-rx8025.c ++++ b/drivers/rtc/rtc-rx8025.c +@@ -316,7 +316,7 @@ static int rx8025_init_client(struct i2c_client *client) + return hour_reg; + rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224); + } else { +- rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224); ++ rx8025->is_24 = (ctrl[0] & RX8025_BIT_CTRL1_1224); + } + out: + return err; +diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h +index 7dcac3b6baa7ee..992250ca8b9fd4 100644 +--- a/drivers/scsi/libfc/fc_encode.h ++++ b/drivers/scsi/libfc/fc_encode.h +@@ -354,7 +354,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport, + put_unaligned_be16(len, &entry->len); + snprintf((char *)&entry->value, + FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN, +- "%s v%s", ++ "%.62s v%.62s", + init_utsname()->sysname, + init_utsname()->release); + +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h +index 8d2e8d05bbc05f..52b14671eaa941 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.h ++++ b/drivers/scsi/lpfc/lpfc_debugfs.h +@@ -44,6 +44,9 @@ + /* hbqinfo output buffer size */ + #define LPFC_HBQINFO_SIZE 8192 + ++/* hdwqinfo output buffer size */ ++#define LPFC_HDWQINFO_SIZE 8192 ++ + /* nvmestat output buffer size */ + #define LPFC_NVMESTAT_SIZE 8192 + #define LPFC_IOKTIME_SIZE 8192 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index ebe84bb7bb3ddf..2e9972a5878103 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -12094,7 +12094,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, + sglq_entry->state = SGL_FREED; + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, + iflag); +- ++ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI | ++ LOG_DISCOVERY | LOG_NODE, ++ "0732 ELS XRI ABORT on Node: ndlp=x%px " ++ "xri=x%x\n", ++ ndlp, xri); + if (ndlp) { + lpfc_set_rrq_active(phba, ndlp, + sglq_entry->sli4_lxritag, +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 7c8e0e1d36da9b..b0eac09de5ad50 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -3047,13 +3047,6 @@ lpfc_cleanup(struct lpfc_vport *vport) + lpfc_vmid_vport_cleanup(vport); + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { +- if (vport->port_type != LPFC_PHYSICAL_PORT && +- ndlp->nlp_DID == Fabric_DID) { +- /* Just free up ndlp with Fabric_DID for vports */ +- lpfc_nlp_put(ndlp); +- continue; +- } +- + if (ndlp->nlp_DID == Fabric_Cntl_DID && + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_put(ndlp); +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index dcbb2432c978bc..4734694f59a4c6 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -5920,7 +5920,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) + /** + * lpfc_reset_flush_io_context - + * @vport: The virtual port (scsi_host) for the flush context +- * @tgt_id: If aborting by Target contect - specifies the target id ++ * @tgt_id: If aborting by Target context - specifies the target id + * @lun_id: If aborting by Lun context - specifies the lun id + * @context: specifies the context level to flush at. + * +@@ -6094,8 +6094,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + spin_unlock_irqrestore(&pnode->lock, flags); + } +- lpfc_reset_flush_io_context(vport, tgt_id, lun_id, +- LPFC_CTX_TGT); ++ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, ++ LPFC_CTX_TGT); ++ if (status != SUCCESS) { ++ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, ++ "0726 Target Reset flush status x%x\n", ++ status); ++ return status; ++ } + return FAST_IO_FAIL; + } + +@@ -6191,7 +6197,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) + int rc, ret = SUCCESS; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, +- "3172 SCSI layer issued Host Reset Data:\n"); ++ "3172 SCSI layer issued Host Reset\n"); + + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + lpfc_offline(phba); +diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c +index b03e4b8cb67d66..b6ae7ba6de5233 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c +@@ -2117,6 +2117,8 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) + { + int retval = 0; + u16 num_queues = 0, i = 0, msix_count_op_q = 1; ++ u32 ioc_status; ++ enum mpi3mr_iocstate ioc_state; + + num_queues = min_t(int, mrioc->facts.max_op_reply_q, + mrioc->facts.max_op_req_q); +@@ -2172,6 +2174,14 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) + retval = -1; + goto out_failed; + } ++ ioc_status = readl(&mrioc->sysif_regs->ioc_status); ++ ioc_state = mpi3mr_get_iocstate(mrioc); ++ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || ++ ioc_state != MRIOC_STATE_READY) { ++ mpi3mr_print_fault_info(mrioc); ++ retval = -1; ++ goto out_failed; ++ } + mrioc->num_op_reply_q = mrioc->num_op_req_q = i; + ioc_info(mrioc, + "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c +index 7d6e4fe31ceed8..02c970575464c9 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c +@@ -166,6 +166,9 @@ _transport_convert_phy_link_rate(u8 link_rate) + case MPI25_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; ++ case MPI26_SAS_NEG_LINK_RATE_22_5: ++ rc = SAS_LINK_RATE_22_5_GBPS; ++ break; + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; +diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c +index 5c26a13ffbd261..d3ff212d28b2e1 100644 +--- a/drivers/scsi/pm8001/pm8001_ctl.c ++++ b/drivers/scsi/pm8001/pm8001_ctl.c +@@ -534,23 +534,25 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, + char *str = buf; + u32 read_size = + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024; +- static u32 start, end, count; + u32 max_read_times = 32; + u32 max_count = (read_size * 1024) / (max_read_times * 4); + u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr; + +- if ((count % max_count) == 0) { +- start = 0; +- end = max_read_times; +- count = 0; ++ mutex_lock(&pm8001_ha->iop_log_lock); ++ ++ if ((pm8001_ha->iop_log_count % max_count) == 0) { ++ pm8001_ha->iop_log_start = 0; ++ pm8001_ha->iop_log_end = max_read_times; ++ pm8001_ha->iop_log_count = 0; + } else { +- start = end; +- end = end + max_read_times; ++ pm8001_ha->iop_log_start = pm8001_ha->iop_log_end; ++ pm8001_ha->iop_log_end = pm8001_ha->iop_log_end + max_read_times; + } + +- for (; start < end; start++) +- str += sprintf(str, "%08x ", *(temp+start)); +- count++; ++ for (; pm8001_ha->iop_log_start < pm8001_ha->iop_log_end; pm8001_ha->iop_log_start++) ++ str += sprintf(str, "%08x ", *(temp+pm8001_ha->iop_log_start)); ++ pm8001_ha->iop_log_count++; ++ mutex_unlock(&pm8001_ha->iop_log_lock); + return str - buf; + } + static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); +@@ -680,7 +682,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer; +- u32 ret; ++ int ret; + u32 length = 1024 * 5 + sizeof(*payload) - 1; + + if (pm8001_ha->fw_image->size > 4096) { +diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c +index c2f6151cbd2d02..00664bd2caab18 100644 +--- a/drivers/scsi/pm8001/pm8001_init.c ++++ b/drivers/scsi/pm8001/pm8001_init.c +@@ -529,6 +529,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, + pm8001_ha->id = pm8001_id++; + pm8001_ha->logging_level = logging_level; + pm8001_ha->non_fatal_count = 0; ++ mutex_init(&pm8001_ha->iop_log_lock); + if (link_rate >= 1 && link_rate <= 15) + pm8001_ha->link_rate = (link_rate << 8); + else { +diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h +index 2fadd353f1c13d..72cd1523235ca3 100644 +--- a/drivers/scsi/pm8001/pm8001_sas.h ++++ b/drivers/scsi/pm8001/pm8001_sas.h +@@ -543,6 +543,10 @@ struct pm8001_hba_info { + u32 ci_offset; + u32 pi_offset; + u32 max_memcnt; ++ u32 iop_log_start; ++ u32 iop_log_end; ++ u32 iop_log_count; ++ struct mutex iop_log_lock; + }; + + struct pm8001_work { +diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c +index 3f759121dc00a4..67e9ac3d08ecc8 100644 +--- a/drivers/soc/aspeed/aspeed-socinfo.c ++++ b/drivers/soc/aspeed/aspeed-socinfo.c +@@ -27,6 +27,10 @@ static struct { + { "AST2620", 0x05010203 }, + { "AST2605", 0x05030103 }, + { "AST2625", 0x05030403 }, ++ /* AST2700 */ ++ { "AST2750", 0x06000003 }, ++ { "AST2700", 0x06000103 }, ++ { "AST2720", 0x06000203 }, + }; + + static const char *siliconid_to_name(u32 siliconid) +diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c +index 2e8568d6cde948..aead7dd482ea34 100644 +--- a/drivers/soc/qcom/smem.c ++++ b/drivers/soc/qcom/smem.c +@@ -870,7 +870,7 @@ static u32 qcom_smem_get_item_count(struct qcom_smem *smem) + if (IS_ERR_OR_NULL(ptable)) + return SMEM_ITEM_COUNT; + +- info = (struct smem_info *)&ptable->entry[ptable->num_entries]; ++ info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)]; + if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) + return SMEM_ITEM_COUNT; + +diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c +index 402cf939c03263..a6b3217c3ccef6 100644 +--- a/drivers/soc/tegra/fuse/fuse-tegra30.c ++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c +@@ -116,6 +116,124 @@ const struct tegra_fuse_soc tegra30_fuse_soc = { + #endif + + #ifdef CONFIG_ARCH_TEGRA_114_SOC ++static const struct nvmem_cell_info tegra114_fuse_cells[] = { ++ { ++ .name = "tsensor-cpu1", ++ .offset = 0x084, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-cpu2", ++ .offset = 0x088, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-common", ++ .offset = 0x08c, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-cpu0", ++ .offset = 0x098, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "xusb-pad-calibration", ++ .offset = 0x0f0, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-cpu3", ++ .offset = 0x12c, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-gpu", ++ .offset = 0x154, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-mem0", ++ .offset = 0x158, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-mem1", ++ .offset = 0x15c, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, { ++ .name = "tsensor-pllx", ++ .offset = 0x160, ++ .bytes = 4, ++ .bit_offset = 0, ++ .nbits = 32, ++ }, ++}; ++ ++static const struct nvmem_cell_lookup tegra114_fuse_lookups[] = { ++ { ++ .nvmem_name = "fuse", ++ .cell_name = "xusb-pad-calibration", ++ .dev_id = "7009f000.padctl", ++ .con_id = "calibration", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-common", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "common", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-cpu0", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "cpu0", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-cpu1", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "cpu1", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-cpu2", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "cpu2", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-cpu3", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "cpu3", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-mem0", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "mem0", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-mem1", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "mem1", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-gpu", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "gpu", ++ }, { ++ .nvmem_name = "fuse", ++ .cell_name = "tsensor-pllx", ++ .dev_id = "700e2000.thermal-sensor", ++ .con_id = "pllx", ++ }, ++}; ++ + static const struct tegra_fuse_info tegra114_fuse_info = { + .read = tegra30_fuse_read, + .size = 0x2a0, +@@ -126,6 +244,10 @@ const struct tegra_fuse_soc tegra114_fuse_soc = { + .init = tegra30_fuse_init, + .speedo_init = tegra114_init_speedo_data, + .info = &tegra114_fuse_info, ++ .lookups = tegra114_fuse_lookups, ++ .num_lookups = ARRAY_SIZE(tegra114_fuse_lookups), ++ .cells = tegra114_fuse_cells, ++ .num_cells = ARRAY_SIZE(tegra114_fuse_cells), + .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, + }; +diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c +index 69b6c87c5525e0..41e1fb27a9f348 100644 +--- a/drivers/spi/spi-loopback-test.c ++++ b/drivers/spi/spi-loopback-test.c +@@ -447,7 +447,7 @@ static void spi_test_dump_message(struct spi_device *spi, + int i; + u8 b; + +- dev_info(&spi->dev, " spi_msg@%pK\n", msg); ++ dev_info(&spi->dev, " spi_msg@%p\n", msg); + if (msg->status) + dev_info(&spi->dev, " status: %i\n", + msg->status); +@@ -457,15 +457,15 @@ static void spi_test_dump_message(struct spi_device *spi, + msg->actual_length); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { +- dev_info(&spi->dev, " spi_transfer@%pK\n", xfer); ++ dev_info(&spi->dev, " spi_transfer@%p\n", xfer); + dev_info(&spi->dev, " len: %i\n", xfer->len); +- dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf); ++ dev_info(&spi->dev, " tx_buf: %p\n", xfer->tx_buf); + if (dump_data && xfer->tx_buf) + spi_test_print_hex_dump(" TX: ", + xfer->tx_buf, + xfer->len); + +- dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf); ++ dev_info(&spi->dev, " rx_buf: %p\n", xfer->rx_buf); + if (dump_data && xfer->rx_buf) + spi_test_print_hex_dump(" RX: ", + xfer->rx_buf, +@@ -559,7 +559,7 @@ static int spi_check_rx_ranges(struct spi_device *spi, + /* if still not found then something has modified too much */ + /* we could list the "closest" transfer here... */ + dev_err(&spi->dev, +- "loopback strangeness - rx changed outside of allowed range at: %pK\n", ++ "loopback strangeness - rx changed outside of allowed range at: %p\n", + addr); + /* do not return, only set ret, + * so that we list all addresses +@@ -697,7 +697,7 @@ static int spi_test_translate(struct spi_device *spi, + } + + dev_err(&spi->dev, +- "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n", ++ "PointerRange [%p:%p[ not in range [%p:%p[ or [%p:%p[\n", + *ptr, *ptr + len, + RX(0), RX(SPI_TEST_MAX_SIZE), + TX(0), TX(SPI_TEST_MAX_SIZE)); +diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c +index 7cce2d2ab9ca61..a1696672d12fea 100644 +--- a/drivers/spi/spi-rpc-if.c ++++ b/drivers/spi/spi-rpc-if.c +@@ -193,6 +193,8 @@ static int __maybe_unused rpcif_spi_resume(struct device *dev) + { + struct spi_controller *ctlr = dev_get_drvdata(dev); + ++ rpcif_hw_init(dev, false); ++ + return spi_controller_resume(ctlr); + } + +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index cfb6755c0730f4..5adba36255a1aa 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -2716,6 +2716,16 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, + acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, + sizeof(spi->modalias)); + ++ /* ++ * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case ++ * the GPIO controller does not have a driver yet. This needs to be done ++ * here too, because this call sets the GPIO direction and/or bias. ++ * Setting these needs to be done even if there is no driver, in which ++ * case spi_probe() will never get called. ++ */ ++ if (spi->irq < 0) ++ spi->irq = acpi_dev_gpio_irq_get(adev, 0); ++ + acpi_device_set_enumerated(adev); + + adev->power.flags.ignore_parent = true; +diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c +index d7ad16f262b2eb..976912f3bb5b4f 100644 +--- a/drivers/tee/tee_core.c ++++ b/drivers/tee/tee_core.c +@@ -889,7 +889,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + + if (!teedesc || !teedesc->name || !teedesc->ops || + !teedesc->ops->get_version || !teedesc->ops->open || +- !teedesc->ops->release || !pool) ++ !teedesc->ops->release) + return ERR_PTR(-EINVAL); + + teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); +diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c +index b92a8a5b2e8c97..bf35fd23b5e160 100644 +--- a/drivers/thunderbolt/tb.c ++++ b/drivers/thunderbolt/tb.c +@@ -3022,7 +3022,7 @@ static bool tb_apple_add_links(struct tb_nhi *nhi) + if (!pci_is_pcie(pdev)) + continue; + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || +- !pdev->is_hotplug_bridge) ++ !pdev->is_pciehp) + continue; + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, +diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c +index f2c4422cab8640..a714dad82cd1fe 100644 +--- a/drivers/ufs/core/ufshcd-crypto.c ++++ b/drivers/ufs/core/ufshcd-crypto.c +@@ -95,8 +95,12 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, + return err; + } + +-static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) ++static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, ++ const struct blk_crypto_key *key, ++ unsigned int slot) + { ++ struct ufs_hba *hba = ++ container_of(profile, struct ufs_hba, crypto_profile); + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. +@@ -106,16 +110,10 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) + return ufshcd_program_key(hba, &cfg, slot); + } + +-static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, +- const struct blk_crypto_key *key, +- unsigned int slot) +-{ +- struct ufs_hba *hba = +- container_of(profile, struct ufs_hba, crypto_profile); +- +- return ufshcd_clear_keyslot(hba, slot); +-} +- ++/* ++ * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE ++ * should be used in the host controller initialization sequence. ++ */ + bool ufshcd_crypto_enable(struct ufs_hba *hba) + { + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) +@@ -123,6 +121,10 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba) + + /* Reset might clear all keys, so reprogram all the keys. */ + blk_crypto_reprogram_all_keys(&hba->crypto_profile); ++ ++ if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE) ++ return false; ++ + return true; + } + +@@ -159,6 +161,9 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) + int err = 0; + enum blk_crypto_mode_num blk_mode_num; + ++ if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE) ++ return 0; ++ + /* + * Don't use crypto if either the hardware doesn't advertise the + * standard crypto capability bit *or* if the vendor specific driver +@@ -228,9 +233,10 @@ void ufshcd_init_crypto(struct ufs_hba *hba) + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return; + +- /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ +- for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) +- ufshcd_clear_keyslot(hba, slot); ++ /* Clear all keyslots. */ ++ for (slot = 0; slot < hba->crypto_profile.num_slots; slot++) ++ hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile, ++ NULL, slot); + } + + void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) +diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h +index be8596f20ba2f2..89bb97c14c15b6 100644 +--- a/drivers/ufs/core/ufshcd-crypto.h ++++ b/drivers/ufs/core/ufshcd-crypto.h +@@ -37,6 +37,33 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, + h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); + } + ++static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, ++ struct ufshcd_lrb *lrbp) ++{ ++ struct scsi_cmnd *cmd = lrbp->cmd; ++ const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; ++ ++ if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) ++ return hba->vops->fill_crypto_prdt(hba, crypt_ctx, ++ lrbp->ucd_prdt_ptr, ++ scsi_sg_count(cmd)); ++ return 0; ++} ++ ++static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, ++ struct ufshcd_lrb *lrbp) ++{ ++ if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) ++ return; ++ ++ if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) ++ return; ++ ++ /* Zeroize the PRDT because it can contain cryptographic keys. */ ++ memzero_explicit(lrbp->ucd_prdt_ptr, ++ ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); ++} ++ + bool ufshcd_crypto_enable(struct ufs_hba *hba); + + int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); +@@ -54,6 +81,15 @@ static inline void + ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, + struct request_desc_header *h) { } + ++static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, ++ struct ufshcd_lrb *lrbp) ++{ ++ return 0; ++} ++ ++static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, ++ struct ufshcd_lrb *lrbp) { } ++ + static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) + { + return false; +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 7dcdaac31546ba..01a7c1720ce150 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -275,6 +275,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGLF2G9D8KBADG", + .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, ++ { .wmanufacturerid = UFS_VENDOR_TOSHIBA, ++ .model = "THGJFJT1E45BATP", ++ .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT }, + {} + }; + +@@ -2365,7 +2368,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) + * 0h: legacy single doorbell support is available + * 1h: indicate that legacy single doorbell support has been removed + */ +- hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); ++ if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP)) ++ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); ++ else ++ hba->lsdb_sup = true; ++ + if (!hba->mcq_sup) + return 0; + +@@ -2586,7 +2593,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) + + ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); + +- return 0; ++ return ufshcd_crypto_fill_prdt(hba, lrbp); + } + + /** +@@ -4176,8 +4183,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); + +- if (mib_val && !ret) +- *mib_val = uic_cmd.argument3; ++ if (mib_val) ++ *mib_val = ret == 0 ? uic_cmd.argument3 : 0; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) + && pwr_mode_change) +@@ -4973,7 +4980,8 @@ static int ufshcd_link_startup(struct ufs_hba *hba) + * If UFS device isn't active then we will have to issue link startup + * 2 times to make sure the device state move to active. + */ +- if (!ufshcd_is_ufs_dev_active(hba)) ++ if (!(hba->quirks & UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE) && ++ !ufshcd_is_ufs_dev_active(hba)) + link_startup_again = true; + + link_startup: +@@ -5509,6 +5517,7 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct scsi_cmnd *cmd = lrbp->cmd; + + scsi_dma_unmap(cmd); ++ ufshcd_crypto_clear_prdt(hba, lrbp); + ufshcd_release(hba); + ufshcd_clk_scaling_update_busy(hba); + } +@@ -6341,13 +6350,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba) + } + } + +-static void ufshcd_force_error_recovery(struct ufs_hba *hba) ++void ufshcd_force_error_recovery(struct ufs_hba *hba) + { + spin_lock_irq(hba->host->host_lock); + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); + spin_unlock_irq(hba->host->host_lock); + } ++EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery); + + static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) + { +@@ -8730,7 +8740,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) + struct ufs_dev_info *dev_info = &hba->dev_info; + struct utp_upiu_query_v4_0 *upiu_data; + +- if (dev_info->wspecversion < 0x400) ++ if (dev_info->wspecversion < 0x400 || ++ hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT) + return; + + ufshcd_hold(hba); +diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c +index 2383ecd88f1cba..8b4a3cc8125310 100644 +--- a/drivers/ufs/host/ufs-mediatek.c ++++ b/drivers/ufs/host/ufs-mediatek.c +@@ -847,6 +847,69 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) + } + } + ++static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) ++{ ++ unsigned long flags; ++ u32 ah_ms = 10; ++ u32 ah_scale, ah_timer; ++ u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000}; ++ ++ if (ufshcd_is_clkgating_allowed(hba)) { ++ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) { ++ ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ++ hba->ahit); ++ ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ++ hba->ahit); ++ if (ah_scale <= 5) ++ ah_ms = ah_timer * scale_us[ah_scale] / 1000; ++ } ++ ++ spin_lock_irqsave(hba->host->host_lock, flags); ++ hba->clk_gating.delay_ms = max(ah_ms, 10U); ++ spin_unlock_irqrestore(hba->host->host_lock, flags); ++ } ++} ++ ++/* Convert microseconds to Auto-Hibernate Idle Timer register value */ ++static u32 ufs_mtk_us_to_ahit(unsigned int timer) ++{ ++ unsigned int scale; ++ ++ for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) ++ timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; ++ ++ return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | ++ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); ++} ++ ++static void ufs_mtk_fix_ahit(struct ufs_hba *hba) ++{ ++ unsigned int us; ++ ++ if (ufshcd_is_auto_hibern8_supported(hba)) { ++ switch (hba->dev_info.wmanufacturerid) { ++ case UFS_VENDOR_SAMSUNG: ++ /* configure auto-hibern8 timer to 3.5 ms */ ++ us = 3500; ++ break; ++ ++ case UFS_VENDOR_MICRON: ++ /* configure auto-hibern8 timer to 2 ms */ ++ us = 2000; ++ break; ++ ++ default: ++ /* configure auto-hibern8 timer to 1 ms */ ++ us = 1000; ++ break; ++ } ++ ++ hba->ahit = ufs_mtk_us_to_ahit(us); ++ } ++ ++ ufs_mtk_setup_clk_gating(hba); ++} ++ + static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) + { + struct ufs_mtk_host *host = ufshcd_get_variant(hba); +@@ -1028,6 +1091,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_NO_ADAPT); + ++ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), ++ DL_FC0ProtectionTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), ++ DL_TC0ReplayTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), ++ DL_AFC0ReqTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), ++ DL_FC1ProtectionTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), ++ DL_TC1ReplayTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), ++ DL_AFC1ReqTimeOutVal_Default); ++ ++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), ++ DL_FC0ProtectionTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), ++ DL_TC0ReplayTimeOutVal_Default); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), ++ DL_AFC0ReqTimeOutVal_Default); ++ } ++ + ret = ufshcd_uic_change_pwr_mode(hba, + FASTAUTO_MODE << 4 | FASTAUTO_MODE); + +@@ -1046,19 +1131,49 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, + return ret; + } + ++static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) ++{ ++ int ret; ++ ++ /* disable auto-hibern8 */ ++ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); ++ ++ /* wait host return to idle state when auto-hibern8 off */ ++ ufs_mtk_wait_idle_state(hba, 5); ++ ++ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); ++ if (ret) { ++ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); ++ ++ ufshcd_force_error_recovery(hba); ++ ++ /* trigger error handler and break suspend */ ++ ret = -EBUSY; ++ } ++ ++ return ret; ++} ++ + static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status stage, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) + { + int ret = 0; ++ static u32 reg; + + switch (stage) { + case PRE_CHANGE: ++ if (ufshcd_is_auto_hibern8_supported(hba)) { ++ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); ++ ufs_mtk_auto_hibern8_disable(hba); ++ } + ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: ++ if (ufshcd_is_auto_hibern8_supported(hba)) ++ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); + break; + default: + ret = -EINVAL; +@@ -1119,32 +1234,10 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) + + return ret; + } +- +-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +-{ +- u32 ah_ms; +- +- if (ufshcd_is_clkgating_allowed(hba)) { +- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) +- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, +- hba->ahit); +- else +- ah_ms = 10; +- ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); +- } +-} +- + static void ufs_mtk_post_link(struct ufs_hba *hba) + { + /* enable unipro clock gating feature */ + ufs_mtk_cfg_unipro_cg(hba, true); +- +- /* will be configured during probe hba */ +- if (ufshcd_is_auto_hibern8_supported(hba)) +- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | +- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); +- +- ufs_mtk_setup_clk_gating(hba); + } + + static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, +@@ -1171,11 +1264,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) + { + struct arm_smccc_res res; + +- /* disable hba before device reset */ +- ufshcd_hba_stop(hba); +- + ufs_mtk_device_reset_ctrl(0, res); + ++ /* disable hba in middle of device reset */ ++ ufshcd_hba_stop(hba); ++ + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n +@@ -1253,6 +1346,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) + { + struct ufs_vreg *vccqx = NULL; + ++ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) ++ return; ++ + if (hba->vreg_info.vccq) + vccqx = hba->vreg_info.vccq; + else +@@ -1297,21 +1393,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) + } + } + +-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +-{ +- int ret; +- +- /* disable auto-hibern8 */ +- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); +- +- /* wait host return to idle state when auto-hibern8 off */ +- ufs_mtk_wait_idle_state(hba, 5); +- +- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); +- if (ret) +- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); +-} +- + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) + { +@@ -1320,7 +1401,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + + if (status == PRE_CHANGE) { + if (ufshcd_is_auto_hibern8_supported(hba)) +- ufs_mtk_auto_hibern8_disable(hba); ++ return ufs_mtk_auto_hibern8_disable(hba); + return 0; + } + +@@ -1378,8 +1459,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) + } + + return 0; ++ + fail: +- return ufshcd_link_recovery(hba); ++ /* ++ * Check if the platform (parent) device has resumed, and ensure that ++ * power, clock, and MTCMOS are all turned on. ++ */ ++ err = ufshcd_link_recovery(hba); ++ if (err) { ++ dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n", ++ hba->dev->power.request, ++ hba->dev->power.runtime_status, ++ hba->dev->power.runtime_error); ++ } ++ ++ return 0; /* Cannot return a failure, otherwise, the I/O will hang. */ + } + + static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +@@ -1444,6 +1538,7 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) + + ufs_mtk_vreg_fix_vcc(hba); + ufs_mtk_vreg_fix_vccqx(hba); ++ ufs_mtk_fix_ahit(hba); + } + + static void ufs_mtk_event_notify(struct ufs_hba *hba, +diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c +index c38ea3395b2c10..4ecaaf52b3e95e 100644 +--- a/drivers/ufs/host/ufshcd-pci.c ++++ b/drivers/ufs/host/ufshcd-pci.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,6 +35,7 @@ struct intel_host { + u32 dsm_fns; + u32 active_ltr; + u32 idle_ltr; ++ int saved_spm_lvl; + struct dentry *debugfs_root; + struct gpio_desc *reset_gpio; + }; +@@ -375,6 +377,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba) + host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; ++ host->saved_spm_lvl = -1; + ufshcd_set_variant(hba, host); + intel_dsm_init(host, hba->dev); + if (INTEL_DSM_SUPPORTED(host, RESET)) { +@@ -460,7 +463,8 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba) + static int ufs_intel_adl_init(struct ufs_hba *hba) + { + hba->nop_out_timeout = 200; +- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; ++ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 | ++ UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE; + hba->caps |= UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); + } +@@ -542,6 +546,66 @@ static int ufshcd_pci_restore(struct device *dev) + + return ufshcd_system_resume(dev); + } ++ ++static int ufs_intel_suspend_prepare(struct device *dev) ++{ ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ struct intel_host *host = ufshcd_get_variant(hba); ++ int err; ++ ++ /* ++ * Only s2idle (S0ix) retains link state. Force power-off ++ * (UFS_PM_LVL_5) for any other case. ++ */ ++ if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && hba->spm_lvl < UFS_PM_LVL_5) { ++ host->saved_spm_lvl = hba->spm_lvl; ++ hba->spm_lvl = UFS_PM_LVL_5; ++ } ++ ++ err = ufshcd_suspend_prepare(dev); ++ ++ if (err < 0 && host->saved_spm_lvl != -1) { ++ hba->spm_lvl = host->saved_spm_lvl; ++ host->saved_spm_lvl = -1; ++ } ++ ++ return err; ++} ++ ++static void ufs_intel_resume_complete(struct device *dev) ++{ ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ struct intel_host *host = ufshcd_get_variant(hba); ++ ++ ufshcd_resume_complete(dev); ++ ++ if (host->saved_spm_lvl != -1) { ++ hba->spm_lvl = host->saved_spm_lvl; ++ host->saved_spm_lvl = -1; ++ } ++} ++ ++static int ufshcd_pci_suspend_prepare(struct device *dev) ++{ ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ ++ if (!strcmp(hba->vops->name, "intel-pci")) ++ return ufs_intel_suspend_prepare(dev); ++ ++ return ufshcd_suspend_prepare(dev); ++} ++ ++static void ufshcd_pci_resume_complete(struct device *dev) ++{ ++ struct ufs_hba *hba = dev_get_drvdata(dev); ++ ++ if (!strcmp(hba->vops->name, "intel-pci")) { ++ ufs_intel_resume_complete(dev); ++ return; ++ } ++ ++ ufshcd_resume_complete(dev); ++} + #endif + + /** +@@ -624,8 +688,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { + .thaw = ufshcd_system_resume, + .poweroff = ufshcd_system_suspend, + .restore = ufshcd_pci_restore, +- .prepare = ufshcd_suspend_prepare, +- .complete = ufshcd_resume_complete, ++ .prepare = ufshcd_pci_suspend_prepare, ++ .complete = ufshcd_pci_resume_complete, + #endif + }; + +diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c +index 38e693cd3efc05..fb192b120d77fe 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.c ++++ b/drivers/usb/cdns3/cdnsp-gadget.c +@@ -1975,7 +1975,10 @@ static int __cdnsp_gadget_init(struct cdns *cdns) + return 0; + + del_gadget: +- usb_del_gadget_udc(&pdev->gadget); ++ usb_del_gadget(&pdev->gadget); ++ cdnsp_gadget_free_endpoints(pdev); ++ usb_put_gadget(&pdev->gadget); ++ goto halt_pdev; + free_endpoints: + cdnsp_gadget_free_endpoints(pdev); + halt_pdev: +@@ -1997,8 +2000,9 @@ static void cdnsp_gadget_exit(struct cdns *cdns) + devm_free_irq(pdev->dev, cdns->dev_irq, pdev); + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); +- usb_del_gadget_udc(&pdev->gadget); ++ usb_del_gadget(&pdev->gadget); + cdnsp_gadget_free_endpoints(pdev); ++ usb_put_gadget(&pdev->gadget); + cdnsp_mem_cleanup(pdev); + kfree(pdev); + cdns->gadget_dev = NULL; +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 4a88546b1b1576..14424e395f2a96 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1941,7 +1941,12 @@ static int ffs_func_eps_enable(struct ffs_function *func) + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; +- while(count--) { ++ if (!epfile) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ while (count--) { + ep->ep->driver_data = ep; + + ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); +@@ -1965,6 +1970,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) + } + + wake_up_interruptible(&ffs->wait); ++done: + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); + + return ret; +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c +index 2f1ec03d17d6db..d8dafebeabea48 100644 +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -494,7 +494,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, + } + + req->status = 0; +- req->zero = 0; ++ req->zero = 1; + req->length = count; + req->complete = f_hidg_req_complete; + req->context = hidg; +@@ -765,7 +765,7 @@ static int hidg_setup(struct usb_function *f, + return -EOPNOTSUPP; + + respond: +- req->zero = 0; ++ req->zero = 1; + req->length = length; + status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (status < 0) +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index 7aad737901e833..1da4e59338c567 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -1449,6 +1449,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + + ncm_opts->bound = true; + ++ ncm_string_defs[1].s = ncm->ethaddr; ++ + us = usb_gstrings_attach(cdev, ncm_strings, + ARRAY_SIZE(ncm_string_defs)); + if (IS_ERR(us)) +@@ -1696,7 +1698,6 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi) + mutex_unlock(&opts->lock); + return ERR_PTR(-EINVAL); + } +- ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr; + + spin_lock_init(&ncm->lock); + ncm_reset_values(ncm); +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index b350ee080236e2..64383566cfb771 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -168,6 +168,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s + return ret; + + pm_runtime_set_active(&pdev->dev); ++ pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + +diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c +index 4e30de4db1c0a8..dabe1a5c6ae567 100644 +--- a/drivers/usb/mon/mon_bin.c ++++ b/drivers/usb/mon/mon_bin.c +@@ -68,18 +68,20 @@ + * The magic limit was calculated so that it allows the monitoring + * application to pick data once in two ticks. This way, another application, + * which presumably drives the bus, gets to hog CPU, yet we collect our data. +- * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an +- * enormous overhead built into the bus protocol, so we need about 1000 KB. ++ * ++ * Originally, for a 480 Mbit/s bus this required a buffer of about 1 MB. For ++ * modern 20 Gbps buses, this value increases to over 50 MB. The maximum ++ * buffer size is set to 64 MiB to accommodate this. + * + * This is still too much for most cases, where we just snoop a few + * descriptor fetches for enumeration. So, the default is a "reasonable" +- * amount for systems with HZ=250 and incomplete bus saturation. ++ * amount for typical, low-throughput use cases. + * + * XXX What about multi-megabyte URBs which take minutes to transfer? + */ +-#define BUFF_MAX CHUNK_ALIGN(1200*1024) +-#define BUFF_DFL CHUNK_ALIGN(300*1024) +-#define BUFF_MIN CHUNK_ALIGN(8*1024) ++#define BUFF_MAX CHUNK_ALIGN(64*1024*1024) ++#define BUFF_DFL CHUNK_ALIGN(300*1024) ++#define BUFF_MIN CHUNK_ALIGN(8*1024) + + /* + * The per-event API header (2 per URB). +diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c +index 38b51613ecca90..3f48125e2b9f0f 100644 +--- a/drivers/vfio/iova_bitmap.c ++++ b/drivers/vfio/iova_bitmap.c +@@ -127,9 +127,8 @@ struct iova_bitmap { + static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, + unsigned long iova) + { +- unsigned long pgsize = 1UL << bitmap->mapped.pgshift; +- +- return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); ++ return (iova >> bitmap->mapped.pgshift) / ++ BITS_PER_TYPE(*bitmap->bitmap); + } + + /* +diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c +index edb631e5e7ec99..6dfb290c339f99 100644 +--- a/drivers/vfio/vfio_main.c ++++ b/drivers/vfio/vfio_main.c +@@ -1195,7 +1195,7 @@ static int vfio_ioctl_device_feature(struct vfio_device *device, + feature.argsz - minsz); + default: + if (unlikely(!device->ops->device_feature)) +- return -EINVAL; ++ return -ENOTTY; + return device->ops->device_feature(device, feature.flags, + arg->data, + feature.argsz - minsz); +diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c +index 7075bfab59c4dc..d191560ce285f9 100644 +--- a/drivers/video/backlight/lp855x_bl.c ++++ b/drivers/video/backlight/lp855x_bl.c +@@ -22,7 +22,7 @@ + #define LP855X_DEVICE_CTRL 0x01 + #define LP855X_EEPROM_START 0xA0 + #define LP855X_EEPROM_END 0xA7 +-#define LP8556_EPROM_START 0xA0 ++#define LP8556_EPROM_START 0x98 + #define LP8556_EPROM_END 0xAF + + /* LP8555/7 Registers */ +diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c +index 3dcf83f5e7b4a1..297ba9517b88a7 100644 +--- a/drivers/video/fbdev/aty/atyfb_base.c ++++ b/drivers/video/fbdev/aty/atyfb_base.c +@@ -2611,8 +2611,12 @@ static int aty_init(struct fb_info *info) + pr_cont("\n"); + } + #endif +- if (par->pll_ops->init_pll) +- par->pll_ops->init_pll(info, &par->pll); ++ if (par->pll_ops->init_pll) { ++ ret = par->pll_ops->init_pll(info, &par->pll); ++ if (ret) ++ return ret; ++ } ++ + if (par->pll_ops->resume_pll) + par->pll_ops->resume_pll(info, &par->pll); + +diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c +index 42e681a78136ab..8563264d11fac6 100644 +--- a/drivers/video/fbdev/core/bitblit.c ++++ b/drivers/video/fbdev/core/bitblit.c +@@ -79,12 +79,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info, + struct fb_image *image, u8 *buf, u8 *dst) + { + u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; ++ unsigned int charcnt = vc->vc_font.charcount; + u32 idx = vc->vc_font.width >> 3; + u8 *src; + + while (cnt--) { +- src = vc->vc_font.data + (scr_readw(s++)& +- charmask)*cellsize; ++ u16 ch = scr_readw(s++) & charmask; ++ ++ if (ch >= charcnt) ++ ch = 0; ++ src = vc->vc_font.data + (unsigned int)ch * cellsize; + + if (attr) { + update_attr(buf, src, attr, vc); +@@ -112,14 +116,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc, + u8 *dst) + { + u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; ++ unsigned int charcnt = vc->vc_font.charcount; + u32 shift_low = 0, mod = vc->vc_font.width % 8; + u32 shift_high = 8; + u32 idx = vc->vc_font.width >> 3; + u8 *src; + + while (cnt--) { +- src = vc->vc_font.data + (scr_readw(s++)& +- charmask)*cellsize; ++ u16 ch = scr_readw(s++) & charmask; ++ ++ if (ch >= charcnt) ++ ch = 0; ++ src = vc->vc_font.data + (unsigned int)ch * cellsize; + + if (attr) { + update_attr(buf, src, attr, vc); +@@ -160,6 +168,11 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, + image.height = vc->vc_font.height; + image.depth = 1; + ++ if (image.dy >= info->var.yres) ++ return; ++ ++ image.height = min(image.height, info->var.yres - image.dy); ++ + if (attribute) { + buf = kmalloc(cellsize, GFP_ATOMIC); + if (!buf) +@@ -173,6 +186,18 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, + cnt = count; + + image.width = vc->vc_font.width * cnt; ++ ++ if (image.dx >= info->var.xres) ++ break; ++ ++ if (image.dx + image.width > info->var.xres) { ++ image.width = info->var.xres - image.dx; ++ cnt = image.width / vc->vc_font.width; ++ if (cnt == 0) ++ break; ++ image.width = cnt * vc->vc_font.width; ++ } ++ + pitch = DIV_ROUND_UP(image.width, 8) + scan_align; + pitch &= ~scan_align; + size = pitch * image.height + buf_align; +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index b49f15a3442eab..78a5b22c8d1507 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -2824,6 +2824,25 @@ int fbcon_mode_deleted(struct fb_info *info, + return found; + } + ++static void fbcon_delete_mode(struct fb_videomode *m) ++{ ++ struct fbcon_display *p; ++ ++ for (int i = first_fb_vc; i <= last_fb_vc; i++) { ++ p = &fb_display[i]; ++ if (p->mode == m) ++ p->mode = NULL; ++ } ++} ++ ++void fbcon_delete_modelist(struct list_head *head) ++{ ++ struct fb_modelist *modelist; ++ ++ list_for_each_entry(modelist, head, list) ++ fbcon_delete_mode(&modelist->mode); ++} ++ + #ifdef CONFIG_VT_HW_CONSOLE_BINDING + static void fbcon_unbind(void) + { +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 942b942f6bf9af..34b8e93f89a43c 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1030,6 +1030,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info) + fb_info->pixmap.addr = NULL; + } + ++ fbcon_delete_modelist(&fb_info->modelist); + fb_destroy_modelist(&fb_info->modelist); + registered_fb[fb_info->node] = NULL; + num_registered_fb--; +diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c +index 6307364e4a49c5..7ae37f91b42c78 100644 +--- a/drivers/video/fbdev/pvr2fb.c ++++ b/drivers/video/fbdev/pvr2fb.c +@@ -192,7 +192,7 @@ static unsigned long pvr2fb_map; + + #ifdef CONFIG_PVR2_DMA + static unsigned int shdma = PVR2_CASCADE_CHAN; +-static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; ++static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS; + #endif + + static struct fb_videomode pvr2_modedb[] = { +diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c +index 91d070ef69897d..6ff059ee169418 100644 +--- a/drivers/video/fbdev/valkyriefb.c ++++ b/drivers/video/fbdev/valkyriefb.c +@@ -329,11 +329,13 @@ static int __init valkyriefb_init(void) + + if (of_address_to_resource(dp, 0, &r)) { + printk(KERN_ERR "can't find address for valkyrie\n"); ++ of_node_put(dp); + return 0; + } + + frame_buffer_phys = r.start; + cmap_regs_phys = r.start + 0x304000; ++ of_node_put(dp); + } + #endif /* ppc (!CONFIG_MAC) */ + +diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c +index 0b4bd883ff28a8..183d77a6c4b31b 100644 +--- a/drivers/watchdog/s3c2410_wdt.c ++++ b/drivers/watchdog/s3c2410_wdt.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #define S3C2410_WTCON 0x00 + #define S3C2410_WTDAT 0x04 +@@ -302,9 +303,14 @@ static inline unsigned long s3c2410wdt_get_freq(struct s3c2410_wdt *wdt) + static inline unsigned int s3c2410wdt_max_timeout(struct s3c2410_wdt *wdt) + { + const unsigned long freq = s3c2410wdt_get_freq(wdt); ++ const u64 n_max = (u64)(S3C2410_WTCON_PRESCALE_MAX + 1) * ++ S3C2410_WTCON_MAXDIV * S3C2410_WTCNT_MAXCNT; ++ u64 t_max = div64_ul(n_max, freq); + +- return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1) +- / S3C2410_WTCON_MAXDIV); ++ if (t_max > UINT_MAX) ++ t_max = UINT_MAX; ++ ++ return t_max; + } + + static int s3c2410wdt_disable_wdt_reset(struct s3c2410_wdt *wdt, bool mask) +diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c +index d525957594b6b5..be61810cb7798f 100644 +--- a/fs/9p/v9fs.c ++++ b/fs/9p/v9fs.c +@@ -561,7 +561,7 @@ static ssize_t caches_show(struct kobject *kobj, + spin_lock(&v9fs_sessionlist_lock); + list_for_each_entry(v9ses, &v9fs_sessionlist, slist) { + if (v9ses->cachetag) { +- n = snprintf(buf, limit, "%s\n", v9ses->cachetag); ++ n = snprintf(buf + count, limit, "%s\n", v9ses->cachetag); + if (n < 0) { + count = n; + break; +@@ -597,13 +597,16 @@ static const struct attribute_group v9fs_attr_group = { + + static int __init v9fs_sysfs_init(void) + { ++ int ret; ++ + v9fs_kobj = kobject_create_and_add("9p", fs_kobj); + if (!v9fs_kobj) + return -ENOMEM; + +- if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) { ++ ret = sysfs_create_group(v9fs_kobj, &v9fs_attr_group); ++ if (ret) { + kobject_put(v9fs_kobj); +- return -ENOMEM; ++ return ret; + } + + return 0; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 2d6ccc21a8229f..fab8ffb3a2f827 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -1751,6 +1751,14 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, + unlock_page(p); + } + } ++ /* ++ * If the fs is already in error status, do not submit any writeback ++ * but immediately finish it. ++ */ ++ if (unlikely(BTRFS_FS_ERROR(fs_info))) { ++ btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info))); ++ return; ++ } + btrfs_submit_bio(bbio, 0); + } + +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index e794606e7c780b..9ef543db8aab91 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2825,12 +2825,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode, + { + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(inode)->root; ++ u64 range_start; ++ u64 range_end; + int ret; + int ret2; + + if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) + return 0; + ++ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize); ++ range_end = round_up(end, root->fs_info->sectorsize); ++ ++ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start, ++ range_end - range_start); ++ if (ret) ++ return ret; ++ + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index 4a5a5ee360e579..a69dce114c5bae 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -2000,6 +2000,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, + ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start, + &length, &bioc, NULL, NULL, 1); + if (ret < 0) { ++ bio_put(bio); + btrfs_put_bioc(bioc); + btrfs_bio_counter_dec(fs_info); + goto out; +@@ -2009,6 +2010,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, + btrfs_put_bioc(bioc); + if (!rbio) { + ret = -ENOMEM; ++ bio_put(bio); + btrfs_bio_counter_dec(fs_info); + goto out; + } +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 5512991b24faa8..256116a6e07c6a 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -6765,7 +6765,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, + * a power failure unless the log was synced as part of an fsync + * against any other unrelated inode. + */ +- if (inode_only != LOG_INODE_EXISTS) ++ if (!ctx->logging_new_name && inode_only != LOG_INODE_EXISTS) + inode->last_log_commit = inode->last_sub_trans; + spin_unlock(&inode->lock); + +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c +index 1395b71df5ccc2..529dd07fa459f1 100644 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -1213,8 +1213,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc, + spin_unlock(&fsc->async_unlink_conflict_lock); + + spin_lock(&dentry->d_lock); +- di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK; +- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT); ++ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags); + spin_unlock(&dentry->d_lock); + + synchronize_rcu(); +diff --git a/fs/ceph/file.c b/fs/ceph/file.c +index e12657b4c3e042..0ec78d87519ba7 100644 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@ -539,8 +539,7 @@ static void wake_async_create_waiters(struct inode *inode, + + spin_lock(&ci->i_ceph_lock); + if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { +- ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE; +- wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT); ++ clear_and_wake_up_bit(CEPH_ASYNC_CREATE_BIT, &ci->i_ceph_flags); + + if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) { + ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS; +@@ -716,8 +715,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode, + } + + spin_lock(&dentry->d_lock); +- di->flags &= ~CEPH_DENTRY_ASYNC_CREATE; +- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT); ++ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_CREATE_BIT, &di->flags); + spin_unlock(&dentry->d_lock); + + return ret; +diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c +index cb51c7e9c8e22c..02f5fbe83aa46d 100644 +--- a/fs/ceph/locks.c ++++ b/fs/ceph/locks.c +@@ -219,7 +219,10 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, + if (err && err != -ERESTARTSYS) + return err; + +- wait_for_completion_killable(&req->r_safe_completion); ++ err = wait_for_completion_killable(&req->r_safe_completion); ++ if (err) ++ return err; ++ + return 0; + } + +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c +index 407880901ee3fb..e5f4ce8c38e1a7 100644 +--- a/fs/exfat/fatent.c ++++ b/fs/exfat/fatent.c +@@ -89,35 +89,36 @@ int exfat_ent_get(struct super_block *sb, unsigned int loc, + int err; + + if (!is_valid_cluster(sbi, loc)) { +- exfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", ++ exfat_fs_error_ratelimit(sb, ++ "invalid access to FAT (entry 0x%08x)", + loc); + return -EIO; + } + + err = __exfat_ent_get(sb, loc, content); + if (err) { +- exfat_fs_error(sb, ++ exfat_fs_error_ratelimit(sb, + "failed to access to FAT (entry 0x%08x, err:%d)", + loc, err); + return err; + } + + if (*content == EXFAT_FREE_CLUSTER) { +- exfat_fs_error(sb, ++ exfat_fs_error_ratelimit(sb, + "invalid access to FAT free cluster (entry 0x%08x)", + loc); + return -EIO; + } + + if (*content == EXFAT_BAD_CLUSTER) { +- exfat_fs_error(sb, ++ exfat_fs_error_ratelimit(sb, + "invalid access to FAT bad cluster (entry 0x%08x)", + loc); + return -EIO; + } + + if (*content != EXFAT_EOF_CLUSTER && !is_valid_cluster(sbi, *content)) { +- exfat_fs_error(sb, ++ exfat_fs_error_ratelimit(sb, + "invalid access to FAT (entry 0x%08x) bogus content (0x%08x)", + loc, *content); + return -EIO; +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c +index b527f4ab47e021..62a6960242c5a6 100644 +--- a/fs/ext4/fast_commit.c ++++ b/fs/ext4/fast_commit.c +@@ -675,7 +675,7 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star + + static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail) + { +- blk_opf_t write_flags = REQ_SYNC; ++ blk_opf_t write_flags = JBD2_JOURNAL_REQ_FLAGS; + struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh; + + /* Add REQ_FUA | REQ_PREFLUSH only its tail */ +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 66933e55efb3bd..307081c9943743 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1552,7 +1552,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, + WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && + !(current->flags & PF_MEMALLOC_NOFS)); + +- ea_data = kvmalloc(value_len, GFP_KERNEL); ++ ea_data = kvmalloc(value_len, GFP_NOFS); + if (!ea_data) { + mb_cache_entry_put(ea_inode_cache, ce); + return NULL; +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index e962de4ecaa2f6..c3b2f78ca4e3e2 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -1209,7 +1209,7 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) + int i; + + for (i = cluster_size - 1; i >= 0; i--) { +- loff_t start = rpages[i]->index << PAGE_SHIFT; ++ loff_t start = (loff_t)rpages[i]->index << PAGE_SHIFT; + + if (from <= start) { + zero_user_segment(rpages[i], 0, PAGE_SIZE); +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c +index 6a77581106a9e4..79d07c786f6aec 100644 +--- a/fs/f2fs/extent_cache.c ++++ b/fs/f2fs/extent_cache.c +@@ -562,7 +562,13 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, + p = &(*p)->rb_right; + leftmost = false; + } else { ++ f2fs_err_ratelimited(sbi, "%s: corrupted extent, type: %d, " ++ "extent node in rb tree [%u, %u, %u], age [%llu, %llu], " ++ "extent node to insert [%u, %u, %u], age [%llu, %llu]", ++ __func__, et->type, en->ei.fofs, en->ei.blk, en->ei.len, en->ei.age, ++ en->ei.last_blocks, ei->fofs, ei->blk, ei->len, ei->age, ei->last_blocks); + f2fs_bug_on(sbi, 1); ++ return NULL; + } + } + +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 735abf426a0640..96050028d91b0e 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -94,14 +94,11 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) + if (!fi) + return NULL; + +- fi->i_time = 0; ++ /* Initialize private data (i.e. everything except fi->inode) */ ++ BUILD_BUG_ON(offsetof(struct fuse_inode, inode) != 0); ++ memset((void *) fi + sizeof(fi->inode), 0, sizeof(*fi) - sizeof(fi->inode)); ++ + fi->inval_mask = ~0; +- fi->nodeid = 0; +- fi->nlookup = 0; +- fi->attr_version = 0; +- fi->orig_ino = 0; +- fi->state = 0; +- fi->submount_lookup = NULL; + mutex_init(&fi->mutex); + spin_lock_init(&fi->lock); + fi->forget = fuse_alloc_forget(); +diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c +index f4eb8d6f598944..6cf17fe239c9a3 100644 +--- a/fs/hpfs/namei.c ++++ b/fs/hpfs/namei.c +@@ -52,8 +52,10 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, + dee.fnode = cpu_to_le32(fno); + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); + result = new_inode(dir->i_sb); +- if (!result) ++ if (!result) { ++ err = -ENOMEM; + goto bail2; ++ } + hpfs_init_inode(result); + result->i_ino = fno; + hpfs_i(result)->i_parent_dir = dir->i_ino; +@@ -153,9 +155,10 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir, + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); + + result = new_inode(dir->i_sb); +- if (!result) ++ if (!result) { ++ err = -ENOMEM; + goto bail1; +- ++ } + hpfs_init_inode(result); + result->i_ino = fno; + result->i_mode |= S_IFREG; +@@ -239,9 +242,10 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir, + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); + + result = new_inode(dir->i_sb); +- if (!result) ++ if (!result) { ++ err = -ENOMEM; + goto bail1; +- ++ } + hpfs_init_inode(result); + result->i_ino = fno; + hpfs_i(result)->i_parent_dir = dir->i_ino; +@@ -314,8 +318,10 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir, + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); + + result = new_inode(dir->i_sb); +- if (!result) ++ if (!result) { ++ err = -ENOMEM; + goto bail1; ++ } + result->i_ino = fno; + hpfs_init_inode(result); + hpfs_i(result)->i_parent_dir = dir->i_ino; +diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c +index 66c38ef5e57111..1e6c1d1a15a6a6 100644 +--- a/fs/jfs/inode.c ++++ b/fs/jfs/inode.c +@@ -59,9 +59,15 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino) + */ + inode->i_link[inode->i_size] = '\0'; + } +- } else { ++ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || ++ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + inode->i_op = &jfs_file_inode_operations; + init_special_inode(inode, inode->i_mode, inode->i_rdev); ++ } else { ++ printk(KERN_DEBUG "JFS: Invalid file type 0%04o for inode %lu.\n", ++ inode->i_mode, inode->i_ino); ++ iget_failed(inode); ++ return ERR_PTR(-EIO); + } + unlock_new_inode(inode); + return inode; +diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c +index dccc8b3f104593..42fb833ef28347 100644 +--- a/fs/jfs/jfs_txnmgr.c ++++ b/fs/jfs/jfs_txnmgr.c +@@ -272,14 +272,15 @@ int txInit(void) + if (TxBlock == NULL) + return -ENOMEM; + +- for (k = 1; k < nTxBlock - 1; k++) { +- TxBlock[k].next = k + 1; ++ for (k = 0; k < nTxBlock; k++) { + init_waitqueue_head(&TxBlock[k].gcwait); + init_waitqueue_head(&TxBlock[k].waitor); + } ++ ++ for (k = 1; k < nTxBlock - 1; k++) { ++ TxBlock[k].next = k + 1; ++ } + TxBlock[k].next = 0; +- init_waitqueue_head(&TxBlock[k].gcwait); +- init_waitqueue_head(&TxBlock[k].waitor); + + TxAnchor.freetid = 1; + init_waitqueue_head(&TxAnchor.freewait); +diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c +index 674c012868b1a2..1aa4c43c9b3b41 100644 +--- a/fs/nfs/nfs3client.c ++++ b/fs/nfs/nfs3client.c +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include "internal.h" + #include "nfs3_fs.h" + #include "netns.h" +@@ -98,7 +99,11 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv, + .net = mds_clp->cl_net, + .timeparms = &ds_timeout, + .cred = mds_srv->cred, +- .xprtsec = mds_clp->cl_xprtsec, ++ .xprtsec = { ++ .policy = RPC_XPRTSEC_NONE, ++ .cert_serial = TLS_NO_CERT, ++ .privkey_serial = TLS_NO_PRIVKEY, ++ }, + .connect_timeout = connect_timeout, + .reconnect_timeout = connect_timeout, + }; +@@ -111,8 +116,14 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv, + cl_init.hostname = buf; + + switch (ds_proto) { +- case XPRT_TRANSPORT_TCP: + case XPRT_TRANSPORT_TCP_TLS: ++ if (mds_clp->cl_xprtsec.policy != RPC_XPRTSEC_NONE) ++ cl_init.xprtsec = mds_clp->cl_xprtsec; ++ else ++ ds_proto = XPRT_TRANSPORT_TCP; ++ fallthrough; ++ case XPRT_TRANSPORT_RDMA: ++ case XPRT_TRANSPORT_TCP: + if (mds_clp->cl_nconnect > 1) + cl_init.nconnect = mds_clp->cl_nconnect; + } +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index f6dc42de48f03d..b14688da814d6c 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include "internal.h" + #include "callback.h" + #include "delegation.h" +@@ -222,6 +223,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init) + clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; + clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion]; + clp->cl_mig_gen = 1; ++ clp->cl_last_renewal = jiffies; + #if IS_ENABLED(CONFIG_NFS_V4_1) + init_waitqueue_head(&clp->cl_lock_waitq); + #endif +@@ -923,6 +925,7 @@ static int nfs4_set_client(struct nfs_server *server, + else + cl_init.max_connect = max_connect; + switch (proto) { ++ case XPRT_TRANSPORT_RDMA: + case XPRT_TRANSPORT_TCP: + case XPRT_TRANSPORT_TCP_TLS: + cl_init.nconnect = nconnect; +@@ -990,7 +993,11 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, + .net = mds_clp->cl_net, + .timeparms = &ds_timeout, + .cred = mds_srv->cred, +- .xprtsec = mds_srv->nfs_client->cl_xprtsec, ++ .xprtsec = { ++ .policy = RPC_XPRTSEC_NONE, ++ .cert_serial = TLS_NO_CERT, ++ .privkey_serial = TLS_NO_PRIVKEY, ++ }, + }; + char buf[INET6_ADDRSTRLEN + 1]; + +@@ -999,8 +1006,14 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, + cl_init.hostname = buf; + + switch (ds_proto) { +- case XPRT_TRANSPORT_TCP: + case XPRT_TRANSPORT_TCP_TLS: ++ if (mds_srv->nfs_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE) ++ cl_init.xprtsec = mds_srv->nfs_client->cl_xprtsec; ++ else ++ ds_proto = XPRT_TRANSPORT_TCP; ++ fallthrough; ++ case XPRT_TRANSPORT_RDMA: ++ case XPRT_TRANSPORT_TCP: + if (mds_clp->cl_nconnect > 1) { + cl_init.nconnect = mds_clp->cl_nconnect; + cl_init.max_connect = NFS_MAX_TRANSPORTS; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 94a1caf3266991..a0a71a163ffed0 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -362,7 +362,9 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent + *p++ = htonl(attrs); /* bitmap */ + *p++ = htonl(12); /* attribute buffer length */ + *p++ = htonl(NF4DIR); ++ spin_lock(&dentry->d_lock); + p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); ++ spin_unlock(&dentry->d_lock); + + readdir->pgbase = (char *)p - (char *)start; + readdir->count -= readdir->pgbase; +@@ -4576,16 +4578,19 @@ static int _nfs4_proc_lookupp(struct inode *inode, + }; + unsigned short task_flags = 0; + +- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) ++ if (server->flags & NFS_MOUNT_SOFTREVAL) + task_flags |= RPC_TASK_TIMEOUT; ++ if (server->caps & NFS_CAP_MOVEABLE) ++ task_flags |= RPC_TASK_MOVEABLE; + + args.bitmask = nfs4_bitmask(server, fattr->label); + + nfs_fattr_init(fattr); ++ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); + + dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); +- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, +- &res.seq_res, task_flags); ++ status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args, ++ &res.seq_res, task_flags); + dprintk("NFS reply lookupp: %d\n", status); + return status; + } +@@ -7658,10 +7663,10 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, + return err; + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); +- if (err != -NFS4ERR_DELAY) ++ if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE) + break; + ssleep(1); +- } while (err == -NFS4ERR_DELAY); ++ } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE); + return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); + } + +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 9fc71dc090c254..78d52c2006dcc9 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -2746,6 +2746,9 @@ static void nfs4_state_manager(struct nfs_client *clp) + case -ENETUNREACH: + nfs_mark_client_ready(clp, -EIO); + break; ++ case -EINVAL: ++ nfs_mark_client_ready(clp, status); ++ break; + default: + ssleep(1); + break; +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c +index 1b317c44da126b..5314dabb725d80 100644 +--- a/fs/nfs/pnfs_nfs.c ++++ b/fs/nfs/pnfs_nfs.c +@@ -914,7 +914,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, + u32 minor_version) + { + struct nfs_client *clp = ERR_PTR(-EIO); ++ struct nfs_client *mds_clp = mds_srv->nfs_client; ++ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy; + struct nfs4_pnfs_ds_addr *da; ++ int ds_proto; + int status = 0; + + dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr); +@@ -942,12 +945,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, + .data = &xprtdata, + }; + +- if (da->da_transport != clp->cl_proto && +- clp->cl_proto != XPRT_TRANSPORT_TCP_TLS) +- continue; +- if (da->da_transport == XPRT_TRANSPORT_TCP && +- mds_srv->nfs_client->cl_proto == +- XPRT_TRANSPORT_TCP_TLS) { ++ if (xprt_args.ident == XPRT_TRANSPORT_TCP && ++ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) { + struct sockaddr *addr = + (struct sockaddr *)&da->da_addr; + struct sockaddr_in *sin = +@@ -978,7 +977,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, + xprt_args.ident = XPRT_TRANSPORT_TCP_TLS; + xprt_args.servername = servername; + } +- if (da->da_addr.ss_family != clp->cl_addr.ss_family) ++ if (xprt_args.ident != clp->cl_proto) ++ continue; ++ if (xprt_args.dstaddr->sa_family != ++ clp->cl_addr.ss_family) + continue; + + /** +@@ -992,15 +994,14 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, + if (xprtdata.cred) + put_cred(xprtdata.cred); + } else { +- if (da->da_transport == XPRT_TRANSPORT_TCP && +- mds_srv->nfs_client->cl_proto == +- XPRT_TRANSPORT_TCP_TLS) +- da->da_transport = XPRT_TRANSPORT_TCP_TLS; +- clp = nfs4_set_ds_client(mds_srv, +- &da->da_addr, +- da->da_addrlen, +- da->da_transport, timeo, +- retrans, minor_version); ++ ds_proto = da->da_transport; ++ if (ds_proto == XPRT_TRANSPORT_TCP && ++ xprtsec_policy != RPC_XPRTSEC_NONE) ++ ds_proto = XPRT_TRANSPORT_TCP_TLS; ++ ++ clp = nfs4_set_ds_client(mds_srv, &da->da_addr, ++ da->da_addrlen, ds_proto, ++ timeo, retrans, minor_version); + if (IS_ERR(clp)) + continue; + +@@ -1011,7 +1012,6 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, + clp = ERR_PTR(-EIO); + continue; + } +- + } + } + +diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c +index 784f7c1d003bfc..53d4cdf28ee008 100644 +--- a/fs/nfs/sysfs.c ++++ b/fs/nfs/sysfs.c +@@ -189,6 +189,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, + return p; + + kobject_put(&p->kobject); ++ kobject_put(&p->nfs_net_kobj); + } + return NULL; + } +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index cb1e9996fcc8ec..ef69b15aa72e5f 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1638,7 +1638,8 @@ static int nfs_writeback_done(struct rpc_task *task, + /* Deal with the suid/sgid bit corner case */ + if (nfs_should_remove_suid(inode)) { + spin_lock(&inode->i_lock); +- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); ++ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE ++ | NFS_INO_REVAL_FORCED); + spin_unlock(&inode->i_lock); + } + return 0; +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index 836367d839bda1..886c0926754426 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -978,10 +978,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + static void + nfsd4_read_release(union nfsd4_op_u *u) + { +- if (u->read.rd_nf) ++ if (u->read.rd_nf) { ++ trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, ++ u->read.rd_offset, u->read.rd_length); + nfsd_file_put(u->read.rd_nf); +- trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, +- u->read.rd_offset, u->read.rd_length); ++ } + } + + static __be32 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 4aeb08040f3e5a..e6d0c3b1169aaa 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -1496,7 +1496,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid) + release_all_access(stp); + if (stp->st_stateowner) + nfs4_put_stateowner(stp->st_stateowner); +- WARN_ON(!list_empty(&stid->sc_cp_list)); ++ if (!list_empty(&stid->sc_cp_list)) ++ nfs4_free_cpntf_statelist(stid->sc_client->net, stid); + kmem_cache_free(stateid_slab, stid); + } + +diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c +index 0150a221020988..a967babea8a59d 100644 +--- a/fs/ntfs3/inode.c ++++ b/fs/ntfs3/inode.c +@@ -464,6 +464,7 @@ static struct inode *ntfs_read_mft(struct inode *inode, + fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) { + /* Records in $Extend are not a files or general directories. */ + inode->i_op = &ntfs_file_inode_operations; ++ mode = S_IFREG; + } else { + err = -EINVAL; + goto out; +diff --git a/fs/open.c b/fs/open.c +index f9ac703ec1b2d3..b5ea1dcbfb2242 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -1029,18 +1029,20 @@ EXPORT_SYMBOL(finish_open); + * finish_no_open - finish ->atomic_open() without opening the file + * + * @file: file pointer +- * @dentry: dentry or NULL (as returned from ->lookup()) ++ * @dentry: dentry, ERR_PTR(-E...) or NULL (as returned from ->lookup()) + * +- * This can be used to set the result of a successful lookup in ->atomic_open(). ++ * This can be used to set the result of a lookup in ->atomic_open(). + * + * NB: unlike finish_open() this function does consume the dentry reference and + * the caller need not dput() it. + * +- * Returns "0" which must be the return value of ->atomic_open() after having +- * called this function. ++ * Returns 0 or -E..., which must be the return value of ->atomic_open() after ++ * having called this function. + */ + int finish_no_open(struct file *file, struct dentry *dentry) + { ++ if (IS_ERR(dentry)) ++ return PTR_ERR(dentry); + file->f_path.dentry = dentry; + return 0; + } +diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c +index 68b62689a63e24..15738379f55116 100644 +--- a/fs/orangefs/xattr.c ++++ b/fs/orangefs/xattr.c +@@ -54,7 +54,9 @@ static inline int convert_to_internal_xattr_flags(int setxattr_flags) + static unsigned int xattr_key(const char *key) + { + unsigned int i = 0; +- while (key) ++ if (!key) ++ return 0; ++ while (*key) + i += *key++; + return i % 16; + } +@@ -175,8 +177,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name, + cx->length = -1; + cx->timeout = jiffies + + orangefs_getattr_timeout_msecs*HZ/1000; +- hash_add(orangefs_inode->xattr_cache, &cx->node, +- xattr_key(cx->key)); ++ hlist_add_head( &cx->node, ++ &orangefs_inode->xattr_cache[xattr_key(cx->key)]); + } + } + goto out_release_op; +@@ -229,8 +231,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name, + memcpy(cx->val, buffer, length); + cx->length = length; + cx->timeout = jiffies + HZ; +- hash_add(orangefs_inode->xattr_cache, &cx->node, +- xattr_key(cx->key)); ++ hlist_add_head(&cx->node, ++ &orangefs_inode->xattr_cache[xattr_key(cx->key)]); + } + } + +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index 4cadd2fd23d8f8..9eff57c7aef3a5 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -694,6 +694,12 @@ void pde_put(struct proc_dir_entry *pde) + } + } + ++static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent) ++{ ++ rb_erase(&pde->subdir_node, &parent->subdir); ++ RB_CLEAR_NODE(&pde->subdir_node); ++} ++ + /* + * Remove a /proc entry and free it if it's not currently in use. + */ +@@ -716,7 +722,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) + WARN(1, "removing permanent /proc entry '%s'", de->name); + de = NULL; + } else { +- rb_erase(&de->subdir_node, &parent->subdir); ++ pde_erase(de, parent); + if (S_ISDIR(de->mode)) + parent->nlink--; + } +@@ -760,7 +766,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) + root->parent->name, root->name); + return -EINVAL; + } +- rb_erase(&root->subdir_node, &parent->subdir); ++ pde_erase(root, parent); + + de = root; + while (1) { +@@ -772,7 +778,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) + next->parent->name, next->name); + return -EINVAL; + } +- rb_erase(&next->subdir_node, &de->subdir); ++ pde_erase(next, de); + de = next; + continue; + } +diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c +index d64742ba371aa5..539a9038fb0dd2 100644 +--- a/fs/smb/client/cached_dir.c ++++ b/fs/smb/client/cached_dir.c +@@ -362,11 +362,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + * lease. Release one here, and the second below. + */ + cfid->has_lease = false; +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + } + spin_unlock(&cfids->cfid_list_lock); + +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + } else { + *ret_cfid = cfid; + atomic_inc(&tcon->num_remote_opens); +@@ -406,12 +406,14 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon, + + static void + smb2_close_cached_fid(struct kref *ref) ++__releases(&cfid->cfids->cfid_list_lock) + { + struct cached_fid *cfid = container_of(ref, struct cached_fid, + refcount); + int rc; + +- spin_lock(&cfid->cfids->cfid_list_lock); ++ lockdep_assert_held(&cfid->cfids->cfid_list_lock); ++ + if (cfid->on_list) { + list_del(&cfid->entry); + cfid->on_list = false; +@@ -446,7 +448,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, + spin_lock(&cfid->cfids->cfid_list_lock); + if (cfid->has_lease) { + cfid->has_lease = false; +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + } + spin_unlock(&cfid->cfids->cfid_list_lock); + close_cached_dir(cfid); +@@ -455,7 +457,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, + + void close_cached_dir(struct cached_fid *cfid) + { +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock); + } + + /* +@@ -566,7 +568,7 @@ cached_dir_offload_close(struct work_struct *work) + + WARN_ON(cfid->on_list); + +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); + } + +@@ -743,7 +745,7 @@ static void cfids_laundromat_worker(struct work_struct *work) + * Drop the ref-count from above, either the lease-ref (if there + * was one) or the extra one acquired. + */ +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + } + queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, + dir_cache_timeout * HZ); +diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c +index 1058066913dd60..92e43589fd83f1 100644 +--- a/fs/smb/client/file.c ++++ b/fs/smb/client/file.c +@@ -37,6 +37,81 @@ + #include "cifs_ioctl.h" + #include "cached_dir.h" + ++/* ++ * Allocate a bio_vec array and extract up to sg_max pages from a KVEC-type ++ * iterator and add them to the array. This can deal with vmalloc'd buffers as ++ * well as kmalloc'd or static buffers. The pages are not pinned. ++ */ ++static ssize_t extract_kvec_to_bvec(struct iov_iter *iter, ssize_t maxsize, ++ unsigned int bc_max, ++ struct bio_vec **_bv, unsigned int *_bc) ++{ ++ const struct kvec *kv = iter->kvec; ++ struct bio_vec *bv; ++ unsigned long start = iter->iov_offset; ++ unsigned int i, bc = 0; ++ ssize_t ret = 0; ++ ++ bc_max = iov_iter_npages(iter, bc_max); ++ if (bc_max == 0) { ++ *_bv = NULL; ++ *_bc = 0; ++ return 0; ++ } ++ ++ bv = kvmalloc(array_size(bc_max, sizeof(*bv)), GFP_NOFS); ++ if (!bv) { ++ *_bv = NULL; ++ *_bc = 0; ++ return -ENOMEM; ++ } ++ *_bv = bv; ++ ++ for (i = 0; i < iter->nr_segs; i++) { ++ struct page *page; ++ unsigned long kaddr; ++ size_t off, len, seg; ++ ++ len = kv[i].iov_len; ++ if (start >= len) { ++ start -= len; ++ continue; ++ } ++ ++ kaddr = (unsigned long)kv[i].iov_base + start; ++ off = kaddr & ~PAGE_MASK; ++ len = min_t(size_t, maxsize, len - start); ++ kaddr &= PAGE_MASK; ++ ++ maxsize -= len; ++ ret += len; ++ do { ++ seg = umin(len, PAGE_SIZE - off); ++ if (is_vmalloc_or_module_addr((void *)kaddr)) ++ page = vmalloc_to_page((void *)kaddr); ++ else ++ page = virt_to_page((void *)kaddr); ++ ++ bvec_set_page(bv, page, len, off); ++ bv++; ++ bc++; ++ ++ len -= seg; ++ kaddr += PAGE_SIZE; ++ off = 0; ++ } while (len > 0 && bc < bc_max); ++ ++ if (maxsize <= 0 || bc >= bc_max) ++ break; ++ start = 0; ++ } ++ ++ if (ret > 0) ++ iov_iter_advance(iter, ret); ++ *_bc = bc; ++ return ret; ++} ++ + /* + * Remove the dirty flags from a span of pages. + */ +@@ -2747,8 +2822,10 @@ static void cifs_extend_writeback(struct address_space *mapping, + loff_t start, + int max_pages, + loff_t max_len, +- size_t *_len) ++ size_t *_len, ++ unsigned long long i_size) + { ++ struct inode *inode = mapping->host; + struct folio_batch batch; + struct folio *folio; + unsigned int nr_pages; +@@ -2779,7 +2856,7 @@ static void cifs_extend_writeback(struct address_space *mapping, + + if (!folio_try_get(folio)) { + xas_reset(xas); +- continue; ++ break; + } + nr_pages = folio_nr_pages(folio); + if (nr_pages > max_pages) { +@@ -2799,6 +2876,15 @@ static void cifs_extend_writeback(struct address_space *mapping, + xas_reset(xas); + break; + } ++ ++ /* if file size is changing, stop extending */ ++ if (i_size_read(inode) != i_size) { ++ folio_unlock(folio); ++ folio_put(folio); ++ xas_reset(xas); ++ break; ++ } ++ + if (!folio_test_dirty(folio) || + folio_test_writeback(folio)) { + folio_unlock(folio); +@@ -2934,7 +3020,8 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping, + + if (max_pages > 0) + cifs_extend_writeback(mapping, xas, &count, start, +- max_pages, max_len, &len); ++ max_pages, max_len, &len, ++ i_size); + } + } + len = min_t(unsigned long long, len, i_size - start); +@@ -4318,11 +4405,27 @@ static ssize_t __cifs_readv( + ctx->bv = (void *)ctx->iter.bvec; + ctx->bv_need_unpin = iov_iter_extract_will_pin(to); + ctx->should_dirty = true; +- } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) && +- !is_sync_kiocb(iocb)) { ++ } else if (iov_iter_is_kvec(to)) { ++ /* ++ * Extract a KVEC-type iterator into a BVEC-type iterator. We ++ * assume that the storage will be retained by the caller; in ++ * any case, we may or may not be able to pin the pages, so we ++ * don't try. ++ */ ++ unsigned int bc; ++ ++ rc = extract_kvec_to_bvec(to, iov_iter_count(to), INT_MAX, ++ &ctx->bv, &bc); ++ if (rc < 0) { ++ kref_put(&ctx->refcount, cifs_aio_ctx_release); ++ return rc; ++ } ++ ++ iov_iter_bvec(&ctx->iter, ITER_DEST, ctx->bv, bc, rc); ++ } else if (iov_iter_is_bvec(to) && !is_sync_kiocb(iocb)) { + /* + * If the op is asynchronous, we need to copy the list attached +- * to a BVEC/KVEC-type iterator, but we assume that the storage ++ * to a BVEC-type iterator, but we assume that the storage + * will be retained by the caller; in any case, we may or may + * not be able to pin the pages, so we don't try. + */ +diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c +index 137d03781d5268..cf233cb9c19436 100644 +--- a/fs/smb/client/fs_context.c ++++ b/fs/smb/client/fs_context.c +@@ -1361,12 +1361,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, + cifs_errorf(fc, "Unknown error parsing devname\n"); + goto cifs_parse_mount_err; + } ++ kfree(ctx->source); + ctx->source = smb3_fs_context_fullpath(ctx, '/'); + if (IS_ERR(ctx->source)) { + ctx->source = NULL; + cifs_errorf(fc, "OOM when copying UNC string\n"); + goto cifs_parse_mount_err; + } ++ kfree(fc->source); + fc->source = kstrdup(ctx->source, GFP_KERNEL); + if (fc->source == NULL) { + cifs_errorf(fc, "OOM when copying UNC string\n"); +diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c +index 232a3c28905568..d6086394d0b844 100644 +--- a/fs/smb/client/smb2inode.c ++++ b/fs/smb/client/smb2inode.c +@@ -1128,6 +1128,8 @@ static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon, + smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb); + if (smb2_to_name == NULL) { + rc = -ENOMEM; ++ if (cfile) ++ cifsFileInfo_put(cfile); + goto smb2_rename_path; + } + in_iov.iov_base = smb2_to_name; +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index b02114b734dcdb..138b3ed08217c9 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -2663,11 +2663,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, + struct cifs_fid fid; + int rc; + __le16 *utf16_path; +- struct cached_fid *cfid = NULL; ++ struct cached_fid *cfid; + int retries = 0, cur_sleep = 1; + + replay_again: + /* reinitialize for possible replay */ ++ cfid = NULL; + flags = CIFS_CP_CREATE_CLOSE_OP; + oplock = SMB2_OPLOCK_LEVEL_NONE; + server = cifs_pick_channel(ses); +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index e58cad5d735a22..a8890ae2171445 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -4068,9 +4068,12 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, + + smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; + +- smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), +- le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov, ++ rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), ++ le32_to_cpu(smb_rsp->OutputBufferLength), ++ &rsp_iov, + sizeof(struct file_notify_information)); ++ if (rc) ++ goto cnotify_exit; + + *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), + le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); +diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c +index 7b2560612bd6aa..7fee8e2c723a8d 100644 +--- a/fs/smb/client/transport.c ++++ b/fs/smb/client/transport.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include "cifspdu.h" + #include "cifsglob.h" + #include "cifsproto.h" +@@ -211,9 +212,16 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, + * send a packet. In most cases if we fail to send + * after the retries we will kill the socket and + * reconnect which may clear the network problem. ++ * ++ * Even if regular signals are masked, EINTR might be ++ * propagated from sk_stream_wait_memory() to here when ++ * TIF_NOTIFY_SIGNAL is used for task work. For example, ++ * certain io_uring completions will use that. Treat ++ * having EINTR with pending task work the same as EAGAIN ++ * to avoid unnecessary reconnects. + */ + rc = sock_sendmsg(ssocket, smb_msg); +- if (rc == -EAGAIN) { ++ if (rc == -EAGAIN || unlikely(rc == -EINTR && task_work_pending(current))) { + retries++; + if (retries >= 14 || + (!server->noblocksnd && (retries > 2))) { +@@ -1038,7 +1046,7 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) + if (!server || server->terminate) + continue; + +- if (CIFS_CHAN_NEEDS_RECONNECT(ses, i)) ++ if (CIFS_CHAN_NEEDS_RECONNECT(ses, cur)) + continue; + + /* +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 9a58c5a6f9866f..9f64808c7917c3 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1793,6 +1793,7 @@ int smb2_sess_setup(struct ksmbd_work *work) + + if (ksmbd_conn_need_reconnect(conn)) { + rc = -EFAULT; ++ ksmbd_user_session_put(sess); + sess = NULL; + goto out_err; + } +@@ -6772,6 +6773,7 @@ int smb2_read(struct ksmbd_work *work) + + nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf); + if (nbytes < 0) { ++ kvfree(aux_payload_buf); + err = nbytes; + goto out; + } +diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c +index 665d21d40e7a17..08275db6446c2b 100644 +--- a/fs/smb/server/transport_tcp.c ++++ b/fs/smb/server/transport_tcp.c +@@ -286,8 +286,11 @@ static int ksmbd_kthread_fn(void *p) + } + } + up_read(&conn_list_lock); +- if (ret == -EAGAIN) ++ if (ret == -EAGAIN) { ++ /* Per-IP limit hit: release the just-accepted socket. */ ++ sock_release(client_sk); + continue; ++ } + + skip_max_ip_conns_limit: + +@@ -476,12 +479,13 @@ static int create_socket(struct interface *iface) + struct socket *ksmbd_socket; + bool ipv4 = false; + +- ret = sock_create(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket); ++ ret = sock_create_kern(current->nsproxy->net_ns, PF_INET6, SOCK_STREAM, ++ IPPROTO_TCP, &ksmbd_socket); + if (ret) { + if (ret != -EAFNOSUPPORT) + pr_err("Can't create socket for ipv6, fallback to ipv4: %d\n", ret); +- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, +- &ksmbd_socket); ++ ret = sock_create_kern(current->nsproxy->net_ns, PF_INET, ++ SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket); + if (ret) { + pr_err("Can't create socket for ipv4: %d\n", ret); + goto out_clear; +diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h +index b9c0b3281ace16..d328b318e44b1a 100644 +--- a/include/linux/blk_types.h ++++ b/include/linux/blk_types.h +@@ -384,15 +384,15 @@ enum req_op { + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, + /* Open a zone */ +- REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, ++ REQ_OP_ZONE_OPEN = (__force blk_opf_t)11, + /* Close a zone */ +- REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, ++ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13, + /* Transition a zone to full */ +- REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, ++ REQ_OP_ZONE_FINISH = (__force blk_opf_t)15, + /* reset a zone write pointer */ +- REQ_OP_ZONE_RESET = (__force blk_opf_t)15, ++ REQ_OP_ZONE_RESET = (__force blk_opf_t)17, + /* reset all the zone present on the device */ +- REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, ++ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19, + + /* Driver private requests */ + REQ_OP_DRV_IN = (__force blk_opf_t)34, +@@ -517,6 +517,7 @@ static inline bool op_is_zone_mgmt(enum req_op op) + { + switch (op & REQ_OP_MASK) { + case REQ_OP_ZONE_RESET: ++ case REQ_OP_ZONE_RESET_ALL: + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index b307013b9c6c9a..fa87b6a15082c6 100644 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -633,6 +633,7 @@ static inline void cgroup_kthread_ready(void) + } + + void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); ++struct cgroup *__cgroup_get_from_id(u64 id); + struct cgroup *cgroup_get_from_id(u64 id); + #else /* !CONFIG_CGROUPS */ + +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h +index 02f616dfb15f4d..5c8aca9c345660 100644 +--- a/include/linux/compiler_types.h ++++ b/include/linux/compiler_types.h +@@ -229,10 +229,9 @@ struct ftrace_likely_data { + /* + * GCC does not warn about unused static inline functions for -Wunused-function. + * Suppress the warning in clang as well by using __maybe_unused, but enable it +- * for W=1 build. This will allow clang to find unused functions. Remove the +- * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. ++ * for W=2 build. This will allow clang to find unused functions. + */ +-#ifdef KBUILD_EXTRA_WARN1 ++#ifdef KBUILD_EXTRA_WARN2 + #define __inline_maybe_unused + #else + #define __inline_maybe_unused __maybe_unused +diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h +index 2382dec6d6ab8e..fb0fc2736b8015 100644 +--- a/include/linux/fbcon.h ++++ b/include/linux/fbcon.h +@@ -11,6 +11,7 @@ void fbcon_suspended(struct fb_info *info); + void fbcon_resumed(struct fb_info *info); + int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode); ++void fbcon_delete_modelist(struct list_head *head); + void fbcon_new_modelist(struct fb_info *info); + void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps); +@@ -31,6 +32,7 @@ static inline void fbcon_suspended(struct fb_info *info) {} + static inline void fbcon_resumed(struct fb_info *info) {} + static inline int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode) { return 0; } ++static inline void fbcon_delete_modelist(struct list_head *head) {} + static inline void fbcon_new_modelist(struct fb_info *info) {} + static inline void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps) {} +diff --git a/include/linux/filter.h b/include/linux/filter.h +index adf65eacade062..ad5a3d68b55523 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -685,6 +685,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb) + cb->data_end = skb->data + skb_headlen(skb); + } + ++static inline int bpf_prog_run_data_pointers( ++ const struct bpf_prog *prog, ++ struct sk_buff *skb) ++{ ++ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; ++ void *save_data_meta, *save_data_end; ++ int res; ++ ++ save_data_meta = cb->data_meta; ++ save_data_end = cb->data_end; ++ ++ bpf_compute_data_pointers(skb); ++ res = bpf_prog_run(prog, skb); ++ ++ cb->data_meta = save_data_meta; ++ cb->data_end = save_data_end; ++ ++ return res; ++} ++ + /* Similar to bpf_compute_data_pointers(), except that save orginal + * data in cb->data and cb->meta_data for restore. + */ +@@ -1065,7 +1085,7 @@ void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) + { +- pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, ++ pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen, + proglen, pass, image, current->comm, task_pid_nr(current)); + + if (image) +diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h +index 62674c83bde4ef..48e2ff95332f37 100644 +--- a/include/linux/map_benchmark.h ++++ b/include/linux/map_benchmark.h +@@ -27,5 +27,6 @@ struct map_benchmark { + __u32 dma_dir; /* DMA data direction */ + __u32 dma_trans_ns; /* time for DMA transmission in ns */ + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ ++ __u8 expansion[76]; /* For future use */ + }; + #endif /* _KERNEL_DMA_BENCHMARK_H */ +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index b1fdb1554f2f9c..8aee8b75aad01c 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -1039,8 +1039,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, + return x; + } + +-void mem_cgroup_flush_stats(void); +-void mem_cgroup_flush_stats_ratelimited(void); ++void mem_cgroup_flush_stats(struct mem_cgroup *memcg); ++void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); + + void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); +@@ -1515,11 +1515,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, + return node_page_state(lruvec_pgdat(lruvec), idx); + } + +-static inline void mem_cgroup_flush_stats(void) ++static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) + { + } + +-static inline void mem_cgroup_flush_stats_ratelimited(void) ++static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) + { + } + +diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h +index 437441cdf78fb6..b3cbeb46ddeee8 100644 +--- a/include/linux/memory-tiers.h ++++ b/include/linux/memory-tiers.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + /* + * Each tier cover a abstrace distance chunk size of 128 + */ +@@ -17,7 +18,7 @@ + * adistance value (slightly faster) than default DRAM adistance to be part of + * the same memory tier. + */ +-#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1)) ++#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1)) + + struct memory_tier; + struct memory_dev_type { +@@ -30,12 +31,21 @@ struct memory_dev_type { + struct kref kref; + }; + ++struct access_coordinate; ++ + #ifdef CONFIG_NUMA + extern bool numa_demotion_enabled; ++extern struct memory_dev_type *default_dram_type; + struct memory_dev_type *alloc_memory_type(int adistance); + void put_memory_type(struct memory_dev_type *memtype); + void init_node_memory_type(int node, struct memory_dev_type *default_type); + void clear_node_memory_type(int node, struct memory_dev_type *memtype); ++int register_mt_adistance_algorithm(struct notifier_block *nb); ++int unregister_mt_adistance_algorithm(struct notifier_block *nb); ++int mt_calc_adistance(int node, int *adist); ++int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, ++ const char *source); ++int mt_perf_to_adistance(struct access_coordinate *perf, int *adist); + #ifdef CONFIG_MIGRATION + int next_demotion_node(int node); + void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); +@@ -60,6 +70,7 @@ static inline bool node_is_toptier(int node) + #else + + #define numa_demotion_enabled false ++#define default_dram_type NULL + /* + * CONFIG_NUMA implementation returns non NULL error. + */ +@@ -97,5 +108,31 @@ static inline bool node_is_toptier(int node) + { + return true; + } ++ ++static inline int register_mt_adistance_algorithm(struct notifier_block *nb) ++{ ++ return 0; ++} ++ ++static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb) ++{ ++ return 0; ++} ++ ++static inline int mt_calc_adistance(int node, int *adist) ++{ ++ return NOTIFY_DONE; ++} ++ ++static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, ++ const char *source) ++{ ++ return -EIO; ++} ++ ++static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist) ++{ ++ return -EIO; ++} + #endif /* CONFIG_NUMA */ + #endif /* _LINUX_MEMORY_TIERS_H */ +diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h +index 3ddf205b7e2c38..f0dddb88528050 100644 +--- a/include/linux/netpoll.h ++++ b/include/linux/netpoll.h +@@ -32,6 +32,7 @@ struct netpoll { + bool ipv6; + u16 local_port, remote_port; + u8 remote_mac[ETH_ALEN]; ++ struct sk_buff_head skb_pool; + }; + + struct netpoll_info { +diff --git a/include/linux/node.h b/include/linux/node.h +index 427a5975cf4050..dfc004e4bee747 100644 +--- a/include/linux/node.h ++++ b/include/linux/node.h +@@ -20,20 +20,32 @@ + #include + + /** +- * struct node_hmem_attrs - heterogeneous memory performance attributes ++ * struct access_coordinate - generic performance coordinates container + * + * @read_bandwidth: Read bandwidth in MB/s + * @write_bandwidth: Write bandwidth in MB/s + * @read_latency: Read latency in nanoseconds + * @write_latency: Write latency in nanoseconds + */ +-struct node_hmem_attrs { ++struct access_coordinate { + unsigned int read_bandwidth; + unsigned int write_bandwidth; + unsigned int read_latency; + unsigned int write_latency; + }; + ++/* ++ * ACCESS_COORDINATE_LOCAL correlates to ACCESS CLASS 0 ++ * - access_coordinate between target node and nearest initiator node ++ * ACCESS_COORDINATE_CPU correlates to ACCESS CLASS 1 ++ * - access_coordinate between target node and nearest CPU node ++ */ ++enum access_coordinate_class { ++ ACCESS_COORDINATE_LOCAL, ++ ACCESS_COORDINATE_CPU, ++ ACCESS_COORDINATE_MAX ++}; ++ + enum cache_indexing { + NODE_CACHE_DIRECT_MAP, + NODE_CACHE_INDEXED, +@@ -65,8 +77,8 @@ struct node_cache_attrs { + + #ifdef CONFIG_HMEM_REPORTING + void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); +-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, +- unsigned access); ++void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, ++ enum access_coordinate_class access); + #else + static inline void node_add_cache(unsigned int nid, + struct node_cache_attrs *cache_attrs) +@@ -74,8 +86,8 @@ static inline void node_add_cache(unsigned int nid, + } + + static inline void node_set_perf_attrs(unsigned int nid, +- struct node_hmem_attrs *hmem_attrs, +- unsigned access) ++ struct access_coordinate *coord, ++ enum access_coordinate_class access) + { + } + #endif +@@ -137,7 +149,7 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); + + extern int register_memory_node_under_compute_node(unsigned int mem_nid, + unsigned int cpu_nid, +- unsigned access); ++ enum access_coordinate_class access); + #else + static inline void node_dev_init(void) + { +diff --git a/include/linux/pci.h b/include/linux/pci.h +index e4338237a05454..052d956d3ba1f2 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -2667,7 +2667,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) + return false; + } + +-#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) ++#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390) + void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); + #endif + +diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h +index 6dfd05ef5c2d9f..03ba4dab2ef731 100644 +--- a/include/linux/shdma-base.h ++++ b/include/linux/shdma-base.h +@@ -96,7 +96,7 @@ struct shdma_ops { + int (*desc_setup)(struct shdma_chan *, struct shdma_desc *, + dma_addr_t, dma_addr_t, size_t *); + int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool); +- void (*setup_xfer)(struct shdma_chan *, int); ++ int (*setup_xfer)(struct shdma_chan *, int); + void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); + struct shdma_desc *(*embedded_desc)(void *, int); + bool (*chan_irq)(struct shdma_chan *, int); +diff --git a/include/linux/swap.h b/include/linux/swap.h +index d7a5b7817987d8..ff91337695b434 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -343,7 +343,8 @@ static inline swp_entry_t page_swap_entry(struct page *page) + } + + /* linux/mm/workingset.c */ +-bool workingset_test_recent(void *shadow, bool file, bool *workingset); ++bool workingset_test_recent(void *shadow, bool file, bool *workingset, ++ bool flush); + void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); + void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); + void workingset_refault(struct folio *folio, void *shadow); +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h +index 8abfa124004003..b61796a35d2bab 100644 +--- a/include/linux/vm_event_item.h ++++ b/include/linux/vm_event_item.h +@@ -145,6 +145,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + #ifdef CONFIG_ZSWAP + ZSWPIN, + ZSWPOUT, ++ ZSWPWB, + #endif + #ifdef CONFIG_X86 + DIRECT_MAP_LEVEL2_SPLIT, +diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h +index 4c084a03d6bb75..b25746b91986c2 100644 +--- a/include/net/bluetooth/hci.h ++++ b/include/net/bluetooth/hci.h +@@ -392,6 +392,7 @@ enum { + HCI_USER_CHANNEL, + HCI_EXT_CONFIGURED, + HCI_LE_ADV, ++ HCI_LE_ADV_0, + HCI_LE_PER_ADV, + HCI_LE_SCAN, + HCI_SSP_ENABLED, +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 62135b7782f5be..a2a6fb20f49647 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -95,6 +96,7 @@ struct discovery_state { + unsigned long scan_start; + unsigned long scan_duration; + unsigned long name_resolve_timeout; ++ spinlock_t lock; + }; + + #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +@@ -240,6 +242,7 @@ struct adv_info { + bool enabled; + bool pending; + bool periodic; ++ bool periodic_enabled; + __u8 mesh; + __u8 instance; + __u32 flags; +@@ -869,6 +872,7 @@ static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, + + static inline void discovery_init(struct hci_dev *hdev) + { ++ spin_lock_init(&hdev->discovery.lock); + hdev->discovery.state = DISCOVERY_STOPPED; + INIT_LIST_HEAD(&hdev->discovery.all); + INIT_LIST_HEAD(&hdev->discovery.unknown); +@@ -883,8 +887,12 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev) + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; + hdev->discovery.uuid_count = 0; ++ ++ spin_lock(&hdev->discovery.lock); + kfree(hdev->discovery.uuids); + hdev->discovery.uuids = NULL; ++ spin_unlock(&hdev->discovery.lock); ++ + hdev->discovery.scan_start = 0; + hdev->discovery.scan_duration = 0; + } +diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h +index d382679efd2b1c..e083f0fa0113a9 100644 +--- a/include/net/bluetooth/mgmt.h ++++ b/include/net/bluetooth/mgmt.h +@@ -774,7 +774,7 @@ struct mgmt_adv_pattern { + __u8 ad_type; + __u8 offset; + __u8 length; +- __u8 value[31]; ++ __u8 value[HCI_MAX_AD_LENGTH]; + } __packed; + + #define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052 +diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h +index 7e78e7d6f01524..668aeee9b3f662 100644 +--- a/include/net/cls_cgroup.h ++++ b/include/net/cls_cgroup.h +@@ -63,7 +63,7 @@ static inline u32 task_get_classid(const struct sk_buff *skb) + * calls by looking at the number of nested bh disable calls because + * softirqs always disables bh. + */ +- if (in_serving_softirq()) { ++ if (softirq_count()) { + struct sock *sk = skb_to_full_sk(skb); + + /* If there is an sock_cgroup_classid we'll use that. */ +diff --git a/include/net/gro.h b/include/net/gro.h +index 018343254c90a6..9260ed367c9190 100644 +--- a/include/net/gro.h ++++ b/include/net/gro.h +@@ -10,6 +10,9 @@ + #include + #include + ++/* This should be increased if a protocol with a bigger head is added. */ ++#define GRO_MAX_HEAD (MAX_HEADER + 128) ++ + struct napi_gro_cb { + union { + struct { +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h +index ea8595651c3846..e066bdbc807bed 100644 +--- a/include/net/nfc/nci_core.h ++++ b/include/net/nfc/nci_core.h +@@ -52,7 +52,7 @@ enum nci_state { + #define NCI_RF_DISC_SELECT_TIMEOUT 5000 + #define NCI_RF_DEACTIVATE_TIMEOUT 30000 + #define NCI_CMD_TIMEOUT 5000 +-#define NCI_DATA_TIMEOUT 700 ++#define NCI_DATA_TIMEOUT 3000 + + struct nci_dev; + +diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h +index e8dd77a9674803..a5ce83f3eea4bf 100644 +--- a/include/net/tc_act/tc_connmark.h ++++ b/include/net/tc_act/tc_connmark.h +@@ -7,6 +7,7 @@ + struct tcf_connmark_parms { + struct net *net; + u16 zone; ++ int action; + struct rcu_head rcu; + }; + +diff --git a/include/net/xdp.h b/include/net/xdp.h +index b39ac83618a550..3d8989096b5d29 100644 +--- a/include/net/xdp.h ++++ b/include/net/xdp.h +@@ -113,6 +113,11 @@ static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) + xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; + } + ++static __always_inline void xdp_buff_clear_frag_pfmemalloc(struct xdp_buff *xdp) ++{ ++ xdp->flags &= ~XDP_FLAGS_FRAGS_PF_MEMALLOC; ++} ++ + static __always_inline void + xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) + { +diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h +index f52de5ed1b3b6e..83563247c36cb2 100644 +--- a/include/ufs/ufs_quirks.h ++++ b/include/ufs/ufs_quirks.h +@@ -113,4 +113,7 @@ struct ufs_dev_quirk { + */ + #define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12) + ++/* Some UFS 4 devices do not support the qTimestamp attribute */ ++#define UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT (1 << 13) ++ + #endif /* UFS_QUIRKS_H_ */ +diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h +index e9db9682316a2a..78380fc2374ea5 100644 +--- a/include/ufs/ufshcd.h ++++ b/include/ufs/ufshcd.h +@@ -323,6 +323,7 @@ struct ufs_pwr_mode_info { + * @device_reset: called to issue a reset pulse on the UFS device + * @config_scaling_param: called to configure clock scaling parameters + * @program_key: program or evict an inline encryption key ++ * @fill_crypto_prdt: initialize crypto-related fields in the PRDT + * @event_notify: called to notify important events + * @mcq_config_resource: called to configure MCQ platform resources + * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode +@@ -366,6 +367,9 @@ struct ufs_hba_variant_ops { + struct devfreq_simple_ondemand_data *data); + int (*program_key)(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); ++ int (*fill_crypto_prdt)(struct ufs_hba *hba, ++ const struct bio_crypt_ctx *crypt_ctx, ++ void *prdt, unsigned int num_segments); + void (*event_notify)(struct ufs_hba *hba, + enum ufs_event_type evt, void *data); + int (*mcq_config_resource)(struct ufs_hba *hba); +@@ -642,6 +646,45 @@ enum ufshcd_quirks { + * thus need this quirk to skip related flow. + */ + UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, ++ ++ /* ++ * This quirk needs to be enabled if the host controller supports inline ++ * encryption but it needs to initialize the crypto capabilities in a ++ * nonstandard way and/or needs to override blk_crypto_ll_ops. If ++ * enabled, the standard code won't initialize the blk_crypto_profile; ++ * ufs_hba_variant_ops::init() must do it instead. ++ */ ++ UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22, ++ ++ /* ++ * This quirk needs to be enabled if the host controller supports inline ++ * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e. ++ * host controller initialization fails if that bit is set. ++ */ ++ UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23, ++ ++ /* ++ * This quirk needs to be enabled if the host controller driver copies ++ * cryptographic keys into the PRDT in order to send them to hardware, ++ * and therefore the PRDT should be zeroized after each request (as per ++ * the standard best practice for managing keys). ++ */ ++ UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, ++ ++ /* ++ * This quirk indicates that the controller reports the value 1 (not ++ * supported) in the Legacy Single DoorBell Support (LSDBS) bit of the ++ * Controller Capabilities register although it supports the legacy ++ * single doorbell mode. ++ */ ++ UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25, ++ ++ /* ++ * This quirk indicates that DME_LINKSTARTUP should not be issued a 2nd ++ * time (refer link_startup_again) after the 1st time was successful, ++ * because it causes link startup to become unreliable. ++ */ ++ UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE = 1 << 26, + }; + + enum ufshcd_caps { +@@ -1425,5 +1468,6 @@ int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); + int ufshcd_write_ee_control(struct ufs_hba *hba); + int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr); ++void ufshcd_force_error_recovery(struct ufs_hba *hba); + + #endif /* End of Header */ +diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h +index ae93b30d25893e..725437f02b1fa9 100644 +--- a/include/ufs/ufshci.h ++++ b/include/ufs/ufshci.h +@@ -182,6 +182,7 @@ static inline u32 ufshci_version(u32 major, u32 minor) + #define UTP_TASK_REQ_COMPL 0x200 + #define UIC_COMMAND_COMPL 0x400 + #define DEVICE_FATAL_ERROR 0x800 ++#define UTP_ERROR 0x1000 + #define CONTROLLER_FATAL_ERROR 0x10000 + #define SYSTEM_BUS_FATAL_ERROR 0x20000 + #define CRYPTO_ENGINE_FATAL_ERROR 0x40000 +@@ -201,7 +202,8 @@ static inline u32 ufshci_version(u32 major, u32 minor) + CONTROLLER_FATAL_ERROR |\ + SYSTEM_BUS_FATAL_ERROR |\ + CRYPTO_ENGINE_FATAL_ERROR |\ +- UIC_LINK_LOST) ++ UIC_LINK_LOST |\ ++ UTP_ERROR) + + /* HCS - Host Controller Status 30h */ + #define DEVICE_PRESENT 0x1 +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 90c281e1379eeb..2cfbc48b824259 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -2276,7 +2276,7 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) + { + struct cgroup *cgrp; + +- cgrp = cgroup_get_from_id(cgid); ++ cgrp = __cgroup_get_from_id(cgid); + if (IS_ERR(cgrp)) + return NULL; + return cgrp; +diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c +index 6aff5ee483b60e..c0c5e9b313e433 100644 +--- a/kernel/bpf/ringbuf.c ++++ b/kernel/bpf/ringbuf.c +@@ -215,6 +215,8 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) + + static void bpf_ringbuf_free(struct bpf_ringbuf *rb) + { ++ irq_work_sync(&rb->work); ++ + /* copy pages pointer and nr_pages to local variable, as we are going + * to unmap rb itself with vunmap() below + */ +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 5e644b1b12aaaa..45b2f06de452ce 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -7839,7 +7839,7 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env, + struct bpf_verifier_state *cur) + { + struct bpf_func_state *fold, *fcur; +- int i, fr; ++ int i, fr, num_slots; + + reset_idmap_scratch(env); + for (fr = old->curframe; fr >= 0; fr--) { +@@ -7852,7 +7852,9 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env, + &fcur->regs[i], + &env->idmap_scratch); + +- for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { ++ num_slots = min(fold->allocated_stack / BPF_REG_SIZE, ++ fcur->allocated_stack / BPF_REG_SIZE); ++ for (i = 0; i < num_slots; i++) { + if (!is_spilled_reg(&fold->stack[i]) || + !is_spilled_reg(&fcur->stack[i])) + continue; +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 5135838b5899f8..476be99dbcf1bf 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -6220,15 +6220,15 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) + } + + /* +- * cgroup_get_from_id : get the cgroup associated with cgroup id ++ * __cgroup_get_from_id : get the cgroup associated with cgroup id + * @id: cgroup id + * On success return the cgrp or ERR_PTR on failure +- * Only cgroups within current task's cgroup NS are valid. ++ * There are no cgroup NS restrictions. + */ +-struct cgroup *cgroup_get_from_id(u64 id) ++struct cgroup *__cgroup_get_from_id(u64 id) + { + struct kernfs_node *kn; +- struct cgroup *cgrp, *root_cgrp; ++ struct cgroup *cgrp; + + kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id); + if (!kn) +@@ -6250,6 +6250,22 @@ struct cgroup *cgroup_get_from_id(u64 id) + + if (!cgrp) + return ERR_PTR(-ENOENT); ++ return cgrp; ++} ++ ++/* ++ * cgroup_get_from_id : get the cgroup associated with cgroup id ++ * @id: cgroup id ++ * On success return the cgrp or ERR_PTR on failure ++ * Only cgroups within current task's cgroup NS are valid. ++ */ ++struct cgroup *cgroup_get_from_id(u64 id) ++{ ++ struct cgroup *cgrp, *root_cgrp; ++ ++ cgrp = __cgroup_get_from_id(id); ++ if (IS_ERR(cgrp)) ++ return cgrp; + + root_cgrp = current_cgns_cgroup_dfl(); + if (!cgroup_is_descendant(cgrp, root_cgrp)) { +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index a554f43d3ceb9e..6304238293ae10 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -2255,6 +2255,13 @@ static void handle_swbp(struct pt_regs *regs) + + handler_chain(uprobe, regs); + ++ /* ++ * If user decided to take execution elsewhere, it makes little sense ++ * to execute the original instruction, so let's skip it. ++ */ ++ if (instruction_pointer(regs) != bp_vaddr) ++ goto out; ++ + if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) + goto out; + +diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c +index a8074079b09e87..48feaa545b3c7f 100644 +--- a/kernel/futex/syscalls.c ++++ b/kernel/futex/syscalls.c +@@ -40,6 +40,56 @@ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, + return 0; + } + ++static inline void __user *futex_task_robust_list(struct task_struct *p, bool compat) ++{ ++#ifdef CONFIG_COMPAT ++ if (compat) ++ return p->compat_robust_list; ++#endif ++ return p->robust_list; ++} ++ ++static void __user *futex_get_robust_list_common(int pid, bool compat) ++{ ++ struct task_struct *p = current; ++ void __user *head; ++ int ret; ++ ++ scoped_guard(rcu) { ++ if (pid) { ++ p = find_task_by_vpid(pid); ++ if (!p) ++ return (void __user *)ERR_PTR(-ESRCH); ++ } ++ get_task_struct(p); ++ } ++ ++ /* ++ * Hold exec_update_lock to serialize with concurrent exec() ++ * so ptrace_may_access() is checked against stable credentials ++ */ ++ ret = down_read_killable(&p->signal->exec_update_lock); ++ if (ret) ++ goto err_put; ++ ++ ret = -EPERM; ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) ++ goto err_unlock; ++ ++ head = futex_task_robust_list(p, compat); ++ ++ up_read(&p->signal->exec_update_lock); ++ put_task_struct(p); ++ ++ return head; ++ ++err_unlock: ++ up_read(&p->signal->exec_update_lock); ++err_put: ++ put_task_struct(p); ++ return (void __user *)ERR_PTR(ret); ++} ++ + /** + * sys_get_robust_list() - Get the robust-futex list head of a task + * @pid: pid of the process [zero for current task] +@@ -50,36 +100,14 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + struct robust_list_head __user * __user *, head_ptr, + size_t __user *, len_ptr) + { +- struct robust_list_head __user *head; +- unsigned long ret; +- struct task_struct *p; +- +- rcu_read_lock(); +- +- ret = -ESRCH; +- if (!pid) +- p = current; +- else { +- p = find_task_by_vpid(pid); +- if (!p) +- goto err_unlock; +- } +- +- ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) +- goto err_unlock; ++ struct robust_list_head __user *head = futex_get_robust_list_common(pid, false); + +- head = p->robust_list; +- rcu_read_unlock(); ++ if (IS_ERR(head)) ++ return PTR_ERR(head); + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(head, head_ptr); +- +-err_unlock: +- rcu_read_unlock(); +- +- return ret; + } + + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, +@@ -322,36 +350,14 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + compat_uptr_t __user *, head_ptr, + compat_size_t __user *, len_ptr) + { +- struct compat_robust_list_head __user *head; +- unsigned long ret; +- struct task_struct *p; +- +- rcu_read_lock(); +- +- ret = -ESRCH; +- if (!pid) +- p = current; +- else { +- p = find_task_by_vpid(pid); +- if (!p) +- goto err_unlock; +- } +- +- ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) +- goto err_unlock; ++ struct compat_robust_list_head __user *head = futex_get_robust_list_common(pid, true); + +- head = p->compat_robust_list; +- rcu_read_unlock(); ++ if (IS_ERR(head)) ++ return PTR_ERR(head); + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(ptr_to_compat(head), head_ptr); +- +-err_unlock: +- rcu_read_unlock(); +- +- return ret; + } + #endif /* CONFIG_COMPAT */ + +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index fd75b4a484d76a..bbccbae331d72b 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include + #include "gcov.h" + +-#if (__GNUC__ >= 14) ++#if (__GNUC__ >= 15) ++#define GCOV_COUNTERS 10 ++#elif (__GNUC__ >= 14) + #define GCOV_COUNTERS 9 + #elif (__GNUC__ >= 10) + #define GCOV_COUNTERS 8 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 58231999d929ea..7f23b866c3d4c8 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4836,6 +4836,11 @@ static inline unsigned long task_util(struct task_struct *p) + return READ_ONCE(p->se.avg.util_avg); + } + ++static inline unsigned long task_runnable(struct task_struct *p) ++{ ++ return READ_ONCE(p->se.avg.runnable_avg); ++} ++ + static inline unsigned long _task_util_est(struct task_struct *p) + { + struct util_est ue = READ_ONCE(p->se.avg.util_est); +@@ -4954,6 +4959,14 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, + if (task_util(p) > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)))) + return; + ++ /* ++ * To avoid underestimate of task utilization, skip updates of EWMA if ++ * we cannot grant that thread got all CPU time it wanted. ++ */ ++ if ((ue.enqueued + UTIL_EST_MARGIN) < task_runnable(p)) ++ goto done; ++ ++ + /* + * Update Task's estimated utilization + * +@@ -9152,7 +9165,7 @@ static int detach_tasks(struct lb_env *env) + case migrate_util: + util = task_util_est(p); + +- if (util > env->imbalance) ++ if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance) + goto next; + + env->imbalance -= util; +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 15785a729a0cdf..398992597685b6 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -6873,6 +6873,8 @@ void ftrace_module_enable(struct module *mod) + if (!within_module(rec->ip, mod)) + break; + ++ cond_resched(); ++ + /* Weak functions should still be ignored */ + if (!test_for_valid_rec(rec)) { + /* Clear all other flags. Should not be enabled anyway */ +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index 88985aefb71ff8..e6d2f2a94235fa 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -3258,14 +3258,16 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data, + var = create_var(hist_data, file, field_name, val->size, val->type); + if (IS_ERR(var)) { + hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); +- kfree(val); ++ destroy_hist_field(val, 0); + ret = PTR_ERR(var); + goto err; + } + + field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); + if (!field_var) { +- kfree(val); ++ destroy_hist_field(val, 0); ++ kfree_const(var->type); ++ kfree(var->var.name); + kfree(var); + ret = -ENOMEM; + goto err; +diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile +index 88246000c9d8eb..b1b81998e101ed 100644 +--- a/lib/crypto/Makefile ++++ b/lib/crypto/Makefile +@@ -31,7 +31,7 @@ libcurve25519-generic-y := curve25519-fiat32.o + libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o + libcurve25519-generic-y += curve25519-generic.o + # clang versions prior to 18 may blow out the stack with KASAN +-ifeq ($(call clang-min-version, 180000),) ++ifeq ($(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_) + KASAN_SANITIZE_curve25519-hacl64.o := n + endif + +diff --git a/mm/filemap.c b/mm/filemap.c +index ab24dbf5e747ef..1c229b261a62be 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -3608,19 +3608,33 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, + struct vm_area_struct *vma = vmf->vma; + struct file *file = vma->vm_file; + struct address_space *mapping = file->f_mapping; +- pgoff_t last_pgoff = start_pgoff; ++ pgoff_t file_end, last_pgoff = start_pgoff; + unsigned long addr; + XA_STATE(xas, &mapping->i_pages, start_pgoff); + struct folio *folio; + vm_fault_t ret = 0; + unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; ++ bool can_map_large; + + rcu_read_lock(); + folio = next_uptodate_folio(&xas, mapping, end_pgoff); + if (!folio) + goto out; + +- if (filemap_map_pmd(vmf, folio, start_pgoff)) { ++ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; ++ end_pgoff = min(end_pgoff, file_end); ++ ++ /* ++ * Do not allow to map with PTEs beyond i_size and with PMD ++ * across i_size to preserve SIGBUS semantics. ++ * ++ * Make an exception for shmem/tmpfs that for long time ++ * intentionally mapped with PMDs across i_size. ++ */ ++ can_map_large = shmem_mapping(mapping) || ++ file_end >= folio_next_index(folio); ++ ++ if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) { + ret = VM_FAULT_NOPAGE; + goto out; + } +@@ -3632,6 +3646,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, + folio_put(folio); + goto out; + } ++ + do { + unsigned long end; + +@@ -4195,6 +4210,9 @@ static void filemap_cachestat(struct address_space *mapping, + XA_STATE(xas, &mapping->i_pages, first_index); + struct folio *folio; + ++ /* Flush stats (and potentially sleep) outside the RCU read section. */ ++ mem_cgroup_flush_stats_ratelimited(NULL); ++ + rcu_read_lock(); + xas_for_each(&xas, folio, last_index) { + int order; +@@ -4258,7 +4276,7 @@ static void filemap_cachestat(struct address_space *mapping, + goto resched; + } + #endif +- if (workingset_test_recent(shadow, true, &workingset)) ++ if (workingset_test_recent(shadow, true, &workingset, false)) + cs->nr_recently_evicted += nr_pages; + + goto resched; +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 2d2cada8a8a4c8..41207e680e7d96 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -570,6 +570,92 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) + return mz; + } + ++/* Subset of vm_event_item to report for memcg event stats */ ++static const unsigned int memcg_vm_event_stat[] = { ++ PGPGIN, ++ PGPGOUT, ++ PGSCAN_KSWAPD, ++ PGSCAN_DIRECT, ++ PGSCAN_KHUGEPAGED, ++ PGSTEAL_KSWAPD, ++ PGSTEAL_DIRECT, ++ PGSTEAL_KHUGEPAGED, ++ PGFAULT, ++ PGMAJFAULT, ++ PGREFILL, ++ PGACTIVATE, ++ PGDEACTIVATE, ++ PGLAZYFREE, ++ PGLAZYFREED, ++#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) ++ ZSWPIN, ++ ZSWPOUT, ++ ZSWPWB, ++#endif ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ THP_FAULT_ALLOC, ++ THP_COLLAPSE_ALLOC, ++ THP_SWPOUT, ++ THP_SWPOUT_FALLBACK, ++#endif ++}; ++ ++#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) ++static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; ++ ++static void init_memcg_events(void) ++{ ++ int i; ++ ++ for (i = 0; i < NR_MEMCG_EVENTS; ++i) ++ mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; ++} ++ ++static inline int memcg_events_index(enum vm_event_item idx) ++{ ++ return mem_cgroup_events_index[idx] - 1; ++} ++ ++struct memcg_vmstats_percpu { ++ /* Stats updates since the last flush */ ++ unsigned int stats_updates; ++ ++ /* Cached pointers for fast iteration in memcg_rstat_updated() */ ++ struct memcg_vmstats_percpu *parent; ++ struct memcg_vmstats *vmstats; ++ ++ /* The above should fit a single cacheline for memcg_rstat_updated() */ ++ ++ /* Local (CPU and cgroup) page state & events */ ++ long state[MEMCG_NR_STAT]; ++ unsigned long events[NR_MEMCG_EVENTS]; ++ ++ /* Delta calculation for lockless upward propagation */ ++ long state_prev[MEMCG_NR_STAT]; ++ unsigned long events_prev[NR_MEMCG_EVENTS]; ++ ++ /* Cgroup1: threshold notifications & softlimit tree updates */ ++ unsigned long nr_page_events; ++ unsigned long targets[MEM_CGROUP_NTARGETS]; ++} ____cacheline_aligned; ++ ++struct memcg_vmstats { ++ /* Aggregated (CPU and subtree) page state & events */ ++ long state[MEMCG_NR_STAT]; ++ unsigned long events[NR_MEMCG_EVENTS]; ++ ++ /* Non-hierarchical (CPU aggregated) page state & events */ ++ long state_local[MEMCG_NR_STAT]; ++ unsigned long events_local[NR_MEMCG_EVENTS]; ++ ++ /* Pending child counts during tree propagation */ ++ long state_pending[MEMCG_NR_STAT]; ++ unsigned long events_pending[NR_MEMCG_EVENTS]; ++ ++ /* Stats updates since the last flush */ ++ atomic64_t stats_updates; ++}; ++ + /* + * memcg and lruvec stats flushing + * +@@ -587,10 +673,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) + */ + static void flush_memcg_stats_dwork(struct work_struct *w); + static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); +-static DEFINE_PER_CPU(unsigned int, stats_updates); +-static atomic_t stats_flush_ongoing = ATOMIC_INIT(0); +-static atomic_t stats_flush_threshold = ATOMIC_INIT(0); +-static u64 flush_next_time; ++static u64 flush_last_time; + + #define FLUSH_TIME (2UL*HZ) + +@@ -616,141 +699,87 @@ static void memcg_stats_unlock(void) + preempt_enable_nested(); + } + ++ ++static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) ++{ ++ return atomic64_read(&vmstats->stats_updates) > ++ MEMCG_CHARGE_BATCH * num_online_cpus(); ++} ++ + static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) + { +- unsigned int x; ++ struct memcg_vmstats_percpu *statc; ++ int cpu = smp_processor_id(); ++ unsigned int stats_updates; + + if (!val) + return; + +- cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); ++ cgroup_rstat_updated(memcg->css.cgroup, cpu); ++ statc = this_cpu_ptr(memcg->vmstats_percpu); ++ for (; statc; statc = statc->parent) { ++ stats_updates = READ_ONCE(statc->stats_updates) + abs(val); ++ WRITE_ONCE(statc->stats_updates, stats_updates); ++ if (stats_updates < MEMCG_CHARGE_BATCH) ++ continue; + +- x = __this_cpu_add_return(stats_updates, abs(val)); +- if (x > MEMCG_CHARGE_BATCH) { + /* +- * If stats_flush_threshold exceeds the threshold +- * (>num_online_cpus()), cgroup stats update will be triggered +- * in __mem_cgroup_flush_stats(). Increasing this var further +- * is redundant and simply adds overhead in atomic update. ++ * If @memcg is already flush-able, increasing stats_updates is ++ * redundant. Avoid the overhead of the atomic update. + */ +- if (atomic_read(&stats_flush_threshold) <= num_online_cpus()) +- atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold); +- __this_cpu_write(stats_updates, 0); ++ if (!memcg_vmstats_needs_flush(statc->vmstats)) ++ atomic64_add(stats_updates, ++ &statc->vmstats->stats_updates); ++ WRITE_ONCE(statc->stats_updates, 0); + } + } + +-static void do_flush_stats(void) ++static void do_flush_stats(struct mem_cgroup *memcg) + { +- /* +- * We always flush the entire tree, so concurrent flushers can just +- * skip. This avoids a thundering herd problem on the rstat global lock +- * from memcg flushers (e.g. reclaim, refault, etc). +- */ +- if (atomic_read(&stats_flush_ongoing) || +- atomic_xchg(&stats_flush_ongoing, 1)) +- return; +- +- WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME); +- +- cgroup_rstat_flush(root_mem_cgroup->css.cgroup); ++ if (mem_cgroup_is_root(memcg)) ++ WRITE_ONCE(flush_last_time, jiffies_64); + +- atomic_set(&stats_flush_threshold, 0); +- atomic_set(&stats_flush_ongoing, 0); ++ cgroup_rstat_flush(memcg->css.cgroup); + } + +-void mem_cgroup_flush_stats(void) ++/* ++ * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree ++ * @memcg: root of the subtree to flush ++ * ++ * Flushing is serialized by the underlying global rstat lock. There is also a ++ * minimum amount of work to be done even if there are no stat updates to flush. ++ * Hence, we only flush the stats if the updates delta exceeds a threshold. This ++ * avoids unnecessary work and contention on the underlying lock. ++ */ ++void mem_cgroup_flush_stats(struct mem_cgroup *memcg) + { +- if (atomic_read(&stats_flush_threshold) > num_online_cpus()) +- do_flush_stats(); ++ if (mem_cgroup_disabled()) ++ return; ++ ++ if (!memcg) ++ memcg = root_mem_cgroup; ++ ++ if (memcg_vmstats_needs_flush(memcg->vmstats)) ++ do_flush_stats(memcg); + } + +-void mem_cgroup_flush_stats_ratelimited(void) ++void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) + { +- if (time_after64(jiffies_64, READ_ONCE(flush_next_time))) +- mem_cgroup_flush_stats(); ++ /* Only flush if the periodic flusher is one full cycle late */ ++ if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) ++ mem_cgroup_flush_stats(memcg); + } + + static void flush_memcg_stats_dwork(struct work_struct *w) + { + /* +- * Always flush here so that flushing in latency-sensitive paths is +- * as cheap as possible. ++ * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing ++ * in latency-sensitive paths is as cheap as possible. + */ +- do_flush_stats(); ++ do_flush_stats(root_mem_cgroup); + queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); + } + +-/* Subset of vm_event_item to report for memcg event stats */ +-static const unsigned int memcg_vm_event_stat[] = { +- PGPGIN, +- PGPGOUT, +- PGSCAN_KSWAPD, +- PGSCAN_DIRECT, +- PGSCAN_KHUGEPAGED, +- PGSTEAL_KSWAPD, +- PGSTEAL_DIRECT, +- PGSTEAL_KHUGEPAGED, +- PGFAULT, +- PGMAJFAULT, +- PGREFILL, +- PGACTIVATE, +- PGDEACTIVATE, +- PGLAZYFREE, +- PGLAZYFREED, +-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) +- ZSWPIN, +- ZSWPOUT, +-#endif +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE +- THP_FAULT_ALLOC, +- THP_COLLAPSE_ALLOC, +-#endif +-}; +- +-#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) +-static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; +- +-static void init_memcg_events(void) +-{ +- int i; +- +- for (i = 0; i < NR_MEMCG_EVENTS; ++i) +- mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; +-} +- +-static inline int memcg_events_index(enum vm_event_item idx) +-{ +- return mem_cgroup_events_index[idx] - 1; +-} +- +-struct memcg_vmstats_percpu { +- /* Local (CPU and cgroup) page state & events */ +- long state[MEMCG_NR_STAT]; +- unsigned long events[NR_MEMCG_EVENTS]; +- +- /* Delta calculation for lockless upward propagation */ +- long state_prev[MEMCG_NR_STAT]; +- unsigned long events_prev[NR_MEMCG_EVENTS]; +- +- /* Cgroup1: threshold notifications & softlimit tree updates */ +- unsigned long nr_page_events; +- unsigned long targets[MEM_CGROUP_NTARGETS]; +-}; +- +-struct memcg_vmstats { +- /* Aggregated (CPU and subtree) page state & events */ +- long state[MEMCG_NR_STAT]; +- unsigned long events[NR_MEMCG_EVENTS]; +- +- /* Non-hierarchical (CPU aggregated) page state & events */ +- long state_local[MEMCG_NR_STAT]; +- unsigned long events_local[NR_MEMCG_EVENTS]; +- +- /* Pending child counts during tree propagation */ +- long state_pending[MEMCG_NR_STAT]; +- unsigned long events_pending[NR_MEMCG_EVENTS]; +-}; +- + unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) + { + long x = READ_ONCE(memcg->vmstats->state[idx]); +@@ -1579,7 +1608,7 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) + * + * Current memory state: + */ +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(memcg); + + for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { + u64 size; +@@ -4029,7 +4058,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) + int nid; + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(memcg); + + for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { + seq_printf(m, "%s=%lu", stat->name, +@@ -4104,7 +4133,7 @@ static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) + + BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); + +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(memcg); + + for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { + unsigned long nr; +@@ -4606,7 +4635,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); + struct mem_cgroup *parent; + +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(memcg); + + *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); + *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); +@@ -5306,10 +5335,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) + __mem_cgroup_free(memcg); + } + +-static struct mem_cgroup *mem_cgroup_alloc(void) ++static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) + { ++ struct memcg_vmstats_percpu *statc, *pstatc; + struct mem_cgroup *memcg; +- int node; ++ int node, cpu; + int __maybe_unused i; + long error = -ENOMEM; + +@@ -5332,6 +5362,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + if (!memcg->vmstats_percpu) + goto fail; + ++ for_each_possible_cpu(cpu) { ++ if (parent) ++ pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); ++ statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); ++ statc->parent = parent ? pstatc : NULL; ++ statc->vmstats = memcg->vmstats; ++ } ++ + for_each_node(node) + if (alloc_mem_cgroup_per_node_info(memcg, node)) + goto fail; +@@ -5377,7 +5415,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + struct mem_cgroup *memcg, *old_memcg; + + old_memcg = set_active_memcg(parent); +- memcg = mem_cgroup_alloc(); ++ memcg = mem_cgroup_alloc(parent); + set_active_memcg(old_memcg); + if (IS_ERR(memcg)) + return ERR_CAST(memcg); +@@ -5654,6 +5692,10 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) + } + } + } ++ WRITE_ONCE(statc->stats_updates, 0); ++ /* We are in a per-cpu loop here, only do the atomic write once */ ++ if (atomic64_read(&memcg->vmstats->stats_updates)) ++ atomic64_set(&memcg->vmstats->stats_updates, 0); + } + + #ifdef CONFIG_MMU +@@ -6682,7 +6724,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v) + int i; + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(memcg); + + for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { + int nid; +@@ -7846,7 +7888,11 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) + break; + } + +- cgroup_rstat_flush(memcg->css.cgroup); ++ /* ++ * mem_cgroup_flush_stats() ignores small changes. Use ++ * do_flush_stats() directly to get accurate stats for charging. ++ */ ++ do_flush_stats(memcg); + pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; + if (pages < max) + continue; +@@ -7911,8 +7957,10 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) + static u64 zswap_current_read(struct cgroup_subsys_state *css, + struct cftype *cft) + { +- cgroup_rstat_flush(css->cgroup); +- return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B); ++ struct mem_cgroup *memcg = mem_cgroup_from_css(css); ++ ++ mem_cgroup_flush_stats(memcg); ++ return memcg_page_state(memcg, MEMCG_ZSWAP_B); + } + + static int zswap_max_show(struct seq_file *m, void *v) +diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c +index 37a4f59d9585b9..93cb8eed4bc233 100644 +--- a/mm/memory-tiers.c ++++ b/mm/memory-tiers.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -36,7 +37,7 @@ struct node_memory_type_map { + static DEFINE_MUTEX(memory_tier_lock); + static LIST_HEAD(memory_tiers); + static struct node_memory_type_map node_memory_types[MAX_NUMNODES]; +-static struct memory_dev_type *default_dram_type; ++struct memory_dev_type *default_dram_type; + + static struct bus_type memory_tier_subsys = { + .name = "memory_tiering", +@@ -105,6 +106,13 @@ static int top_tier_adistance; + static struct demotion_nodes *node_demotion __read_mostly; + #endif /* CONFIG_MIGRATION */ + ++static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms); ++ ++static bool default_dram_perf_error; ++static struct access_coordinate default_dram_perf; ++static int default_dram_perf_ref_nid = NUMA_NO_NODE; ++static const char *default_dram_perf_ref_source; ++ + static inline struct memory_tier *to_memory_tier(struct device *device) + { + return container_of(device, struct memory_tier, dev); +@@ -592,6 +600,158 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype) + } + EXPORT_SYMBOL_GPL(clear_node_memory_type); + ++static void dump_hmem_attrs(struct access_coordinate *coord, const char *prefix) ++{ ++ pr_info( ++"%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n", ++ prefix, coord->read_latency, coord->write_latency, ++ coord->read_bandwidth, coord->write_bandwidth); ++} ++ ++int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, ++ const char *source) ++{ ++ int rc = 0; ++ ++ mutex_lock(&memory_tier_lock); ++ if (default_dram_perf_error) { ++ rc = -EIO; ++ goto out; ++ } ++ ++ if (perf->read_latency + perf->write_latency == 0 || ++ perf->read_bandwidth + perf->write_bandwidth == 0) { ++ rc = -EINVAL; ++ goto out; ++ } ++ ++ if (default_dram_perf_ref_nid == NUMA_NO_NODE) { ++ default_dram_perf = *perf; ++ default_dram_perf_ref_nid = nid; ++ default_dram_perf_ref_source = kstrdup(source, GFP_KERNEL); ++ goto out; ++ } ++ ++ /* ++ * The performance of all default DRAM nodes is expected to be ++ * same (that is, the variation is less than 10%). And it ++ * will be used as base to calculate the abstract distance of ++ * other memory nodes. ++ */ ++ if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 > ++ default_dram_perf.read_latency || ++ abs(perf->write_latency - default_dram_perf.write_latency) * 10 > ++ default_dram_perf.write_latency || ++ abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 > ++ default_dram_perf.read_bandwidth || ++ abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 > ++ default_dram_perf.write_bandwidth) { ++ pr_info( ++"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n" ++"DRAM node %d.\n", nid, default_dram_perf_ref_nid); ++ pr_info(" performance of reference DRAM node %d from %s:\n", ++ default_dram_perf_ref_nid, default_dram_perf_ref_source); ++ dump_hmem_attrs(&default_dram_perf, " "); ++ pr_info(" performance of DRAM node %d from %s:\n", nid, source); ++ dump_hmem_attrs(perf, " "); ++ pr_info( ++" disable default DRAM node performance based abstract distance algorithm.\n"); ++ default_dram_perf_error = true; ++ rc = -EINVAL; ++ } ++ ++out: ++ mutex_unlock(&memory_tier_lock); ++ return rc; ++} ++ ++int mt_perf_to_adistance(struct access_coordinate *perf, int *adist) ++{ ++ if (default_dram_perf_error) ++ return -EIO; ++ ++ if (default_dram_perf_ref_nid == NUMA_NO_NODE) ++ return -ENOENT; ++ ++ if (perf->read_latency + perf->write_latency == 0 || ++ perf->read_bandwidth + perf->write_bandwidth == 0) ++ return -EINVAL; ++ ++ mutex_lock(&memory_tier_lock); ++ /* ++ * The abstract distance of a memory node is in direct proportion to ++ * its memory latency (read + write) and inversely proportional to its ++ * memory bandwidth (read + write). The abstract distance, memory ++ * latency, and memory bandwidth of the default DRAM nodes are used as ++ * the base. ++ */ ++ *adist = MEMTIER_ADISTANCE_DRAM * ++ (perf->read_latency + perf->write_latency) / ++ (default_dram_perf.read_latency + default_dram_perf.write_latency) * ++ (default_dram_perf.read_bandwidth + default_dram_perf.write_bandwidth) / ++ (perf->read_bandwidth + perf->write_bandwidth); ++ mutex_unlock(&memory_tier_lock); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(mt_perf_to_adistance); ++ ++/** ++ * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm ++ * @nb: The notifier block which describe the algorithm ++ * ++ * Return: 0 on success, errno on error. ++ * ++ * Every memory tiering abstract distance algorithm provider needs to ++ * register the algorithm with register_mt_adistance_algorithm(). To ++ * calculate the abstract distance for a specified memory node, the ++ * notifier function will be called unless some high priority ++ * algorithm has provided result. The prototype of the notifier ++ * function is as follows, ++ * ++ * int (*algorithm_notifier)(struct notifier_block *nb, ++ * unsigned long nid, void *data); ++ * ++ * Where "nid" specifies the memory node, "data" is the pointer to the ++ * returned abstract distance (that is, "int *adist"). If the ++ * algorithm provides the result, NOTIFY_STOP should be returned. ++ * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next ++ * algorithm in the chain to provide the result. ++ */ ++int register_mt_adistance_algorithm(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_register(&mt_adistance_algorithms, nb); ++} ++EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm); ++ ++/** ++ * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm ++ * @nb: the notifier block which describe the algorithm ++ * ++ * Return: 0 on success, errno on error. ++ */ ++int unregister_mt_adistance_algorithm(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_unregister(&mt_adistance_algorithms, nb); ++} ++EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm); ++ ++/** ++ * mt_calc_adistance() - Calculate abstract distance with registered algorithms ++ * @node: the node to calculate abstract distance for ++ * @adist: the returned abstract distance ++ * ++ * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some ++ * abstract distance algorithm provides the result, and return it via ++ * @adist. Otherwise, no algorithm can provide the result and @adist ++ * will be kept as it is. ++ */ ++int mt_calc_adistance(int node, int *adist) ++{ ++ return blocking_notifier_call_chain(&mt_adistance_algorithms, node, adist); ++} ++EXPORT_SYMBOL_GPL(mt_calc_adistance); ++ + static int __meminit memtier_hotplug_callback(struct notifier_block *self, + unsigned long action, void *_arg) + { +diff --git a/mm/memory.c b/mm/memory.c +index 8b80db7115e5fe..fd917a5bc9617a 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -67,6 +67,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -4435,6 +4436,8 @@ static bool vmf_pte_changed(struct vm_fault *vmf) + vm_fault_t finish_fault(struct vm_fault *vmf) + { + struct vm_area_struct *vma = vmf->vma; ++ bool needs_fallback = false; ++ struct folio *folio; + struct page *page; + vm_fault_t ret; + +@@ -4444,6 +4447,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf) + else + page = vmf->page; + ++ folio = page_folio(page); ++ + /* + * check even for read faults because we might have lost our CoWed + * page +@@ -4454,8 +4459,25 @@ vm_fault_t finish_fault(struct vm_fault *vmf) + return ret; + } + ++ if (!needs_fallback && vma->vm_file) { ++ struct address_space *mapping = vma->vm_file->f_mapping; ++ pgoff_t file_end; ++ ++ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); ++ ++ /* ++ * Do not allow to map with PTEs beyond i_size and with PMD ++ * across i_size to preserve SIGBUS semantics. ++ * ++ * Make an exception for shmem/tmpfs that for long time ++ * intentionally mapped with PMDs across i_size. ++ */ ++ needs_fallback = !shmem_mapping(mapping) && ++ file_end < folio_next_index(folio); ++ } ++ + if (pmd_none(*vmf->pmd)) { +- if (PageTransCompound(page)) { ++ if (!needs_fallback && PageTransCompound(page)) { + ret = do_set_pmd(vmf, page); + if (ret != VM_FAULT_FALLBACK) + return ret; +diff --git a/mm/mm_init.c b/mm/mm_init.c +index 77fd04c83d046d..f5519ca1263e75 100644 +--- a/mm/mm_init.c ++++ b/mm/mm_init.c +@@ -2546,7 +2546,7 @@ void *__init alloc_large_system_hash(const char *tablename, + panic("Failed to allocate %s hash table\n", tablename); + + pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", +- tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, ++ tablename, 1UL << log2qty, get_order(size), size, + virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); + + if (_hash_shift) +diff --git a/mm/page_io.c b/mm/page_io.c +index fe4c21af23f269..cb559ae324c672 100644 +--- a/mm/page_io.c ++++ b/mm/page_io.c +@@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) + static inline void count_swpout_vm_event(struct folio *folio) + { + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +- if (unlikely(folio_test_pmd_mappable(folio))) ++ if (unlikely(folio_test_pmd_mappable(folio))) { ++ count_memcg_folio_events(folio, THP_SWPOUT, 1); + count_vm_event(THP_SWPOUT); ++ } + #endif + count_vm_events(PSWPOUT, folio_nr_pages(folio)); + } +@@ -278,9 +280,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret) + set_page_dirty(page); + ClearPageReclaim(page); + } +- } else { +- for (p = 0; p < sio->pages; p++) +- count_swpout_vm_event(page_folio(sio->bvec[p].bv_page)); + } + + for (p = 0; p < sio->pages; p++) +@@ -296,6 +295,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) + struct file *swap_file = sis->swap_file; + loff_t pos = page_file_offset(page); + ++ count_swpout_vm_event(page_folio(page)); + set_page_writeback(page); + unlock_page(page); + if (wbc->swap_plug) +diff --git a/mm/percpu.c b/mm/percpu.c +index 38d5121c2b652a..54c2988a74967c 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -1734,7 +1734,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, + gfp = current_gfp_context(gfp); + /* whitelisted flags that can be passed to the backing allocators */ + pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); +- is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; ++ is_atomic = !gfpflags_allow_blocking(gfp); + do_warn = !(gfp & __GFP_NOWARN); + + /* +@@ -2231,7 +2231,12 @@ static void pcpu_balance_workfn(struct work_struct *work) + * to grow other chunks. This then gives pcpu_reclaim_populated() time + * to move fully free chunks to the active list to be freed if + * appropriate. ++ * ++ * Enforce GFP_NOIO allocations because we have pcpu_alloc users ++ * constrained to GFP_NOIO/NOFS contexts and they could form lock ++ * dependency through pcpu_alloc_mutex + */ ++ unsigned int flags = memalloc_noio_save(); + mutex_lock(&pcpu_alloc_mutex); + spin_lock_irq(&pcpu_lock); + +@@ -2242,6 +2247,7 @@ static void pcpu_balance_workfn(struct work_struct *work) + + spin_unlock_irq(&pcpu_lock); + mutex_unlock(&pcpu_alloc_mutex); ++ memalloc_noio_restore(flags); + } + + /** +diff --git a/mm/secretmem.c b/mm/secretmem.c +index 4bedf491a8a742..f64ea1cde2f92f 100644 +--- a/mm/secretmem.c ++++ b/mm/secretmem.c +@@ -84,13 +84,13 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) + __folio_mark_uptodate(folio); + err = filemap_add_folio(mapping, folio, offset, gfp); + if (unlikely(err)) { +- folio_put(folio); + /* + * If a split of large page was required, it + * already happened when we marked the page invalid + * which guarantees that this call won't fail + */ + set_direct_map_default_noflush(page); ++ folio_put(folio); + if (err == -EEXIST) + goto retry; + +diff --git a/mm/truncate.c b/mm/truncate.c +index 70c09213bb9200..6a82e981b63cef 100644 +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -196,6 +196,31 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) + return 0; + } + ++static int try_folio_split_or_unmap(struct folio *folio) ++{ ++ enum ttu_flags ttu_flags = ++ TTU_SYNC | ++ TTU_SPLIT_HUGE_PMD | ++ TTU_IGNORE_MLOCK; ++ int ret; ++ ++ ret = split_folio(folio); ++ ++ /* ++ * If the split fails, unmap the folio, so it will be refaulted ++ * with PTEs to respect SIGBUS semantics. ++ * ++ * Make an exception for shmem/tmpfs that for long time ++ * intentionally mapped with PMDs across i_size. ++ */ ++ if (ret && !shmem_mapping(folio->mapping)) { ++ try_to_unmap(folio, ttu_flags); ++ WARN_ON(folio_mapped(folio)); ++ } ++ ++ return ret; ++} ++ + /* + * Handle partial folios. The folio may be entirely within the + * range if a split has raced with us. If not, we zero the part of the +@@ -239,7 +264,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) + folio_invalidate(folio, offset, length); + if (!folio_test_large(folio)) + return true; +- if (split_folio(folio) == 0) ++ if (try_folio_split_or_unmap(folio) == 0) + return true; + if (folio_test_dirty(folio)) + return false; +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 258f5472f1e900..aba757e5c59784 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -1922,6 +1922,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, + folio_list)) + goto activate_locked; + #ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); + count_vm_event(THP_SWPOUT_FALLBACK); + #endif + if (!add_to_swap(folio)) +@@ -2910,7 +2911,7 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) + * Flush the memory cgroup stats, so that we read accurate per-memcg + * lruvec stats for heuristics. + */ +- mem_cgroup_flush_stats(); ++ mem_cgroup_flush_stats(sc->target_mem_cgroup); + + /* + * Determine the scan balance between anon and file LRUs. +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 57891697846b93..3630c6e2bb41a4 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1397,6 +1397,7 @@ const char * const vmstat_text[] = { + #ifdef CONFIG_ZSWAP + "zswpin", + "zswpout", ++ "zswpwb", + #endif + #ifdef CONFIG_X86 + "direct_map_level2_splits", +diff --git a/mm/workingset.c b/mm/workingset.c +index 9110957bec5b30..1cdbc1fb587312 100644 +--- a/mm/workingset.c ++++ b/mm/workingset.c +@@ -411,10 +411,12 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) + * @file: whether the corresponding folio is from the file lru. + * @workingset: where the workingset value unpacked from shadow should + * be stored. ++ * @flush: whether to flush cgroup rstat. + * + * Return: true if the shadow is for a recently evicted folio; false otherwise. + */ +-bool workingset_test_recent(void *shadow, bool file, bool *workingset) ++bool workingset_test_recent(void *shadow, bool file, bool *workingset, ++ bool flush) + { + struct mem_cgroup *eviction_memcg; + struct lruvec *eviction_lruvec; +@@ -425,8 +427,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) + struct pglist_data *pgdat; + unsigned long eviction; + +- if (lru_gen_enabled()) +- return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset); ++ rcu_read_lock(); ++ ++ if (lru_gen_enabled()) { ++ bool recent = lru_gen_test_recent(shadow, file, ++ &eviction_lruvec, &eviction, workingset); ++ ++ rcu_read_unlock(); ++ return recent; ++ } ++ + + unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); + eviction <<= bucket_order; +@@ -448,8 +458,26 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) + * configurations instead. + */ + eviction_memcg = mem_cgroup_from_id(memcgid); +- if (!mem_cgroup_disabled() && !eviction_memcg) ++ if (!mem_cgroup_disabled() && ++ (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) { ++ rcu_read_unlock(); + return false; ++ } ++ ++ rcu_read_unlock(); ++ ++ /* ++ * Flush stats (and potentially sleep) outside the RCU read section. ++ * ++ * Note that workingset_test_recent() itself might be called in RCU read ++ * section (for e.g, in cachestat) - these callers need to skip flushing ++ * stats (via the flush argument). ++ * ++ * XXX: With per-memcg flushing and thresholding, is ratelimiting ++ * still needed here? ++ */ ++ if (flush) ++ mem_cgroup_flush_stats_ratelimited(eviction_memcg); + + eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); + refault = atomic_long_read(&eviction_lruvec->nonresident_age); +@@ -493,6 +521,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) + } + } + ++ mem_cgroup_put(eviction_memcg); + return refault_distance <= workingset_size; + } + +@@ -519,19 +548,16 @@ void workingset_refault(struct folio *folio, void *shadow) + return; + } + +- /* Flush stats (and potentially sleep) before holding RCU read lock */ +- mem_cgroup_flush_stats_ratelimited(); +- +- rcu_read_lock(); +- + /* + * The activation decision for this folio is made at the level + * where the eviction occurred, as that is where the LRU order + * during folio reclaim is being determined. + * + * However, the cgroup that will own the folio is the one that +- * is actually experiencing the refault event. ++ * is actually experiencing the refault event. Make sure the folio is ++ * locked to guarantee folio_memcg() stability throughout. + */ ++ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + nr = folio_nr_pages(folio); + memcg = folio_memcg(folio); + pgdat = folio_pgdat(folio); +@@ -539,8 +565,8 @@ void workingset_refault(struct folio *folio, void *shadow) + + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); + +- if (!workingset_test_recent(shadow, file, &workingset)) +- goto out; ++ if (!workingset_test_recent(shadow, file, &workingset, true)) ++ return; + + folio_set_active(folio); + workingset_age_nonresident(lruvec, nr); +@@ -556,8 +582,6 @@ void workingset_refault(struct folio *folio, void *shadow) + lru_note_cost_refault(folio); + mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); + } +-out: +- rcu_read_unlock(); + } + + /** +@@ -664,7 +688,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, + struct lruvec *lruvec; + int i; + +- mem_cgroup_flush_stats_ratelimited(); ++ mem_cgroup_flush_stats_ratelimited(sc->memcg); + lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); + for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) + pages += lruvec_page_state_local(lruvec, +diff --git a/mm/zswap.c b/mm/zswap.c +index 69681b9173fdcb..a3459440fc313c 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -674,6 +674,10 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) + goto put_unlock; + } + ++ if (entry->objcg) ++ count_objcg_event(entry->objcg, ZSWPWB); ++ ++ count_vm_event(ZSWPWB); + /* + * Writeback started successfully, the page now belongs to the + * swapcache. Drop the entry from zswap - unless invalidate already +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index 422f726346ea51..7c77482f315948 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -194,6 +194,8 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) + vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); + grp->nr_vlan_devs++; + ++ netdev_update_features(dev); ++ + return 0; + + out_unregister_netdev: +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c +index 13b752c169bed7..e65d4754c94f4c 100644 +--- a/net/bluetooth/6lowpan.c ++++ b/net/bluetooth/6lowpan.c +@@ -52,6 +52,11 @@ static bool enable_6lowpan; + static struct l2cap_chan *listen_chan; + static DEFINE_MUTEX(set_lock); + ++enum { ++ LOWPAN_PEER_CLOSING, ++ LOWPAN_PEER_MAXBITS ++}; ++ + struct lowpan_peer { + struct list_head list; + struct rcu_head rcu; +@@ -60,6 +65,8 @@ struct lowpan_peer { + /* peer addresses in various formats */ + unsigned char lladdr[ETH_ALEN]; + struct in6_addr peer_addr; ++ ++ DECLARE_BITMAP(flags, LOWPAN_PEER_MAXBITS); + }; + + struct lowpan_btle_dev { +@@ -288,6 +295,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, + local_skb->pkt_type = PACKET_HOST; + local_skb->dev = dev; + ++ skb_reset_mac_header(local_skb); + skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); + + if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { +@@ -955,10 +963,11 @@ static struct l2cap_chan *bt_6lowpan_listen(void) + } + + static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, +- struct l2cap_conn **conn) ++ struct l2cap_conn **conn, bool disconnect) + { + struct hci_conn *hcon; + struct hci_dev *hdev; ++ int le_addr_type; + int n; + + n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", +@@ -969,13 +978,32 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, + if (n < 7) + return -EINVAL; + ++ if (disconnect) { ++ /* The "disconnect" debugfs command has used different address ++ * type constants than "connect" since 2015. Let's retain that ++ * for now even though it's obviously buggy... ++ */ ++ *addr_type += 1; ++ } ++ ++ switch (*addr_type) { ++ case BDADDR_LE_PUBLIC: ++ le_addr_type = ADDR_LE_DEV_PUBLIC; ++ break; ++ case BDADDR_LE_RANDOM: ++ le_addr_type = ADDR_LE_DEV_RANDOM; ++ break; ++ default: ++ return -EINVAL; ++ } ++ + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ + hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); + if (!hdev) + return -ENOENT; + + hci_dev_lock(hdev); +- hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); ++ hcon = hci_conn_hash_lookup_le(hdev, addr, le_addr_type); + hci_dev_unlock(hdev); + hci_dev_put(hdev); + +@@ -992,41 +1020,52 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, + static void disconnect_all_peers(void) + { + struct lowpan_btle_dev *entry; +- struct lowpan_peer *peer, *tmp_peer, *new_peer; +- struct list_head peers; +- +- INIT_LIST_HEAD(&peers); ++ struct lowpan_peer *peer; ++ int nchans; + +- /* We make a separate list of peers as the close_cb() will +- * modify the device peers list so it is better not to mess +- * with the same list at the same time. ++ /* l2cap_chan_close() cannot be called from RCU, and lock ordering ++ * chan->lock > devices_lock prevents taking write side lock, so copy ++ * then close. + */ + + rcu_read_lock(); ++ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) ++ list_for_each_entry_rcu(peer, &entry->peers, list) ++ clear_bit(LOWPAN_PEER_CLOSING, peer->flags); ++ rcu_read_unlock(); + +- list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { +- list_for_each_entry_rcu(peer, &entry->peers, list) { +- new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); +- if (!new_peer) +- break; ++ do { ++ struct l2cap_chan *chans[32]; ++ int i; + +- new_peer->chan = peer->chan; +- INIT_LIST_HEAD(&new_peer->list); ++ nchans = 0; + +- list_add(&new_peer->list, &peers); +- } +- } ++ spin_lock(&devices_lock); + +- rcu_read_unlock(); ++ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { ++ list_for_each_entry_rcu(peer, &entry->peers, list) { ++ if (test_and_set_bit(LOWPAN_PEER_CLOSING, ++ peer->flags)) ++ continue; + +- spin_lock(&devices_lock); +- list_for_each_entry_safe(peer, tmp_peer, &peers, list) { +- l2cap_chan_close(peer->chan, ENOENT); ++ l2cap_chan_hold(peer->chan); ++ chans[nchans++] = peer->chan; + +- list_del_rcu(&peer->list); +- kfree_rcu(peer, rcu); +- } +- spin_unlock(&devices_lock); ++ if (nchans >= ARRAY_SIZE(chans)) ++ goto done; ++ } ++ } ++ ++done: ++ spin_unlock(&devices_lock); ++ ++ for (i = 0; i < nchans; ++i) { ++ l2cap_chan_lock(chans[i]); ++ l2cap_chan_close(chans[i], ENOENT); ++ l2cap_chan_unlock(chans[i]); ++ l2cap_chan_put(chans[i]); ++ } ++ } while (nchans); + } + + struct set_enable { +@@ -1102,7 +1141,7 @@ static ssize_t lowpan_control_write(struct file *fp, + buf[buf_size] = '\0'; + + if (memcmp(buf, "connect ", 8) == 0) { +- ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn); ++ ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn, false); + if (ret == -EINVAL) + return ret; + +@@ -1139,7 +1178,7 @@ static ssize_t lowpan_control_write(struct file *fp, + } + + if (memcmp(buf, "disconnect ", 11) == 0) { +- ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn); ++ ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn, true); + if (ret < 0) + return ret; + +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 7bda00dcb0b2f0..4aa445e7f56bc2 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -1596,8 +1596,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, + + hci_dev_set_flag(hdev, HCI_LE_ADV); + +- if (adv && !adv->periodic) ++ if (adv) + adv->enabled = true; ++ else if (!set->handle) ++ hci_dev_set_flag(hdev, HCI_LE_ADV_0); + + conn = hci_lookup_le_connect(hdev); + if (conn) +@@ -1608,6 +1610,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, + if (cp->num_of_sets) { + if (adv) + adv->enabled = false; ++ else if (!set->handle) ++ hci_dev_clear_flag(hdev, HCI_LE_ADV_0); + + /* If just one instance was disabled check if there are + * any other instance enabled before clearing HCI_LE_ADV +@@ -3949,8 +3953,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, + hci_dev_set_flag(hdev, HCI_LE_PER_ADV); + + if (adv) +- adv->enabled = true; ++ adv->periodic_enabled = true; + } else { ++ if (adv) ++ adv->periodic_enabled = false; ++ + /* If just one instance was disabled check if there are + * any other instance enabled before clearing HCI_LE_PER_ADV. + * The current periodic adv instance will be marked as +@@ -4201,6 +4208,13 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, + } + + if (i == ARRAY_SIZE(hci_cc_table)) { ++ if (!skb->len) { ++ bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status", ++ *opcode); ++ *status = HCI_ERROR_UNSPECIFIED; ++ return; ++ } ++ + /* Unknown opcode, assume byte 0 contains the status, so + * that e.g. __hci_cmd_sync() properly returns errors + * for vendor specific commands send by HCI drivers. +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index a128e5709fa153..f0eb52d5c05811 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -881,11 +881,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, + { + struct hci_cmd_sync_work_entry *entry; + +- entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); +- if (!entry) ++ mutex_lock(&hdev->cmd_sync_work_lock); ++ ++ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); ++ if (!entry) { ++ mutex_unlock(&hdev->cmd_sync_work_lock); + return false; ++ } + +- hci_cmd_sync_cancel_entry(hdev, entry); ++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); ++ ++ mutex_unlock(&hdev->cmd_sync_work_lock); + + return true; + } +@@ -1625,7 +1631,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) + + /* If periodic advertising already disabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); +- if (!adv || !adv->periodic || !adv->enabled) ++ if (!adv || !adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); +@@ -1694,7 +1700,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) + + /* If periodic advertising already enabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); +- if (adv && adv->periodic && adv->enabled) ++ if (adv && adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); +@@ -2645,9 +2651,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) + /* If current advertising instance is set to instance 0x00 + * then we need to re-enable it. + */ +- if (!hdev->cur_adv_instance) +- err = hci_enable_ext_advertising_sync(hdev, +- hdev->cur_adv_instance); ++ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) ++ err = hci_enable_ext_advertising_sync(hdev, 0x00); + } else { + /* Schedule for most recent instance to be restarted and begin + * the software rotation loop +diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c +index 69529a3049e741..1469e9b69e631c 100644 +--- a/net/bluetooth/iso.c ++++ b/net/bluetooth/iso.c +@@ -1782,7 +1782,13 @@ static void iso_conn_ready(struct iso_conn *conn) + } + + bacpy(&iso_pi(sk)->dst, &hcon->dst); +- iso_pi(sk)->dst_type = hcon->dst_type; ++ ++ /* Convert from HCI to three-value type */ ++ if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) ++ iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; ++ else ++ iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; ++ + iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; + memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); + iso_pi(sk)->base_len = iso_pi(parent)->base_len; +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index dabc07700197c5..ad46112cb596bf 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -497,6 +497,7 @@ void l2cap_chan_hold(struct l2cap_chan *c) + + kref_get(&c->kref); + } ++EXPORT_SYMBOL_GPL(l2cap_chan_hold); + + struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) + { +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 54ddbb2635e2ff..7de0a0d752629f 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -5358,9 +5358,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, + for (i = 0; i < pattern_count; i++) { + offset = patterns[i].offset; + length = patterns[i].length; +- if (offset >= HCI_MAX_EXT_AD_LENGTH || +- length > HCI_MAX_EXT_AD_LENGTH || +- (offset + length) > HCI_MAX_EXT_AD_LENGTH) ++ if (offset >= HCI_MAX_AD_LENGTH || ++ length > HCI_MAX_AD_LENGTH || ++ (offset + length) > HCI_MAX_AD_LENGTH) + return MGMT_STATUS_INVALID_PARAMS; + + p = kmalloc(sizeof(*p), GFP_KERNEL); +@@ -9440,6 +9440,7 @@ void mgmt_index_removed(struct hci_dev *hdev) + cancel_delayed_work_sync(&hdev->discov_off); + cancel_delayed_work_sync(&hdev->service_cache); + cancel_delayed_work_sync(&hdev->rpa_expired); ++ cancel_delayed_work_sync(&hdev->mesh_send_done); + } + + void mgmt_power_on(struct hci_dev *hdev, int err) +diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c +index 94ec913dfb76e8..e258ca19147050 100644 +--- a/net/bluetooth/rfcomm/tty.c ++++ b/net/bluetooth/rfcomm/tty.c +@@ -651,8 +651,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) + tty_port_tty_hangup(&dev->port, true); + + dev->modem_status = +- ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | +- ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | ++ ((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | ++ ((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) | + ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | + ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); + } +@@ -1063,10 +1063,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) + static int rfcomm_tty_tiocmget(struct tty_struct *tty) + { + struct rfcomm_dev *dev = tty->driver_data; ++ struct rfcomm_dlc *dlc = dev->dlc; ++ u8 v24_sig; + + BT_DBG("tty %p dev %p", tty, dev); + +- return dev->modem_status; ++ rfcomm_dlc_get_modem_status(dlc, &v24_sig); ++ ++ return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status; + } + + static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) +@@ -1079,23 +1083,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne + + rfcomm_dlc_get_modem_status(dlc, &v24_sig); + +- if (set & TIOCM_DSR || set & TIOCM_DTR) ++ if (set & TIOCM_DTR) + v24_sig |= RFCOMM_V24_RTC; +- if (set & TIOCM_RTS || set & TIOCM_CTS) ++ if (set & TIOCM_RTS) + v24_sig |= RFCOMM_V24_RTR; +- if (set & TIOCM_RI) +- v24_sig |= RFCOMM_V24_IC; +- if (set & TIOCM_CD) +- v24_sig |= RFCOMM_V24_DV; + +- if (clear & TIOCM_DSR || clear & TIOCM_DTR) ++ if (clear & TIOCM_DTR) + v24_sig &= ~RFCOMM_V24_RTC; +- if (clear & TIOCM_RTS || clear & TIOCM_CTS) ++ if (clear & TIOCM_RTS) + v24_sig &= ~RFCOMM_V24_RTR; +- if (clear & TIOCM_RI) +- v24_sig &= ~RFCOMM_V24_IC; +- if (clear & TIOCM_CD) +- v24_sig &= ~RFCOMM_V24_DV; + + rfcomm_dlc_set_modem_status(dlc, v24_sig); + +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c +index 64d4d57c7033a3..6635d155e360be 100644 +--- a/net/bluetooth/sco.c ++++ b/net/bluetooth/sco.c +@@ -433,6 +433,13 @@ static void sco_sock_kill(struct sock *sk) + + BT_DBG("sk %p state %d", sk, sk->sk_state); + ++ /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */ ++ if (sco_pi(sk)->conn) { ++ sco_conn_lock(sco_pi(sk)->conn); ++ sco_pi(sk)->conn->sk = NULL; ++ sco_conn_unlock(sco_pi(sk)->conn); ++ } ++ + /* Kill poor orphan */ + bt_sock_unlink(&sco_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); +diff --git a/net/bridge/br.c b/net/bridge/br.c +index a45db67197226b..d466febcf9abe8 100644 +--- a/net/bridge/br.c ++++ b/net/bridge/br.c +@@ -37,6 +37,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v + int err; + + if (netif_is_bridge_master(dev)) { ++ struct net_bridge *br = netdev_priv(dev); ++ ++ if (event == NETDEV_REGISTER) ++ br_fdb_change_mac_address(br, dev->dev_addr); ++ + err = br_vlan_bridge_event(dev, event, ptr); + if (err) + return notifier_from_errno(err); +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c +index e19b583ff2c6d0..e9f09cdb9848e8 100644 +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -25,7 +25,7 @@ static inline int should_deliver(const struct net_bridge_port *p, + + vg = nbp_vlan_group_rcu(p); + return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && +- (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && ++ (br_mst_is_enabled(p) || p->state == BR_STATE_FORWARDING) && + br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && + !br_skb_isolated(p, skb); + } +@@ -148,7 +148,8 @@ void br_forward(const struct net_bridge_port *to, + goto out; + + /* redirect to backup link if the destination port is down */ +- if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) { ++ if (rcu_access_pointer(to->backup_port) && ++ (!netif_carrier_ok(to->dev) || !netif_running(to->dev))) { + struct net_bridge_port *backup_port; + + backup_port = rcu_dereference(to->backup_port); +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c +index 2450690f98cfa5..6ffc81eedf0748 100644 +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -386,6 +386,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head) + del_nbp(p); + } + ++ br_mst_uninit(br); + br_recalculate_neigh_suppress_enabled(br); + + br_fdb_delete_by_port(br, NULL, 0, 1); +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c +index e09000e38d071d..951330c1a813b2 100644 +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -93,7 +93,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb + + br = p->br; + +- if (br_mst_is_enabled(br)) { ++ if (br_mst_is_enabled(p)) { + state = BR_STATE_FORWARDING; + } else { + if (p->state == BR_STATE_DISABLED) +@@ -411,7 +411,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) + return RX_HANDLER_PASS; + + forward: +- if (br_mst_is_enabled(p->br)) ++ if (br_mst_is_enabled(p)) + goto defer_stp_filtering; + + switch (p->state) { +diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c +index 3f24b4ee49c274..43a300ae6bfafc 100644 +--- a/net/bridge/br_mst.c ++++ b/net/bridge/br_mst.c +@@ -22,6 +22,12 @@ bool br_mst_enabled(const struct net_device *dev) + } + EXPORT_SYMBOL_GPL(br_mst_enabled); + ++void br_mst_uninit(struct net_bridge *br) ++{ ++ if (br_opt_get(br, BROPT_MST_ENABLED)) ++ static_branch_dec(&br_mst_used); ++} ++ + int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) + { + const struct net_bridge_vlan_group *vg; +@@ -225,9 +231,9 @@ int br_mst_set_enabled(struct net_bridge *br, bool on, + return err; + + if (on) +- static_branch_enable(&br_mst_used); ++ static_branch_inc(&br_mst_used); + else +- static_branch_disable(&br_mst_used); ++ static_branch_dec(&br_mst_used); + + br_opt_toggle(br, BROPT_MST_ENABLED, on); + return 0; +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index ef98ec4c3f51d4..c8a4e3b39b0e2e 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -1880,10 +1880,12 @@ static inline bool br_vlan_state_allowed(u8 state, bool learn_allow) + /* br_mst.c */ + #ifdef CONFIG_BRIDGE_VLAN_FILTERING + DECLARE_STATIC_KEY_FALSE(br_mst_used); +-static inline bool br_mst_is_enabled(struct net_bridge *br) ++static inline bool br_mst_is_enabled(const struct net_bridge_port *p) + { ++ /* check the port's vlan group to avoid racing with port deletion */ + return static_branch_unlikely(&br_mst_used) && +- br_opt_get(br, BROPT_MST_ENABLED); ++ br_opt_get(p->br, BROPT_MST_ENABLED) && ++ rcu_access_pointer(p->vlgrp); + } + + int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, +@@ -1897,8 +1899,9 @@ int br_mst_fill_info(struct sk_buff *skb, + const struct net_bridge_vlan_group *vg); + int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr, + struct netlink_ext_ack *extack); ++void br_mst_uninit(struct net_bridge *br); + #else +-static inline bool br_mst_is_enabled(struct net_bridge *br) ++static inline bool br_mst_is_enabled(const struct net_bridge_port *p) + { + return false; + } +@@ -1932,6 +1935,10 @@ static inline int br_mst_process(struct net_bridge_port *p, + { + return -EOPNOTSUPP; + } ++ ++static inline void br_mst_uninit(struct net_bridge *br) ++{ ++} + #endif + + struct nf_br_ops { +diff --git a/net/core/filter.c b/net/core/filter.c +index c2e888ea54abbf..0564ee6ac87315 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -4203,6 +4203,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) + + if (unlikely(!sinfo->nr_frags)) { + xdp_buff_clear_frags_flag(xdp); ++ xdp_buff_clear_frag_pfmemalloc(xdp); + xdp->data_end -= offset; + } + +diff --git a/net/core/gro.c b/net/core/gro.c +index 397cf598425034..b8cc44406e69bf 100644 +--- a/net/core/gro.c ++++ b/net/core/gro.c +@@ -6,9 +6,6 @@ + + #define MAX_GRO_SKBS 8 + +-/* This should be increased if a protocol with a bigger head is added. */ +-#define GRO_MAX_HEAD (MAX_HEADER + 128) +- + static DEFINE_SPINLOCK(offload_lock); + struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); + /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 2bdb1e84c6c8a8..a78340bb25ba6b 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -45,11 +45,6 @@ + + #define MAX_UDP_CHUNK 1460 + #define MAX_SKBS 32 +- +-static struct sk_buff_head skb_pool; +- +-DEFINE_STATIC_SRCU(netpoll_srcu); +- + #define USEC_PER_POLL 50 + + #define MAX_SKB_SIZE \ +@@ -220,41 +215,41 @@ EXPORT_SYMBOL(netpoll_poll_dev); + void netpoll_poll_disable(struct net_device *dev) + { + struct netpoll_info *ni; +- int idx; ++ + might_sleep(); +- idx = srcu_read_lock(&netpoll_srcu); +- ni = srcu_dereference(dev->npinfo, &netpoll_srcu); ++ ni = rtnl_dereference(dev->npinfo); + if (ni) + down(&ni->dev_lock); +- srcu_read_unlock(&netpoll_srcu, idx); + } + EXPORT_SYMBOL(netpoll_poll_disable); + + void netpoll_poll_enable(struct net_device *dev) + { + struct netpoll_info *ni; +- rcu_read_lock(); +- ni = rcu_dereference(dev->npinfo); ++ ++ ni = rtnl_dereference(dev->npinfo); + if (ni) + up(&ni->dev_lock); +- rcu_read_unlock(); + } + EXPORT_SYMBOL(netpoll_poll_enable); + +-static void refill_skbs(void) ++static void refill_skbs(struct netpoll *np) + { ++ struct sk_buff_head *skb_pool; + struct sk_buff *skb; + unsigned long flags; + +- spin_lock_irqsave(&skb_pool.lock, flags); +- while (skb_pool.qlen < MAX_SKBS) { ++ skb_pool = &np->skb_pool; ++ ++ spin_lock_irqsave(&skb_pool->lock, flags); ++ while (skb_pool->qlen < MAX_SKBS) { + skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); + if (!skb) + break; + +- __skb_queue_tail(&skb_pool, skb); ++ __skb_queue_tail(skb_pool, skb); + } +- spin_unlock_irqrestore(&skb_pool.lock, flags); ++ spin_unlock_irqrestore(&skb_pool->lock, flags); + } + + static void zap_completion_queue(void) +@@ -291,12 +286,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) + struct sk_buff *skb; + + zap_completion_queue(); +- refill_skbs(); ++ refill_skbs(np); + repeat: + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) +- skb = skb_dequeue(&skb_pool); ++ skb = skb_dequeue(&np->skb_pool); + + if (!skb) { + if (++count < 10) { +@@ -543,6 +538,14 @@ static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) + return -1; + } + ++static void skb_pool_flush(struct netpoll *np) ++{ ++ struct sk_buff_head *skb_pool; ++ ++ skb_pool = &np->skb_pool; ++ skb_queue_purge_reason(skb_pool, SKB_CONSUMED); ++} ++ + int netpoll_parse_options(struct netpoll *np, char *opt) + { + char *cur=opt, *delim; +@@ -631,6 +634,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) + const struct net_device_ops *ops; + int err; + ++ skb_queue_head_init(&np->skb_pool); ++ + if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { + np_err(np, "%s doesn't support polling, aborting\n", + ndev->name); +@@ -666,6 +671,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) + strscpy(np->dev_name, ndev->name, IFNAMSIZ); + npinfo->netpoll = np; + ++ /* fill up the skb queue */ ++ refill_skbs(np); ++ + /* last thing to do is link it to the net device structure */ + rcu_assign_pointer(ndev->npinfo, npinfo); + +@@ -784,12 +792,9 @@ int netpoll_setup(struct netpoll *np) + } + } + +- /* fill up the skb queue */ +- refill_skbs(); +- + err = __netpoll_setup(np, ndev); + if (err) +- goto put; ++ goto flush; + rtnl_unlock(); + + /* Make sure all NAPI polls which started before dev->npinfo +@@ -800,6 +805,8 @@ int netpoll_setup(struct netpoll *np) + + return 0; + ++flush: ++ skb_pool_flush(np); + put: + DEBUG_NET_WARN_ON_ONCE(np->dev); + if (ip_overwritten) +@@ -811,13 +818,6 @@ int netpoll_setup(struct netpoll *np) + } + EXPORT_SYMBOL(netpoll_setup); + +-static int __init netpoll_init(void) +-{ +- skb_queue_head_init(&skb_pool); +- return 0; +-} +-core_initcall(netpoll_init); +- + static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) + { + struct netpoll_info *npinfo = +@@ -843,8 +843,10 @@ void __netpoll_cleanup(struct netpoll *np) + if (!npinfo) + return; + +- synchronize_srcu(&netpoll_srcu); +- ++ /* At this point, there is a single npinfo instance per netdevice, and ++ * its refcnt tracks how many netpoll structures are linked to it. We ++ * only perform npinfo cleanup when the refcnt decrements to zero. ++ */ + if (refcount_dec_and_test(&npinfo->refcnt)) { + const struct net_device_ops *ops; + +@@ -854,8 +856,9 @@ void __netpoll_cleanup(struct netpoll *np) + + RCU_INIT_POINTER(np->dev->npinfo, NULL); + call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); +- } else +- RCU_INIT_POINTER(np->dev->npinfo, NULL); ++ } ++ ++ skb_pool_flush(np); + } + EXPORT_SYMBOL_GPL(__netpoll_cleanup); + +diff --git a/net/core/page_pool.c b/net/core/page_pool.c +index 2f2f63c8cf4b07..0188d7f007857e 100644 +--- a/net/core/page_pool.c ++++ b/net/core/page_pool.c +@@ -180,11 +180,7 @@ static int page_pool_init(struct page_pool *pool, + return -EINVAL; + + if (pool->p.pool_size) +- ring_qsize = pool->p.pool_size; +- +- /* Sanity limit mem that can be pinned down */ +- if (ring_qsize > 32768) +- return -E2BIG; ++ ring_qsize = min(pool->p.pool_size, 16384); + + /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. + * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, +@@ -422,6 +418,12 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, + struct page *page; + int i, nr_pages; + ++ /* Unconditionally set NOWARN if allocating from NAPI. ++ * Drivers forget to set it, and OOM reports on packet Rx are useless. ++ */ ++ if ((gfp & GFP_ATOMIC) == GFP_ATOMIC) ++ gfp |= __GFP_NOWARN; ++ + /* Don't support bulk alloc for high-order pages */ + if (unlikely(pp_order)) + return __page_pool_alloc_page_order(pool, gfp); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 867832f8bbaea9..073e2c52740796 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -67,6 +67,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -96,7 +97,9 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init; + + static struct kmem_cache *skb_small_head_cache __ro_after_init; + +-#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) ++#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN) ++#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \ ++ GRO_MAX_HEAD_PAD)) + + /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. + * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique +@@ -708,7 +711,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, + /* If requested length is either too small or too big, + * we use kmalloc() for skb->head allocation. + */ +- if (len <= SKB_WITH_OVERHEAD(1024) || ++ if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || + len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); +@@ -785,7 +788,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + * When the small frag allocator is available, prefer it over kmalloc + * for small fragments + */ +- if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || ++ if ((!NAPI_HAS_SMALL_PAGE_FRAG && ++ len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)) || + len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, +diff --git a/net/core/sock.c b/net/core/sock.c +index 9918a9a337b616..91f101231309d4 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2996,23 +2996,27 @@ void __release_sock(struct sock *sk) + __acquires(&sk->sk_lock.slock) + { + struct sk_buff *skb, *next; ++ int nb = 0; + + while ((skb = sk->sk_backlog.head) != NULL) { + sk->sk_backlog.head = sk->sk_backlog.tail = NULL; + + spin_unlock_bh(&sk->sk_lock.slock); + +- do { ++ while (1) { + next = skb->next; + prefetch(next); + DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb)); + skb_mark_not_on_list(skb); + sk_backlog_rcv(sk, skb); + +- cond_resched(); +- + skb = next; +- } while (skb != NULL); ++ if (!skb) ++ break; ++ ++ if (!(++nb & 15)) ++ cond_resched(); ++ } + + spin_lock_bh(&sk->sk_lock.slock); + } +@@ -3141,8 +3145,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) + } + } + +- if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) +- trace_sock_exceed_buf_limit(sk, prot, allocated, kind); ++ trace_sock_exceed_buf_limit(sk, prot, allocated, kind); + + sk_memory_allocated_sub(sk, amt); + +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c +index 07736edc8b6a5d..c9bf1a9a6c99b9 100644 +--- a/net/dsa/dsa.c ++++ b/net/dsa/dsa.c +@@ -1613,6 +1613,7 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch); + void dsa_switch_shutdown(struct dsa_switch *ds) + { + struct net_device *master, *slave_dev; ++ LIST_HEAD(close_list); + struct dsa_port *dp; + + mutex_lock(&dsa2_mutex); +@@ -1622,10 +1623,16 @@ void dsa_switch_shutdown(struct dsa_switch *ds) + + rtnl_lock(); + ++ dsa_switch_for_each_cpu_port(dp, ds) ++ list_add(&dp->master->close_list, &close_list); ++ ++ dev_close_many(&close_list, true); ++ + dsa_switch_for_each_user_port(dp, ds) { + master = dsa_port_to_master(dp); + slave_dev = dp->slave; + ++ netif_device_detach(slave_dev); + netdev_upper_dev_unlink(master, slave_dev); + } + +diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c +index 146c1dbd15a93f..385581cf3b7ba0 100644 +--- a/net/dsa/tag_brcm.c ++++ b/net/dsa/tag_brcm.c +@@ -255,12 +255,14 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, + { + int len = BRCM_LEG_TAG_LEN; + int source_port; ++ __be16 *proto; + u8 *brcm_tag; + + if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN))) + return NULL; + + brcm_tag = dsa_etype_header_pos_rx(skb); ++ proto = (__be16 *)(brcm_tag + BRCM_LEG_TAG_LEN); + + source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; + +@@ -268,8 +270,12 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, + if (!skb->dev) + return NULL; + +- /* VLAN tag is added by BCM63xx internal switch */ +- if (netdev_uses_dsa(skb->dev)) ++ /* The internal switch in BCM63XX SoCs always tags on egress on the CPU ++ * port. We use VID 0 internally for untagged traffic, so strip the tag ++ * if the TCI field is all 0, and keep it otherwise to also retain ++ * e.g. 802.1p tagged packets. ++ */ ++ if (proto[0] == htons(ETH_P_8021Q) && proto[1] == 0) + len += VLAN_HLEN; + + /* Remove Broadcom tag and update checksum */ +diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c +index 049c3adeb85044..b4a6e26ec2871b 100644 +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -615,7 +615,10 @@ EXPORT_SYMBOL(fwnode_get_mac_address); + */ + int device_get_mac_address(struct device *dev, char *addr) + { +- return fwnode_get_mac_address(dev_fwnode(dev), addr); ++ if (!fwnode_get_mac_address(dev_fwnode(dev), addr)) ++ return 0; ++ ++ return nvmem_get_mac_address(dev, addr); + } + EXPORT_SYMBOL(device_get_mac_address); + +diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c +index bbfb4095ddd6b4..06916a80cc1309 100644 +--- a/net/handshake/tlshd.c ++++ b/net/handshake/tlshd.c +@@ -254,6 +254,7 @@ static int tls_handshake_accept(struct handshake_req *req, + + out_cancel: + genlmsg_cancel(msg, hdr); ++ nlmsg_free(msg); + out: + return ret; + } +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index 5514b5bedc9298..70e958caa956d7 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -313,6 +313,9 @@ static void send_hsr_supervision_frame(struct hsr_port *master, + } + + hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag)); ++ skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); ++ skb_reset_mac_len(skb); ++ + set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf)); + set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version); + +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index 49fd664f50fc01..2caf6a2a819b21 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -152,8 +152,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + + sk = esp_find_tcp_sk(x); + err = PTR_ERR_OR_ZERO(sk); +- if (err) ++ if (err) { ++ kfree_skb(skb); + goto out; ++ } + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) +diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c +index 2d663fe50f876c..2064b401304129 100644 +--- a/net/ipv4/netfilter/nf_reject_ipv4.c ++++ b/net/ipv4/netfilter/nf_reject_ipv4.c +@@ -71,6 +71,27 @@ struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net, + } + EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset); + ++static bool nf_skb_is_icmp_unreach(const struct sk_buff *skb) ++{ ++ const struct iphdr *iph = ip_hdr(skb); ++ u8 *tp, _type; ++ int thoff; ++ ++ if (iph->protocol != IPPROTO_ICMP) ++ return false; ++ ++ thoff = skb_network_offset(skb) + sizeof(*iph); ++ ++ tp = skb_header_pointer(skb, ++ thoff + offsetof(struct icmphdr, type), ++ sizeof(_type), &_type); ++ ++ if (!tp) ++ return false; ++ ++ return *tp == ICMP_DEST_UNREACH; ++} ++ + struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, + struct sk_buff *oldskb, + const struct net_device *dev, +@@ -91,6 +112,10 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) + return NULL; + ++ /* don't reply to ICMP_DEST_UNREACH with ICMP_DEST_UNREACH. */ ++ if (nf_skb_is_icmp_unreach(oldskb)) ++ return NULL; ++ + /* RFC says return as much as we can without exceeding 576 bytes. */ + len = min_t(unsigned int, 536, oldskb->len); + +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index fab550633ec9f3..99385fe34a1e57 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -1835,6 +1835,12 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, + { + struct nh_grp_entry *nhge, *tmp; + ++ /* If there is nothing to do, let's avoid the costly call to ++ * synchronize_net() ++ */ ++ if (list_empty(&nh->grp_list)) ++ return; ++ + list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) + remove_nh_grp_entry(net, nhge, nlinfo); + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 20f5c8307443d3..fcabacec89c73e 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -617,6 +617,11 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) + oldest_p = fnhe_p; + } + } ++ ++ /* Clear oldest->fnhe_daddr to prevent this fnhe from being ++ * rebound with new dsts in rt_bind_exception(). ++ */ ++ oldest->fnhe_daddr = 0; + fnhe_flush_routes(oldest); + *oldest_p = oldest->fnhe_next; + kfree_rcu(oldest, rcu); +diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c +index 02921974978501..a08b0b6e0727c9 100644 +--- a/net/ipv4/udp_tunnel_nic.c ++++ b/net/ipv4/udp_tunnel_nic.c +@@ -899,7 +899,7 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused, + + err = udp_tunnel_nic_register(dev); + if (err) +- netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err); ++ netdev_warn(dev, "failed to register for UDP tunnel offloads: %d", err); + return notifier_from_errno(err); + } + /* All other events will need the udp_tunnel_nic state */ +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 1c3b0ba289fbd4..0e49ee83533b51 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -7102,7 +7102,9 @@ static const struct ctl_table addrconf_sysctl[] = { + .data = &ipv6_devconf.rpl_seg_enabled, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, + }, + { + .procname = "ioam6_enabled", +diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c +index 01005035ad1018..5361e2107458f5 100644 +--- a/net/ipv6/ah6.c ++++ b/net/ipv6/ah6.c +@@ -46,6 +46,34 @@ struct ah_skb_cb { + + #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) + ++/* Helper to save IPv6 addresses and extension headers to temporary storage */ ++static inline void ah6_save_hdrs(struct tmp_ext *iph_ext, ++ struct ipv6hdr *top_iph, int extlen) ++{ ++ if (!extlen) ++ return; ++ ++#if IS_ENABLED(CONFIG_IPV6_MIP6) ++ iph_ext->saddr = top_iph->saddr; ++#endif ++ iph_ext->daddr = top_iph->daddr; ++ memcpy(&iph_ext->hdrs, top_iph + 1, extlen - sizeof(*iph_ext)); ++} ++ ++/* Helper to restore IPv6 addresses and extension headers from temporary storage */ ++static inline void ah6_restore_hdrs(struct ipv6hdr *top_iph, ++ struct tmp_ext *iph_ext, int extlen) ++{ ++ if (!extlen) ++ return; ++ ++#if IS_ENABLED(CONFIG_IPV6_MIP6) ++ top_iph->saddr = iph_ext->saddr; ++#endif ++ top_iph->daddr = iph_ext->daddr; ++ memcpy(top_iph + 1, &iph_ext->hdrs, extlen - sizeof(*iph_ext)); ++} ++ + static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, + unsigned int size) + { +@@ -304,13 +332,7 @@ static void ah6_output_done(void *data, int err) + memcpy(ah->auth_data, icv, ahp->icv_trunc_len); + memcpy(top_iph, iph_base, IPV6HDR_BASELEN); + +- if (extlen) { +-#if IS_ENABLED(CONFIG_IPV6_MIP6) +- memcpy(&top_iph->saddr, iph_ext, extlen); +-#else +- memcpy(&top_iph->daddr, iph_ext, extlen); +-#endif +- } ++ ah6_restore_hdrs(top_iph, iph_ext, extlen); + + kfree(AH_SKB_CB(skb)->tmp); + xfrm_output_resume(skb->sk, skb, err); +@@ -381,12 +403,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) + */ + memcpy(iph_base, top_iph, IPV6HDR_BASELEN); + ++ ah6_save_hdrs(iph_ext, top_iph, extlen); + if (extlen) { +-#if IS_ENABLED(CONFIG_IPV6_MIP6) +- memcpy(iph_ext, &top_iph->saddr, extlen); +-#else +- memcpy(iph_ext, &top_iph->daddr, extlen); +-#endif + err = ipv6_clear_mutable_options(top_iph, + extlen - sizeof(*iph_ext) + + sizeof(*top_iph), +@@ -437,13 +455,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) + memcpy(ah->auth_data, icv, ahp->icv_trunc_len); + memcpy(top_iph, iph_base, IPV6HDR_BASELEN); + +- if (extlen) { +-#if IS_ENABLED(CONFIG_IPV6_MIP6) +- memcpy(&top_iph->saddr, iph_ext, extlen); +-#else +- memcpy(&top_iph->daddr, iph_ext, extlen); +-#endif +- } ++ ah6_restore_hdrs(top_iph, iph_ext, extlen); + + out_free: + kfree(iph_base); +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 7e4c8628cf9835..2caaab61b9967c 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -169,8 +169,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + + sk = esp6_find_tcp_sk(x); + err = PTR_ERR_OR_ZERO(sk); +- if (err) ++ if (err) { ++ kfree_skb(skb); + goto out; ++ } + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) +diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c +index f3579bccf0a516..a19ca1907de360 100644 +--- a/net/ipv6/netfilter/nf_reject_ipv6.c ++++ b/net/ipv6/netfilter/nf_reject_ipv6.c +@@ -91,6 +91,32 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net, + } + EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset); + ++static bool nf_skb_is_icmp6_unreach(const struct sk_buff *skb) ++{ ++ const struct ipv6hdr *ip6h = ipv6_hdr(skb); ++ u8 proto = ip6h->nexthdr; ++ u8 _type, *tp; ++ int thoff; ++ __be16 fo; ++ ++ thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo); ++ ++ if (thoff < 0 || thoff >= skb->len || fo != 0) ++ return false; ++ ++ if (proto != IPPROTO_ICMPV6) ++ return false; ++ ++ tp = skb_header_pointer(skb, ++ thoff + offsetof(struct icmp6hdr, icmp6_type), ++ sizeof(_type), &_type); ++ ++ if (!tp) ++ return false; ++ ++ return *tp == ICMPV6_DEST_UNREACH; ++} ++ + struct sk_buff *nf_reject_skb_v6_unreach(struct net *net, + struct sk_buff *oldskb, + const struct net_device *dev, +@@ -104,6 +130,10 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net, + if (!nf_reject_ip6hdr_validate(oldskb)) + return NULL; + ++ /* Don't reply to ICMPV6_DEST_UNREACH with ICMPV6_DEST_UNREACH */ ++ if (nf_skb_is_icmp6_unreach(oldskb)) ++ return NULL; ++ + /* Include "As much of invoking packet as possible without the ICMPv6 + * packet exceeding the minimum IPv6 MTU" in the ICMP payload. + */ +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 4f526606bc8943..7d72633ea01982 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -438,7 +438,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + if (flags & MSG_ERRQUEUE) + return ipv6_recv_error(sk, msg, len, addr_len); + +- if (np->rxpmtu && np->rxopt.bits.rxpmtu) ++ if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu)) + return ipv6_recv_rxpmtu(sk, msg, len, addr_len); + + skb = skb_recv_datagram(sk, flags, &err); +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 9ff8e723402ba8..6df2459f25618e 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -347,7 +347,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + if (flags & MSG_ERRQUEUE) + return ipv6_recv_error(sk, msg, len, addr_len); + +- if (np->rxpmtu && np->rxopt.bits.rxpmtu) ++ if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu)) + return ipv6_recv_rxpmtu(sk, msg, len, addr_len); + + try_again: +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index eaa4e5c6a5c3ac..a531fb2b14deea 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -216,6 +216,10 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata + + mutex_lock(&local->mtx); + ++ /* if any stations are set known (so they know this vif too), reject */ ++ if (sta_info_get_by_idx(sdata, 0)) ++ return -EBUSY; ++ + /* First check no ROC work is happening on this iface */ + list_for_each_entry(roc, &local->roc_list, list) { + if (roc->sdata != sdata) +@@ -235,12 +239,16 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata + ret = -EBUSY; + } + ++ /* ++ * More interface types could be added here but changing the ++ * address while powered makes the most sense in client modes. ++ */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: +- /* More interface types could be added here but changing the +- * address while powered makes the most sense in client modes. +- */ ++ /* refuse while connecting */ ++ if (sdata->u.mgd.auth_data || sdata->u.mgd.assoc_data) ++ return -EBUSY; + break; + default: + ret = -EOPNOTSUPP; +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 295c2fdbd3c742..aa7cee830b0045 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -4507,7 +4507,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link, + he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, + ies->data, ies->len); + +- if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap)) ++ if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap) + 1) + return chains; + + /* skip one byte ext_tag_id */ +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 164c6e80498265..e6a0a65d4d43ab 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -5341,10 +5341,14 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + if (WARN_ON(!local->started)) + goto drop; + +- if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { ++ if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC) && ++ !(status->flag & RX_FLAG_NO_PSDU && ++ status->zero_length_psdu_type == ++ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED))) { + /* +- * Validate the rate, unless a PLCP error means that +- * we probably can't have a valid rate here anyway. ++ * Validate the rate, unless there was a PLCP error which may ++ * have an invalid rate or the PSDU was not capture and may be ++ * missing rate information. + */ + + switch (status->encoding) { +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 643d64bdef2ea7..6b4b0c40570cef 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -1044,7 +1044,7 @@ static void __mptcp_clean_una(struct sock *sk) + if (WARN_ON_ONCE(!msk->recovery)) + break; + +- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); ++ msk->first_pending = mptcp_send_next(sk); + } + + dfrag_clear(sk, dfrag); +@@ -1335,7 +1335,12 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, + if (copy == 0) { + u64 snd_una = READ_ONCE(msk->snd_una); + +- if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { ++ /* No need for zero probe if there are any data pending ++ * either at the msk or ssk level; skb is the current write ++ * queue tail and can be empty at this point. ++ */ ++ if (snd_una != msk->snd_nxt || skb->len || ++ skb != tcp_send_head(ssk)) { + tcp_remove_empty_skb(ssk); + return 0; + } +@@ -1588,7 +1593,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk, + + mptcp_update_post_push(msk, dfrag, ret); + } +- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); ++ msk->first_pending = mptcp_send_next(sk); + + if (msk->snd_burst <= 0 || + !sk_stream_memory_free(ssk) || +@@ -1900,7 +1905,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + get_page(dfrag->page); + list_add_tail(&dfrag->list, &msk->rtx_queue); + if (!msk->first_pending) +- WRITE_ONCE(msk->first_pending, dfrag); ++ msk->first_pending = dfrag; + } + pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, + dfrag->data_seq, dfrag->data_len, dfrag->already_sent, +@@ -1935,19 +1940,35 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); + + static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, + struct msghdr *msg, +- size_t len, int flags, ++ size_t len, int flags, int copied_total, + struct scm_timestamping_internal *tss, + int *cmsg_flags) + { + struct sk_buff *skb, *tmp; ++ int total_data_len = 0; + int copied = 0; + + skb_queue_walk_safe(&msk->receive_queue, skb, tmp) { +- u32 offset = MPTCP_SKB_CB(skb)->offset; ++ u32 delta, offset = MPTCP_SKB_CB(skb)->offset; + u32 data_len = skb->len - offset; +- u32 count = min_t(size_t, len - copied, data_len); ++ u32 count; + int err; + ++ if (flags & MSG_PEEK) { ++ /* skip already peeked skbs */ ++ if (total_data_len + data_len <= copied_total) { ++ total_data_len += data_len; ++ continue; ++ } ++ ++ /* skip the already peeked data in the current skb */ ++ delta = copied_total - total_data_len; ++ offset += delta; ++ data_len -= delta; ++ } ++ ++ count = min_t(size_t, len - copied, data_len); ++ + if (!(flags & MSG_TRUNC)) { + err = skb_copy_datagram_msg(skb, offset, msg, count); + if (unlikely(err < 0)) { +@@ -1964,22 +1985,19 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, + + copied += count; + +- if (count < data_len) { +- if (!(flags & MSG_PEEK)) { ++ if (!(flags & MSG_PEEK)) { ++ msk->bytes_consumed += count; ++ if (count < data_len) { + MPTCP_SKB_CB(skb)->offset += count; + MPTCP_SKB_CB(skb)->map_seq += count; +- msk->bytes_consumed += count; ++ break; + } +- break; +- } + +- if (!(flags & MSG_PEEK)) { + /* we will bulk release the skb memory later */ + skb->destructor = NULL; + WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); + __skb_unlink(skb, &msk->receive_queue); + __kfree_skb(skb); +- msk->bytes_consumed += count; + } + + if (copied >= len) +@@ -2203,7 +2221,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + while (copied < len) { + int err, bytes_read; + +- bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); ++ bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, ++ copied, &tss, &cmsg_flags); + if (unlikely(bytes_read < 0)) { + if (!copied) + copied = bytes_read; +@@ -2899,7 +2918,7 @@ static void __mptcp_clear_xmit(struct sock *sk) + struct mptcp_sock *msk = mptcp_sk(sk); + struct mptcp_data_frag *dtmp, *dfrag; + +- WRITE_ONCE(msk->first_pending, NULL); ++ msk->first_pending = NULL; + list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) + dfrag_clear(sk, dfrag); + } +@@ -3456,9 +3475,6 @@ void __mptcp_data_acked(struct sock *sk) + + void __mptcp_check_push(struct sock *sk, struct sock *ssk) + { +- if (!mptcp_send_head(sk)) +- return; +- + if (!sock_owned_by_user(sk)) + __mptcp_subflow_push_pending(sk, ssk, false); + else +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 1f213706dfaa52..dc98f588c8a829 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -379,7 +379,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) + { + const struct mptcp_sock *msk = mptcp_sk(sk); + +- return READ_ONCE(msk->first_pending); ++ return msk->first_pending; + } + + static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 5ca1d775e976d4..80443b4eaeff0e 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -2576,6 +2576,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, + struct nft_chain *chain = ctx->chain; + struct nft_chain_hook hook = {}; + struct nft_stats *stats = NULL; ++ struct nftables_pernet *nft_net; + struct nft_hook *h, *next; + struct nf_hook_ops *ops; + struct nft_trans *trans; +@@ -2616,6 +2617,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, + if (nft_hook_list_find(&basechain->hook_list, h)) { + list_del(&h->list); + kfree(h); ++ continue; ++ } ++ ++ nft_net = nft_pernet(ctx->net); ++ list_for_each_entry(trans, &nft_net->commit_list, list) { ++ if (trans->msg_type != NFT_MSG_NEWCHAIN || ++ trans->ctx.table != ctx->table || ++ !nft_trans_chain_update(trans)) ++ continue; ++ ++ if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) { ++ nft_chain_release_hook(&hook); ++ return -EEXIST; ++ } + } + } + } else { +@@ -8493,6 +8508,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, + { + const struct nlattr * const *nla = ctx->nla; + struct nft_flowtable_hook flowtable_hook; ++ struct nftables_pernet *nft_net; + struct nft_hook *hook, *next; + struct nft_trans *trans; + bool unregister = false; +@@ -8508,6 +8524,20 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, + if (nft_hook_list_find(&flowtable->hook_list, hook)) { + list_del(&hook->list); + kfree(hook); ++ continue; ++ } ++ ++ nft_net = nft_pernet(ctx->net); ++ list_for_each_entry(trans, &nft_net->commit_list, list) { ++ if (trans->msg_type != NFT_MSG_NEWFLOWTABLE || ++ trans->ctx.table != ctx->table || ++ !nft_trans_flowtable_update(trans)) ++ continue; ++ ++ if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) { ++ err = -EEXIST; ++ goto err_flowtable_update_hook; ++ } + } + } + +diff --git a/net/rds/rds.h b/net/rds/rds.h +index dc360252c51573..5b1c072e2e7ff4 100644 +--- a/net/rds/rds.h ++++ b/net/rds/rds.h +@@ -93,7 +93,7 @@ enum { + + /* Max number of multipaths per RDS connection. Must be a power of 2 */ + #define RDS_MPATH_WORKERS 8 +-#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \ ++#define RDS_MPATH_HASH(rs, n) (jhash_1word(ntohs((rs)->rs_bound_port), \ + (rs)->rs_hash_initval) & ((n) - 1)) + + #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr)) +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c +index ac87fcff4795e8..a1c0e8a9fc8c2f 100644 +--- a/net/sched/act_bpf.c ++++ b/net/sched/act_bpf.c +@@ -47,12 +47,10 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb, + filter = rcu_dereference(prog->filter); + if (at_ingress) { + __skb_push(skb, skb->mac_len); +- bpf_compute_data_pointers(skb); +- filter_res = bpf_prog_run(filter, skb); ++ filter_res = bpf_prog_run_data_pointers(filter, skb); + __skb_pull(skb, skb->mac_len); + } else { +- bpf_compute_data_pointers(skb); +- filter_res = bpf_prog_run(filter, skb); ++ filter_res = bpf_prog_run_data_pointers(filter, skb); + } + if (unlikely(!skb->tstamp && skb->tstamp_type)) + skb->tstamp_type = SKB_CLOCK_REALTIME; +diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c +index 0d7aee8933c5ff..d90a2fa5966b7f 100644 +--- a/net/sched/act_connmark.c ++++ b/net/sched/act_connmark.c +@@ -88,7 +88,7 @@ TC_INDIRECT_SCOPE int tcf_connmark_act(struct sk_buff *skb, + /* using overlimits stats to count how many packets marked */ + tcf_action_inc_overlimit_qstats(&ca->common); + out: +- return READ_ONCE(ca->tcf_action); ++ return parms->action; + } + + static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = { +@@ -167,6 +167,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, + if (err < 0) + goto release_idr; + ++ nparms->action = parm->action; ++ + spin_lock_bh(&ci->tcf_lock); + goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); + oparms = rcu_replace_pointer(ci->parms, nparms, lockdep_is_held(&ci->tcf_lock)); +@@ -190,20 +192,22 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, + static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) + { ++ const struct tcf_connmark_info *ci = to_connmark(a); + unsigned char *b = skb_tail_pointer(skb); +- struct tcf_connmark_info *ci = to_connmark(a); +- struct tc_connmark opt = { +- .index = ci->tcf_index, +- .refcnt = refcount_read(&ci->tcf_refcnt) - ref, +- .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, +- }; +- struct tcf_connmark_parms *parms; ++ const struct tcf_connmark_parms *parms; ++ struct tc_connmark opt; + struct tcf_t t; + +- spin_lock_bh(&ci->tcf_lock); +- parms = rcu_dereference_protected(ci->parms, lockdep_is_held(&ci->tcf_lock)); ++ memset(&opt, 0, sizeof(opt)); + +- opt.action = ci->tcf_action; ++ opt.index = ci->tcf_index; ++ opt.refcnt = refcount_read(&ci->tcf_refcnt) - ref; ++ opt.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind; ++ ++ rcu_read_lock(); ++ parms = rcu_dereference(ci->parms); ++ ++ opt.action = parms->action; + opt.zone = parms->zone; + if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; +@@ -212,12 +216,12 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, + if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, + TCA_CONNMARK_PAD)) + goto nla_put_failure; +- spin_unlock_bh(&ci->tcf_lock); ++ rcu_read_unlock(); + + return skb->len; + + nla_put_failure: +- spin_unlock_bh(&ci->tcf_lock); ++ rcu_read_unlock(); + nlmsg_trim(skb, b); + return -1; + } +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c +index bc7611b0744c41..431921204f6603 100644 +--- a/net/sched/act_ife.c ++++ b/net/sched/act_ife.c +@@ -644,13 +644,15 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, + unsigned char *b = skb_tail_pointer(skb); + struct tcf_ife_info *ife = to_ife(a); + struct tcf_ife_params *p; +- struct tc_ife opt = { +- .index = ife->tcf_index, +- .refcnt = refcount_read(&ife->tcf_refcnt) - ref, +- .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, +- }; ++ struct tc_ife opt; + struct tcf_t t; + ++ memset(&opt, 0, sizeof(opt)); ++ ++ opt.index = ife->tcf_index, ++ opt.refcnt = refcount_read(&ife->tcf_refcnt) - ref, ++ opt.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, ++ + spin_lock_bh(&ife->tcf_lock); + opt.action = ife->tcf_action; + p = rcu_dereference_protected(ife->params, +diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c +index db7151c6b70b79..29dfe6767f108c 100644 +--- a/net/sched/cls_bpf.c ++++ b/net/sched/cls_bpf.c +@@ -97,12 +97,10 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, + } else if (at_ingress) { + /* It is safe to push/pull even if skb_shared() */ + __skb_push(skb, skb->mac_len); +- bpf_compute_data_pointers(skb); +- filter_res = bpf_prog_run(prog->filter, skb); ++ filter_res = bpf_prog_run_data_pointers(prog->filter, skb); + __skb_pull(skb, skb->mac_len); + } else { +- bpf_compute_data_pointers(skb); +- filter_res = bpf_prog_run(prog->filter, skb); ++ filter_res = bpf_prog_run_data_pointers(prog->filter, skb); + } + if (unlikely(!skb->tstamp && skb->tstamp_type)) + skb->tstamp_type = SKB_CLOCK_REALTIME; +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index b51af871a621ca..1b51b3038b4bd6 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -178,9 +178,10 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) + static void try_bulk_dequeue_skb(struct Qdisc *q, + struct sk_buff *skb, + const struct netdev_queue *txq, +- int *packets) ++ int *packets, int budget) + { + int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; ++ int cnt = 0; + + while (bytelimit > 0) { + struct sk_buff *nskb = q->dequeue(q); +@@ -191,8 +192,10 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, + bytelimit -= nskb->len; /* covers GSO len */ + skb->next = nskb; + skb = nskb; +- (*packets)++; /* GSO counts as one pkt */ ++ if (++cnt >= budget) ++ break; + } ++ (*packets) += cnt; + skb_mark_not_on_list(skb); + } + +@@ -226,7 +229,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, + * A requeued skb (via q->gso_skb) can also be a SKB list. + */ + static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, +- int *packets) ++ int *packets, int budget) + { + const struct netdev_queue *txq = q->dev_queue; + struct sk_buff *skb = NULL; +@@ -293,7 +296,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, + if (skb) { + bulk: + if (qdisc_may_bulk(q)) +- try_bulk_dequeue_skb(q, skb, txq, packets); ++ try_bulk_dequeue_skb(q, skb, txq, packets, budget); + else + try_bulk_dequeue_skb_slow(q, skb, packets); + } +@@ -385,7 +388,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + * >0 - queue is not empty. + * + */ +-static inline bool qdisc_restart(struct Qdisc *q, int *packets) ++static inline bool qdisc_restart(struct Qdisc *q, int *packets, int budget) + { + spinlock_t *root_lock = NULL; + struct netdev_queue *txq; +@@ -394,7 +397,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets) + bool validate; + + /* Dequeue packet */ +- skb = dequeue_skb(q, &validate, packets); ++ skb = dequeue_skb(q, &validate, packets, budget); + if (unlikely(!skb)) + return false; + +@@ -412,7 +415,7 @@ void __qdisc_run(struct Qdisc *q) + int quota = READ_ONCE(dev_tx_weight); + int packets; + +- while (qdisc_restart(q, &packets)) { ++ while (qdisc_restart(q, &packets, quota)) { + quota -= packets; + if (quota <= 0) { + if (q->flags & TCQ_F_NOLOCK) +diff --git a/net/sctp/diag.c b/net/sctp/diag.c +index c3d6b92dd3862f..ad3e1462a896ed 100644 +--- a/net/sctp/diag.c ++++ b/net/sctp/diag.c +@@ -73,19 +73,26 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, + struct nlattr *attr; + void *info = NULL; + ++ rcu_read_lock(); + list_for_each_entry_rcu(laddr, address_list, list) + addrcnt++; ++ rcu_read_unlock(); + + attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt); + if (!attr) + return -EMSGSIZE; + + info = nla_data(attr); ++ rcu_read_lock(); + list_for_each_entry_rcu(laddr, address_list, list) { + memcpy(info, &laddr->a, sizeof(laddr->a)); + memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a)); + info += addrlen; ++ ++ if (!--addrcnt) ++ break; + } ++ rcu_read_unlock(); + + return 0; + } +@@ -223,14 +230,15 @@ struct sctp_comm_param { + bool net_admin; + }; + +-static size_t inet_assoc_attr_size(struct sctp_association *asoc) ++static size_t inet_assoc_attr_size(struct sock *sk, ++ struct sctp_association *asoc) + { + int addrlen = sizeof(struct sockaddr_storage); + int addrcnt = 0; + struct sctp_sockaddr_entry *laddr; + + list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, +- list) ++ list, lockdep_sock_is_held(sk)) + addrcnt++; + + return nla_total_size(sizeof(struct sctp_info)) +@@ -256,11 +264,14 @@ static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *t + if (err) + return err; + +- rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL); +- if (!rep) ++ lock_sock(sk); ++ ++ rep = nlmsg_new(inet_assoc_attr_size(sk, assoc), GFP_KERNEL); ++ if (!rep) { ++ release_sock(sk); + return -ENOMEM; ++ } + +- lock_sock(sk); + if (ep != assoc->ep) { + err = -EAGAIN; + goto out; +diff --git a/net/sctp/transport.c b/net/sctp/transport.c +index 31eca29b6cfbfb..abb44c0ac1a0ba 100644 +--- a/net/sctp/transport.c ++++ b/net/sctp/transport.c +@@ -495,6 +495,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) + + if (tp->rttvar || tp->srtt) { + struct net *net = tp->asoc->base.net; ++ unsigned int rto_beta, rto_alpha; + /* 6.3.1 C3) When a new RTT measurement R' is made, set + * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| + * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' +@@ -506,10 +507,14 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) + * For example, assuming the default value of RTO.Alpha of + * 1/8, rto_alpha would be expressed as 3. + */ +- tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) +- + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); +- tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) +- + (rtt >> net->sctp.rto_alpha); ++ rto_beta = READ_ONCE(net->sctp.rto_beta); ++ if (rto_beta < 32) ++ tp->rttvar = tp->rttvar - (tp->rttvar >> rto_beta) ++ + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> rto_beta); ++ rto_alpha = READ_ONCE(net->sctp.rto_alpha); ++ if (rto_alpha < 32) ++ tp->srtt = tp->srtt - (tp->srtt >> rto_alpha) ++ + (rtt >> rto_alpha); + } else { + /* 6.3.1 C2) When the first RTT measurement R is made, set + * SRTT <- R, RTTVAR <- R/2. +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c +index 4f485b9b31b288..2f748226f14335 100644 +--- a/net/smc/smc_clc.c ++++ b/net/smc/smc_clc.c +@@ -883,6 +883,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) + return SMC_CLC_DECL_CNFERR; + } + pclc_base->hdr.typev1 = SMC_TYPE_N; ++ ini->smc_type_v1 = SMC_TYPE_N; + } else { + pclc_base->iparea_offset = htons(sizeof(*pclc_smcd)); + plen += sizeof(*pclc_prfx) + +diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c +index 95696f42647ec1..b61384b08e7c3a 100644 +--- a/net/strparser/strparser.c ++++ b/net/strparser/strparser.c +@@ -238,7 +238,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, + strp_parser_err(strp, -EMSGSIZE, desc); + break; + } else if (len <= (ssize_t)head->len - +- skb->len - stm->strp.offset) { ++ (ssize_t)skb->len - stm->strp.offset) { + /* Length must be into new skb (and also + * greater than zero) + */ +diff --git a/net/tipc/net.c b/net/tipc/net.c +index 0e95572e56b41e..7e65d0b0c4a8d1 100644 +--- a/net/tipc/net.c ++++ b/net/tipc/net.c +@@ -145,7 +145,9 @@ void tipc_net_finalize_work(struct work_struct *work) + { + struct tipc_net *tn = container_of(work, struct tipc_net, work); + ++ rtnl_lock(); + tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr); ++ rtnl_unlock(); + } + + void tipc_net_stop(struct net *net) +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 0068e758be4ddb..66fd606c43f45d 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -136,6 +136,7 @@ enum unix_vertex_index { + }; + + static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; ++static unsigned long unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START; + + static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) + { +@@ -144,6 +145,7 @@ static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) + if (!vertex) { + vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); + vertex->index = unix_vertex_unvisited_index; ++ vertex->scc_index = ++unix_vertex_max_scc_index; + vertex->out_degree = 0; + INIT_LIST_HEAD(&vertex->edges); + INIT_LIST_HEAD(&vertex->scc_entry); +@@ -480,10 +482,15 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde + scc_dead = unix_vertex_dead(v); + } + +- if (scc_dead) ++ if (scc_dead) { + unix_collect_skb(&scc, hitlist); +- else if (!unix_graph_maybe_cyclic) +- unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); ++ } else { ++ if (unix_vertex_max_scc_index < vertex->scc_index) ++ unix_vertex_max_scc_index = vertex->scc_index; ++ ++ if (!unix_graph_maybe_cyclic) ++ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); ++ } + + list_del(&scc); + } +@@ -498,6 +505,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) + unsigned long last_index = UNIX_VERTEX_INDEX_START; + + unix_graph_maybe_cyclic = false; ++ unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START; + + /* Visit every vertex exactly once. + * __unix_walk_scc() moves visited vertices to unix_visited_vertices. +diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c +index d3b3f9e720b3b6..427072285b8c7f 100644 +--- a/net/xfrm/espintcp.c ++++ b/net/xfrm/espintcp.c +@@ -169,8 +169,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) + { + struct espintcp_ctx *ctx = espintcp_getctx(sk); + +- if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) ++ if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) { ++ kfree_skb(skb); + return -ENOBUFS; ++ } + + __skb_queue_tail(&ctx->out_queue, skb); + +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c +index 870dde67707b1f..0acf7d6baa37fb 100644 +--- a/security/integrity/ima/ima_appraise.c ++++ b/security/integrity/ima/ima_appraise.c +@@ -671,6 +671,15 @@ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name, + return 0; + } + ++/* ++ * ima_reset_appraise_flags - reset ima_iint_cache flags ++ * ++ * @digsig: whether to clear/set IMA_DIGSIG flag, tristate values ++ * 0: clear IMA_DIGSIG ++ * 1: set IMA_DIGSIG ++ * -1: don't change IMA_DIGSIG ++ * ++ */ + static void ima_reset_appraise_flags(struct inode *inode, int digsig) + { + struct integrity_iint_cache *iint; +@@ -683,9 +692,9 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig) + return; + iint->measured_pcrs = 0; + set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags); +- if (digsig) ++ if (digsig == 1) + set_bit(IMA_DIGSIG, &iint->atomic_flags); +- else ++ else if (digsig == 0) + clear_bit(IMA_DIGSIG, &iint->atomic_flags); + } + +@@ -770,6 +779,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, + digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG); + } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) { + digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG); ++ } else { ++ digsig = -1; + } + if (result == 1 || evm_revalidate_status(xattr_name)) { + ima_reset_appraise_flags(d_backing_inode(dentry), digsig); +@@ -783,18 +794,20 @@ int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, + const char *acl_name, struct posix_acl *kacl) + { + if (evm_revalidate_status(acl_name)) +- ima_reset_appraise_flags(d_backing_inode(dentry), 0); ++ ima_reset_appraise_flags(d_backing_inode(dentry), -1); + + return 0; + } + + int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name) + { +- int result; ++ int result, digsig = -1; + + result = ima_protect_xattr(dentry, xattr_name, NULL, 0); + if (result == 1 || evm_revalidate_status(xattr_name)) { +- ima_reset_appraise_flags(d_backing_inode(dentry), 0); ++ if (!strcmp(xattr_name, XATTR_NAME_IMA)) ++ digsig = 0; ++ ima_reset_appraise_flags(d_backing_inode(dentry), digsig); + if (result == 1) + result = 0; + } +diff --git a/sound/drivers/serial-generic.c b/sound/drivers/serial-generic.c +index c8db6c75d133d1..8b8e9e871ed37c 100644 +--- a/sound/drivers/serial-generic.c ++++ b/sound/drivers/serial-generic.c +@@ -37,6 +37,8 @@ MODULE_LICENSE("GPL"); + #define SERIAL_TX_STATE_ACTIVE 1 + #define SERIAL_TX_STATE_WAKEUP 2 + ++#define INTERNAL_BUF_SIZE 256 ++ + struct snd_serial_generic { + struct serdev_device *serdev; + +@@ -51,6 +53,7 @@ struct snd_serial_generic { + struct work_struct tx_work; + unsigned long tx_state; + ++ char tx_buf[INTERNAL_BUF_SIZE]; + }; + + static void snd_serial_generic_tx_wakeup(struct snd_serial_generic *drvdata) +@@ -61,11 +64,8 @@ static void snd_serial_generic_tx_wakeup(struct snd_serial_generic *drvdata) + schedule_work(&drvdata->tx_work); + } + +-#define INTERNAL_BUF_SIZE 256 +- + static void snd_serial_generic_tx_work(struct work_struct *work) + { +- static char buf[INTERNAL_BUF_SIZE]; + int num_bytes; + struct snd_serial_generic *drvdata = container_of(work, struct snd_serial_generic, + tx_work); +@@ -78,8 +78,10 @@ static void snd_serial_generic_tx_work(struct work_struct *work) + if (!test_bit(SERIAL_MODE_OUTPUT_OPEN, &drvdata->filemode)) + break; + +- num_bytes = snd_rawmidi_transmit_peek(substream, buf, INTERNAL_BUF_SIZE); +- num_bytes = serdev_device_write_buf(drvdata->serdev, buf, num_bytes); ++ num_bytes = snd_rawmidi_transmit_peek(substream, drvdata->tx_buf, ++ INTERNAL_BUF_SIZE); ++ num_bytes = serdev_device_write_buf(drvdata->serdev, drvdata->tx_buf, ++ num_bytes); + + if (!num_bytes) + break; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 5fe6b71d90f4f4..65c9d47f03af57 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -3644,6 +3644,15 @@ static void alc256_shutup(struct hda_codec *codec) + hp_pin = 0x21; + + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ ++ ++ /* 3k pull low control for Headset jack. */ ++ /* NOTE: call this before clearing the pin, otherwise codec stalls */ ++ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly ++ * when booting with headset plugged. So skip setting it for the codec alc257 ++ */ ++ if (spec->en_3kpull_low) ++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12); ++ + hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); + + if (hp_pin_sense) { +@@ -3654,14 +3663,6 @@ static void alc256_shutup(struct hda_codec *codec) + + msleep(75); + +- /* 3k pull low control for Headset jack. */ +- /* NOTE: call this before clearing the pin, otherwise codec stalls */ +- /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly +- * when booting with headset plugged. So skip setting it for the codec alc257 +- */ +- if (spec->en_3kpull_low) +- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); +- + if (!spec->no_shutup_pins) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); +diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c +index 188b8b43c524f2..b7529e29005725 100644 +--- a/sound/soc/codecs/cs4271.c ++++ b/sound/soc/codecs/cs4271.c +@@ -594,17 +594,17 @@ static int cs4271_component_probe(struct snd_soc_component *component) + + ret = regcache_sync(cs4271->regmap); + if (ret < 0) +- return ret; ++ goto err_disable_regulator; + + ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2, + CS4271_MODE2_PDN | CS4271_MODE2_CPEN, + CS4271_MODE2_PDN | CS4271_MODE2_CPEN); + if (ret < 0) +- return ret; ++ goto err_disable_regulator; + ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2, + CS4271_MODE2_PDN, 0); + if (ret < 0) +- return ret; ++ goto err_disable_regulator; + /* Power-up sequence requires 85 uS */ + udelay(85); + +@@ -614,6 +614,10 @@ static int cs4271_component_probe(struct snd_soc_component *component) + CS4271_MODE2_MUTECAEQUB); + + return 0; ++ ++err_disable_regulator: ++ regulator_bulk_disable(ARRAY_SIZE(cs4271->supplies), cs4271->supplies); ++ return ret; + } + + static void cs4271_component_remove(struct snd_soc_component *component) +diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c +index b71ef03c4aef02..c7d6696b1bfdff 100644 +--- a/sound/soc/codecs/lpass-va-macro.c ++++ b/sound/soc/codecs/lpass-va-macro.c +@@ -1555,7 +1555,7 @@ static int va_macro_probe(struct platform_device *pdev) + if (ret) + goto err_clkout; + +- va->fsgen = clk_hw_get_clk(&va->hw, "fsgen"); ++ va->fsgen = devm_clk_hw_get_clk(dev, &va->hw, "fsgen"); + if (IS_ERR(va->fsgen)) { + ret = PTR_ERR(va->fsgen); + goto err_clkout; +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c +index 2adf744c652634..4023b88e7bc13a 100644 +--- a/sound/soc/codecs/max98090.c ++++ b/sound/soc/codecs/max98090.c +@@ -1234,9 +1234,11 @@ static const struct snd_soc_dapm_widget max98091_dapm_widgets[] = { + SND_SOC_DAPM_INPUT("DMIC4"), + + SND_SOC_DAPM_SUPPLY("DMIC3_ENA", M98090_REG_DIGITAL_MIC_ENABLE, +- M98090_DIGMIC3_SHIFT, 0, NULL, 0), ++ M98090_DIGMIC3_SHIFT, 0, max98090_shdn_event, ++ SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_SUPPLY("DMIC4_ENA", M98090_REG_DIGITAL_MIC_ENABLE, +- M98090_DIGMIC4_SHIFT, 0, NULL, 0), ++ M98090_DIGMIC4_SHIFT, 0, max98090_shdn_event, ++ SND_SOC_DAPM_POST_PMU), + }; + + static const struct snd_soc_dapm_route max98090_dapm_routes[] = { +diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c +index 43775c19444525..836cf06a452667 100644 +--- a/sound/soc/codecs/tas2781-i2c.c ++++ b/sound/soc/codecs/tas2781-i2c.c +@@ -616,7 +616,8 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) + { + struct i2c_client *client = (struct i2c_client *)tas_priv->client; + unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS]; +- int i, ndev = 0; ++ int ndev = 0; ++ int i, rc; + + if (tas_priv->isacpi) { + ndev = device_property_read_u32_array(&client->dev, +@@ -627,8 +628,12 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) + } else { + ndev = (ndev < ARRAY_SIZE(dev_addrs)) + ? ndev : ARRAY_SIZE(dev_addrs); +- ndev = device_property_read_u32_array(&client->dev, ++ rc = device_property_read_u32_array(&client->dev, + "ti,audio-slots", dev_addrs, ndev); ++ if (rc != 0) { ++ ndev = 1; ++ dev_addrs[0] = client->addr; ++ } + } + + tas_priv->irq = +diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c +index 56e795a00e22fd..591f6c9f9d3a60 100644 +--- a/sound/soc/codecs/tlv320aic3x.c ++++ b/sound/soc/codecs/tlv320aic3x.c +@@ -121,6 +121,16 @@ static const struct reg_default aic3x_reg[] = { + { 108, 0x00 }, { 109, 0x00 }, + }; + ++static const struct reg_sequence aic3007_class_d[] = { ++ /* Class-D speaker driver init; datasheet p. 46 */ ++ { AIC3X_PAGE_SELECT, 0x0D }, ++ { 0xD, 0x0D }, ++ { 0x8, 0x5C }, ++ { 0x8, 0x5D }, ++ { 0x8, 0x5C }, ++ { AIC3X_PAGE_SELECT, 0x00 }, ++}; ++ + static bool aic3x_volatile_reg(struct device *dev, unsigned int reg) + { + switch (reg) { +@@ -1393,6 +1403,10 @@ static int aic3x_set_power(struct snd_soc_component *component, int power) + gpiod_set_value(aic3x->gpio_reset, 0); + } + ++ if (aic3x->model == AIC3X_MODEL_3007) ++ regmap_multi_reg_write_bypassed(aic3x->regmap, aic3007_class_d, ++ ARRAY_SIZE(aic3007_class_d)); ++ + /* Sync reg_cache with the hardware */ + regcache_cache_only(aic3x->regmap, false); + regcache_sync(aic3x->regmap); +@@ -1723,17 +1737,6 @@ static void aic3x_configure_ocmv(struct device *dev, struct aic3x_priv *aic3x) + } + } + +- +-static const struct reg_sequence aic3007_class_d[] = { +- /* Class-D speaker driver init; datasheet p. 46 */ +- { AIC3X_PAGE_SELECT, 0x0D }, +- { 0xD, 0x0D }, +- { 0x8, 0x5C }, +- { 0x8, 0x5D }, +- { 0x8, 0x5C }, +- { AIC3X_PAGE_SELECT, 0x00 }, +-}; +- + int aic3x_probe(struct device *dev, struct regmap *regmap, kernel_ulong_t driver_data) + { + struct aic3x_priv *aic3x; +@@ -1825,13 +1828,6 @@ int aic3x_probe(struct device *dev, struct regmap *regmap, kernel_ulong_t driver + + aic3x_configure_ocmv(dev, aic3x); + +- if (aic3x->model == AIC3X_MODEL_3007) { +- ret = regmap_register_patch(aic3x->regmap, aic3007_class_d, +- ARRAY_SIZE(aic3007_class_d)); +- if (ret != 0) +- dev_err(dev, "Failed to init class D: %d\n", ret); +- } +- + ret = devm_snd_soc_register_component(dev, &soc_component_dev_aic3x, &aic3x_dai, 1); + if (ret) + return ret; +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c +index a6948a57636ab5..0de878d64a3bd1 100644 +--- a/sound/soc/fsl/fsl_sai.c ++++ b/sound/soc/fsl/fsl_sai.c +@@ -322,7 +322,6 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, + break; + case SND_SOC_DAIFMT_PDM: + val_cr2 |= FSL_SAI_CR2_BCP; +- val_cr4 &= ~FSL_SAI_CR4_MF; + sai->is_pdm_mode = true; + break; + case SND_SOC_DAIFMT_RIGHT_J: +@@ -597,7 +596,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream, + val_cr5 |= FSL_SAI_CR5_WNW(slot_width); + val_cr5 |= FSL_SAI_CR5_W0W(slot_width); + +- if (sai->is_lsb_first || sai->is_pdm_mode) ++ if (sai->is_lsb_first) + val_cr5 |= FSL_SAI_CR5_FBT(0); + else + val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1); +diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c +index 781019685b941f..9251c38cf9d126 100644 +--- a/sound/soc/intel/avs/pcm.c ++++ b/sound/soc/intel/avs/pcm.c +@@ -611,6 +611,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so + data = snd_soc_dai_get_dma_data(dai, substream); + host_stream = data->host_stream; + ++ if (runtime->state == SNDRV_PCM_STATE_XRUN) ++ hdac_stream(host_stream)->prepared = false; + if (hdac_stream(host_stream)->prepared) + return 0; + +diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c +index a0dd914c8ed136..3b4061508c1804 100644 +--- a/sound/soc/meson/aiu-encoder-i2s.c ++++ b/sound/soc/meson/aiu-encoder-i2s.c +@@ -236,8 +236,12 @@ static int aiu_encoder_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + inv == SND_SOC_DAIFMT_IB_IF) + val |= AIU_CLK_CTRL_LRCLK_INVERT; + +- if (inv == SND_SOC_DAIFMT_IB_NF || +- inv == SND_SOC_DAIFMT_IB_IF) ++ /* ++ * The SoC changes data on the rising edge of the bitclock ++ * so an inversion of the bitclock is required in normal mode ++ */ ++ if (inv == SND_SOC_DAIFMT_NB_NF || ++ inv == SND_SOC_DAIFMT_NB_IF) + val |= AIU_CLK_CTRL_AOCLK_INVERT; + + /* Signal skew */ +@@ -328,4 +332,3 @@ const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = { + .startup = aiu_encoder_i2s_startup, + .shutdown = aiu_encoder_i2s_shutdown, + }; +- +diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c +index 195780f75d05d2..9f53cffe5184e3 100644 +--- a/sound/soc/qcom/qdsp6/q6asm.c ++++ b/sound/soc/qcom/qdsp6/q6asm.c +@@ -376,9 +376,9 @@ static void q6asm_audio_client_free_buf(struct audio_client *ac, + + spin_lock_irqsave(&ac->lock, flags); + port->num_periods = 0; ++ spin_unlock_irqrestore(&ac->lock, flags); + kfree(port->buf); + port->buf = NULL; +- spin_unlock_irqrestore(&ac->lock, flags); + } + + /** +diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c +index d5cc967992d161..ee195a54d0c3b1 100644 +--- a/sound/soc/qcom/sc8280xp.c ++++ b/sound/soc/qcom/sc8280xp.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -58,8 +59,10 @@ static int sc8280xp_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, + SNDRV_PCM_HW_PARAM_RATE); + struct snd_interval *channels = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_CHANNELS); ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + rate->min = rate->max = 48000; ++ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); + channels->min = 2; + channels->max = 2; + switch (cpu_dai->id) { +diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c +index dcbcd1a59a3aae..351e96163406b0 100644 +--- a/sound/soc/stm/stm32_sai_sub.c ++++ b/sound/soc/stm/stm32_sai_sub.c +@@ -551,6 +551,14 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai, + struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); + int ret; + ++ /* ++ * The mclk rate is determined at runtime from the audio stream rate. ++ * Skip calls to the set_sysclk callback that are not relevant during the ++ * initialization phase. ++ */ ++ if (!snd_soc_card_is_instantiated(cpu_dai->component->card)) ++ return 0; ++ + if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) { + ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, + SAI_XCR1_NODIV, +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index f71f6ff3e2b0f7..b05ad2eb623db6 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -1383,6 +1383,11 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, + ep->sample_rem = ep->cur_rate % ep->pps; + ep->packsize[0] = ep->cur_rate / ep->pps; + ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps; ++ if (ep->packsize[1] > ep->maxpacksize) { ++ usb_audio_dbg(chip, "Too small maxpacksize %u for rate %u / pps %u\n", ++ ep->maxpacksize, ep->cur_rate, ep->pps); ++ return -EINVAL; ++ } + + /* calculate the frequency in 16.16 format */ + ep->freqm = ep->freqn; +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index b8fa0a866153b0..787cdeddbdf443 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -1191,6 +1191,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + cval->res = 1; + } + break; ++ case USB_ID(0x3302, 0x12db): /* MOONDROP Quark2 */ ++ if (!strcmp(kctl->id.name, "PCM Playback Volume")) { ++ usb_audio_info(chip, ++ "set volume quirk for MOONDROP Quark2\n"); ++ cval->min = -14208; /* Mute under it */ ++ } ++ break; + } + } + +@@ -3072,6 +3079,8 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, + int i; + + assoc = usb_ifnum_to_if(dev, ctrlif)->intf_assoc; ++ if (!assoc) ++ return -EINVAL; + + /* Detect BADD capture/playback channels from AS EP descriptors */ + for (i = 0; i < assoc->bInterfaceCount; i++) { +diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c +index fac4bbc6b27577..2413a6d96971cb 100644 +--- a/sound/usb/mixer_s1810c.c ++++ b/sound/usb/mixer_s1810c.c +@@ -93,6 +93,7 @@ struct s1810c_ctl_packet { + + #define SC1810C_CTL_LINE_SW 0 + #define SC1810C_CTL_MUTE_SW 1 ++#define SC1824C_CTL_MONO_SW 2 + #define SC1810C_CTL_AB_SW 3 + #define SC1810C_CTL_48V_SW 4 + +@@ -123,6 +124,7 @@ struct s1810c_state_packet { + #define SC1810C_STATE_48V_SW 58 + #define SC1810C_STATE_LINE_SW 59 + #define SC1810C_STATE_MUTE_SW 60 ++#define SC1824C_STATE_MONO_SW 61 + #define SC1810C_STATE_AB_SW 62 + + struct s1810_mixer_state { +@@ -181,7 +183,7 @@ snd_sc1810c_get_status_field(struct usb_device *dev, + + pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1; + pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2; +- ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), ++ ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), + SC1810C_SET_STATE_REQ, + SC1810C_SET_STATE_REQTYPE, + (*seqnum), 0, &pkt_out, sizeof(pkt_out)); +@@ -502,6 +504,15 @@ static const struct snd_kcontrol_new snd_s1810c_mute_sw = { + .private_value = (SC1810C_STATE_MUTE_SW | SC1810C_CTL_MUTE_SW << 8) + }; + ++static const struct snd_kcontrol_new snd_s1824c_mono_sw = { ++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, ++ .name = "Mono Main Out Switch", ++ .info = snd_ctl_boolean_mono_info, ++ .get = snd_s1810c_switch_get, ++ .put = snd_s1810c_switch_set, ++ .private_value = (SC1824C_STATE_MONO_SW | SC1824C_CTL_MONO_SW << 8) ++}; ++ + static const struct snd_kcontrol_new snd_s1810c_48v_sw = { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "48V Phantom Power On Mic Inputs Switch", +@@ -588,8 +599,17 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer) + if (ret < 0) + return ret; + +- ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); +- if (ret < 0) +- return ret; ++ // The 1824c has a Mono Main switch instead of a ++ // A/B select switch. ++ if (mixer->chip->usb_id == USB_ID(0x194f, 0x010d)) { ++ ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); ++ if (ret < 0) ++ return ret; ++ } else if (mixer->chip->usb_id == USB_ID(0x194f, 0x010c)) { ++ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); ++ if (ret < 0) ++ return ret; ++ } ++ + return ret; + } +diff --git a/sound/usb/validate.c b/sound/usb/validate.c +index a0d55b77c9941d..4bb4893f6e74f7 100644 +--- a/sound/usb/validate.c ++++ b/sound/usb/validate.c +@@ -266,7 +266,11 @@ static const struct usb_desc_validator audio_validators[] = { + FUNC(UAC_VERSION_2, UAC_MIXER_UNIT, validate_mixer_unit), + FUNC(UAC_VERSION_2, UAC_SELECTOR_UNIT, validate_selector_unit), + FUNC(UAC_VERSION_2, UAC_FEATURE_UNIT, validate_uac2_feature_unit), +- /* UAC_VERSION_2, UAC2_EFFECT_UNIT: not implemented yet */ ++ /* just a stop-gap, it should be a proper function for the array ++ * once if the unit is really parsed/used ++ */ ++ FIXED(UAC_VERSION_2, UAC2_EFFECT_UNIT, ++ struct uac2_effect_unit_descriptor), + FUNC(UAC_VERSION_2, UAC2_PROCESSING_UNIT_V2, validate_processing_unit), + FUNC(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2, validate_processing_unit), + FIXED(UAC_VERSION_2, UAC2_CLOCK_SOURCE, +@@ -286,7 +290,8 @@ static const struct usb_desc_validator audio_validators[] = { + FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit), + FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit), + FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit), +- /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */ ++ FIXED(UAC_VERSION_3, UAC3_EFFECT_UNIT, ++ struct uac2_effect_unit_descriptor), /* sharing the same struct */ + FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit), + FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit), + FIXED(UAC_VERSION_3, UAC3_CLOCK_SOURCE, +diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c +index 1b7f6971460419..42c7b70f6996cb 100644 +--- a/tools/bpf/bpftool/btf_dumper.c ++++ b/tools/bpf/bpftool/btf_dumper.c +@@ -38,7 +38,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, + __u32 info_len = sizeof(info); + const char *prog_name = NULL; + struct btf *prog_btf = NULL; +- struct bpf_func_info finfo; ++ struct bpf_func_info finfo = {}; + __u32 finfo_rec_size; + char prog_str[1024]; + int err; +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c +index 174e076e56af2a..90fa7a8d0f4b36 100644 +--- a/tools/bpf/bpftool/prog.c ++++ b/tools/bpf/bpftool/prog.c +@@ -2203,7 +2203,7 @@ static void profile_print_readings(void) + + static char *profile_target_name(int tgt_fd) + { +- struct bpf_func_info func_info; ++ struct bpf_func_info func_info = {}; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + const struct btf_type *t; +diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h +index 210c13b1b8570b..926f3117d8e4fd 100644 +--- a/tools/include/linux/bitmap.h ++++ b/tools/include/linux/bitmap.h +@@ -3,6 +3,7 @@ + #define _TOOLS_LINUX_BITMAP_H + + #include ++#include + #include + #include + #include +diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h +index 1c13f8e88833b4..66b925bd954eb5 100644 +--- a/tools/lib/bpf/bpf_tracing.h ++++ b/tools/lib/bpf/bpf_tracing.h +@@ -311,7 +311,7 @@ struct pt_regs___arm64 { + #define __PT_RET_REG regs[31] + #define __PT_FP_REG __unsupported__ + #define __PT_RC_REG gpr[3] +-#define __PT_SP_REG sp ++#define __PT_SP_REG gpr[1] + #define __PT_IP_REG nip + + #elif defined(bpf_target_sparc) +diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile +index 8890fd57b110cc..8d21ea1950a310 100644 +--- a/tools/lib/thermal/Makefile ++++ b/tools/lib/thermal/Makefile +@@ -59,8 +59,12 @@ else + CFLAGS := -g -Wall + endif + ++NL3_CFLAGS = $(shell pkg-config --cflags libnl-3.0 2>/dev/null) ++ifeq ($(NL3_CFLAGS),) ++NL3_CFLAGS = -I/usr/include/libnl3 ++endif ++ + INCLUDES = \ +--I/usr/include/libnl3 \ + -I$(srctree)/tools/lib/thermal/include \ + -I$(srctree)/tools/lib/ \ + -I$(srctree)/tools/include \ +@@ -72,6 +76,7 @@ INCLUDES = \ + override CFLAGS += $(EXTRA_WARNINGS) + override CFLAGS += -Werror -Wall + override CFLAGS += -fPIC ++override CFLAGS += $(NL3_CFLAGS) + override CFLAGS += $(INCLUDES) + override CFLAGS += -fvisibility=hidden + override CFGLAS += -Wl,-L. +@@ -147,7 +152,7 @@ endef + install_lib: libs + $(call QUIET_INSTALL, $(LIBTHERMAL_ALL)) \ + $(call do_install_mkdir,$(libdir_SQ)); \ +- cp -fpR $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ) ++ cp -fR --preserve=mode,timestamp $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ) + + install_headers: + $(call QUIET_INSTALL, headers) \ +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index 4f0bbebcb6d62d..ea24f21aafc3e3 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -1366,7 +1366,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map, + goto out_err; + } + } +- map__zput(new_node->map); + free(new_node); + } + +diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c +index 479c5971aa6da2..c15d0de12357fb 100644 +--- a/tools/power/cpupower/lib/cpuidle.c ++++ b/tools/power/cpupower/lib/cpuidle.c +@@ -231,6 +231,7 @@ int cpuidle_state_disable(unsigned int cpu, + { + char value[SYSFS_PATH_MAX]; + int bytes_written; ++ int len; + + if (cpuidle_state_count(cpu) <= idlestate) + return -1; +@@ -239,10 +240,10 @@ int cpuidle_state_disable(unsigned int cpu, + idlestate_value_files[IDLESTATE_DISABLE])) + return -2; + +- snprintf(value, SYSFS_PATH_MAX, "%u", disable); ++ len = snprintf(value, SYSFS_PATH_MAX, "%u", disable); + + bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable", +- value, sizeof(disable)); ++ value, len); + if (bytes_written) + return 0; + return -3; +diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c +index 7a2ef691b20e1d..c2a7af89a67bb0 100644 +--- a/tools/power/cpupower/lib/cpupower.c ++++ b/tools/power/cpupower/lib/cpupower.c +@@ -55,7 +55,7 @@ unsigned int cpupower_write_sysfs(const char *path, char *buf, size_t buflen) + if (numwritten < 1) { + perror(path); + close(fd); +- return -1; ++ return 0; + } + + close(fd); +diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +index ebda9c366b2ba3..891738116c8b29 100644 +--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c ++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +@@ -62,6 +62,7 @@ unsigned char turbo_update_value; + unsigned char update_hwp_epp; + unsigned char update_hwp_min; + unsigned char update_hwp_max; ++unsigned char hwp_limits_done_via_sysfs; + unsigned char update_hwp_desired; + unsigned char update_hwp_window; + unsigned char update_hwp_use_pkg; +@@ -630,7 +631,7 @@ void cmdline(int argc, char **argv) + */ + FILE *fopen_or_die(const char *path, const char *mode) + { +- FILE *filep = fopen(path, "r"); ++ FILE *filep = fopen(path, mode); + + if (!filep) + err(1, "%s: open failed", path); +@@ -644,7 +645,7 @@ void err_on_hypervisor(void) + char *buffer; + + /* On VMs /proc/cpuinfo contains a "flags" entry for hypervisor */ +- cpuinfo = fopen_or_die("/proc/cpuinfo", "ro"); ++ cpuinfo = fopen_or_die("/proc/cpuinfo", "r"); + + buffer = malloc(4096); + if (!buffer) { +@@ -951,8 +952,10 @@ int ratio_2_sysfs_khz(int ratio) + } + /* + * If HWP is enabled and cpufreq sysfs attribtes are present, +- * then update sysfs, so that it will not become +- * stale when we write to MSRs. ++ * then update via sysfs. The intel_pstate driver may modify (clip) ++ * this request, say, when HWP_CAP is outside of PLATFORM_INFO limits, ++ * and the driver-chosen value takes precidence. ++ * + * (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq, + * so we don't have to touch that.) + */ +@@ -1007,6 +1010,8 @@ int update_sysfs(int cpu) + if (update_hwp_max) + update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max); + ++ hwp_limits_done_via_sysfs = 1; ++ + return 0; + } + +@@ -1085,10 +1090,10 @@ int update_hwp_request(int cpu) + if (debug) + print_hwp_request(cpu, &req, "old: "); + +- if (update_hwp_min) ++ if (update_hwp_min && !hwp_limits_done_via_sysfs) + req.hwp_min = req_update.hwp_min; + +- if (update_hwp_max) ++ if (update_hwp_max && !hwp_limits_done_via_sysfs) + req.hwp_max = req_update.hwp_max; + + if (update_hwp_desired) +@@ -1166,13 +1171,18 @@ int update_hwp_request_pkg(int pkg) + + int enable_hwp_on_cpu(int cpu) + { +- unsigned long long msr; ++ unsigned long long old_msr, new_msr; ++ ++ get_msr(cpu, MSR_PM_ENABLE, &old_msr); ++ ++ if (old_msr & 1) ++ return 0; /* already enabled */ + +- get_msr(cpu, MSR_PM_ENABLE, &msr); +- put_msr(cpu, MSR_PM_ENABLE, 1); ++ new_msr = old_msr | 1; ++ put_msr(cpu, MSR_PM_ENABLE, new_msr); + + if (verbose) +- printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1); ++ printf("cpu%d: MSR_PM_ENABLE old: %llX new: %llX\n", cpu, old_msr, new_msr); + + return 0; + } +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile +index 5b61b8bb29f846..a670d5de302b96 100644 +--- a/tools/testing/selftests/Makefile ++++ b/tools/testing/selftests/Makefile +@@ -269,7 +269,7 @@ gen_tar: install + @echo "Created ${TAR_PATH}" + + clean: +- @for TARGET in $(TARGETS); do \ ++ @for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \ + BUILD_TARGET=$$BUILD/$$TARGET; \ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ + done; +diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c +index 4694422aa76c36..88e4aeab21b7bc 100644 +--- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c ++++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c +@@ -74,7 +74,7 @@ int main(int argc, char **argv) + + /* Let's try detach it before it was ever attached */ + ret = bpf_prog_detach2(progfd, lircfd, BPF_LIRC_MODE2); +- if (ret != -1 || errno != ENOENT) { ++ if (ret != -ENOENT) { + printf("bpf_prog_detach2 not attached should fail: %m\n"); + return 1; + } +diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh +index 2aa5a3445056ac..f4205823175a93 100755 +--- a/tools/testing/selftests/bpf/test_xsk.sh ++++ b/tools/testing/selftests/bpf/test_xsk.sh +@@ -203,4 +203,6 @@ done + + if [ $failures -eq 0 ]; then + echo "All tests successful!" ++else ++ exit 1 + fi +diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile +new file mode 100644 +index 00000000000000..50932e13cb5a8b +--- /dev/null ++++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile +@@ -0,0 +1,21 @@ ++# SPDX-License-Identifier: GPL-2.0+ OR MIT ++ ++TEST_PROGS = devlink.sh \ ++ devlink_in_netns.sh \ ++ devlink_trap.sh \ ++ ethtool-coalesce.sh \ ++ ethtool-fec.sh \ ++ ethtool-pause.sh \ ++ ethtool-ring.sh \ ++ fib.sh \ ++ hw_stats_l3.sh \ ++ nexthop.sh \ ++ psample.sh \ ++ tc-mq-visibility.sh \ ++ udp_tunnel_nic.sh \ ++ ++TEST_FILES := \ ++ ethtool-common.sh ++# end of TEST_FILES ++ ++include ../../../lib.mk +diff --git a/tools/testing/selftests/drivers/net/netdevsim/settings b/tools/testing/selftests/drivers/net/netdevsim/settings +new file mode 100644 +index 00000000000000..a62d2fa1275c6b +--- /dev/null ++++ b/tools/testing/selftests/drivers/net/netdevsim/settings +@@ -0,0 +1 @@ ++timeout=600 +diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc +index 118247b8dd84d8..ed791b995a43fc 100644 +--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc ++++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc +@@ -20,6 +20,10 @@ sample_events() { + echo 0 > tracing_on + echo 0 > events/enable + ++# Clear functions caused by page cache; run sample_events twice ++sample_events ++sample_events ++ + echo "Get the most frequently calling function" + echo > trace + sample_events +diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c +index 890a81f4ff6184..67fcc99fbd6d34 100644 +--- a/tools/testing/selftests/iommu/iommufd.c ++++ b/tools/testing/selftests/iommu/iommufd.c +@@ -1728,6 +1728,8 @@ TEST_F(vfio_compat_mock_domain, map) + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); + ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size); ++ /* Unmap of empty is success */ ++ ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); + + /* UNMAP_FLAG_ALL requres 0 iova/size */ + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); +diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh +index d32a14ba069ac5..5d8f50cd38b7ed 100755 +--- a/tools/testing/selftests/net/fcnal-test.sh ++++ b/tools/testing/selftests/net/fcnal-test.sh +@@ -194,7 +194,7 @@ show_hint() + kill_procs() + { + killall nettest ping ping6 >/dev/null 2>&1 +- sleep 1 ++ slowwait 2 sh -c 'test -z "$(pgrep '"'^(nettest|ping|ping6)$'"')"' + } + + do_run_cmd() +@@ -423,6 +423,8 @@ create_ns() + ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1 + ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1 + ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1 ++ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.accept_dad=0 ++ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.accept_dad=0 + } + + # create veth pair to connect namespaces and apply addresses. +@@ -865,7 +867,7 @@ ipv4_tcp_md5_novrf() + # basic use case + log_start + run_cmd nettest -s -M ${MD5_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: Single address config" + +@@ -873,7 +875,7 @@ ipv4_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: Server no config, client uses password" + +@@ -881,7 +883,7 @@ ipv4_tcp_md5_novrf() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -s -M ${MD5_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: Client uses wrong password" + +@@ -889,7 +891,7 @@ ipv4_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -s -M ${MD5_PW} -m ${NSB_LO_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: Client address does not match address configured with password" + +@@ -900,7 +902,7 @@ ipv4_tcp_md5_novrf() + # client in prefix + log_start + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: Prefix config" + +@@ -908,7 +910,7 @@ ipv4_tcp_md5_novrf() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: Prefix config, client uses wrong password" + +@@ -916,7 +918,7 @@ ipv4_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -c ${NSB_LO_IP} -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: Prefix config, client address not in configured prefix" + } +@@ -933,7 +935,7 @@ ipv4_tcp_md5() + # basic use case + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Single address config" + +@@ -941,7 +943,7 @@ ipv4_tcp_md5() + log_start + show_hint "Should timeout since server does not have MD5 auth" + run_cmd nettest -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Server no config, client uses password" + +@@ -949,7 +951,7 @@ ipv4_tcp_md5() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Client uses wrong password" + +@@ -957,7 +959,7 @@ ipv4_tcp_md5() + log_start + show_hint "Should timeout since server config differs from client" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_LO_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Client address does not match address configured with password" + +@@ -968,7 +970,7 @@ ipv4_tcp_md5() + # client in prefix + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Prefix config" + +@@ -976,7 +978,7 @@ ipv4_tcp_md5() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Prefix config, client uses wrong password" + +@@ -984,7 +986,7 @@ ipv4_tcp_md5() + log_start + show_hint "Should timeout since client address is outside of prefix" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -c ${NSB_LO_IP} -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Prefix config, client address not in configured prefix" + +@@ -995,14 +997,14 @@ ipv4_tcp_md5() + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF" + + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF" + +@@ -1010,7 +1012,7 @@ ipv4_tcp_md5() + show_hint "Should timeout since client in default VRF uses VRF password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF with VRF pw" + +@@ -1018,21 +1020,21 @@ ipv4_tcp_md5() + show_hint "Should timeout since client in VRF uses default VRF password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF with default VRF pw" + + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF" + + log_start + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF" + +@@ -1040,7 +1042,7 @@ ipv4_tcp_md5() + show_hint "Should timeout since client in default VRF uses VRF password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF with VRF pw" + +@@ -1048,7 +1050,7 @@ ipv4_tcp_md5() + show_hint "Should timeout since client in VRF uses default VRF password" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} & + run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF with default VRF pw" + +@@ -1072,14 +1074,14 @@ test_ipv4_md5_vrf__vrf_server__no_bind_ifindex() + log_start + show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection" + + log_start + show_hint "Binding both the socket and the key is not required but it works" + run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection" + } +@@ -1093,25 +1095,25 @@ test_ipv4_md5_vrf__global_server__bind_ifindex0() + + log_start + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection" + + log_start + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection" + log_start + + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection" + + log_start + run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection" + +@@ -1183,7 +1185,7 @@ ipv4_tcp_novrf() + do + log_start + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Global server" + done +@@ -1191,7 +1193,7 @@ ipv4_tcp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + +@@ -1211,13 +1213,13 @@ ipv4_tcp_novrf() + do + log_start + run_cmd_nsb nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -r ${a} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + +@@ -1239,7 +1241,7 @@ ipv4_tcp_novrf() + do + log_start + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done +@@ -1247,7 +1249,7 @@ ipv4_tcp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + +@@ -1256,7 +1258,7 @@ ipv4_tcp_novrf() + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -s -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done +@@ -1264,7 +1266,7 @@ ipv4_tcp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -0 ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + +@@ -1273,7 +1275,7 @@ ipv4_tcp_novrf() + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + done +@@ -1281,7 +1283,7 @@ ipv4_tcp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" + +@@ -1313,19 +1315,19 @@ ipv4_tcp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "Global server" + + log_start + run_cmd nettest -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + +@@ -1342,7 +1344,7 @@ ipv4_tcp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, local connection" + +@@ -1364,14 +1366,14 @@ ipv4_tcp_vrf() + log_start + show_hint "client socket should be bound to VRF" + run_cmd nettest -s -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + show_hint "client socket should be bound to VRF" + run_cmd nettest -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "VRF server" + +@@ -1386,7 +1388,7 @@ ipv4_tcp_vrf() + log_start + show_hint "client socket should be bound to device" + run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 0 "Device server" + +@@ -1396,7 +1398,7 @@ ipv4_tcp_vrf() + log_start + show_hint "Should fail 'Connection refused' since client is not bound to VRF" + run_cmd nettest -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "Global server, local connection" + done +@@ -1408,13 +1410,13 @@ ipv4_tcp_vrf() + do + log_start + run_cmd_nsb nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -r ${a} -d ${VRF} + log_test_addr ${a} $? 0 "Client, VRF bind" + + log_start + run_cmd_nsb nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + +@@ -1433,7 +1435,7 @@ ipv4_tcp_vrf() + do + log_start + run_cmd nettest -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local connection" + done +@@ -1441,26 +1443,26 @@ ipv4_tcp_vrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local connection" + + log_start + show_hint "Should fail 'No route to host' since client is out of VRF scope" + run_cmd nettest -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} + log_test_addr ${a} $? 1 "VRF server, unbound client, local connection" + + log_start + run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local connection" + + log_start + run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" + } +@@ -1499,7 +1501,7 @@ ipv4_udp_novrf() + do + log_start + run_cmd nettest -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + +@@ -1512,7 +1514,7 @@ ipv4_udp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Device server" + +@@ -1523,31 +1525,31 @@ ipv4_udp_novrf() + do + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -D -r ${a} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device send via cmsg" + + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP} + log_test_addr ${a} $? 0 "Client, device bind via IP_UNICAST_IF" + + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP} -U + log_test_addr ${a} $? 0 "Client, device bind via IP_UNICAST_IF, with connect()" + +@@ -1570,7 +1572,7 @@ ipv4_udp_novrf() + do + log_start + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done +@@ -1578,7 +1580,7 @@ ipv4_udp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + +@@ -1587,7 +1589,7 @@ ipv4_udp_novrf() + log_start + show_hint "Should fail 'Connection refused' since address is out of device scope" + run_cmd nettest -s -D -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done +@@ -1595,25 +1597,25 @@ ipv4_udp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + log_start + run_cmd nettest -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -C -r ${a} + log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection" + + log_start + run_cmd nettest -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -S -r ${a} + log_test_addr ${a} $? 0 "Global server, device client via IP_UNICAST_IF, local connection" + + log_start + run_cmd nettest -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -S -r ${a} -U + log_test_addr ${a} $? 0 "Global server, device client via IP_UNICAST_IF, local connection, with connect()" + +@@ -1626,28 +1628,28 @@ ipv4_udp_novrf() + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 2 "Global server, device client, local connection" + + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C + log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection" + + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection" + + log_start + show_hint "Should fail since addresses on loopback are out of device scope" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -U + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection, with connect()" + +@@ -1657,7 +1659,7 @@ ipv4_udp_novrf() + a=${NSA_IP} + log_start + run_cmd nettest -D -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + +@@ -1699,19 +1701,19 @@ ipv4_udp_vrf() + log_start + show_hint "Fails because ingress is in a VRF and global server is disabled" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 1 "Global server" + + log_start + run_cmd nettest -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + +@@ -1723,7 +1725,7 @@ ipv4_udp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server is out of scope" + run_cmd nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "Global server, VRF client, local connection" + done +@@ -1731,26 +1733,26 @@ ipv4_udp_vrf() + a=${NSA_IP} + log_start + run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, enslaved device client, local connection" + + a=${NSA_IP} + log_start + run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + +@@ -1765,19 +1767,19 @@ ipv4_udp_vrf() + do + log_start + run_cmd nettest -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + + log_start + run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + +@@ -1792,13 +1794,13 @@ ipv4_udp_vrf() + # + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -d ${VRF} -D -r ${NSB_IP} -1 ${NSA_IP} + log_test $? 0 "VRF client" + + log_start + run_cmd_nsb nettest -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -d ${NSA_DEV} -D -r ${NSB_IP} -1 ${NSA_IP} + log_test $? 0 "Enslaved device client" + +@@ -1819,31 +1821,31 @@ ipv4_udp_vrf() + a=${NSA_IP} + log_start + run_cmd nettest -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + +@@ -1851,7 +1853,7 @@ ipv4_udp_vrf() + do + log_start + run_cmd nettest -D -s -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + done +@@ -1860,7 +1862,7 @@ ipv4_udp_vrf() + do + log_start + run_cmd nettest -s -D -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + done +@@ -2083,7 +2085,7 @@ ipv4_rt() + do + log_start + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2097,7 +2099,7 @@ ipv4_rt() + do + log_start + run_cmd nettest ${varg} -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2110,7 +2112,7 @@ ipv4_rt() + a=${NSA_IP} + log_start + run_cmd nettest ${varg} -s -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2124,7 +2126,7 @@ ipv4_rt() + # + log_start + run_cmd_nsb nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2135,7 +2137,7 @@ ipv4_rt() + + log_start + run_cmd_nsb nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2151,7 +2153,7 @@ ipv4_rt() + do + log_start + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2165,7 +2167,7 @@ ipv4_rt() + do + log_start + run_cmd nettest ${varg} -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2179,7 +2181,7 @@ ipv4_rt() + log_start + + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2190,7 +2192,7 @@ ipv4_rt() + + log_start + run_cmd nettest ${varg} -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2201,7 +2203,7 @@ ipv4_rt() + + log_start + run_cmd nettest ${varg} -I ${NSA_DEV} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -2551,7 +2553,7 @@ ipv6_tcp_md5_novrf() + # basic use case + log_start + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: Single address config" + +@@ -2559,7 +2561,7 @@ ipv6_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: Server no config, client uses password" + +@@ -2567,7 +2569,7 @@ ipv6_tcp_md5_novrf() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: Client uses wrong password" + +@@ -2575,7 +2577,7 @@ ipv6_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_LO_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: Client address does not match address configured with password" + +@@ -2586,7 +2588,7 @@ ipv6_tcp_md5_novrf() + # client in prefix + log_start + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: Prefix config" + +@@ -2594,7 +2596,7 @@ ipv6_tcp_md5_novrf() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: Prefix config, client uses wrong password" + +@@ -2602,7 +2604,7 @@ ipv6_tcp_md5_novrf() + log_start + show_hint "Should timeout due to MD5 mismatch" + run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -c ${NSB_LO_IP6} -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: Prefix config, client address not in configured prefix" + } +@@ -2619,7 +2621,7 @@ ipv6_tcp_md5() + # basic use case + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Single address config" + +@@ -2627,7 +2629,7 @@ ipv6_tcp_md5() + log_start + show_hint "Should timeout since server does not have MD5 auth" + run_cmd nettest -6 -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Server no config, client uses password" + +@@ -2635,7 +2637,7 @@ ipv6_tcp_md5() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Client uses wrong password" + +@@ -2643,7 +2645,7 @@ ipv6_tcp_md5() + log_start + show_hint "Should timeout since server config differs from client" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_LO_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Client address does not match address configured with password" + +@@ -2654,7 +2656,7 @@ ipv6_tcp_md5() + # client in prefix + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Prefix config" + +@@ -2662,7 +2664,7 @@ ipv6_tcp_md5() + log_start + show_hint "Should timeout since client uses wrong password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Prefix config, client uses wrong password" + +@@ -2670,7 +2672,7 @@ ipv6_tcp_md5() + log_start + show_hint "Should timeout since client address is outside of prefix" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -c ${NSB_LO_IP6} -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Prefix config, client address not in configured prefix" + +@@ -2681,14 +2683,14 @@ ipv6_tcp_md5() + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF" + + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF" + +@@ -2696,7 +2698,7 @@ ipv6_tcp_md5() + show_hint "Should timeout since client in default VRF uses VRF password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF with VRF pw" + +@@ -2704,21 +2706,21 @@ ipv6_tcp_md5() + show_hint "Should timeout since client in VRF uses default VRF password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF with default VRF pw" + + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF" + + log_start + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF" + +@@ -2726,7 +2728,7 @@ ipv6_tcp_md5() + show_hint "Should timeout since client in default VRF uses VRF password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_PW} + log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF with VRF pw" + +@@ -2734,7 +2736,7 @@ ipv6_tcp_md5() + show_hint "Should timeout since client in VRF uses default VRF password" + run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} & + run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW} + log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF with default VRF pw" + +@@ -2762,7 +2764,7 @@ ipv6_tcp_novrf() + do + log_start + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + done +@@ -2783,7 +2785,7 @@ ipv6_tcp_novrf() + do + log_start + run_cmd_nsb nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Client" + done +@@ -2792,7 +2794,7 @@ ipv6_tcp_novrf() + do + log_start + run_cmd_nsb nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + done +@@ -2812,7 +2814,7 @@ ipv6_tcp_novrf() + do + log_start + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done +@@ -2820,7 +2822,7 @@ ipv6_tcp_novrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + +@@ -2829,7 +2831,7 @@ ipv6_tcp_novrf() + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -6 -s -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Device server, unbound client, local connection" + done +@@ -2837,7 +2839,7 @@ ipv6_tcp_novrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + +@@ -2846,7 +2848,7 @@ ipv6_tcp_novrf() + log_start + show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope" + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + done +@@ -2855,7 +2857,7 @@ ipv6_tcp_novrf() + do + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + done +@@ -2888,7 +2890,7 @@ ipv6_tcp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server" + done +@@ -2897,7 +2899,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done +@@ -2906,7 +2908,7 @@ ipv6_tcp_vrf() + a=${NSA_LINKIP6}%${NSB_DEV} + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + +@@ -2914,7 +2916,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Device server" + done +@@ -2933,7 +2935,7 @@ ipv6_tcp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server with VRF is disabled" + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, local connection" + +@@ -2954,7 +2956,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + done +@@ -2963,7 +2965,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done +@@ -2972,13 +2974,13 @@ ipv6_tcp_vrf() + a=${NSA_LINKIP6}%${NSB_DEV} + log_start + run_cmd nettest -6 -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "VRF server" + +@@ -2986,7 +2988,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 0 "Device server" + done +@@ -3006,7 +3008,7 @@ ipv6_tcp_vrf() + log_start + show_hint "Fails 'Connection refused' since client is not in VRF" + run_cmd nettest -6 -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server, local connection" + done +@@ -3019,7 +3021,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd_nsb nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${VRF} + log_test_addr ${a} $? 0 "Client, VRF bind" + done +@@ -3028,7 +3030,7 @@ ipv6_tcp_vrf() + log_start + show_hint "Fails since VRF device does not allow linklocal addresses" + run_cmd_nsb nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${VRF} + log_test_addr ${a} $? 1 "Client, VRF bind" + +@@ -3036,7 +3038,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd_nsb nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 0 "Client, device bind" + done +@@ -3061,7 +3063,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local connection" + done +@@ -3069,7 +3071,7 @@ ipv6_tcp_vrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local connection" + +@@ -3077,13 +3079,13 @@ ipv6_tcp_vrf() + log_start + show_hint "Should fail since unbound client is out of VRF scope" + run_cmd nettest -6 -s -I ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} + log_test_addr ${a} $? 1 "VRF server, unbound client, local connection" + + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local connection" + +@@ -3091,7 +3093,7 @@ ipv6_tcp_vrf() + do + log_start + run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local connection" + done +@@ -3131,13 +3133,13 @@ ipv6_udp_novrf() + do + log_start + run_cmd nettest -6 -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Device server" + done +@@ -3145,7 +3147,7 @@ ipv6_udp_novrf() + a=${NSA_LO_IP6} + log_start + run_cmd nettest -6 -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + +@@ -3155,7 +3157,7 @@ ipv6_udp_novrf() + #log_start + #show_hint "Should fail since loopback address is out of scope" + #run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- #sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + #run_cmd_nsb nettest -6 -D -r ${a} + #log_test_addr ${a} $? 1 "Device server" + +@@ -3175,25 +3177,25 @@ ipv6_udp_novrf() + do + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -r ${a} -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client" + + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device bind" + + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device send via cmsg" + + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP6} + log_test_addr ${a} $? 0 "Client, device bind via IPV6_UNICAST_IF" + +@@ -3215,7 +3217,7 @@ ipv6_udp_novrf() + do + log_start + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} -0 ${a} -1 ${a} + log_test_addr ${a} $? 0 "Global server, local connection" + done +@@ -3223,7 +3225,7 @@ ipv6_udp_novrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -D -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Device server, unbound client, local connection" + +@@ -3232,7 +3234,7 @@ ipv6_udp_novrf() + log_start + show_hint "Should fail 'Connection refused' since address is out of device scope" + run_cmd nettest -6 -s -D -I ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "Device server, local connection" + done +@@ -3240,19 +3242,19 @@ ipv6_udp_novrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local connection" + + log_start + run_cmd nettest -6 -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -C -r ${a} + log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection" + + log_start + run_cmd nettest -6 -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -S -r ${a} + log_test_addr ${a} $? 0 "Global server, device client via IPV6_UNICAST_IF, local connection" + +@@ -3261,28 +3263,28 @@ ipv6_udp_novrf() + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} + log_test_addr ${a} $? 1 "Global server, device client, local connection" + + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C + log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection" + + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection" + + log_start + show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S -U + log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection, with connect()" + done +@@ -3290,7 +3292,7 @@ ipv6_udp_novrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -I ${NSA_DEV} -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} -0 ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + +@@ -3304,7 +3306,7 @@ ipv6_udp_novrf() + run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV} + log_start + run_cmd nettest -6 -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${NSA_IP6} + log_test $? 0 "UDP in - LLA to GUA" + +@@ -3328,7 +3330,7 @@ ipv6_udp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 1 "Global server" + done +@@ -3337,7 +3339,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done +@@ -3346,7 +3348,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + done +@@ -3368,7 +3370,7 @@ ipv6_udp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 1 "Global server, VRF client, local conn" + done +@@ -3377,7 +3379,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + done +@@ -3386,25 +3388,25 @@ ipv6_udp_vrf() + log_start + show_hint "Should fail 'Connection refused' since global server is disabled" + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 1 "Global server, device client, local conn" + + log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn" + +@@ -3419,7 +3421,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Global server" + done +@@ -3428,7 +3430,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "VRF server" + done +@@ -3437,7 +3439,7 @@ ipv6_udp_vrf() + do + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${a} + log_test_addr ${a} $? 0 "Enslaved device server" + done +@@ -3455,7 +3457,7 @@ ipv6_udp_vrf() + # + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${NSB_IP6} + log_test $? 0 "VRF client" + +@@ -3466,7 +3468,7 @@ ipv6_udp_vrf() + + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_IP6} + log_test $? 0 "Enslaved device client" + +@@ -3481,13 +3483,13 @@ ipv6_udp_vrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + #log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + +@@ -3495,13 +3497,13 @@ ipv6_udp_vrf() + a=${VRF_IP6} + log_start + run_cmd nettest -6 -D -s -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Global server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${VRF} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "VRF server, VRF client, local conn" + +@@ -3517,25 +3519,25 @@ ipv6_udp_vrf() + a=${NSA_IP6} + log_start + run_cmd nettest -6 -D -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Global server, device client, local conn" + + log_start + run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "VRF server, device client, local conn" + + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${VRF} -r ${a} + log_test_addr ${a} $? 0 "Device server, VRF client, local conn" + + log_start + run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} + log_test_addr ${a} $? 0 "Device server, device client, local conn" + +@@ -3547,7 +3549,7 @@ ipv6_udp_vrf() + # link local addresses + log_start + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -d ${NSB_DEV} -r ${NSA_LINKIP6} + log_test $? 0 "Global server, linklocal IP" + +@@ -3558,7 +3560,7 @@ ipv6_udp_vrf() + + log_start + run_cmd_nsb nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_LINKIP6} + log_test $? 0 "Enslaved device client, linklocal IP" + +@@ -3569,7 +3571,7 @@ ipv6_udp_vrf() + + log_start + run_cmd nettest -6 -D -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSA_LINKIP6} + log_test $? 0 "Enslaved device client, local conn - linklocal IP" + +@@ -3582,7 +3584,7 @@ ipv6_udp_vrf() + run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV} + log_start + run_cmd nettest -6 -s -D & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 udp + run_cmd_nsb nettest -6 -D -r ${NSA_IP6} + log_test $? 0 "UDP in - LLA to GUA" + +@@ -3761,7 +3763,7 @@ ipv6_rt() + do + log_start + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3775,7 +3777,7 @@ ipv6_rt() + do + log_start + run_cmd nettest ${varg} -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3789,7 +3791,7 @@ ipv6_rt() + do + log_start + run_cmd nettest ${varg} -I ${NSA_DEV} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${varg} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3804,7 +3806,7 @@ ipv6_rt() + # + log_start + run_cmd_nsb nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP6} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3815,7 +3817,7 @@ ipv6_rt() + + log_start + run_cmd_nsb nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSB} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP6} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3832,7 +3834,7 @@ ipv6_rt() + do + log_start + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3846,7 +3848,7 @@ ipv6_rt() + do + log_start + run_cmd nettest ${varg} -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${VRF} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3859,7 +3861,7 @@ ipv6_rt() + a=${NSA_IP6} + log_start + run_cmd nettest ${varg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3870,7 +3872,7 @@ ipv6_rt() + + log_start + run_cmd nettest ${varg} -I ${VRF} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3881,7 +3883,7 @@ ipv6_rt() + + log_start + run_cmd nettest ${varg} -I ${NSA_DEV} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} & + sleep 3 + run_cmd ip link del ${VRF} +@@ -3940,7 +3942,7 @@ netfilter_tcp_reset() + do + log_start + run_cmd nettest -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -r ${a} + log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx" + done +@@ -3958,7 +3960,7 @@ netfilter_icmp() + do + log_start + run_cmd nettest ${arg} -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest ${arg} -r ${a} + log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach" + done +@@ -3997,7 +3999,7 @@ netfilter_tcp6_reset() + do + log_start + run_cmd nettest -6 -s & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 -r ${a} + log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx" + done +@@ -4015,7 +4017,7 @@ netfilter_icmp6() + do + log_start + run_cmd nettest -6 -s ${arg} & +- sleep 1 ++ wait_local_port_listen ${NSA} 12345 tcp + run_cmd_nsb nettest -6 ${arg} -r ${a} + log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach" + done +@@ -4211,12 +4213,12 @@ use_case_snat_on_vrf() + run_cmd ip6tables -t nat -A POSTROUTING -p tcp -m tcp --dport ${port} -j SNAT --to-source ${NSA_LO_IP6} -o ${VRF} + + run_cmd_nsb nettest -s -l ${NSB_IP} -p ${port} & +- sleep 1 ++ wait_local_port_listen ${NSB} ${port} tcp + run_cmd nettest -d ${VRF} -r ${NSB_IP} -p ${port} + log_test $? 0 "IPv4 TCP connection over VRF with SNAT" + + run_cmd_nsb nettest -6 -s -l ${NSB_IP6} -p ${port} & +- sleep 1 ++ wait_local_port_listen ${NSB} ${port} tcp + run_cmd nettest -6 -d ${VRF} -r ${NSB_IP6} -p ${port} + log_test $? 0 "IPv6 TCP connection over VRF with SNAT" + +diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh +index 9b5a63519b949e..6cde61f10fd0e2 100755 +--- a/tools/testing/selftests/net/forwarding/local_termination.sh ++++ b/tools/testing/selftests/net/forwarding/local_termination.sh +@@ -108,6 +108,8 @@ run_test() + local smac=$(mac_get $h1) + local rcv_dmac=$(mac_get $rcv_if_name) + ++ setup_wait ++ + tcpdump_start $rcv_if_name + + mc_route_prepare $h1 +diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c +index 30024d0ed37391..8dd6857e52cb56 100644 +--- a/tools/testing/selftests/net/gro.c ++++ b/tools/testing/selftests/net/gro.c +@@ -71,6 +71,12 @@ + #define MAX_PAYLOAD (IP_MAXPACKET - sizeof(struct tcphdr) - sizeof(struct ipv6hdr)) + #define NUM_LARGE_PKT (MAX_PAYLOAD / MSS) + #define MAX_HDR_LEN (ETH_HLEN + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) ++#define MIN_EXTHDR_SIZE 8 ++#define EXT_PAYLOAD_1 "\x00\x00\x00\x00\x00\x00" ++#define EXT_PAYLOAD_2 "\x11\x11\x11\x11\x11\x11" ++ ++#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) /* calculate IPv6 extension header len */ ++#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + + static const char *addr6_src = "fdaa::2"; + static const char *addr6_dst = "fdaa::1"; +@@ -104,7 +110,7 @@ static void setup_sock_filter(int fd) + const int dport_off = tcp_offset + offsetof(struct tcphdr, dest); + const int ethproto_off = offsetof(struct ethhdr, h_proto); + int optlen = 0; +- int ipproto_off; ++ int ipproto_off, opt_ipproto_off; + int next_off; + + if (proto == PF_INET) +@@ -116,14 +122,30 @@ static void setup_sock_filter(int fd) + if (strcmp(testname, "ip") == 0) { + if (proto == PF_INET) + optlen = sizeof(struct ip_timestamp); +- else +- optlen = sizeof(struct ip6_frag); ++ else { ++ BUILD_BUG_ON(sizeof(struct ip6_hbh) > MIN_EXTHDR_SIZE); ++ BUILD_BUG_ON(sizeof(struct ip6_dest) > MIN_EXTHDR_SIZE); ++ BUILD_BUG_ON(sizeof(struct ip6_frag) > MIN_EXTHDR_SIZE); ++ ++ /* same size for HBH and Fragment extension header types */ ++ optlen = MIN_EXTHDR_SIZE; ++ opt_ipproto_off = ETH_HLEN + sizeof(struct ipv6hdr) ++ + offsetof(struct ip6_ext, ip6e_nxt); ++ } + } + ++ /* this filter validates the following: ++ * - packet is IPv4/IPv6 according to the running test. ++ * - packet is TCP. Also handles the case of one extension header and then TCP. ++ * - checks the packet tcp dport equals to DPORT. Also handles the case of one ++ * extension header and then TCP. ++ */ + struct sock_filter filter[] = { + BPF_STMT(BPF_LD + BPF_H + BPF_ABS, ethproto_off), +- BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ntohs(ethhdr_proto), 0, 7), ++ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ntohs(ethhdr_proto), 0, 9), + BPF_STMT(BPF_LD + BPF_B + BPF_ABS, ipproto_off), ++ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_TCP, 2, 0), ++ BPF_STMT(BPF_LD + BPF_B + BPF_ABS, opt_ipproto_off), + BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_TCP, 0, 5), + BPF_STMT(BPF_LD + BPF_H + BPF_ABS, dport_off), + BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, DPORT, 2, 0), +@@ -576,6 +598,39 @@ static void add_ipv4_ts_option(void *buf, void *optpkt) + iph->check = checksum_fold(iph, sizeof(struct iphdr) + optlen, 0); + } + ++static void add_ipv6_exthdr(void *buf, void *optpkt, __u8 exthdr_type, char *ext_payload) ++{ ++ struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(optpkt + tcp_offset); ++ struct ipv6hdr *iph = (struct ipv6hdr *)(optpkt + ETH_HLEN); ++ char *exthdr_payload_start = (char *)(exthdr + 1); ++ ++ exthdr->hdrlen = 0; ++ exthdr->nexthdr = IPPROTO_TCP; ++ ++ memcpy(exthdr_payload_start, ext_payload, MIN_EXTHDR_SIZE - sizeof(*exthdr)); ++ ++ memcpy(optpkt, buf, tcp_offset); ++ memcpy(optpkt + tcp_offset + MIN_EXTHDR_SIZE, buf + tcp_offset, ++ sizeof(struct tcphdr) + PAYLOAD_LEN); ++ ++ iph->nexthdr = exthdr_type; ++ iph->payload_len = htons(ntohs(iph->payload_len) + MIN_EXTHDR_SIZE); ++} ++ ++static void send_ipv6_exthdr(int fd, struct sockaddr_ll *daddr, char *ext_data1, char *ext_data2) ++{ ++ static char buf[MAX_HDR_LEN + PAYLOAD_LEN]; ++ static char exthdr_pck[sizeof(buf) + MIN_EXTHDR_SIZE]; ++ ++ create_packet(buf, 0, 0, PAYLOAD_LEN, 0); ++ add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data1); ++ write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr); ++ ++ create_packet(buf, PAYLOAD_LEN * 1, 0, PAYLOAD_LEN, 0); ++ add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data2); ++ write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr); ++} ++ + /* IPv4 options shouldn't coalesce */ + static void send_ip_options(int fd, struct sockaddr_ll *daddr) + { +@@ -697,7 +752,7 @@ static void send_fragment6(int fd, struct sockaddr_ll *daddr) + create_packet(buf, PAYLOAD_LEN * i, 0, PAYLOAD_LEN, 0); + write_packet(fd, buf, bufpkt_len, daddr); + } +- ++ sleep(1); + create_packet(buf, PAYLOAD_LEN * 2, 0, PAYLOAD_LEN, 0); + memset(extpkt, 0, extpkt_len); + +@@ -760,6 +815,7 @@ static void check_recv_pkts(int fd, int *correct_payload, + vlog("}, Total %d packets\nReceived {", correct_num_pkts); + + while (1) { ++ ip_ext_len = 0; + pkt_size = recv(fd, buffer, IP_MAXPACKET + ETH_HLEN + 1, 0); + if (pkt_size < 0) + error(1, errno, "could not receive"); +@@ -767,7 +823,7 @@ static void check_recv_pkts(int fd, int *correct_payload, + if (iph->version == 4) + ip_ext_len = (iph->ihl - 5) * 4; + else if (ip6h->version == 6 && ip6h->nexthdr != IPPROTO_TCP) +- ip_ext_len = sizeof(struct ip6_frag); ++ ip_ext_len = MIN_EXTHDR_SIZE; + + tcph = (struct tcphdr *)(buffer + tcp_offset + ip_ext_len); + +@@ -802,6 +858,7 @@ static void check_recv_pkts(int fd, int *correct_payload, + + static void gro_sender(void) + { ++ const int fin_delay_us = 100 * 1000; + static char fin_pkt[MAX_HDR_LEN]; + struct sockaddr_ll daddr = {}; + int txfd = -1; +@@ -845,15 +902,22 @@ static void gro_sender(void) + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + } else if (strcmp(testname, "tcp") == 0) { + send_changed_checksum(txfd, &daddr); ++ /* Adding sleep before sending FIN so that it is not ++ * received prior to other packets. ++ */ ++ usleep(fin_delay_us); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + + send_changed_seq(txfd, &daddr); ++ usleep(fin_delay_us); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + + send_changed_ts(txfd, &daddr); ++ usleep(fin_delay_us); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + + send_diff_opt(txfd, &daddr); ++ usleep(fin_delay_us); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + } else if (strcmp(testname, "ip") == 0) { + send_changed_ECN(txfd, &daddr); +@@ -880,7 +944,21 @@ static void gro_sender(void) + sleep(1); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + } else if (proto == PF_INET6) { ++ sleep(1); + send_fragment6(txfd, &daddr); ++ sleep(1); ++ write_packet(txfd, fin_pkt, total_hdr_len, &daddr); ++ ++ sleep(1); ++ /* send IPv6 packets with ext header with same payload */ ++ send_ipv6_exthdr(txfd, &daddr, EXT_PAYLOAD_1, EXT_PAYLOAD_1); ++ sleep(1); ++ write_packet(txfd, fin_pkt, total_hdr_len, &daddr); ++ ++ sleep(1); ++ /* send IPv6 packets with ext header with different payload */ ++ send_ipv6_exthdr(txfd, &daddr, EXT_PAYLOAD_1, EXT_PAYLOAD_2); ++ sleep(1); + write_packet(txfd, fin_pkt, total_hdr_len, &daddr); + } + } else if (strcmp(testname, "large") == 0) { +@@ -997,6 +1075,17 @@ static void gro_receiver(void) + */ + printf("fragmented ip6 doesn't coalesce: "); + correct_payload[0] = PAYLOAD_LEN * 2; ++ correct_payload[1] = PAYLOAD_LEN; ++ correct_payload[2] = PAYLOAD_LEN; ++ check_recv_pkts(rxfd, correct_payload, 3); ++ ++ printf("ipv6 with ext header does coalesce: "); ++ correct_payload[0] = PAYLOAD_LEN * 2; ++ check_recv_pkts(rxfd, correct_payload, 1); ++ ++ printf("ipv6 with ext header with different payloads doesn't coalesce: "); ++ correct_payload[0] = PAYLOAD_LEN; ++ correct_payload[1] = PAYLOAD_LEN; + check_recv_pkts(rxfd, correct_payload, 2); + } + } else if (strcmp(testname, "large") == 0) { +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c +index fc9eff0e89e226..20c29324b814f0 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c +@@ -696,8 +696,14 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, + + bw = do_rnd_write(peerfd, winfo->buf + winfo->off, winfo->len); + if (bw < 0) { +- if (cfg_rcv_trunc) +- return 0; ++ /* expected reset, continue to read */ ++ if (cfg_rcv_trunc && ++ (errno == ECONNRESET || ++ errno == EPIPE)) { ++ fds.events &= ~POLLOUT; ++ continue; ++ } ++ + perror("write"); + return 111; + } +@@ -723,8 +729,10 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, + } + + if (fds.revents & (POLLERR | POLLNVAL)) { +- if (cfg_rcv_trunc) +- return 0; ++ if (cfg_rcv_trunc) { ++ fds.events &= ~(POLLERR | POLLNVAL); ++ continue; ++ } + fprintf(stderr, "Unexpected revents: " + "POLLERR/POLLNVAL(%x)\n", fds.revents); + return 5; +@@ -1419,7 +1427,7 @@ static void parse_opts(int argc, char **argv) + */ + if (cfg_truncate < 0) { + cfg_rcv_trunc = true; +- signal(SIGPIPE, handle_signal); ++ signal(SIGPIPE, SIG_IGN); + } + break; + case 'j': +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +index 3763ffa214d53f..f475b27185dec9 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +@@ -516,7 +516,7 @@ do_transfer() + "${stat_synrx_now_l}" "${expect_synrx}" 1>&2 + retc=1 + fi +- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then ++ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then + if [ ${stat_ooo_now} -eq 0 ]; then + printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \ + "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index b9cc3d51dc2857..250577b11a91b7 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -2361,7 +2361,7 @@ remove_tests() + if reset "remove single subflow"; then + pm_nl_set_limits $ns1 0 1 + pm_nl_set_limits $ns2 0 1 +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup + addr_nr_ns2=-1 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 1 1 1 +@@ -2374,8 +2374,8 @@ remove_tests() + if reset "remove multiple subflows"; then + pm_nl_set_limits $ns1 0 2 + pm_nl_set_limits $ns2 0 2 +- pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup + addr_nr_ns2=-2 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 +@@ -2386,7 +2386,7 @@ remove_tests() + # single address, remove + if reset "remove single address"; then + pm_nl_set_limits $ns1 0 1 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup + pm_nl_set_limits $ns2 1 1 + addr_nr_ns1=-1 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 +@@ -2399,9 +2399,9 @@ remove_tests() + # subflow and signal, remove + if reset "remove subflow and signal"; then + pm_nl_set_limits $ns1 0 2 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup + pm_nl_set_limits $ns2 1 2 +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup + addr_nr_ns1=-1 addr_nr_ns2=-1 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 +@@ -2413,10 +2413,10 @@ remove_tests() + # subflows and signal, remove + if reset "remove subflows and signal"; then + pm_nl_set_limits $ns1 0 3 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup + pm_nl_set_limits $ns2 1 3 +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow +- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup ++ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup + addr_nr_ns1=-1 addr_nr_ns2=-2 speed=10 \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 +@@ -2428,9 +2428,9 @@ remove_tests() + # addresses remove + if reset "remove addresses"; then + pm_nl_set_limits $ns1 3 3 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250 +- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal +- pm_nl_add_endpoint $ns1 10.0.4.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup id 250 ++ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup ++ pm_nl_add_endpoint $ns1 10.0.4.1 flags signal,backup + pm_nl_set_limits $ns2 3 3 + addr_nr_ns1=-3 speed=10 \ + run_tests $ns1 $ns2 10.0.1.1 +@@ -2443,10 +2443,10 @@ remove_tests() + # invalid addresses remove + if reset "remove invalid addresses"; then + pm_nl_set_limits $ns1 3 3 +- pm_nl_add_endpoint $ns1 10.0.12.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.12.1 flags signal,backup + # broadcast IP: no packet for this address will be received on ns1 +- pm_nl_add_endpoint $ns1 224.0.0.1 flags signal +- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal ++ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal,backup ++ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup + pm_nl_set_limits $ns2 2 2 + addr_nr_ns1=-3 speed=10 \ + run_tests $ns1 $ns2 10.0.1.1 +@@ -2459,10 +2459,10 @@ remove_tests() + # subflows and signal, flush + if reset "flush subflows and signal"; then + pm_nl_set_limits $ns1 0 3 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup + pm_nl_set_limits $ns2 1 3 +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow +- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup ++ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup + addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 +@@ -2475,9 +2475,9 @@ remove_tests() + if reset "flush subflows"; then + pm_nl_set_limits $ns1 3 3 + pm_nl_set_limits $ns2 3 3 +- pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow id 150 +- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow +- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow ++ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup id 150 ++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup ++ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup + addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 +@@ -2494,9 +2494,9 @@ remove_tests() + # addresses flush + if reset "flush addresses"; then + pm_nl_set_limits $ns1 3 3 +- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250 +- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal +- pm_nl_add_endpoint $ns1 10.0.4.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup id 250 ++ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup ++ pm_nl_add_endpoint $ns1 10.0.4.1 flags signal,backup + pm_nl_set_limits $ns2 3 3 + addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 +@@ -2509,9 +2509,9 @@ remove_tests() + # invalid addresses flush + if reset "flush invalid addresses"; then + pm_nl_set_limits $ns1 3 3 +- pm_nl_add_endpoint $ns1 10.0.12.1 flags signal +- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal +- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal ++ pm_nl_add_endpoint $ns1 10.0.12.1 flags signal,backup ++ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup ++ pm_nl_add_endpoint $ns1 10.0.14.1 flags signal,backup + pm_nl_set_limits $ns2 3 3 + addr_nr_ns1=-8 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 +diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c +index 404a2ce759ab61..ca0d9a5a9e08c1 100644 +--- a/tools/testing/selftests/net/psock_tpacket.c ++++ b/tools/testing/selftests/net/psock_tpacket.c +@@ -22,6 +22,7 @@ + * - TPACKET_V3: RX_RING + */ + ++#undef NDEBUG + #include + #include + #include +@@ -33,7 +34,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -785,7 +785,7 @@ static int test_kernel_bit_width(void) + + static int test_user_bit_width(void) + { +- return __WORDSIZE; ++ return sizeof(long) * 8; + } + + static const char *tpacket_str[] = { +diff --git a/tools/testing/selftests/net/traceroute.sh b/tools/testing/selftests/net/traceroute.sh +index de9ca97abc3062..9cb5e96e64333e 100755 +--- a/tools/testing/selftests/net/traceroute.sh ++++ b/tools/testing/selftests/net/traceroute.sh +@@ -209,11 +209,6 @@ setup_traceroute6() + + run_traceroute6() + { +- if [ ! -x "$(command -v traceroute6)" ]; then +- echo "SKIP: Could not run IPV6 test without traceroute6" +- return +- fi +- + setup_traceroute6 + + # traceroute6 host-2 from host-1 (expects 2000:102::2) +@@ -278,11 +273,6 @@ setup_traceroute() + + run_traceroute() + { +- if [ ! -x "$(command -v traceroute)" ]; then +- echo "SKIP: Could not run IPV4 test without traceroute" +- return +- fi +- + setup_traceroute + + # traceroute host-2 from host-1 (expects 1.0.1.1). Takes a while. +@@ -316,6 +306,9 @@ do + esac + done + ++require_command traceroute6 ++require_command traceroute ++ + run_tests + + printf "\nTests passed: %3d\n" ${nsuccess} +diff --git a/tools/testing/selftests/user_events/perf_test.c b/tools/testing/selftests/user_events/perf_test.c +index 5288e768b207a9..68625362add283 100644 +--- a/tools/testing/selftests/user_events/perf_test.c ++++ b/tools/testing/selftests/user_events/perf_test.c +@@ -236,7 +236,7 @@ TEST_F(user, perf_empty_events) { + ASSERT_EQ(1 << reg.enable_bit, self->check); + + /* Ensure write shows up at correct offset */ +- ASSERT_NE(-1, write(self->data_fd, ®.write_index, ++ ASSERT_NE(-1, write(self->data_fd, (void *)®.write_index, + sizeof(reg.write_index))); + val = (void *)(((char *)perf_page) + perf_page->data_offset); + ASSERT_EQ(PERF_RECORD_SAMPLE, *val); +diff --git a/usr/include/headers_check.pl b/usr/include/headers_check.pl +index b6aec5e4365f9b..682980781eb37a 100755 +--- a/usr/include/headers_check.pl ++++ b/usr/include/headers_check.pl +@@ -160,6 +160,8 @@ sub check_sizetypes + if (my $included = ($line =~ /^\s*#\s*include\s+[<"](\S+)[>"]/)[0]) { + check_include_typesh($included); + } ++ # strip single-line comments, as types may be referenced within them ++ $line =~ s@/\*.*?\*/@@; + if ($line =~ m/__[us](8|16|32|64)\b/) { + printf STDERR "$filename:$lineno: " . + "found __[us]{8,16,32,64} type " . diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.117-118.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.117-118.patch new file mode 100644 index 0000000000..fb1f620749 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.117-118.patch @@ -0,0 +1,2950 @@ +diff --git a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml +index 19d47fd414bc06..ce04d2eadec9d5 100644 +--- a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml ++++ b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml +@@ -50,18 +50,20 @@ patternProperties: + groups: + description: + Name of the pin group to use for the functions. +- $ref: /schemas/types.yaml#/definitions/string +- enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp, +- i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp, +- spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp, +- spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp, +- uart0_grp, uart1_grp, uart2_grp, uart3_grp, +- pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp, +- pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp, +- pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp, +- pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp, +- pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp, +- pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp] ++ items: ++ enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp, ++ i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp, ++ spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp, ++ spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp, ++ uart0_grp, uart1_grp, uart2_grp, uart3_grp, ++ pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp, ++ pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp, ++ pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp, ++ pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp, ++ pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp, ++ pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp] ++ minItems: 1 ++ maxItems: 8 + + drive-strength: + enum: [2, 4, 6, 8, 16, 24, 32] +diff --git a/Makefile b/Makefile +index a849399cebf8fd..b13776e2b581c4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 117 ++SUBLEVEL = 118 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c +index 8d21ab904f1a98..eacf4ba1d88e9a 100644 +--- a/arch/arm64/kvm/hyp/nvhe/ffa.c ++++ b/arch/arm64/kvm/hyp/nvhe/ffa.c +@@ -425,7 +425,7 @@ static void __do_ffa_mem_xfer(const u64 func_id, + DECLARE_REG(u32, npages_mbz, ctxt, 4); + struct ffa_composite_mem_region *reg; + struct ffa_mem_region *buf; +- u32 offset, nr_ranges; ++ u32 offset, nr_ranges, checked_offset; + int ret = 0; + + if (addr_mbz || npages_mbz || fraglen > len || +@@ -460,7 +460,12 @@ static void __do_ffa_mem_xfer(const u64 func_id, + goto out_unlock; + } + +- if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) { ++ if (check_add_overflow(offset, sizeof(struct ffa_composite_mem_region), &checked_offset)) { ++ ret = FFA_RET_INVALID_PARAMETERS; ++ goto out_unlock; ++ } ++ ++ if (fraglen < checked_offset) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } +diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h +index aafb3cd9e943e5..215e0f9e8aa32a 100644 +--- a/arch/loongarch/include/uapi/asm/ptrace.h ++++ b/arch/loongarch/include/uapi/asm/ptrace.h +@@ -10,10 +10,6 @@ + + #include + +-#ifndef __KERNEL__ +-#include +-#endif +- + /* + * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs, + * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR. +@@ -41,44 +37,44 @@ struct user_pt_regs { + } __attribute__((aligned(8))); + + struct user_fp_state { +- uint64_t fpr[32]; +- uint64_t fcc; +- uint32_t fcsr; ++ __u64 fpr[32]; ++ __u64 fcc; ++ __u32 fcsr; + }; + + struct user_lsx_state { + /* 32 registers, 128 bits width per register. */ +- uint64_t vregs[32*2]; ++ __u64 vregs[32*2]; + }; + + struct user_lasx_state { + /* 32 registers, 256 bits width per register. */ +- uint64_t vregs[32*4]; ++ __u64 vregs[32*4]; + }; + + struct user_lbt_state { +- uint64_t scr[4]; +- uint32_t eflags; +- uint32_t ftop; ++ __u64 scr[4]; ++ __u32 eflags; ++ __u32 ftop; + }; + + struct user_watch_state { +- uint64_t dbg_info; ++ __u64 dbg_info; + struct { +- uint64_t addr; +- uint64_t mask; +- uint32_t ctrl; +- uint32_t pad; ++ __u64 addr; ++ __u64 mask; ++ __u32 ctrl; ++ __u32 pad; + } dbg_regs[8]; + }; + + struct user_watch_state_v2 { +- uint64_t dbg_info; ++ __u64 dbg_info; + struct { +- uint64_t addr; +- uint64_t mask; +- uint32_t ctrl; +- uint32_t pad; ++ __u64 addr; ++ __u64 mask; ++ __u32 ctrl; ++ __u32 pad; + } dbg_regs[14]; + }; + +diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c +index 2726639150bc7a..927dd31f82b93e 100644 +--- a/arch/loongarch/pci/pci.c ++++ b/arch/loongarch/pci/pci.c +@@ -51,11 +51,11 @@ static int __init pcibios_init(void) + */ + lsize = cpu_last_level_cache_line_size(); + +- BUG_ON(!lsize); ++ if (lsize) { ++ pci_dfl_cache_line_size = lsize >> 2; + +- pci_dfl_cache_line_size = lsize >> 2; +- +- pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); ++ pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); ++ } + + return 0; + } +diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c +index 000d6d50520a89..82b0fd8576a241 100644 +--- a/arch/mips/mti-malta/malta-init.c ++++ b/arch/mips/mti-malta/malta-init.c +@@ -241,16 +241,22 @@ mips_pci_controller: + #endif + + /* +- * Setup the Malta max (2GB) memory for PCI DMA in host bridge +- * in transparent addressing mode. ++ * Set up memory mapping in host bridge for PCI DMA masters, ++ * in transparent addressing mode. For EVA use the Malta ++ * maximum of 2 GiB memory in the alias space at 0x80000000 ++ * as per PHYS_OFFSET. Otherwise use 256 MiB of memory in ++ * the regular space, avoiding mapping the PCI MMIO window ++ * for DMA as it seems to confuse the system controller's ++ * logic, causing PCI MMIO to stop working. + */ +- mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH; +- MSC_WRITE(MSC01_PCI_BAR0, mask); +- MSC_WRITE(MSC01_PCI_HEAD4, mask); ++ mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000; ++ MSC_WRITE(MSC01_PCI_BAR0, ++ mask | PCI_BASE_ADDRESS_MEM_PREFETCH); ++ MSC_WRITE(MSC01_PCI_HEAD4, ++ PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH); + +- mask &= MSC01_PCI_BAR0_SIZE_MSK; + MSC_WRITE(MSC01_PCI_P2SCMSKL, mask); +- MSC_WRITE(MSC01_PCI_P2SCMAPL, mask); ++ MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET); + + /* Don't handle target retries indefinitely. */ + if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) == +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h +index da2e91b5b19250..2cc9d7bb1b2ac0 100644 +--- a/arch/s390/include/asm/pgtable.h ++++ b/arch/s390/include/asm/pgtable.h +@@ -1065,17 +1065,15 @@ static inline pte_t pte_mkhuge(pte_t pte) + #define IPTE_NODAT 0x400 + #define IPTE_GUEST_ASCE 0x800 + +-static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, +- unsigned long opt, unsigned long asce, +- int local) ++static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local) + { + unsigned long pto; + + pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); +- asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]" ++ asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]" + : "+m" (*ptep) +- : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt), +- [asce] "a" (asce), [m4] "i" (local)); ++ : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK), ++ [m4] "i" (local)); + } + + static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, +@@ -1259,7 +1257,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, + * A local RDP can be used to do the flush. + */ + if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT)) +- __ptep_rdp(address, ptep, 0, 0, 1); ++ __ptep_rdp(address, ptep, 1); + } + #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault + +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c +index 5e349869590a83..1fb435b3913cdd 100644 +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -312,9 +312,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + preempt_disable(); + atomic_inc(&mm->context.flush_count); + if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) +- __ptep_rdp(addr, ptep, 0, 0, 1); ++ __ptep_rdp(addr, ptep, 1); + else +- __ptep_rdp(addr, ptep, 0, 0, 0); ++ __ptep_rdp(addr, ptep, 0); + /* + * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That + * means it is still valid and active, and must not be changed according +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index f9b6e2043e6b2b..9952c774eaa697 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -226,6 +226,24 @@ static bool need_sha_check(u32 cur_rev) + return true; + } + ++static bool cpu_has_entrysign(void) ++{ ++ unsigned int fam = x86_family(bsp_cpuid_1_eax); ++ unsigned int model = x86_model(bsp_cpuid_1_eax); ++ ++ if (fam == 0x17 || fam == 0x19) ++ return true; ++ ++ if (fam == 0x1a) { ++ if (model <= 0x2f || ++ (0x40 <= model && model <= 0x4f) || ++ (0x60 <= model && model <= 0x6f)) ++ return true; ++ } ++ ++ return false; ++} ++ + static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) + { + struct patch_digest *pd = NULL; +@@ -233,7 +251,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi + struct sha256_state s; + int i; + +- if (x86_family(bsp_cpuid_1_eax) < 0x17) ++ if (!cpu_has_entrysign()) + return true; + + if (!need_sha_check(cur_rev)) +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 0b2f1e269ca496..8f6a7acf770e4a 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) + return; + } + ++ if (ata_id_is_locked(dev->id)) { ++ /* Security locked */ ++ /* LOGICAL UNIT ACCESS NOT AUTHORIZED */ ++ ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71); ++ return; ++ } ++ + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, + "Missing result TF: reporting aborted command\n"); +@@ -4831,8 +4838,10 @@ void ata_scsi_dev_rescan(struct work_struct *work) + spin_unlock_irqrestore(ap->lock, flags); + if (do_resume) { + ret = scsi_resume_device(sdev); +- if (ret == -EWOULDBLOCK) ++ if (ret == -EWOULDBLOCK) { ++ scsi_device_put(sdev); + goto unlock_scan; ++ } + dev->flags &= ~ATA_DFLAG_RESUMING; + } + ret = scsi_rescan_device(sdev); +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c +index 7061d3ee836a15..c69c05256b59f1 100644 +--- a/drivers/bcma/main.c ++++ b/drivers/bcma/main.c +@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus) + int err; + + list_for_each_entry(core, &bus->cores, list) { ++ struct device_node *np; ++ + /* We support that core ourselves */ + switch (core->id.id) { + case BCMA_CORE_4706_CHIPCOMMON: +@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus) + if (bcma_is_core_needed_early(core->id.id)) + continue; + ++ np = core->dev.of_node; ++ if (np && !of_device_is_available(np)) ++ continue; ++ + /* Only first GMAC core on BCM4706 is connected and working */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + core->core_unit > 0) +diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c +index 0e05a79de82d8a..82d923a9d75d75 100644 +--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c ++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c +@@ -54,7 +54,7 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain) + + static int scmi_pm_domain_probe(struct scmi_device *sdev) + { +- int num_domains, i; ++ int num_domains, i, ret; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + struct scmi_pm_domain *scmi_pd; +@@ -112,9 +112,18 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) + scmi_pd_data->domains = domains; + scmi_pd_data->num_domains = num_domains; + ++ ret = of_genpd_add_provider_onecell(np, scmi_pd_data); ++ if (ret) ++ goto err_rm_genpds; ++ + dev_set_drvdata(dev, scmi_pd_data); + +- return of_genpd_add_provider_onecell(np, scmi_pd_data); ++ return 0; ++err_rm_genpds: ++ for (i = num_domains - 1; i >= 0; i--) ++ pm_genpd_remove(domains[i]); ++ ++ return ret; + } + + static void scmi_pm_domain_remove(struct scmi_device *sdev) +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +index 480d718d09cb6b..7591a2803ae126 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +@@ -5296,9 +5296,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, + if (flags & AMDGPU_IB_PREEMPTED) + control |= INDIRECT_BUFFER_PRE_RESUME(1); + +- if (vmid) ++ if (vmid && !ring->adev->gfx.rs64_enable) + gfx_v11_0_ring_emit_de_meta(ring, +- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); ++ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED)); + } + + if (ring->is_mes_queue) +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +index adf0ef8b70e4b1..56999657f4d864 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +@@ -1552,7 +1552,7 @@ static bool retrieve_link_cap(struct dc_link *link) + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; +- uint32_t read_dpcd_retry_cnt = 3; ++ uint32_t read_dpcd_retry_cnt = 20; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms +@@ -1598,12 +1598,13 @@ static bool retrieve_link_cap(struct dc_link *link) + status = dpcd_get_tunneling_device_data(link); + + dpcd_set_source_specific_data(link); +- /* Sink may need to configure internals based on vendor, so allow some +- * time before proceeding with possibly vendor specific transactions +- */ +- msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { ++ /* ++ * Sink may need to configure internals based on vendor, so allow some ++ * time before proceeding with possibly vendor specific transactions ++ */ ++ msleep(post_oui_delay); + status = core_link_read_dpcd( + link, + DP_DPCD_REV, +diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +index cac6d64ab67d1d..4e8b3f1c7e25d2 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +@@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw) + nvkm_memory_unref(&fw->inst); + nvkm_falcon_fw_dtor_sigs(fw); + nvkm_firmware_dtor(&fw->fw); ++ kfree(fw->boot); ++ fw->boot = NULL; + } + + static const struct nvkm_firmware_func +diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c +index 980d85bc7f3745..caa1fe6cc386eb 100644 +--- a/drivers/gpu/drm/tegra/dc.c ++++ b/drivers/gpu/drm/tegra/dc.c +@@ -3140,6 +3140,7 @@ static int tegra_dc_couple(struct tegra_dc *dc) + dc->client.parent = &parent->client; + + dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); ++ put_device(companion); + } + + return 0; +diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c +index 839dbad9bc483d..49fc4690c63af7 100644 +--- a/drivers/gpu/drm/tegra/dsi.c ++++ b/drivers/gpu/drm/tegra/dsi.c +@@ -912,15 +912,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) + u32 value; + int err; + +- /* If the bootloader enabled DSI it needs to be disabled +- * in order for the panel initialization commands to be +- * properly sent. +- */ +- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); +- +- if (value & DSI_POWER_CONTROL_ENABLE) +- tegra_dsi_disable(dsi); +- + err = tegra_dsi_prepare(dsi); + if (err < 0) { + dev_err(dsi->dev, "failed to prepare: %d\n", err); +diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c +index 5adab6b229164e..d0b6a1fa6efad9 100644 +--- a/drivers/gpu/drm/tegra/uapi.c ++++ b/drivers/gpu/drm/tegra/uapi.c +@@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_ + if (err) + goto put_channel; + +- if (supported) ++ if (supported) { ++ struct pid *pid = get_task_pid(current, PIDTYPE_TGID); + context->memory_context = host1x_memory_context_alloc( +- host, client->base.dev, get_task_pid(current, PIDTYPE_TGID)); ++ host, client->base.dev, pid); ++ put_pid(pid); ++ } + + if (IS_ERR(context->memory_context)) { + if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) { +diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +index 862ca8d0723262..e2047ef864ebfa 100644 +--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c ++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +@@ -163,6 +163,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) + if (rc) + goto cleanup; + ++ mp2_ops->stop(privdata, cl_data->sensor_idx[i]); ++ amd_sfh_wait_for_response(privdata, cl_data->sensor_idx[i], DISABLE_SENSOR); + writel(0, privdata->mmio + AMD_P2C_MSG(0)); + mp2_ops->start(privdata, info); + status = amd_sfh_wait_for_response +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index fbbab353f040a8..6a538dca21a9a2 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -1511,7 +1511,7 @@ + #define USB_VENDOR_ID_SIGNOTEC 0x2133 + #define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018 + +-#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a +-#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155 ++#define USB_VENDOR_ID_JIELI_SDK_DEFAULT 0x4c4a ++#define USB_DEVICE_ID_JIELI_SDK_4155 0x4155 + + #endif +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 75480ec3c15a2d..fa946666969b8d 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -900,7 +900,6 @@ static const struct hid_device_id hid_ignore_list[] = { + #endif + { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) }, + { } + }; + +@@ -1057,6 +1056,18 @@ bool hid_ignore(struct hid_device *hdev) + strlen(elan_acpi_id[i].id))) + return true; + break; ++ case USB_VENDOR_ID_JIELI_SDK_DEFAULT: ++ /* ++ * Multiple USB devices with identical IDs (mic & touchscreen). ++ * The touch screen requires hid core processing, but the ++ * microphone does not. They can be distinguished by manufacturer ++ * and serial number. ++ */ ++ if (hdev->product == USB_DEVICE_ID_JIELI_SDK_4155 && ++ strncmp(hdev->name, "SmartlinkTechnology", 19) == 0 && ++ strncmp(hdev->uniq, "20201111000001", 14) == 0) ++ return true; ++ break; + } + + if (hdev->type == HID_TYPE_USBMOUSE && +diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c +index e7ecfca838df40..76fcf90ea7c763 100644 +--- a/drivers/input/keyboard/cros_ec_keyb.c ++++ b/drivers/input/keyboard/cros_ec_keyb.c +@@ -263,6 +263,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb, + case EC_MKBP_EVENT_KEY_MATRIX: + pm_wakeup_event(ckdev->dev, 0); + ++ if (!ckdev->idev) { ++ dev_warn_once(ckdev->dev, ++ "Unexpected key matrix event\n"); ++ return NOTIFY_OK; ++ } ++ + if (ckdev->ec->event_size != ckdev->cols) { + dev_err(ckdev->dev, + "Discarded incomplete key matrix event.\n"); +diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c +index d18839f1f4f60d..b620cd310cdb78 100644 +--- a/drivers/input/keyboard/imx_sc_key.c ++++ b/drivers/input/keyboard/imx_sc_key.c +@@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev) + return error; + } + +- error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv); ++ error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv); + if (error) + return error; + +diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c +index a68da2988f9cd8..26ab9924a7ae54 100644 +--- a/drivers/input/tablet/pegasus_notetaker.c ++++ b/drivers/input/tablet/pegasus_notetaker.c +@@ -63,6 +63,9 @@ + #define BUTTON_PRESSED 0xb5 + #define COMMAND_VERSION 0xa9 + ++/* 1 Status + 1 Color + 2 X + 2 Y = 6 bytes */ ++#define NOTETAKER_PACKET_SIZE 6 ++ + /* in xy data packet */ + #define BATTERY_NO_REPORT 0x40 + #define BATTERY_LOW 0x41 +@@ -303,6 +306,12 @@ static int pegasus_probe(struct usb_interface *intf, + } + + pegasus->data_len = usb_maxpacket(dev, pipe); ++ if (pegasus->data_len < NOTETAKER_PACKET_SIZE) { ++ dev_err(&intf->dev, "packet size is too small (%d)\n", ++ pegasus->data_len); ++ error = -EINVAL; ++ goto err_free_mem; ++ } + + pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL, + &pegasus->data_dma); +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c +index b068ff8afbc9ad..294bd942ee3794 100644 +--- a/drivers/input/touchscreen/goodix.c ++++ b/drivers/input/touchscreen/goodix.c +@@ -1519,6 +1519,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id); + static const struct acpi_device_id goodix_acpi_match[] = { + { "GDIX1001", 0 }, + { "GDIX1002", 0 }, ++ { "GDIX1003", 0 }, + { "GDX9110", 0 }, + { } + }; +diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c +index 8dc4f5c493fcba..335c702633ffe0 100644 +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -599,6 +599,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) + uint8_t *datbuf = NULL, *oobbuf = NULL; + size_t datbuf_len, oobbuf_len; + int ret = 0; ++ u64 end; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; +@@ -618,7 +619,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) + req.len &= 0xffffffff; + req.ooblen &= 0xffffffff; + +- if (req.start + req.len > mtd->size) ++ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) + return -EINVAL; + + datbuf_len = min_t(size_t, req.len, mtd->erasesize); +@@ -698,6 +699,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) + size_t datbuf_len, oobbuf_len; + size_t orig_len, orig_ooblen; + int ret = 0; ++ u64 end; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; +@@ -724,7 +726,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) + req.len &= 0xffffffff; + req.ooblen &= 0xffffffff; + +- if (req.start + req.len > mtd->size) { ++ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) { + ret = -EINVAL; + goto out; + } +diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c +index 4f37ca894d18a1..202b4fc064fa39 100644 +--- a/drivers/mtd/nand/raw/cadence-nand-controller.c ++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c +@@ -2876,7 +2876,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) + static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) + { + dma_cap_mask_t mask; +- struct dma_device *dma_dev = cdns_ctrl->dmac->device; ++ struct dma_device *dma_dev; + int ret; + + cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, +@@ -2920,6 +2920,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) + } + } + ++ dma_dev = cdns_ctrl->dmac->device; + cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma, + cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); +diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c +index 5249a1c2a80b8c..1bb994f7859636 100644 +--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c ++++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c +@@ -371,8 +371,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek) + hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1); + + /* Register both leds */ +- led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good); +- led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm); ++ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good); ++ if (ret) { ++ dev_err(hellcreek->dev, "Failed to register sync_good LED\n"); ++ goto out; ++ } ++ ++ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm); ++ if (ret) { ++ dev_err(hellcreek->dev, "Failed to register is_gm LED\n"); ++ led_classdev_unregister(&hellcreek->led_sync_good); ++ goto out; ++ } + + ret = 0; + +diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c +index dde37e61faa359..d2e98b5f0aca4c 100644 +--- a/drivers/net/dsa/microchip/lan937x_main.c ++++ b/drivers/net/dsa/microchip/lan937x_main.c +@@ -336,6 +336,7 @@ static void lan937x_set_tune_adj(struct ksz_device *dev, int port, + ksz_pread16(dev, port, reg, &data16); + + /* Update tune Adjust */ ++ data16 &= ~PORT_TUNE_ADJ; + data16 |= FIELD_PREP(PORT_TUNE_ADJ, val); + ksz_pwrite16(dev, port, reg, data16); + +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 0fda17bc8e2303..011c8cc8429e3a 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) + (adapter->bmc_filt_mask & BMC_FILT_MULTICAST) + + static bool be_send_pkt_to_bmc(struct be_adapter *adapter, +- struct sk_buff **skb) ++ struct sk_buff **skb, ++ struct be_wrb_params *wrb_params) + { + struct ethhdr *eh = (struct ethhdr *)(*skb)->data; + bool os2bmc = false; +@@ -1360,7 +1361,7 @@ done: + * to BMC, asic expects the vlan to be inline in the packet. + */ + if (os2bmc) +- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL); ++ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params); + + return os2bmc; + } +@@ -1387,7 +1388,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) + /* if os2bmc is enabled and if the pkt is destined to bmc, + * enqueue the pkt a 2nd time with mgmt bit set. + */ +- if (be_send_pkt_to_bmc(adapter, &skb)) { ++ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) { + BE_WRB_F_SET(wrb_params.features, OS2BMC, 1); + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); + if (unlikely(!wrb_cnt)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +index a8d6fd18c0f557..487d8b413a41d7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +@@ -323,10 +323,8 @@ err_xa: + free_irq(irq->map.virq, &irq->nh); + err_req_irq: + #ifdef CONFIG_RFS_ACCEL +- if (i && rmap && *rmap) { +- free_irq_cpu_rmap(*rmap); +- *rmap = NULL; +- } ++ if (i && rmap && *rmap) ++ irq_cpu_rmap_remove(*rmap, irq->map.virq); + err_irq_rmap: + #endif + if (i && pci_msix_can_alloc_dyn(dev->pdev)) +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +index b032d5a4b3b84c..10f5bc4892fc75 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard, + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, + info->psid); ++ if (err) ++ goto unlock; + + sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor, + info->fw_sub_minor); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +index 9fd1ca07925849..fba545938fd7d5 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +@@ -816,8 +816,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, + return -EINVAL; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); +- if (!rule) +- return -EINVAL; ++ if (!rule) { ++ err = -EINVAL; ++ goto err_rule_get_stats; ++ } + + err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, + &drops, &lastuse, &used_hw_stats); +diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c +index cb1746bc0e0c5d..273dae622c4113 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c +@@ -4,6 +4,7 @@ + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + ++#include + #include + #include + #include +@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev, + { + int i; + +- for (i = 0; cqe->len_list[i]; i++) ++ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + +@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev, + dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, + PAGE_SIZE, rxq->data_direction); + +- for (i = 0; cqe->len_list[i]; i++) ++ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) +diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c +index d829113c16eee3..1c33a9c9ddb5a1 100644 +--- a/drivers/net/ethernet/ti/netcp_core.c ++++ b/drivers/net/ethernet/ti/netcp_core.c +@@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) + + tx_pipe->dma_channel = knav_dma_open_channel(dev, + tx_pipe->dma_chan_name, &config); +- if (IS_ERR(tx_pipe->dma_channel)) { ++ if (!tx_pipe->dma_channel) { + dev_err(dev, "failed opening tx chan(%s)\n", + tx_pipe->dma_chan_name); +- ret = PTR_ERR(tx_pipe->dma_channel); ++ ret = -EINVAL; + goto err; + } + +@@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) + return 0; + + err: +- if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) ++ if (tx_pipe->dma_channel) + knav_dma_close_channel(tx_pipe->dma_channel); + tx_pipe->dma_channel = NULL; + return ret; +@@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) + + netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, + netcp->dma_chan_name, &config); +- if (IS_ERR(netcp->rx_channel)) { ++ if (!netcp->rx_channel) { + dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", + netcp->dma_chan_name); +- ret = PTR_ERR(netcp->rx_channel); ++ ret = -EINVAL; + goto fail; + } + +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index bf9ab07257642c..37fede155b920e 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -2349,17 +2349,11 @@ nvme_fc_ctrl_free(struct kref *ref) + container_of(ref, struct nvme_fc_ctrl, ref); + unsigned long flags; + +- if (ctrl->ctrl.tagset) +- nvme_remove_io_tag_set(&ctrl->ctrl); +- + /* remove from rport list */ + spin_lock_irqsave(&ctrl->rport->lock, flags); + list_del(&ctrl->ctrl_list); + spin_unlock_irqrestore(&ctrl->rport->lock, flags); + +- nvme_unquiesce_admin_queue(&ctrl->ctrl); +- nvme_remove_admin_tag_set(&ctrl->ctrl); +- + kfree(ctrl->queues); + + put_device(ctrl->dev); +@@ -3248,13 +3242,20 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) + { + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); + +- cancel_work_sync(&ctrl->ioerr_work); + cancel_delayed_work_sync(&ctrl->connect_work); ++ + /* + * kill the association on the link side. this will block + * waiting for io to terminate + */ + nvme_fc_delete_association(ctrl); ++ cancel_work_sync(&ctrl->ioerr_work); ++ ++ if (ctrl->ctrl.tagset) ++ nvme_remove_io_tag_set(&ctrl->ctrl); ++ ++ nvme_unquiesce_admin_queue(&ctrl->ctrl); ++ nvme_remove_admin_tag_set(&ctrl->ctrl); + } + + static void +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 57416bbf9344f0..578f4f29eacfe1 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -686,7 +686,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) + return; + } + nvme_add_ns_head_cdev(head); +- kblockd_schedule_work(&head->partition_scan_work); ++ queue_work(nvme_wq, &head->partition_scan_work); + } + + mutex_lock(&head->lock); +diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c +index c0964631841955..e1ac89be7c847d 100644 +--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c ++++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c +@@ -532,6 +532,11 @@ static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip) + return ret; + } + ++static void cs42l43_fwnode_put(void *data) ++{ ++ fwnode_handle_put(data); ++} ++ + static int cs42l43_pin_probe(struct platform_device *pdev) + { + struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent); +@@ -563,10 +568,20 @@ static int cs42l43_pin_probe(struct platform_device *pdev) + priv->gpio_chip.ngpio = CS42L43_NUM_GPIOS; + + if (is_of_node(fwnode)) { +- fwnode = fwnode_get_named_child_node(fwnode, "pinctrl"); +- +- if (fwnode && !fwnode->dev) +- fwnode->dev = priv->dev; ++ struct fwnode_handle *child; ++ ++ child = fwnode_get_named_child_node(fwnode, "pinctrl"); ++ if (child) { ++ ret = devm_add_action_or_reset(&pdev->dev, ++ cs42l43_fwnode_put, child); ++ if (ret) { ++ fwnode_handle_put(child); ++ return ret; ++ } ++ if (!child->dev) ++ child->dev = priv->dev; ++ fwnode = child; ++ } + } + + priv->gpio_chip.fwnode = fwnode; +diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c +index 08d80fb935b3ad..9c435e44abb4fc 100644 +--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c ++++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c +@@ -392,6 +392,7 @@ static int s32_pmx_gpio_request_enable(struct pinctrl_dev *pctldev, + + gpio_pin->pin_id = offset; + gpio_pin->config = config; ++ INIT_LIST_HEAD(&gpio_pin->list); + + spin_lock_irqsave(&ipctl->gpio_configs_lock, flags); + list_add(&gpio_pin->list, &ipctl->gpio_configs); +@@ -943,7 +944,7 @@ int s32_pinctrl_probe(struct platform_device *pdev, + spin_lock_init(&ipctl->gpio_configs_lock); + + s32_pinctrl_desc = +- devm_kmalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL); ++ devm_kzalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL); + if (!s32_pinctrl_desc) + return -ENOMEM; + +diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +index ff49025ec0856a..bb38e5f021a808 100644 +--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c ++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +@@ -106,11 +106,11 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + ret = pci_read_config_dword(pdev, 0xD0, &mmio_base); + if (ret) +- return ret; ++ return pcibios_err_to_errno(ret); + + ret = pci_read_config_dword(pdev, 0xFC, &pcu_base); + if (ret) +- return ret; ++ return pcibios_err_to_errno(ret); + + pcu_base &= GENMASK(10, 0); + base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; +diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c +index 419ed15cc10c42..d7c0301a7121b4 100644 +--- a/drivers/pmdomain/imx/gpc.c ++++ b/drivers/pmdomain/imx/gpc.c +@@ -512,7 +512,7 @@ static int imx_gpc_probe(struct platform_device *pdev) + return 0; + } + +-static int imx_gpc_remove(struct platform_device *pdev) ++static void imx_gpc_remove(struct platform_device *pdev) + { + struct device_node *pgc_node; + int ret; +@@ -522,7 +522,7 @@ static int imx_gpc_remove(struct platform_device *pdev) + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && + !pgc_node) +- return 0; ++ return; + + /* + * If the old DT binding is used the toplevel driver needs to +@@ -532,16 +532,22 @@ static int imx_gpc_remove(struct platform_device *pdev) + of_genpd_del_provider(pdev->dev.of_node); + + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); +- if (ret) +- return ret; ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to remove PU power domain (%pe)\n", ++ ERR_PTR(ret)); ++ return; ++ } + imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]); + + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base); +- if (ret) +- return ret; ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to remove ARM power domain (%pe)\n", ++ ERR_PTR(ret)); ++ return; ++ } + } + +- return 0; ++ of_node_put(pgc_node); + } + + static struct platform_driver imx_gpc_driver = { +@@ -550,6 +556,6 @@ static struct platform_driver imx_gpc_driver = { + .of_match_table = imx_gpc_dt_ids, + }, + .probe = imx_gpc_probe, +- .remove = imx_gpc_remove, ++ .remove_new = imx_gpc_remove, + }; + builtin_platform_driver(imx_gpc_driver) +diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c +index 7a2f34a5e0e09f..f8b3e3f7b8ce04 100644 +--- a/drivers/s390/net/ctcm_mpc.c ++++ b/drivers/s390/net/ctcm_mpc.c +@@ -700,7 +700,6 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) + + grp->sweep_req_pend_num--; + ctcmpc_send_sweep_resp(ch); +- kfree(mpcginfo); + return; + } + +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index 445f4a220df3eb..f274812aee1e88 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -602,8 +602,9 @@ int scsi_host_busy(struct Scsi_Host *shost) + { + int cnt = 0; + +- blk_mq_tagset_busy_iter(&shost->tag_set, +- scsi_host_check_in_flight, &cnt); ++ if (shost->tag_set.ops) ++ blk_mq_tagset_busy_iter(&shost->tag_set, ++ scsi_host_check_in_flight, &cnt); + return cnt; + } + EXPORT_SYMBOL(scsi_host_busy); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 62574886a9111e..9258a1a8c23c16 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -2212,9 +2212,17 @@ sg_remove_sfp_usercontext(struct work_struct *work) + write_lock_irqsave(&sfp->rq_list_lock, iflags); + while (!list_empty(&sfp->rq_list)) { + srp = list_first_entry(&sfp->rq_list, Sg_request, entry); +- sg_finish_rem_req(srp); + list_del(&srp->entry); ++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); ++ ++ sg_finish_rem_req(srp); ++ /* ++ * sg_rq_end_io() uses srp->parentfp. Hence, only clear ++ * srp->parentfp after blk_mq_free_request() has been called. ++ */ + srp->parentfp = NULL; ++ ++ write_lock_irqsave(&sfp->rq_list_lock, iflags); + } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + +diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c +index 0fbc37cd512310..f599eaab3c562c 100644 +--- a/drivers/soc/ti/knav_dma.c ++++ b/drivers/soc/ti/knav_dma.c +@@ -402,7 +402,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name, + * @name: slave channel name + * @config: dma configuration parameters + * +- * Returns pointer to appropriate DMA channel on success or error. ++ * Return: Pointer to appropriate DMA channel on success or NULL on error. + */ + void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config) +@@ -414,13 +414,13 @@ void *knav_dma_open_channel(struct device *dev, const char *name, + + if (!kdev) { + pr_err("keystone-navigator-dma driver not registered\n"); +- return (void *)-EINVAL; ++ return NULL; + } + + chan_num = of_channel_match_helper(dev->of_node, name, &instance); + if (chan_num < 0) { + dev_err(kdev->dev, "No DMA instance with name %s\n", name); +- return (void *)-EINVAL; ++ return NULL; + } + + dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n", +@@ -431,7 +431,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name, + if (config->direction != DMA_MEM_TO_DEV && + config->direction != DMA_DEV_TO_MEM) { + dev_err(kdev->dev, "bad direction\n"); +- return (void *)-EINVAL; ++ return NULL; + } + + /* Look for correct dma instance */ +@@ -443,7 +443,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name, + } + if (!dma) { + dev_err(kdev->dev, "No DMA instance with name %s\n", instance); +- return (void *)-EINVAL; ++ return NULL; + } + + /* Look for correct dma channel from dma instance */ +@@ -463,14 +463,14 @@ void *knav_dma_open_channel(struct device *dev, const char *name, + if (!chan) { + dev_err(kdev->dev, "channel %d is not in DMA %s\n", + chan_num, instance); +- return (void *)-EINVAL; ++ return NULL; + } + + if (atomic_read(&chan->ref_count) >= 1) { + if (!check_config(chan, config)) { + dev_err(kdev->dev, "channel %d config miss-match\n", + chan_num); +- return (void *)-EINVAL; ++ return NULL; + } + } + +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index 4ec99a55ac305a..db13c6a379d875 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -893,6 +893,9 @@ static ssize_t tcm_loop_tpg_address_show(struct config_item *item, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; + ++ if (!tl_hba->sh) ++ return -ENODEV; ++ + return snprintf(page, PAGE_SIZE, "%d:0:%d\n", + tl_hba->sh->host_no, tl_tpg->tl_tpgt); + } +diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c +index 2724656bf63486..69e5016ebd463c 100644 +--- a/drivers/uio/uio_hv_generic.c ++++ b/drivers/uio/uio_hv_generic.c +@@ -80,9 +80,15 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) + { + struct hv_uio_private_data *pdata = info->priv; + struct hv_device *dev = pdata->device; ++ struct vmbus_channel *primary, *sc; + +- dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state; +- virt_mb(); ++ primary = dev->channel; ++ primary->inbound.ring_buffer->interrupt_mask = !irq_state; ++ ++ mutex_lock(&vmbus_connection.channel_mutex); ++ list_for_each_entry(sc, &primary->sc_list, sc_list) ++ sc->inbound.ring_buffer->interrupt_mask = !irq_state; ++ mutex_unlock(&vmbus_connection.channel_mutex); + + return 0; + } +@@ -93,11 +99,18 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) + static void hv_uio_channel_cb(void *context) + { + struct vmbus_channel *chan = context; +- struct hv_device *hv_dev = chan->device_obj; +- struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); ++ struct hv_device *hv_dev; ++ struct hv_uio_private_data *pdata; + + virt_mb(); + ++ /* ++ * The callback may come from a subchannel, in which case look ++ * for the hv device in the primary channel ++ */ ++ hv_dev = chan->primary_channel ? ++ chan->primary_channel->device_obj : chan->device_obj; ++ pdata = hv_get_drvdata(hv_dev); + uio_event_notify(&pdata->info); + } + +diff --git a/fs/exfat/super.c b/fs/exfat/super.c +index 5affc11d14615a..957135f20cb6f2 100644 +--- a/fs/exfat/super.c ++++ b/fs/exfat/super.c +@@ -411,7 +411,10 @@ static int exfat_read_boot_sector(struct super_block *sb) + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + /* set block size to read super block */ +- sb_min_blocksize(sb, 512); ++ if (!sb_min_blocksize(sb, 512)) { ++ exfat_err(sb, "unable to set blocksize"); ++ return -EINVAL; ++ } + + /* read boot sector */ + sbi->boot_bh = sb_bread(sb, 0); +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index c3b2f78ca4e3e2..df7404214f34eb 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -23,20 +23,18 @@ + static struct kmem_cache *cic_entry_slab; + static struct kmem_cache *dic_entry_slab; + +-static void *page_array_alloc(struct inode *inode, int nr) ++static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr) + { +- struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + unsigned int size = sizeof(struct page *) * nr; + + if (likely(size <= sbi->page_array_slab_size)) + return f2fs_kmem_cache_alloc(sbi->page_array_slab, +- GFP_F2FS_ZERO, false, F2FS_I_SB(inode)); ++ GFP_F2FS_ZERO, false, sbi); + return f2fs_kzalloc(sbi, size, GFP_NOFS); + } + +-static void page_array_free(struct inode *inode, void *pages, int nr) ++static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr) + { +- struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + unsigned int size = sizeof(struct page *) * nr; + + if (!pages) +@@ -145,13 +143,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc) + if (cc->rpages) + return 0; + +- cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); ++ cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size); + return cc->rpages ? 0 : -ENOMEM; + } + + void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) + { +- page_array_free(cc->inode, cc->rpages, cc->cluster_size); ++ page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size); + cc->rpages = NULL; + cc->nr_rpages = 0; + cc->nr_cpages = 0; +@@ -211,13 +209,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic) + ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, + dic->rbuf, &dic->rlen); + if (ret != LZO_E_OK) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "lzo decompress failed, ret:%d", ret); + return -EIO; + } + + if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "lzo invalid rlen:%zu, expected:%lu", + dic->rlen, PAGE_SIZE << dic->log_cluster_size); + return -EIO; +@@ -291,13 +289,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) + ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, + dic->clen, dic->rlen); + if (ret < 0) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "lz4 decompress failed, ret:%d", ret); + return -EIO; + } + + if (ret != PAGE_SIZE << dic->log_cluster_size) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "lz4 invalid ret:%d, expected:%lu", + ret, PAGE_SIZE << dic->log_cluster_size); + return -EIO; +@@ -425,7 +423,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) + + stream = zstd_init_dstream(max_window_size, workspace, workspace_size); + if (!stream) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "%s zstd_init_dstream failed", __func__); + vfree(workspace); + return -EIO; +@@ -461,14 +459,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic) + + ret = zstd_decompress_stream(stream, &outbuf, &inbuf); + if (zstd_is_error(ret)) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "%s zstd_decompress_stream failed, ret: %d", + __func__, zstd_get_error_code(ret)); + return -EIO; + } + + if (dic->rlen != outbuf.pos) { +- f2fs_err_ratelimited(F2FS_I_SB(dic->inode), ++ f2fs_err_ratelimited(dic->sbi, + "%s ZSTD invalid rlen:%zu, expected:%lu", + __func__, dic->rlen, + PAGE_SIZE << dic->log_cluster_size); +@@ -614,6 +612,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count) + + static int f2fs_compress_pages(struct compress_ctx *cc) + { ++ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); + struct f2fs_inode_info *fi = F2FS_I(cc->inode); + const struct f2fs_compress_ops *cops = + f2fs_cops[fi->i_compress_algorithm]; +@@ -634,7 +633,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) + cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); + cc->valid_nr_cpages = cc->nr_cpages; + +- cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); ++ cc->cpages = page_array_alloc(sbi, cc->nr_cpages); + if (!cc->cpages) { + ret = -ENOMEM; + goto destroy_compress_ctx; +@@ -709,7 +708,7 @@ out_free_cpages: + if (cc->cpages[i]) + f2fs_compress_free_page(cc->cpages[i]); + } +- page_array_free(cc->inode, cc->cpages, cc->nr_cpages); ++ page_array_free(sbi, cc->cpages, cc->nr_cpages); + cc->cpages = NULL; + destroy_compress_ctx: + if (cops->destroy_compress_ctx) +@@ -727,7 +726,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + + void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) + { +- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); ++ struct f2fs_sb_info *sbi = dic->sbi; + struct f2fs_inode_info *fi = F2FS_I(dic->inode); + const struct f2fs_compress_ops *cops = + f2fs_cops[fi->i_compress_algorithm]; +@@ -800,7 +799,7 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed, + { + struct decompress_io_ctx *dic = + (struct decompress_io_ctx *)page_private(page); +- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); ++ struct f2fs_sb_info *sbi = dic->sbi; + + dec_page_count(sbi, F2FS_RD_DATA); + +@@ -1302,7 +1301,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; + cic->inode = inode; + atomic_set(&cic->pending_pages, cc->valid_nr_cpages); +- cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); ++ cic->rpages = page_array_alloc(sbi, cc->cluster_size); + if (!cic->rpages) + goto out_put_cic; + +@@ -1395,13 +1394,13 @@ unlock_continue: + spin_unlock(&fi->i_size_lock); + + f2fs_put_rpages(cc); +- page_array_free(cc->inode, cc->cpages, cc->nr_cpages); ++ page_array_free(sbi, cc->cpages, cc->nr_cpages); + cc->cpages = NULL; + f2fs_destroy_compress_ctx(cc, false); + return 0; + + out_destroy_crypt: +- page_array_free(cc->inode, cic->rpages, cc->cluster_size); ++ page_array_free(sbi, cic->rpages, cc->cluster_size); + + for (--i; i >= 0; i--) + fscrypt_finalize_bounce_page(&cc->cpages[i]); +@@ -1419,7 +1418,7 @@ out_free: + f2fs_compress_free_page(cc->cpages[i]); + cc->cpages[i] = NULL; + } +- page_array_free(cc->inode, cc->cpages, cc->nr_cpages); ++ page_array_free(sbi, cc->cpages, cc->nr_cpages); + cc->cpages = NULL; + return -EAGAIN; + } +@@ -1449,7 +1448,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page) + end_page_writeback(cic->rpages[i]); + } + +- page_array_free(cic->inode, cic->rpages, cic->nr_rpages); ++ page_array_free(sbi, cic->rpages, cic->nr_rpages); + kmem_cache_free(cic_entry_slab, cic); + } + +@@ -1580,14 +1579,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi, + static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + bool pre_alloc) + { +- const struct f2fs_compress_ops *cops = +- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; ++ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; + int i; + +- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) ++ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) + return 0; + +- dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); ++ dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size); + if (!dic->tpages) + return -ENOMEM; + +@@ -1617,10 +1615,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + bool bypass_destroy_callback, bool pre_alloc) + { +- const struct f2fs_compress_ops *cops = +- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; ++ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; + +- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) ++ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) + return; + + if (!bypass_destroy_callback && cops->destroy_decompress_ctx) +@@ -1647,7 +1644,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) + if (!dic) + return ERR_PTR(-ENOMEM); + +- dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); ++ dic->rpages = page_array_alloc(sbi, cc->cluster_size); + if (!dic->rpages) { + kmem_cache_free(dic_entry_slab, dic); + return ERR_PTR(-ENOMEM); +@@ -1655,6 +1652,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) + + dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; + dic->inode = cc->inode; ++ dic->sbi = sbi; ++ dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm; + atomic_set(&dic->remaining_pages, cc->nr_cpages); + dic->cluster_idx = cc->cluster_idx; + dic->cluster_size = cc->cluster_size; +@@ -1668,7 +1667,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) + dic->rpages[i] = cc->rpages[i]; + dic->nr_rpages = cc->cluster_size; + +- dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); ++ dic->cpages = page_array_alloc(sbi, dic->nr_cpages); + if (!dic->cpages) { + ret = -ENOMEM; + goto out_free; +@@ -1698,6 +1697,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, + bool bypass_destroy_callback) + { + int i; ++ /* use sbi in dic to avoid UFA of dic->inode*/ ++ struct f2fs_sb_info *sbi = dic->sbi; + + f2fs_release_decomp_mem(dic, bypass_destroy_callback, true); + +@@ -1709,7 +1710,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, + continue; + f2fs_compress_free_page(dic->tpages[i]); + } +- page_array_free(dic->inode, dic->tpages, dic->cluster_size); ++ page_array_free(sbi, dic->tpages, dic->cluster_size); + } + + if (dic->cpages) { +@@ -1718,10 +1719,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic, + continue; + f2fs_compress_free_page(dic->cpages[i]); + } +- page_array_free(dic->inode, dic->cpages, dic->nr_cpages); ++ page_array_free(sbi, dic->cpages, dic->nr_cpages); + } + +- page_array_free(dic->inode, dic->rpages, dic->nr_rpages); ++ page_array_free(sbi, dic->rpages, dic->nr_rpages); + kmem_cache_free(dic_entry_slab, dic); + } + +@@ -1740,8 +1741,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) + f2fs_free_dic(dic, false); + } else { + INIT_WORK(&dic->free_work, f2fs_late_free_dic); +- queue_work(F2FS_I_SB(dic->inode)->post_read_wq, +- &dic->free_work); ++ queue_work(dic->sbi->post_read_wq, &dic->free_work); + } + } + } +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index ab2ddd09d8131c..406243395b943c 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -1493,6 +1493,7 @@ struct compress_io_ctx { + struct decompress_io_ctx { + u32 magic; /* magic number to indicate page is compressed */ + struct inode *inode; /* inode the context belong to */ ++ struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ + pgoff_t cluster_idx; /* cluster index number */ + unsigned int cluster_size; /* page count in cluster */ + unsigned int log_cluster_size; /* log of cluster size */ +@@ -1533,6 +1534,7 @@ struct decompress_io_ctx { + + bool failed; /* IO error occurred before decompression? */ + bool need_verity; /* need fs-verity verification after decompression? */ ++ unsigned char compress_algorithm; /* backup algorithm type */ + void *private; /* payload buffer for specified decompression algorithm */ + void *private2; /* extra payload buffer */ + struct work_struct verity_work; /* work to verify the decompressed pages */ +diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c +index 539a9038fb0dd2..e92a61e934e447 100644 +--- a/fs/smb/client/cached_dir.c ++++ b/fs/smb/client/cached_dir.c +@@ -16,6 +16,7 @@ static struct cached_fid *init_cached_dir(const char *path); + static void free_cached_dir(struct cached_fid *cfid); + static void smb2_close_cached_fid(struct kref *ref); + static void cfids_laundromat_worker(struct work_struct *work); ++static void close_cached_dir_locked(struct cached_fid *cfid); + + struct cached_dir_dentry { + struct list_head entry; +@@ -362,7 +363,7 @@ out: + * lease. Release one here, and the second below. + */ + cfid->has_lease = false; +- close_cached_dir(cfid); ++ close_cached_dir_locked(cfid); + } + spin_unlock(&cfids->cfid_list_lock); + +@@ -448,18 +449,52 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, + spin_lock(&cfid->cfids->cfid_list_lock); + if (cfid->has_lease) { + cfid->has_lease = false; +- close_cached_dir(cfid); ++ close_cached_dir_locked(cfid); + } + spin_unlock(&cfid->cfids->cfid_list_lock); + close_cached_dir(cfid); + } + +- ++/** ++ * close_cached_dir - drop a reference of a cached dir ++ * ++ * The release function will be called with cfid_list_lock held to remove the ++ * cached dirs from the list before any other thread can take another @cfid ++ * ref. Must not be called with cfid_list_lock held; use ++ * close_cached_dir_locked() called instead. ++ * ++ * @cfid: cached dir ++ */ + void close_cached_dir(struct cached_fid *cfid) + { ++ lockdep_assert_not_held(&cfid->cfids->cfid_list_lock); + kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock); + } + ++/** ++ * close_cached_dir_locked - put a reference of a cached dir with ++ * cfid_list_lock held ++ * ++ * Calling close_cached_dir() with cfid_list_lock held has the potential effect ++ * of causing a deadlock if the invariant of refcount >= 2 is false. ++ * ++ * This function is used in paths that hold cfid_list_lock and expect at least ++ * two references. If that invariant is violated, WARNs and returns without ++ * dropping a reference; the final put must still go through ++ * close_cached_dir(). ++ * ++ * @cfid: cached dir ++ */ ++static void close_cached_dir_locked(struct cached_fid *cfid) ++{ ++ lockdep_assert_held(&cfid->cfids->cfid_list_lock); ++ ++ if (WARN_ON(kref_read(&cfid->refcount) < 2)) ++ return; ++ ++ kref_put(&cfid->refcount, smb2_close_cached_fid); ++} ++ + /* + * Called from cifs_kill_sb when we unmount a share + */ +@@ -692,7 +727,7 @@ static void cfids_invalidation_worker(struct work_struct *work) + list_for_each_entry_safe(cfid, q, &entry, entry) { + list_del(&cfid->entry); + /* Drop the ref-count acquired in invalidate_all_cached_dirs */ +- kref_put(&cfid->refcount, smb2_close_cached_fid); ++ close_cached_dir(cfid); + } + } + +diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c +index 2744d5580d195f..0461f89c4852ed 100644 +--- a/fs/smb/client/cifsfs.c ++++ b/fs/smb/client/cifsfs.c +@@ -133,7 +133,7 @@ module_param(enable_oplocks, bool, 0644); + MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); + + module_param(enable_gcm_256, bool, 0644); +-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); ++MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1"); + + module_param(require_gcm_256, bool, 0644); + MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); +diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c +index cf233cb9c19436..a64c0b0dbec781 100644 +--- a/fs/smb/client/fs_context.c ++++ b/fs/smb/client/fs_context.c +@@ -1726,6 +1726,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, + ctx->password = NULL; + kfree_sensitive(ctx->password2); + ctx->password2 = NULL; ++ kfree(ctx->source); ++ ctx->source = NULL; ++ kfree(fc->source); ++ fc->source = NULL; + return -EINVAL; + } + +diff --git a/include/linux/array_size.h b/include/linux/array_size.h +new file mode 100644 +index 00000000000000..06d7d83196ca30 +--- /dev/null ++++ b/include/linux/array_size.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_ARRAY_SIZE_H ++#define _LINUX_ARRAY_SIZE_H ++ ++#include ++ ++/** ++ * ARRAY_SIZE - get the number of elements in array @arr ++ * @arr: array to be sized ++ */ ++#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) ++ ++#endif /* _LINUX_ARRAY_SIZE_H */ +diff --git a/include/linux/ata.h b/include/linux/ata.h +index 792e10a09787f8..c9013e472aa3d5 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -566,6 +566,7 @@ struct ata_bmdma_prd { + #define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) + #define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) + #define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) ++#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7) + #define ata_id_has_atapi_AN(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ +diff --git a/include/linux/kernel.h b/include/linux/kernel.h +index cee8fe87e9f4f0..d9ad21058eed9d 100644 +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -13,6 +13,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -50,12 +51,6 @@ + #define READ 0 + #define WRITE 1 + +-/** +- * ARRAY_SIZE - get the number of elements in array @arr +- * @arr: array to be sized +- */ +-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +- + #define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) + + #define u64_to_user_ptr(x) ( \ +diff --git a/include/linux/string.h b/include/linux/string.h +index 5077776e995e01..ce137830a0b99c 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -2,6 +2,7 @@ + #ifndef _LINUX_STRING_H_ + #define _LINUX_STRING_H_ + ++#include + #include /* for inline */ + #include /* for size_t */ + #include /* for NULL */ +diff --git a/include/net/tls.h b/include/net/tls.h +index 6c642ea1805041..66527376ca973a 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -474,6 +474,12 @@ tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) + ((u64)ntohl(seq) << 32) | RESYNC_REQ); + } + ++static inline void ++tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async) ++{ ++ atomic64_set(&resync_async->req, 0); ++} ++ + static inline void + tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) + { +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index fd550c0b563450..84a1c8c861d292 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -462,7 +462,8 @@ static inline int xfrm_af2proto(unsigned int family) + + static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto) + { +- if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || ++ if ((x->sel.family != AF_UNSPEC) || ++ (ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || + (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) + return &x->inner_mode; + else +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c +index e97aeda3a86b55..b6024fc9034906 100644 +--- a/kernel/bpf/trampoline.c ++++ b/kernel/bpf/trampoline.c +@@ -460,10 +460,6 @@ again: + * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the + * trampoline again, and retry register. + */ +- /* reset fops->func and fops->trampoline for re-register */ +- tr->fops->func = NULL; +- tr->fops->trampoline = 0; +- + /* reset im->image memory attr for arch_prepare_bpf_trampoline */ + set_memory_nx((long)im->image, 1); + set_memory_rw((long)im->image, 1); +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index b7246b7171b73b..88024cb22a9dbe 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -1132,7 +1132,7 @@ static int __crash_shrink_memory(struct resource *old_res, + old_res->start = 0; + old_res->end = 0; + } else { +- crashk_res.end = ram_res->start - 1; ++ old_res->end = ram_res->start - 1; + } + + crash_free_reserved_phys_range(ram_res->start, ram_res->end); +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 63a8ce7177dd49..4a2aeaa85af314 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1413,10 +1413,11 @@ static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown) + + base = lock_timer_base(timer, &flags); + +- if (base->running_timer != timer) ++ if (base->running_timer != timer) { + ret = detach_if_pending(timer, base, true); +- if (shutdown) +- timer->function = NULL; ++ if (shutdown) ++ timer->function = NULL; ++ } + + raw_spin_unlock_irqrestore(&base->lock, flags); + +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 398992597685b6..44e31eebb7aaab 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -5370,6 +5370,17 @@ static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long + } + } + ++static void reset_direct(struct ftrace_ops *ops, unsigned long addr) ++{ ++ struct ftrace_hash *hash = ops->func_hash->filter_hash; ++ ++ remove_direct_functions_hash(hash, addr); ++ ++ /* cleanup for possible another register call */ ++ ops->func = NULL; ++ ops->trampoline = 0; ++} ++ + /** + * register_ftrace_direct - Call a custom trampoline directly + * for multiple functions registered in @ops +@@ -5465,6 +5476,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) + ops->direct_call = addr; + + err = register_ftrace_function_nolock(ops); ++ if (err) ++ reset_direct(ops, addr); + + out_unlock: + mutex_unlock(&direct_mutex); +@@ -5497,7 +5510,6 @@ EXPORT_SYMBOL_GPL(register_ftrace_direct); + int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, + bool free_filters) + { +- struct ftrace_hash *hash = ops->func_hash->filter_hash; + int err; + + if (check_direct_multi(ops)) +@@ -5507,13 +5519,9 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, + + mutex_lock(&direct_mutex); + err = unregister_ftrace_function(ops); +- remove_direct_functions_hash(hash, addr); ++ reset_direct(ops, addr); + mutex_unlock(&direct_mutex); + +- /* cleanup for possible another register call */ +- ops->func = NULL; +- ops->trampoline = 0; +- + if (free_filters) + ftrace_free_filter(ops); + return err; +diff --git a/lib/maple_tree.c b/lib/maple_tree.c +index 6f7a2c9cf922a2..e440ffd0e99962 100644 +--- a/lib/maple_tree.c ++++ b/lib/maple_tree.c +@@ -62,6 +62,8 @@ + #define CREATE_TRACE_POINTS + #include + ++#define TP_FCT tracepoint_string(__func__) ++ + #define MA_ROOT_PARENT 1 + + /* +@@ -2990,7 +2992,7 @@ static inline int mas_rebalance(struct ma_state *mas, + MA_STATE(l_mas, mas->tree, mas->index, mas->last); + MA_STATE(r_mas, mas->tree, mas->index, mas->last); + +- trace_ma_op(__func__, mas); ++ trace_ma_op(TP_FCT, mas); + + /* + * Rebalancing occurs if a node is insufficient. Data is rebalanced +@@ -3365,7 +3367,7 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) + MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); + MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); + +- trace_ma_op(__func__, mas); ++ trace_ma_op(TP_FCT, mas); + mas->depth = mas_mt_height(mas); + /* Allocation failures will happen early. */ + mas_node_count(mas, 1 + mas->depth * 2); +@@ -3598,7 +3600,7 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas) + return false; + } + +- trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); ++ trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry); + return true; + } + +@@ -3845,7 +3847,7 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) + * of data may happen. + */ + mas = wr_mas->mas; +- trace_ma_op(__func__, mas); ++ trace_ma_op(TP_FCT, mas); + + if (unlikely(!mas->index && mas->last == ULONG_MAX)) + return mas_new_root(mas, wr_mas->entry); +@@ -3996,7 +3998,7 @@ done: + } else { + memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); + } +- trace_ma_write(__func__, mas, 0, wr_mas->entry); ++ trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); + mas_update_gap(mas); + return true; + } +@@ -4042,7 +4044,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) + return false; + } + +- trace_ma_write(__func__, mas, 0, wr_mas->entry); ++ trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); + /* + * Only update gap when the new entry is empty or there is an empty + * entry in the original two ranges. +@@ -4178,7 +4180,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas, + if (!wr_mas->content || !wr_mas->entry) + mas_update_gap(mas); + +- trace_ma_write(__func__, mas, new_end, wr_mas->entry); ++ trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry); + return true; + } + +@@ -4192,7 +4194,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas) + { + struct maple_big_node b_node; + +- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); ++ trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry); + memset(&b_node, 0, sizeof(struct maple_big_node)); + mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); + mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); +@@ -5395,7 +5397,7 @@ void *mas_store(struct ma_state *mas, void *entry) + { + MA_WR_STATE(wr_mas, mas, entry); + +- trace_ma_write(__func__, mas, 0, entry); ++ trace_ma_write(TP_FCT, mas, 0, entry); + #ifdef CONFIG_DEBUG_MAPLE_TREE + if (MAS_WARN_ON(mas, mas->index > mas->last)) + pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); +@@ -5433,7 +5435,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) + MA_WR_STATE(wr_mas, mas, entry); + + mas_wr_store_setup(&wr_mas); +- trace_ma_write(__func__, mas, 0, entry); ++ trace_ma_write(TP_FCT, mas, 0, entry); + retry: + mas_wr_store_entry(&wr_mas); + if (unlikely(mas_nomem(mas, gfp))) +@@ -5457,7 +5459,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) + MA_WR_STATE(wr_mas, mas, entry); + + mas_wr_store_setup(&wr_mas); +- trace_ma_write(__func__, mas, 0, entry); ++ trace_ma_write(TP_FCT, mas, 0, entry); + mas_wr_store_entry(&wr_mas); + MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); + mas_destroy(mas); +@@ -6245,7 +6247,7 @@ void *mtree_load(struct maple_tree *mt, unsigned long index) + MA_STATE(mas, mt, index, index); + void *entry; + +- trace_ma_read(__func__, &mas); ++ trace_ma_read(TP_FCT, &mas); + rcu_read_lock(); + retry: + entry = mas_start(&mas); +@@ -6288,7 +6290,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, + MA_STATE(mas, mt, index, last); + MA_WR_STATE(wr_mas, &mas, entry); + +- trace_ma_write(__func__, &mas, 0, entry); ++ trace_ma_write(TP_FCT, &mas, 0, entry); + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + +@@ -6470,7 +6472,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) + void *entry = NULL; + + MA_STATE(mas, mt, index, index); +- trace_ma_op(__func__, &mas); ++ trace_ma_op(TP_FCT, &mas); + + mtree_lock(mt); + entry = mas_erase(&mas); +@@ -6536,7 +6538,7 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) + unsigned long copy = *index; + #endif + +- trace_ma_read(__func__, &mas); ++ trace_ma_read(TP_FCT, &mas); + + if ((*index) > max) + return NULL; +diff --git a/mm/mempool.c b/mm/mempool.c +index 734bcf5afbb783..82e4ab399ed1c6 100644 +--- a/mm/mempool.c ++++ b/mm/mempool.c +@@ -64,10 +64,20 @@ static void check_element(mempool_t *pool, void *element) + } else if (pool->free == mempool_free_pages) { + /* Mempools backed by page allocator */ + int order = (int)(long)pool->pool_data; +- void *addr = kmap_atomic((struct page *)element); + +- __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); +- kunmap_atomic(addr); ++#ifdef CONFIG_HIGHMEM ++ for (int i = 0; i < (1 << order); i++) { ++ struct page *page = (struct page *)element; ++ void *addr = kmap_local_page(page + i); ++ ++ __check_element(pool, addr, PAGE_SIZE); ++ kunmap_local(addr); ++ } ++#else ++ void *addr = page_address((struct page *)element); ++ ++ __check_element(pool, addr, PAGE_SIZE << order); ++#endif + } + } + +@@ -89,10 +99,20 @@ static void poison_element(mempool_t *pool, void *element) + } else if (pool->alloc == mempool_alloc_pages) { + /* Mempools backed by page allocator */ + int order = (int)(long)pool->pool_data; +- void *addr = kmap_atomic((struct page *)element); + +- __poison_element(addr, 1UL << (PAGE_SHIFT + order)); +- kunmap_atomic(addr); ++#ifdef CONFIG_HIGHMEM ++ for (int i = 0; i < (1 << order); i++) { ++ struct page *page = (struct page *)element; ++ void *addr = kmap_local_page(page + i); ++ ++ __poison_element(addr, PAGE_SIZE); ++ kunmap_local(addr); ++ } ++#else ++ void *addr = page_address((struct page *)element); ++ ++ __poison_element(addr, PAGE_SIZE << order); ++#endif + } + } + #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ +diff --git a/mm/shmem.c b/mm/shmem.c +index 2260def68090c7..d24398f56660ce 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -126,8 +126,7 @@ struct shmem_options { + #define SHMEM_SEEN_INODES 2 + #define SHMEM_SEEN_HUGE 4 + #define SHMEM_SEEN_INUMS 8 +-#define SHMEM_SEEN_NOSWAP 16 +-#define SHMEM_SEEN_QUOTA 32 ++#define SHMEM_SEEN_QUOTA 16 + }; + + #ifdef CONFIG_TMPFS +@@ -4004,7 +4003,6 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) + "Turning off swap in unprivileged tmpfs mounts unsupported"); + } + ctx->noswap = true; +- ctx->seen |= SHMEM_SEEN_NOSWAP; + break; + case Opt_quota: + if (fc->user_ns != &init_user_ns) +@@ -4154,14 +4152,15 @@ static int shmem_reconfigure(struct fs_context *fc) + err = "Current inum too high to switch to 32-bit inums"; + goto out; + } +- if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { ++ ++ /* ++ * "noswap" doesn't use fsparam_flag_no, i.e. there's no "swap" ++ * counterpart for (re-)enabling swap. ++ */ ++ if (ctx->noswap && !sbinfo->noswap) { + err = "Cannot disable swap on remount"; + goto out; + } +- if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { +- err = "Cannot enable swap on remount if it was disabled on first mount"; +- goto out; +- } + + if (ctx->seen & SHMEM_SEEN_QUOTA && + !sb_any_quota_loaded(fc->root->d_sb)) { +diff --git a/net/devlink/rate.c b/net/devlink/rate.c +index dff1593b8406ac..e7e80af4abaf65 100644 +--- a/net/devlink/rate.c ++++ b/net/devlink/rate.c +@@ -702,13 +702,15 @@ void devl_rate_nodes_destroy(struct devlink *devlink) + if (!devlink_rate->parent) + continue; + +- refcount_dec(&devlink_rate->parent->refcnt); + if (devlink_rate_is_leaf(devlink_rate)) + ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv, + NULL, NULL); + else if (devlink_rate_is_node(devlink_rate)) + ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv, + NULL, NULL); ++ ++ refcount_dec(&devlink_rate->parent->refcnt); ++ devlink_rate->parent = NULL; + } + list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) { + if (devlink_rate_is_node(devlink_rate)) { +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c +index 10e96ed6c9e395..11e33a43126749 100644 +--- a/net/ipv4/esp4_offload.c ++++ b/net/ipv4/esp4_offload.c +@@ -111,8 +111,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) + { +- __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6) +- : htons(ETH_P_IP); ++ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, ++ XFRM_MODE_SKB_CB(skb)->protocol); ++ __be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6) ++ : htons(ETH_P_IP); + + return skb_eth_gso_segment(skb, features, type); + } +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c +index a189e08370a5ea..438f9cbdca299a 100644 +--- a/net/ipv6/esp6_offload.c ++++ b/net/ipv6/esp6_offload.c +@@ -145,8 +145,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) + { +- __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP) +- : htons(ETH_P_IPV6); ++ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, ++ XFRM_MODE_SKB_CB(skb)->protocol); ++ __be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP) ++ : htons(ETH_P_IPV6); + + return skb_eth_gso_segment(skb, features, type); + } +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index b245abd08c8241..f1edae5f90790b 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -839,8 +839,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, + + opts->suboptions = 0; + ++ /* Force later mptcp_write_options(), but do not use any actual ++ * option space. ++ */ + if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb))) +- return false; ++ return true; + + if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { + if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) || +@@ -1041,6 +1044,31 @@ static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una) + msk->snd_una = new_snd_una; + } + ++static void rwin_update(struct mptcp_sock *msk, struct sock *ssk, ++ struct sk_buff *skb) ++{ ++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); ++ struct tcp_sock *tp = tcp_sk(ssk); ++ u64 mptcp_rcv_wnd; ++ ++ /* Avoid touching extra cachelines if TCP is going to accept this ++ * skb without filling the TCP-level window even with a possibly ++ * outdated mptcp-level rwin. ++ */ ++ if (!skb->len || skb->len < tcp_receive_window(tp)) ++ return; ++ ++ mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent); ++ if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent)) ++ return; ++ ++ /* Some other subflow grew the mptcp-level rwin since rcv_wup, ++ * resync. ++ */ ++ tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent; ++ subflow->rcv_wnd_sent = mptcp_rcv_wnd; ++} ++ + static void ack_update_msk(struct mptcp_sock *msk, + struct sock *ssk, + struct mptcp_options_received *mp_opt) +@@ -1207,6 +1235,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) + */ + if (mp_opt.use_ack) + ack_update_msk(msk, sk, &mp_opt); ++ rwin_update(msk, sk, skb); + + /* Zero-data-length packets are dropped by the caller and not + * propagated to the MPTCP layer, so the skb extension does not +@@ -1293,6 +1322,10 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th) + + if (rcv_wnd_new != rcv_wnd_old) { + raise_win: ++ /* The msk-level rcv wnd is after the tcp level one, ++ * sync the latter. ++ */ ++ rcv_wnd_new = rcv_wnd_old; + win = rcv_wnd_old - ack_seq; + tp->rcv_wnd = min_t(u64, win, U32_MAX); + new_win = tp->rcv_wnd; +@@ -1316,6 +1349,21 @@ raise_win: + + update_wspace: + WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); ++ subflow->rcv_wnd_sent = rcv_wnd_new; ++} ++ ++static void mptcp_track_rwin(struct tcp_sock *tp) ++{ ++ const struct sock *ssk = (const struct sock *)tp; ++ struct mptcp_subflow_context *subflow; ++ struct mptcp_sock *msk; ++ ++ if (!ssk) ++ return; ++ ++ subflow = mptcp_subflow_ctx(ssk); ++ msk = mptcp_sk(subflow->conn); ++ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); + } + + __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum) +@@ -1610,6 +1658,10 @@ mp_rst: + opts->reset_transient, + opts->reset_reason); + return; ++ } else if (unlikely(!opts->suboptions)) { ++ /* Fallback to TCP */ ++ mptcp_track_rwin(tp); ++ return; + } + + if (OPTION_MPTCP_PRIO & opts->suboptions) { +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 7fd6714f41fe79..8dbe826555d29f 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -29,6 +29,7 @@ struct mptcp_pm_add_entry { + u8 retrans_times; + struct timer_list add_timer; + struct mptcp_sock *sock; ++ struct rcu_head rcu; + }; + + struct pm_nl_pernet { +@@ -344,22 +345,27 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk, + { + struct mptcp_pm_add_entry *entry; + struct sock *sk = (struct sock *)msk; +- struct timer_list *add_timer = NULL; ++ bool stop_timer = false; ++ ++ rcu_read_lock(); + + spin_lock_bh(&msk->pm.lock); + entry = mptcp_lookup_anno_list_by_saddr(msk, addr); + if (entry && (!check_id || entry->addr.id == addr->id)) { + entry->retrans_times = ADD_ADDR_RETRANS_MAX; +- add_timer = &entry->add_timer; ++ stop_timer = true; + } + if (!check_id && entry) + list_del(&entry->list); + spin_unlock_bh(&msk->pm.lock); + +- /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ +- if (add_timer) +- sk_stop_timer_sync(sk, add_timer); ++ /* Note: entry might have been removed by another thread. ++ * We hold rcu_read_lock() to ensure it is not freed under us. ++ */ ++ if (stop_timer) ++ sk_stop_timer_sync(sk, &entry->add_timer); + ++ rcu_read_unlock(); + return entry; + } + +@@ -415,7 +421,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk) + + list_for_each_entry_safe(entry, tmp, &free_list, list) { + sk_stop_timer_sync(sk, &entry->add_timer); +- kfree(entry); ++ kfree_rcu(entry, rcu); + } + } + +@@ -1573,7 +1579,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, + + entry = mptcp_pm_del_add_timer(msk, addr, false); + if (entry) { +- kfree(entry); ++ kfree_rcu(entry, rcu); + return true; + } + +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 6b4b0c40570cef..73b3a44b183fc9 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -57,11 +57,13 @@ static u64 mptcp_wnd_end(const struct mptcp_sock *msk) + + static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) + { ++ unsigned short family = READ_ONCE(sk->sk_family); ++ + #if IS_ENABLED(CONFIG_MPTCP_IPV6) +- if (sk->sk_prot == &tcpv6_prot) ++ if (family == AF_INET6) + return &inet6_stream_ops; + #endif +- WARN_ON_ONCE(sk->sk_prot != &tcp_prot); ++ WARN_ON_ONCE(family != AF_INET); + return &inet_stream_ops; + } + +@@ -902,6 +904,13 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) + if (sk->sk_state != TCP_ESTABLISHED) + return false; + ++ /* The caller possibly is not holding the msk socket lock, but ++ * in the fallback case only the current subflow is touching ++ * the OoO queue. ++ */ ++ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) ++ return false; ++ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_subflows) { + spin_unlock_bh(&msk->fallback_lock); +@@ -959,14 +968,19 @@ static void mptcp_reset_rtx_timer(struct sock *sk) + + bool mptcp_schedule_work(struct sock *sk) + { +- if (inet_sk_state_load(sk) != TCP_CLOSE && +- schedule_work(&mptcp_sk(sk)->work)) { +- /* each subflow already holds a reference to the sk, and the +- * workqueue is invoked by a subflow, so sk can't go away here. +- */ +- sock_hold(sk); ++ if (inet_sk_state_load(sk) == TCP_CLOSE) ++ return false; ++ ++ /* Get a reference on this socket, mptcp_worker() will release it. ++ * As mptcp_worker() might complete before us, we can not avoid ++ * a sock_hold()/sock_put() if schedule_work() returns false. ++ */ ++ sock_hold(sk); ++ ++ if (schedule_work(&mptcp_sk(sk)->work)) + return true; +- } ++ ++ sock_put(sk); + return false; + } + +@@ -2578,7 +2592,8 @@ static void __mptcp_close_subflow(struct sock *sk) + + if (ssk_state != TCP_CLOSE && + (ssk_state != TCP_CLOSE_WAIT || +- inet_sk_state_load(sk) != TCP_ESTABLISHED)) ++ inet_sk_state_load(sk) != TCP_ESTABLISHED || ++ __mptcp_check_fallback(msk))) + continue; + + /* 'subflow_data_ready' will re-sched once rx queue is empty */ +@@ -2814,7 +2829,11 @@ static void mptcp_worker(struct work_struct *work) + __mptcp_close_subflow(sk); + + if (mptcp_close_tout_expired(sk)) { ++ struct mptcp_subflow_context *subflow, *tmp; ++ + mptcp_do_fastclose(sk); ++ mptcp_for_each_subflow_safe(msk, subflow, tmp) ++ __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0); + mptcp_close_wake_up(sk); + } + +@@ -3242,7 +3261,8 @@ static int mptcp_disconnect(struct sock *sk, int flags) + /* msk->subflow is still intact, the following will not free the first + * subflow + */ +- mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); ++ mptcp_do_fastclose(sk); ++ mptcp_destroy_common(msk); + + /* The first subflow is already in TCP_CLOSE status, the following + * can't overlap with a fallback anymore +@@ -3424,7 +3444,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) + msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; + } + +-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) ++void mptcp_destroy_common(struct mptcp_sock *msk) + { + struct mptcp_subflow_context *subflow, *tmp; + struct sock *sk = (struct sock *)msk; +@@ -3433,7 +3453,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) + + /* join list will be eventually flushed (with rst) at sock lock release time */ + mptcp_for_each_subflow_safe(msk, subflow, tmp) +- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); ++ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0); + + /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ + mptcp_data_lock(sk); +@@ -3458,7 +3478,7 @@ static void mptcp_destroy(struct sock *sk) + + /* allow the following to close even the initial subflow */ + msk->free_first = 1; +- mptcp_destroy_common(msk, 0); ++ mptcp_destroy_common(msk); + sk_sockets_allocated_dec(sk); + } + +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index dc98f588c8a829..5d218c27c07767 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -478,6 +478,7 @@ struct mptcp_subflow_context { + u64 remote_key; + u64 idsn; + u64 map_seq; ++ u64 rcv_wnd_sent; + u32 snd_isn; + u32 token; + u32 rel_write_seq; +@@ -870,7 +871,7 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk) + local_bh_enable(); + } + +-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags); ++void mptcp_destroy_common(struct mptcp_sock *msk); + + #define MPTCP_TOKEN_MAX_RETRIES 4 + +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index dfee1890c841bb..b71675f46d1078 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -2095,6 +2095,10 @@ void __init mptcp_subflow_init(void) + tcp_prot_override = tcp_prot; + tcp_prot_override.release_cb = tcp_release_cb_override; + tcp_prot_override.diag_destroy = tcp_abort_override; ++#ifdef CONFIG_BPF_SYSCALL ++ /* Disable sockmap processing for subflows */ ++ tcp_prot_override.psock_update_sk_prot = NULL; ++#endif + + #if IS_ENABLED(CONFIG_MPTCP_IPV6) + /* In struct mptcp_subflow_request_sock, we assume the TCP request sock +@@ -2132,6 +2136,10 @@ void __init mptcp_subflow_init(void) + tcpv6_prot_override = tcpv6_prot; + tcpv6_prot_override.release_cb = tcp_release_cb_override; + tcpv6_prot_override.diag_destroy = tcp_abort_override; ++#ifdef CONFIG_BPF_SYSCALL ++ /* Disable sockmap processing for subflows */ ++ tcpv6_prot_override.psock_update_sk_prot = NULL; ++#endif + #endif + + mptcp_diag_subflow_init(&subflow_ulp_ops); +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 10c646b32b9d08..0ea4fc2a755bfd 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -597,69 +597,6 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, + return 0; + } + +-static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key, +- const struct nlattr *a) +-{ +- struct nshhdr *nh; +- size_t length; +- int err; +- u8 flags; +- u8 ttl; +- int i; +- +- struct ovs_key_nsh key; +- struct ovs_key_nsh mask; +- +- err = nsh_key_from_nlattr(a, &key, &mask); +- if (err) +- return err; +- +- /* Make sure the NSH base header is there */ +- if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN)) +- return -ENOMEM; +- +- nh = nsh_hdr(skb); +- length = nsh_hdr_len(nh); +- +- /* Make sure the whole NSH header is there */ +- err = skb_ensure_writable(skb, skb_network_offset(skb) + +- length); +- if (unlikely(err)) +- return err; +- +- nh = nsh_hdr(skb); +- skb_postpull_rcsum(skb, nh, length); +- flags = nsh_get_flags(nh); +- flags = OVS_MASKED(flags, key.base.flags, mask.base.flags); +- flow_key->nsh.base.flags = flags; +- ttl = nsh_get_ttl(nh); +- ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl); +- flow_key->nsh.base.ttl = ttl; +- nsh_set_flags_and_ttl(nh, flags, ttl); +- nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr, +- mask.base.path_hdr); +- flow_key->nsh.base.path_hdr = nh->path_hdr; +- switch (nh->mdtype) { +- case NSH_M_TYPE1: +- for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) { +- nh->md1.context[i] = +- OVS_MASKED(nh->md1.context[i], key.context[i], +- mask.context[i]); +- } +- memcpy(flow_key->nsh.context, nh->md1.context, +- sizeof(nh->md1.context)); +- break; +- case NSH_M_TYPE2: +- memset(flow_key->nsh.context, 0, +- sizeof(flow_key->nsh.context)); +- break; +- default: +- return -EINVAL; +- } +- skb_postpush_rcsum(skb, nh, length); +- return 0; +-} +- + /* Must follow skb_ensure_writable() since that can move the skb data. */ + static void set_tp_port(struct sk_buff *skb, __be16 *port, + __be16 new_port, __sum16 *check) +@@ -1143,10 +1080,6 @@ static int execute_masked_set_action(struct sk_buff *skb, + get_mask(a, struct ovs_key_ethernet *)); + break; + +- case OVS_KEY_ATTR_NSH: +- err = set_nsh(skb, flow_key, a); +- break; +- + case OVS_KEY_ATTR_IPV4: + err = set_ipv4(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_ipv4 *)); +@@ -1183,6 +1116,7 @@ static int execute_masked_set_action(struct sk_buff *skb, + case OVS_KEY_ATTR_CT_LABELS: + case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: + case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: ++ case OVS_KEY_ATTR_NSH: + err = -EINVAL; + break; + } +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c +index 089ab1826e1d5e..836e8e705d40e7 100644 +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -1292,6 +1292,11 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, + return 0; + } + ++/* ++ * Constructs NSH header 'nh' from attributes of OVS_ACTION_ATTR_PUSH_NSH, ++ * where 'nh' points to a memory block of 'size' bytes. It's assumed that ++ * attributes were previously validated with validate_push_nsh(). ++ */ + int nsh_hdr_from_nlattr(const struct nlattr *attr, + struct nshhdr *nh, size_t size) + { +@@ -1301,8 +1306,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr, + u8 ttl = 0; + int mdlen = 0; + +- /* validate_nsh has check this, so we needn't do duplicate check here +- */ + if (size < NSH_BASE_HDR_LEN) + return -ENOBUFS; + +@@ -1346,46 +1349,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr, + return 0; + } + +-int nsh_key_from_nlattr(const struct nlattr *attr, +- struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask) +-{ +- struct nlattr *a; +- int rem; +- +- /* validate_nsh has check this, so we needn't do duplicate check here +- */ +- nla_for_each_nested(a, attr, rem) { +- int type = nla_type(a); +- +- switch (type) { +- case OVS_NSH_KEY_ATTR_BASE: { +- const struct ovs_nsh_key_base *base = nla_data(a); +- const struct ovs_nsh_key_base *base_mask = base + 1; +- +- nsh->base = *base; +- nsh_mask->base = *base_mask; +- break; +- } +- case OVS_NSH_KEY_ATTR_MD1: { +- const struct ovs_nsh_key_md1 *md1 = nla_data(a); +- const struct ovs_nsh_key_md1 *md1_mask = md1 + 1; +- +- memcpy(nsh->context, md1->context, sizeof(*md1)); +- memcpy(nsh_mask->context, md1_mask->context, +- sizeof(*md1_mask)); +- break; +- } +- case OVS_NSH_KEY_ATTR_MD2: +- /* Not supported yet */ +- return -ENOTSUPP; +- default: +- return -EINVAL; +- } +- } +- +- return 0; +-} +- + static int nsh_key_put_from_nlattr(const struct nlattr *attr, + struct sw_flow_match *match, bool is_mask, + bool is_push_nsh, bool log) +@@ -2825,17 +2788,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, + return err; + } + +-static bool validate_nsh(const struct nlattr *attr, bool is_mask, +- bool is_push_nsh, bool log) ++static bool validate_push_nsh(const struct nlattr *attr, bool log) + { + struct sw_flow_match match; + struct sw_flow_key key; +- int ret = 0; + + ovs_match_init(&match, &key, true, NULL); +- ret = nsh_key_put_from_nlattr(attr, &match, is_mask, +- is_push_nsh, log); +- return !ret; ++ return !nsh_key_put_from_nlattr(attr, &match, false, true, log); + } + + /* Return false if there are any non-masked bits set. +@@ -2983,13 +2942,6 @@ static int validate_set(const struct nlattr *a, + + break; + +- case OVS_KEY_ATTR_NSH: +- if (eth_type != htons(ETH_P_NSH)) +- return -EINVAL; +- if (!validate_nsh(nla_data(a), masked, false, log)) +- return -EINVAL; +- break; +- + default: + return -EINVAL; + } +@@ -3399,7 +3351,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + return -EINVAL; + } + mac_proto = MAC_PROTO_NONE; +- if (!validate_nsh(nla_data(a), false, true, true)) ++ if (!validate_push_nsh(nla_data(a), log)) + return -EINVAL; + break; + +diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h +index fe7f77fc5f1890..ff8cdecbe34654 100644 +--- a/net/openvswitch/flow_netlink.h ++++ b/net/openvswitch/flow_netlink.h +@@ -65,8 +65,6 @@ int ovs_nla_put_actions(const struct nlattr *attr, + void ovs_nla_free_flow_actions(struct sw_flow_actions *); + void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *); + +-int nsh_key_from_nlattr(const struct nlattr *attr, struct ovs_key_nsh *nsh, +- struct ovs_key_nsh *nsh_mask); + int nsh_hdr_from_nlattr(const struct nlattr *attr, struct nshhdr *nh, + size_t size); + +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index 8c94c926606ad5..4f72fd26ab4058 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -727,8 +727,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, + /* shouldn't get to wraparound: + * too long in async stage, something bad happened + */ +- if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) ++ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) { ++ tls_offload_rx_resync_async_request_cancel(resync_async); + return false; ++ } + + /* asynchronous stage: log all headers seq such that + * req_seq <= seq <= end_seq, and wait for real resync request +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 64790062cfa2eb..ca1289e64bcc8a 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -1550,18 +1550,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, + timeout = schedule_timeout(timeout); + lock_sock(sk); + +- if (signal_pending(current)) { +- err = sock_intr_errno(timeout); +- sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; +- sock->state = SS_UNCONNECTED; +- vsock_transport_cancel_pkt(vsk); +- vsock_remove_connected(vsk); +- goto out_wait; +- } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) { +- err = -ETIMEDOUT; ++ /* Connection established. Whatever happens to socket once we ++ * release it, that's not connect()'s concern. No need to go ++ * into signal and timeout handling. Call it a day. ++ * ++ * Note that allowing to "reset" an already established socket ++ * here is racy and insecure. ++ */ ++ if (sk->sk_state == TCP_ESTABLISHED) ++ break; ++ ++ /* If connection was _not_ established and a signal/timeout came ++ * to be, we want the socket's state reset. User space may want ++ * to retry. ++ * ++ * sk_state != TCP_ESTABLISHED implies that socket is not on ++ * vsock_connected_table. We keep the binding and the transport ++ * assigned. ++ */ ++ if (signal_pending(current) || timeout == 0) { ++ err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout); ++ ++ /* Listener might have already responded with ++ * VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our ++ * sk_state == TCP_SYN_SENT, which hereby we break. ++ * In such case VIRTIO_VSOCK_OP_RST will follow. ++ */ + sk->sk_state = TCP_CLOSE; + sock->state = SS_UNCONNECTED; ++ ++ /* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by ++ * transport->connect(). ++ */ + vsock_transport_cancel_pkt(vsk); ++ + goto out_wait; + } + +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index 9e0366ea86f4a5..4c695f29dfed70 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -4208,6 +4208,9 @@ EXPORT_SYMBOL(regulatory_pre_cac_allowed); + static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) + { + struct wireless_dev *wdev; ++ ++ wiphy_lock(&rdev->wiphy); ++ + /* If we finished CAC or received radar, we should end any + * CAC running on the same channels. + * the check !cfg80211_chandef_dfs_usable contain 2 options: +@@ -4231,6 +4234,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) + if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef)) + rdev_end_cac(rdev, wdev->netdev); + } ++ ++ wiphy_unlock(&rdev->wiphy); + } + + void regulatory_propagate_dfs_state(struct wiphy *wiphy, +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c +index a30538a980cc7f..9277dd4ed541ab 100644 +--- a/net/xfrm/xfrm_output.c ++++ b/net/xfrm/xfrm_output.c +@@ -766,8 +766,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) + /* Exclusive direct xmit for tunnel mode, as + * some filtering or matching rules may apply + * in transport mode. ++ * Locally generated packets also require ++ * the normal XFRM path for L2 header setup, ++ * as the hardware needs the L2 header to match ++ * for encryption, so skip direct output as well. + */ +- if (x->props.mode == XFRM_MODE_TUNNEL) ++ if (x->props.mode == XFRM_MODE_TUNNEL && !skb->sk) + return xfrm_dev_direct_output(sk, x, skb); + + return xfrm_output_resume(sk, skb, 0); +diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c +index 3795c36a9181aa..2364140596c5d7 100644 +--- a/scripts/kconfig/mconf.c ++++ b/scripts/kconfig/mconf.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1015,6 +1016,8 @@ int main(int ac, char **av) + + signal(SIGINT, sig_handler); + ++ setlocale(LC_ALL, ""); ++ + if (ac > 1 && strcmp(av[1], "-s") == 0) { + silent = 1; + /* Silence conf_read() until the real callback is set up */ +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c +index 7a17c94a159418..a7cfa6813c63f2 100644 +--- a/scripts/kconfig/nconf.c ++++ b/scripts/kconfig/nconf.c +@@ -7,6 +7,7 @@ + #ifndef _GNU_SOURCE + #define _GNU_SOURCE + #endif ++#include + #include + #include + #include +@@ -1569,6 +1570,8 @@ int main(int ac, char **av) + int lines, columns; + char *mode; + ++ setlocale(LC_ALL, ""); ++ + if (ac > 1 && strcmp(av[1], "-s") == 0) { + /* Silence conf_read() until the real callback is set up */ + conf_set_message_callback(NULL); +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 787cdeddbdf443..e19d962fab870b 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -930,7 +930,7 @@ static int parse_term_uac2_clock_source(struct mixer_build *state, + { + struct uac_clock_source_descriptor *d = p1; + +- term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ ++ term->type = UAC2_CLOCK_SOURCE << 16; /* virtual type */ + term->id = id; + term->name = d->iClockSource; + return 0; +diff --git a/tools/testing/selftests/net/bareudp.sh b/tools/testing/selftests/net/bareudp.sh +index f366cadbc5e862..ff4308b48e65d1 100755 +--- a/tools/testing/selftests/net/bareudp.sh ++++ b/tools/testing/selftests/net/bareudp.sh +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!/bin/bash + # SPDX-License-Identifier: GPL-2.0 + + # Test various bareudp tunnel configurations. +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 250577b11a91b7..ef16edce4759ee 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3741,7 +3741,7 @@ endpoint_tests() + pm_nl_set_limits $ns1 2 2 + pm_nl_set_limits $ns2 2 2 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal +- speed=slow \ ++ test_linkfail=128 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 & + local tests_pid=$! + +@@ -3768,7 +3768,7 @@ endpoint_tests() + pm_nl_set_limits $ns2 0 3 + pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow + pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow +- test_linkfail=4 speed=5 \ ++ test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & + local tests_pid=$! + +@@ -3845,7 +3845,7 @@ endpoint_tests() + # broadcast IP: no packet for this address will be received on ns1 + pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal + pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal +- test_linkfail=4 speed=5 \ ++ test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & + local tests_pid=$! + +@@ -3917,7 +3917,7 @@ endpoint_tests() + # broadcast IP: no packet for this address will be received on ns1 + pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal + pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow +- test_linkfail=4 speed=20 \ ++ test_linkfail=128 speed=20 \ + run_tests $ns1 $ns2 10.0.1.1 & + local tests_pid=$! + +diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c +index cf263fe9deaf4b..ef97916e3873a1 100644 +--- a/tools/tracing/latency/latency-collector.c ++++ b/tools/tracing/latency/latency-collector.c +@@ -1725,7 +1725,7 @@ static void show_usage(void) + "-n, --notrace\t\tIf latency is detected, do not print out the content of\n" + "\t\t\tthe trace file to standard output\n\n" + +-"-t, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n" ++"-e, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n" + + "-r, --random\t\tArbitrarily sleep a certain amount of time, default\n" + "\t\t\t%ld ms, before reading the trace file. The\n"