From 26fed3cbda738bbca4fb1b3e98bc066973fc530a Mon Sep 17 00:00:00 2001 From: Julian Sikorski Date: Fri, 27 Oct 2023 13:11:10 +0000 Subject: [PATCH] Update odroidxu4-current to 6.1.60 --- .../odroidxu4-6.1/patch-6.1.56-57.patch | 11235 ++++++++++++++++ .../odroidxu4-6.1/patch-6.1.57-58.patch | 389 + .../odroidxu4-6.1/patch-6.1.58-59.patch | 4902 +++++++ .../odroidxu4-6.1/patch-6.1.59-60.patch | 7686 +++++++++++ 4 files changed, 24212 insertions(+) create mode 100644 patch/kernel/archive/odroidxu4-6.1/patch-6.1.56-57.patch create mode 100644 patch/kernel/archive/odroidxu4-6.1/patch-6.1.57-58.patch create mode 100644 patch/kernel/archive/odroidxu4-6.1/patch-6.1.58-59.patch create mode 100644 patch/kernel/archive/odroidxu4-6.1/patch-6.1.59-60.patch diff --git a/patch/kernel/archive/odroidxu4-6.1/patch-6.1.56-57.patch b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.56-57.patch new file mode 100644 index 0000000000..455f1d7194 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.56-57.patch @@ -0,0 +1,11235 @@ +diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst +index 9000640f7f7a0..d9fce65b2f047 100644 +--- a/Documentation/arm64/silicon-errata.rst ++++ b/Documentation/arm64/silicon-errata.rst +@@ -63,6 +63,8 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 3301288a7c692..f5f7a464605f9 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -2148,6 +2148,14 @@ accept_ra_min_hop_limit - INTEGER + + Default: 1 + ++accept_ra_min_lft - INTEGER ++ Minimum acceptable lifetime value in Router Advertisement. ++ ++ RA sections with a lifetime less than this value shall be ++ ignored. Zero lifetimes stay unaffected. ++ ++ Default: 0 ++ + accept_ra_pinfo - BOOLEAN + Learn Prefix Information in Router Advertisement. + +diff --git a/Makefile b/Makefile +index 9ceda3dad5eb7..b435b56594f0f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 56 ++SUBLEVEL = 57 + EXTRAVERSION = + NAME = Curry Ramen + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index d5eb2fbab473e..9ee9e17eb2ca0 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -983,6 +983,19 @@ config ARM64_ERRATUM_2457168 + + If unsure, say Y. + ++config ARM64_ERRATUM_2966298 ++ bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load" ++ default y ++ help ++ This option adds the workaround for ARM Cortex-A520 erratum 2966298. ++ ++ On an affected Cortex-A520 core, a speculatively executed unprivileged ++ load might leak data from a privileged level via a cache side channel. ++ ++ Work around this problem by executing a TLBI before returning to EL0. ++ ++ If unsure, say Y. ++ + config CAVIUM_ERRATUM_22375 + bool "Cavium erratum 22375, 24313" + default y +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h +index f73f11b550425..a0badda3a8d1c 100644 +--- a/arch/arm64/include/asm/cpufeature.h ++++ b/arch/arm64/include/asm/cpufeature.h +@@ -670,7 +670,7 @@ static inline bool supports_clearbhb(int scope) + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); + + return cpuid_feature_extract_unsigned_field(isar2, +- ID_AA64ISAR2_EL1_BC_SHIFT); ++ ID_AA64ISAR2_EL1_CLRBHB_SHIFT); + } + + const struct cpumask *system_32bit_el0_cpumask(void); +@@ -863,7 +863,11 @@ static inline bool cpu_has_hw_af(void) + if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) + return false; + +- mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); ++ /* ++ * Use cached version to avoid emulated msr operation on KVM ++ * guests. ++ */ ++ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_EL1_HAFDBS_SHIFT); + } +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 65e53ef5a3960..357932938b5ab 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -79,6 +79,7 @@ + #define ARM_CPU_PART_CORTEX_A78AE 0xD42 + #define ARM_CPU_PART_CORTEX_X1 0xD44 + #define ARM_CPU_PART_CORTEX_A510 0xD46 ++#define ARM_CPU_PART_CORTEX_A520 0xD80 + #define ARM_CPU_PART_CORTEX_A710 0xD47 + #define ARM_CPU_PART_CORTEX_X2 0xD48 + #define ARM_CPU_PART_NEOVERSE_N2 0xD49 +@@ -141,6 +142,7 @@ + #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) + #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) + #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) ++#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520) + #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) + #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) + #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 8dbf3c21ea22a..3f917124684c5 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -723,6 +723,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { + .cpu_enable = cpu_clear_bf16_from_user_emulation, + }, + #endif ++#ifdef CONFIG_ARM64_ERRATUM_2966298 ++ { ++ .desc = "ARM erratum 2966298", ++ .capability = ARM64_WORKAROUND_2966298, ++ /* Cortex-A520 r0p0 - r0p1 */ ++ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), ++ }, ++#endif + #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 + { + .desc = "AmpereOne erratum AC03_CPU_38", +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index b3eb53847c96b..770a31c6ed81b 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -212,7 +212,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + }; + + static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0), ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index beb4db21c89c1..de16fa917e1b8 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -419,6 +419,10 @@ alternative_else_nop_endif + ldp x28, x29, [sp, #16 * 14] + + .if \el == 0 ++alternative_if ARM64_WORKAROUND_2966298 ++ tlbi vale1, xzr ++ dsb nsh ++alternative_else_nop_endif + alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 + ldr lr, [sp, #S_LR] + add sp, sp, #PT_REGS_SIZE // restore sp +diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps +index 14d31d1b2ff02..e73830d9f1367 100644 +--- a/arch/arm64/tools/cpucaps ++++ b/arch/arm64/tools/cpucaps +@@ -71,6 +71,7 @@ WORKAROUND_2064142 + WORKAROUND_2077057 + WORKAROUND_2457168 + WORKAROUND_2658417 ++WORKAROUND_2966298 + WORKAROUND_AMPERE_AC03_CPU_38 + WORKAROUND_TRBE_OVERWRITE_FILL_MODE + WORKAROUND_TSB_FLUSH_FAILURE +diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg +index 384757a7eda9e..11c3f7a7cec7b 100644 +--- a/arch/arm64/tools/sysreg ++++ b/arch/arm64/tools/sysreg +@@ -484,7 +484,11 @@ EndEnum + EndSysreg + + Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2 +-Res0 63:28 ++Res0 63:32 ++Enum 31:28 CLRBHB ++ 0b0000 NI ++ 0b0001 IMP ++EndEnum + Enum 27:24 PAC_frac + 0b0000 NI + 0b0001 IMP +diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h +index 6d28b5514699a..10a061d6899cd 100644 +--- a/arch/parisc/include/asm/ldcw.h ++++ b/arch/parisc/include/asm/ldcw.h +@@ -2,14 +2,28 @@ + #ifndef __PARISC_LDCW_H + #define __PARISC_LDCW_H + +-#ifndef CONFIG_PA20 + /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, + and GCC only guarantees 8-byte alignment for stack locals, we can't + be assured of 16-byte alignment for atomic lock data even if we + specify "__attribute ((aligned(16)))" in the type declaration. So, + we use a struct containing an array of four ints for the atomic lock + type and dynamically select the 16-byte aligned int from the array +- for the semaphore. */ ++ for the semaphore. */ ++ ++/* From: "Jim Hull" ++ I've attached a summary of the change, but basically, for PA 2.0, as ++ long as the ",CO" (coherent operation) completer is implemented, then the ++ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead ++ they only require "natural" alignment (4-byte for ldcw, 8-byte for ++ ldcd). ++ ++ Although the cache control hint is accepted by all PA 2.0 processors, ++ it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still ++ require 16-byte alignment. If the address is unaligned, the operation ++ of the instruction is undefined. The ldcw instruction does not generate ++ unaligned data reference traps so misaligned accesses are not detected. ++ This hid the problem for years. So, restore the 16-byte alignment dropped ++ by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */ + + #define __PA_LDCW_ALIGNMENT 16 + #define __PA_LDCW_ALIGN_ORDER 4 +@@ -19,22 +33,12 @@ + & ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ + }) +-#define __LDCW "ldcw" + +-#else /*CONFIG_PA20*/ +-/* From: "Jim Hull" +- I've attached a summary of the change, but basically, for PA 2.0, as +- long as the ",CO" (coherent operation) completer is specified, then the +- 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead +- they only require "natural" alignment (4-byte for ldcw, 8-byte for +- ldcd). */ +- +-#define __PA_LDCW_ALIGNMENT 4 +-#define __PA_LDCW_ALIGN_ORDER 2 +-#define __ldcw_align(a) (&(a)->slock) ++#ifdef CONFIG_PA20 + #define __LDCW "ldcw,co" +- +-#endif /*!CONFIG_PA20*/ ++#else ++#define __LDCW "ldcw" ++#endif + + /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. + We don't explicitly expose that "*a" may be written as reload +diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h +index ca39ee350c3f4..35c5086b74d70 100644 +--- a/arch/parisc/include/asm/spinlock_types.h ++++ b/arch/parisc/include/asm/spinlock_types.h +@@ -3,13 +3,8 @@ + #define __ASM_SPINLOCK_TYPES_H + + typedef struct { +-#ifdef CONFIG_PA20 +- volatile unsigned int slock; +-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } +-#else + volatile unsigned int lock[4]; + # define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +-#endif + } arch_spinlock_t; + + +diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c +index 7dbd92cafae38..e37ec05487308 100644 +--- a/arch/parisc/kernel/smp.c ++++ b/arch/parisc/kernel/smp.c +@@ -443,7 +443,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) + if (cpu_online(cpu)) + return 0; + +- if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle)) ++ if (num_online_cpus() < nr_cpu_ids && ++ num_online_cpus() < setup_max_cpus && ++ smp_boot_one_cpu(cpu, tidle)) + return -EIO; + + return cpu_online(cpu) ? 0 : -EIO; +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c +index 6672a3f05fc68..04f4b96dec6df 100644 +--- a/arch/x86/events/amd/core.c ++++ b/arch/x86/events/amd/core.c +@@ -534,8 +534,12 @@ static void amd_pmu_cpu_reset(int cpu) + /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */ + wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); + +- /* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */ +- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask); ++ /* ++ * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze ++ * and PerfCntrGLobalStatus.PerfCntrOvfl ++ */ ++ wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, ++ GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask); + } + + static int amd_pmu_cpu_prepare(int cpu) +@@ -570,6 +574,7 @@ static void amd_pmu_cpu_starting(int cpu) + int i, nb_id; + + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; ++ amd_pmu_cpu_reset(cpu); + + if (!x86_pmu.amd_nb_constraints) + return; +@@ -591,8 +596,6 @@ static void amd_pmu_cpu_starting(int cpu) + + cpuc->amd_nb->nb_id = nb_id; + cpuc->amd_nb->refcnt++; +- +- amd_pmu_cpu_reset(cpu); + } + + static void amd_pmu_cpu_dead(int cpu) +@@ -601,6 +604,7 @@ static void amd_pmu_cpu_dead(int cpu) + + kfree(cpuhw->lbr_sel); + cpuhw->lbr_sel = NULL; ++ amd_pmu_cpu_reset(cpu); + + if (!x86_pmu.amd_nb_constraints) + return; +@@ -613,8 +617,6 @@ static void amd_pmu_cpu_dead(int cpu) + + cpuhw->amd_nb = NULL; + } +- +- amd_pmu_cpu_reset(cpu); + } + + static inline void amd_pmu_set_global_ctl(u64 ctl) +@@ -884,7 +886,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) + struct hw_perf_event *hwc; + struct perf_event *event; + int handled = 0, idx; +- u64 status, mask; ++ u64 reserved, status, mask; + bool pmu_enabled; + + /* +@@ -909,6 +911,14 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) + status &= ~GLOBAL_STATUS_LBRS_FROZEN; + } + ++ reserved = status & ~amd_pmu_global_cntr_mask; ++ if (reserved) ++ pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n", ++ reserved); ++ ++ /* Clear any reserved bits set by buggy microcode */ ++ status &= amd_pmu_global_cntr_mask; ++ + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + if (!test_bit(idx, cpuc->active_mask)) + continue; +diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c +index 3a5b0c9c4fccc..7dce812ce2538 100644 +--- a/arch/x86/kernel/sev-shared.c ++++ b/arch/x86/kernel/sev-shared.c +@@ -253,7 +253,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg) + return 0; + } + +-static int sev_cpuid_hv(struct cpuid_leaf *leaf) ++static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf) + { + int ret; + +@@ -276,6 +276,45 @@ static int sev_cpuid_hv(struct cpuid_leaf *leaf) + return ret; + } + ++static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) ++{ ++ u32 cr4 = native_read_cr4(); ++ int ret; ++ ++ ghcb_set_rax(ghcb, leaf->fn); ++ ghcb_set_rcx(ghcb, leaf->subfn); ++ ++ if (cr4 & X86_CR4_OSXSAVE) ++ /* Safe to read xcr0 */ ++ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); ++ else ++ /* xgetbv will cause #UD - use reset value for xcr0 */ ++ ghcb_set_xcr0(ghcb, 1); ++ ++ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); ++ if (ret != ES_OK) ++ return ret; ++ ++ if (!(ghcb_rax_is_valid(ghcb) && ++ ghcb_rbx_is_valid(ghcb) && ++ ghcb_rcx_is_valid(ghcb) && ++ ghcb_rdx_is_valid(ghcb))) ++ return ES_VMM_ERROR; ++ ++ leaf->eax = ghcb->save.rax; ++ leaf->ebx = ghcb->save.rbx; ++ leaf->ecx = ghcb->save.rcx; ++ leaf->edx = ghcb->save.rdx; ++ ++ return ES_OK; ++} ++ ++static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) ++{ ++ return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf) ++ : __sev_cpuid_hv_msr(leaf); ++} ++ + /* + * This may be called early while still running on the initial identity + * mapping. Use RIP-relative addressing to obtain the correct address +@@ -385,19 +424,20 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf) + return false; + } + +-static void snp_cpuid_hv(struct cpuid_leaf *leaf) ++static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) + { +- if (sev_cpuid_hv(leaf)) ++ if (sev_cpuid_hv(ghcb, ctxt, leaf)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); + } + +-static int snp_cpuid_postprocess(struct cpuid_leaf *leaf) ++static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt, ++ struct cpuid_leaf *leaf) + { + struct cpuid_leaf leaf_hv = *leaf; + + switch (leaf->fn) { + case 0x1: +- snp_cpuid_hv(&leaf_hv); ++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv); + + /* initial APIC ID */ + leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); +@@ -416,7 +456,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf) + break; + case 0xB: + leaf_hv.subfn = 0; +- snp_cpuid_hv(&leaf_hv); ++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv); + + /* extended APIC ID */ + leaf->edx = leaf_hv.edx; +@@ -464,7 +504,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf) + } + break; + case 0x8000001E: +- snp_cpuid_hv(&leaf_hv); ++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv); + + /* extended APIC ID */ + leaf->eax = leaf_hv.eax; +@@ -485,7 +525,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf) + * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value + * should be treated as fatal by caller. + */ +-static int snp_cpuid(struct cpuid_leaf *leaf) ++static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) + { + const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); + +@@ -519,7 +559,7 @@ static int snp_cpuid(struct cpuid_leaf *leaf) + return 0; + } + +- return snp_cpuid_postprocess(leaf); ++ return snp_cpuid_postprocess(ghcb, ctxt, leaf); + } + + /* +@@ -541,14 +581,14 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) + leaf.fn = fn; + leaf.subfn = subfn; + +- ret = snp_cpuid(&leaf); ++ ret = snp_cpuid(NULL, NULL, &leaf); + if (!ret) + goto cpuid_done; + + if (ret != -EOPNOTSUPP) + goto fail; + +- if (sev_cpuid_hv(&leaf)) ++ if (__sev_cpuid_hv_msr(&leaf)) + goto fail; + + cpuid_done: +@@ -845,14 +885,15 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) + return ret; + } + +-static int vc_handle_cpuid_snp(struct pt_regs *regs) ++static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) + { ++ struct pt_regs *regs = ctxt->regs; + struct cpuid_leaf leaf; + int ret; + + leaf.fn = regs->ax; + leaf.subfn = regs->cx; +- ret = snp_cpuid(&leaf); ++ ret = snp_cpuid(ghcb, ctxt, &leaf); + if (!ret) { + regs->ax = leaf.eax; + regs->bx = leaf.ebx; +@@ -871,7 +912,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb, + enum es_result ret; + int snp_cpuid_ret; + +- snp_cpuid_ret = vc_handle_cpuid_snp(regs); ++ snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt); + if (!snp_cpuid_ret) + return ES_OK; + if (snp_cpuid_ret != -EOPNOTSUPP) +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c +index a582ea0da74f5..a82bdec923b21 100644 +--- a/block/blk-sysfs.c ++++ b/block/blk-sysfs.c +@@ -737,6 +737,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) + struct request_queue *q = container_of(rcu_head, struct request_queue, + rcu_head); + ++ percpu_ref_exit(&q->q_usage_counter); + kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q); + } + +@@ -762,8 +763,6 @@ static void blk_release_queue(struct kobject *kobj) + + might_sleep(); + +- percpu_ref_exit(&q->q_usage_counter); +- + if (q->poll_stat) + blk_stat_remove_callback(q, q->poll_cb); + blk_stat_free_callback(q->poll_cb); +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 25b9bdf2fc380..6a053cd0cf410 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -5022,11 +5022,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET + + static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) + { ++ /* ++ * We are about to suspend the port, so we do not care about ++ * scsi_rescan_device() calls scheduled by previous resume operations. ++ * The next resume will schedule the rescan again. So cancel any rescan ++ * that is not done yet. ++ */ ++ cancel_delayed_work_sync(&ap->scsi_rescan_task); ++ + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); + } + + static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) + { ++ /* ++ * We are about to suspend the port, so we do not care about ++ * scsi_rescan_device() calls scheduled by previous resume operations. ++ * The next resume will schedule the rescan again. So cancel any rescan ++ * that is not done yet. ++ */ ++ cancel_delayed_work_sync(&ap->scsi_rescan_task); ++ + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index d28628b964e29..7b9c9264b9a72 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -1081,7 +1081,15 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) + } + } else { + sdev->sector_size = ata_id_logical_sector_size(dev->id); +- sdev->manage_start_stop = 1; ++ /* ++ * Stop the drive on suspend but do not issue START STOP UNIT ++ * on resume as this is not necessary and may fail: the device ++ * will be woken up by ata_port_pm_resume() with a port reset ++ * and device revalidation. ++ */ ++ sdev->manage_system_start_stop = true; ++ sdev->manage_runtime_start_stop = true; ++ sdev->no_start_on_resume = 1; + } + + /* +@@ -4640,7 +4648,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) + struct ata_link *link; + struct ata_device *dev; + unsigned long flags; +- bool delay_rescan = false; ++ int ret = 0; + + mutex_lock(&ap->scsi_scan_mutex); + spin_lock_irqsave(ap->lock, flags); +@@ -4649,37 +4657,34 @@ void ata_scsi_dev_rescan(struct work_struct *work) + ata_for_each_dev(dev, link, ENABLED) { + struct scsi_device *sdev = dev->sdev; + ++ /* ++ * If the port was suspended before this was scheduled, ++ * bail out. ++ */ ++ if (ap->pflags & ATA_PFLAG_SUSPENDED) ++ goto unlock; ++ + if (!sdev) + continue; + if (scsi_device_get(sdev)) + continue; + +- /* +- * If the rescan work was scheduled because of a resume +- * event, the port is already fully resumed, but the +- * SCSI device may not yet be fully resumed. In such +- * case, executing scsi_rescan_device() may cause a +- * deadlock with the PM code on device_lock(). Prevent +- * this by giving up and retrying rescan after a short +- * delay. +- */ +- delay_rescan = sdev->sdev_gendev.power.is_suspended; +- if (delay_rescan) { +- scsi_device_put(sdev); +- break; +- } +- + spin_unlock_irqrestore(ap->lock, flags); +- scsi_rescan_device(&(sdev->sdev_gendev)); ++ ret = scsi_rescan_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(ap->lock, flags); ++ ++ if (ret) ++ goto unlock; + } + } + ++unlock: + spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); + +- if (delay_rescan) ++ /* Reschedule with a delay if scsi_rescan_device() returned an error */ ++ if (ret) + schedule_delayed_work(&ap->scsi_rescan_task, + msecs_to_jiffies(5)); + } +diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c +index ae6b8788d5f3f..d65715b9e129e 100644 +--- a/drivers/base/regmap/regcache-rbtree.c ++++ b/drivers/base/regmap/regcache-rbtree.c +@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, + if (!rbnode) + return -ENOMEM; + regcache_rbtree_set_register(map, rbnode, +- reg - rbnode->base_reg, value); ++ (reg - rbnode->base_reg) / map->reg_stride, ++ value); + regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); + rbtree_ctx->cached_rbnode = rbnode; + } +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 74ef3da545361..afc92869cba42 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) + static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); + + static int rbd_dev_refresh(struct rbd_device *rbd_dev); +-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); +-static int rbd_dev_header_info(struct rbd_device *rbd_dev); +-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); ++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header); + static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, + u64 snap_id); + static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, +@@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev) + RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); + } + ++static void rbd_image_header_cleanup(struct rbd_image_header *header) ++{ ++ kfree(header->object_prefix); ++ ceph_put_snap_context(header->snapc); ++ kfree(header->snap_sizes); ++ kfree(header->snap_names); ++ ++ memset(header, 0, sizeof(*header)); ++} ++ + /* + * Fill an rbd image header with information from the given format 1 + * on-disk header. + */ +-static int rbd_header_from_disk(struct rbd_device *rbd_dev, +- struct rbd_image_header_ondisk *ondisk) ++static int rbd_header_from_disk(struct rbd_image_header *header, ++ struct rbd_image_header_ondisk *ondisk, ++ bool first_time) + { +- struct rbd_image_header *header = &rbd_dev->header; +- bool first_time = header->object_prefix == NULL; + struct ceph_snap_context *snapc; + char *object_prefix = NULL; + char *snap_names = NULL; +@@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, + if (first_time) { + header->object_prefix = object_prefix; + header->obj_order = ondisk->options.order; +- rbd_init_layout(rbd_dev); +- } else { +- ceph_put_snap_context(header->snapc); +- kfree(header->snap_names); +- kfree(header->snap_sizes); + } + + /* The remaining fields always get updated (when we refresh) */ +@@ -4860,7 +4863,9 @@ out_req: + * return, the rbd_dev->header field will contain up-to-date + * information about the image. + */ +-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) ++static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header, ++ bool first_time) + { + struct rbd_image_header_ondisk *ondisk = NULL; + u32 snap_count = 0; +@@ -4908,7 +4913,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) + snap_count = le32_to_cpu(ondisk->snap_count); + } while (snap_count != want_count); + +- ret = rbd_header_from_disk(rbd_dev, ondisk); ++ ret = rbd_header_from_disk(header, ondisk, first_time); + out: + kfree(ondisk); + +@@ -4932,39 +4937,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev) + } + } + +-static int rbd_dev_refresh(struct rbd_device *rbd_dev) +-{ +- u64 mapping_size; +- int ret; +- +- down_write(&rbd_dev->header_rwsem); +- mapping_size = rbd_dev->mapping.size; +- +- ret = rbd_dev_header_info(rbd_dev); +- if (ret) +- goto out; +- +- /* +- * If there is a parent, see if it has disappeared due to the +- * mapped image getting flattened. +- */ +- if (rbd_dev->parent) { +- ret = rbd_dev_v2_parent_info(rbd_dev); +- if (ret) +- goto out; +- } +- +- rbd_assert(!rbd_is_snap(rbd_dev)); +- rbd_dev->mapping.size = rbd_dev->header.image_size; +- +-out: +- up_write(&rbd_dev->header_rwsem); +- if (!ret && mapping_size != rbd_dev->mapping.size) +- rbd_dev_update_size(rbd_dev); +- +- return ret; +-} +- + static const struct blk_mq_ops rbd_mq_ops = { + .queue_rq = rbd_queue_rq, + }; +@@ -5504,17 +5476,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, + return 0; + } + +-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) +-{ +- return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, +- &rbd_dev->header.obj_order, +- &rbd_dev->header.image_size); +-} +- +-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev, ++ char **pobject_prefix) + { + size_t size; + void *reply_buf; ++ char *object_prefix; + int ret; + void *p; + +@@ -5532,16 +5499,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) + goto out; + + p = reply_buf; +- rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, +- p + ret, NULL, GFP_NOIO); ++ object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL, ++ GFP_NOIO); ++ if (IS_ERR(object_prefix)) { ++ ret = PTR_ERR(object_prefix); ++ goto out; ++ } + ret = 0; + +- if (IS_ERR(rbd_dev->header.object_prefix)) { +- ret = PTR_ERR(rbd_dev->header.object_prefix); +- rbd_dev->header.object_prefix = NULL; +- } else { +- dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); +- } ++ *pobject_prefix = object_prefix; ++ dout(" object_prefix = %s\n", object_prefix); + out: + kfree(reply_buf); + +@@ -5592,13 +5559,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, + return 0; + } + +-static int rbd_dev_v2_features(struct rbd_device *rbd_dev) +-{ +- return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, +- rbd_is_ro(rbd_dev), +- &rbd_dev->header.features); +-} +- + /* + * These are generic image flags, but since they are used only for + * object map, store them in rbd_dev->object_map_flags. +@@ -5635,6 +5595,14 @@ struct parent_image_info { + u64 overlap; + }; + ++static void rbd_parent_info_cleanup(struct parent_image_info *pii) ++{ ++ kfree(pii->pool_ns); ++ kfree(pii->image_id); ++ ++ memset(pii, 0, sizeof(*pii)); ++} ++ + /* + * The caller is responsible for @pii. + */ +@@ -5704,6 +5672,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev, + if (pii->has_overlap) + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + ++ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", ++ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id, ++ pii->has_overlap, pii->overlap); + return 0; + + e_inval: +@@ -5742,14 +5713,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev, + pii->has_overlap = true; + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + ++ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", ++ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id, ++ pii->has_overlap, pii->overlap); + return 0; + + e_inval: + return -EINVAL; + } + +-static int get_parent_info(struct rbd_device *rbd_dev, +- struct parent_image_info *pii) ++static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev, ++ struct parent_image_info *pii) + { + struct page *req_page, *reply_page; + void *p; +@@ -5777,7 +5751,7 @@ static int get_parent_info(struct rbd_device *rbd_dev, + return ret; + } + +-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ++static int rbd_dev_setup_parent(struct rbd_device *rbd_dev) + { + struct rbd_spec *parent_spec; + struct parent_image_info pii = { 0 }; +@@ -5787,37 +5761,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) + if (!parent_spec) + return -ENOMEM; + +- ret = get_parent_info(rbd_dev, &pii); ++ ret = rbd_dev_v2_parent_info(rbd_dev, &pii); + if (ret) + goto out_err; + +- dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", +- __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, +- pii.has_overlap, pii.overlap); +- +- if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { +- /* +- * Either the parent never existed, or we have +- * record of it but the image got flattened so it no +- * longer has a parent. When the parent of a +- * layered image disappears we immediately set the +- * overlap to 0. The effect of this is that all new +- * requests will be treated as if the image had no +- * parent. +- * +- * If !pii.has_overlap, the parent image spec is not +- * applicable. It's there to avoid duplication in each +- * snapshot record. +- */ +- if (rbd_dev->parent_overlap) { +- rbd_dev->parent_overlap = 0; +- rbd_dev_parent_put(rbd_dev); +- pr_info("%s: clone image has been flattened\n", +- rbd_dev->disk->disk_name); +- } +- ++ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) + goto out; /* No parent? No problem. */ +- } + + /* The ceph file layout needs to fit pool id in 32 bits */ + +@@ -5829,58 +5778,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) + } + + /* +- * The parent won't change (except when the clone is +- * flattened, already handled that). So we only need to +- * record the parent spec we have not already done so. ++ * The parent won't change except when the clone is flattened, ++ * so we only need to record the parent image spec once. + */ +- if (!rbd_dev->parent_spec) { +- parent_spec->pool_id = pii.pool_id; +- if (pii.pool_ns && *pii.pool_ns) { +- parent_spec->pool_ns = pii.pool_ns; +- pii.pool_ns = NULL; +- } +- parent_spec->image_id = pii.image_id; +- pii.image_id = NULL; +- parent_spec->snap_id = pii.snap_id; +- +- rbd_dev->parent_spec = parent_spec; +- parent_spec = NULL; /* rbd_dev now owns this */ ++ parent_spec->pool_id = pii.pool_id; ++ if (pii.pool_ns && *pii.pool_ns) { ++ parent_spec->pool_ns = pii.pool_ns; ++ pii.pool_ns = NULL; + } ++ parent_spec->image_id = pii.image_id; ++ pii.image_id = NULL; ++ parent_spec->snap_id = pii.snap_id; ++ ++ rbd_assert(!rbd_dev->parent_spec); ++ rbd_dev->parent_spec = parent_spec; ++ parent_spec = NULL; /* rbd_dev now owns this */ + + /* +- * We always update the parent overlap. If it's zero we issue +- * a warning, as we will proceed as if there was no parent. ++ * Record the parent overlap. If it's zero, issue a warning as ++ * we will proceed as if there is no parent. + */ +- if (!pii.overlap) { +- if (parent_spec) { +- /* refresh, careful to warn just once */ +- if (rbd_dev->parent_overlap) +- rbd_warn(rbd_dev, +- "clone now standalone (overlap became 0)"); +- } else { +- /* initial probe */ +- rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); +- } +- } ++ if (!pii.overlap) ++ rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); + rbd_dev->parent_overlap = pii.overlap; + + out: + ret = 0; + out_err: +- kfree(pii.pool_ns); +- kfree(pii.image_id); ++ rbd_parent_info_cleanup(&pii); + rbd_spec_put(parent_spec); + return ret; + } + +-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev, ++ u64 *stripe_unit, u64 *stripe_count) + { + struct { + __le64 stripe_unit; + __le64 stripe_count; + } __attribute__ ((packed)) striping_info_buf = { 0 }; + size_t size = sizeof (striping_info_buf); +- void *p; + int ret; + + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, +@@ -5892,27 +5829,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) + if (ret < size) + return -ERANGE; + +- p = &striping_info_buf; +- rbd_dev->header.stripe_unit = ceph_decode_64(&p); +- rbd_dev->header.stripe_count = ceph_decode_64(&p); ++ *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit); ++ *stripe_count = le64_to_cpu(striping_info_buf.stripe_count); ++ dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit, ++ *stripe_count); ++ + return 0; + } + +-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id) + { +- __le64 data_pool_id; ++ __le64 data_pool_buf; + int ret; + + ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, + &rbd_dev->header_oloc, "get_data_pool", +- NULL, 0, &data_pool_id, sizeof(data_pool_id)); ++ NULL, 0, &data_pool_buf, ++ sizeof(data_pool_buf)); ++ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); + if (ret < 0) + return ret; +- if (ret < sizeof(data_pool_id)) ++ if (ret < sizeof(data_pool_buf)) + return -EBADMSG; + +- rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); +- WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); ++ *data_pool_id = le64_to_cpu(data_pool_buf); ++ dout(" data_pool_id = %lld\n", *data_pool_id); ++ WARN_ON(*data_pool_id == CEPH_NOPOOL); ++ + return 0; + } + +@@ -6104,7 +6047,8 @@ out_err: + return ret; + } + +-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, ++ struct ceph_snap_context **psnapc) + { + size_t size; + int ret; +@@ -6165,9 +6109,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) + for (i = 0; i < snap_count; i++) + snapc->snaps[i] = ceph_decode_64(&p); + +- ceph_put_snap_context(rbd_dev->header.snapc); +- rbd_dev->header.snapc = snapc; +- ++ *psnapc = snapc; + dout(" snap context seq = %llu, snap_count = %u\n", + (unsigned long long)seq, (unsigned int)snap_count); + out: +@@ -6216,38 +6158,42 @@ out: + return snap_name; + } + +-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header, ++ bool first_time) + { +- bool first_time = rbd_dev->header.object_prefix == NULL; + int ret; + +- ret = rbd_dev_v2_image_size(rbd_dev); ++ ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, ++ first_time ? &header->obj_order : NULL, ++ &header->image_size); + if (ret) + return ret; + + if (first_time) { +- ret = rbd_dev_v2_header_onetime(rbd_dev); ++ ret = rbd_dev_v2_header_onetime(rbd_dev, header); + if (ret) + return ret; + } + +- ret = rbd_dev_v2_snap_context(rbd_dev); +- if (ret && first_time) { +- kfree(rbd_dev->header.object_prefix); +- rbd_dev->header.object_prefix = NULL; +- } ++ ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc); ++ if (ret) ++ return ret; + +- return ret; ++ return 0; + } + +-static int rbd_dev_header_info(struct rbd_device *rbd_dev) ++static int rbd_dev_header_info(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header, ++ bool first_time) + { + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); ++ rbd_assert(!header->object_prefix && !header->snapc); + + if (rbd_dev->image_format == 1) +- return rbd_dev_v1_header_info(rbd_dev); ++ return rbd_dev_v1_header_info(rbd_dev, header, first_time); + +- return rbd_dev_v2_header_info(rbd_dev); ++ return rbd_dev_v2_header_info(rbd_dev, header, first_time); + } + + /* +@@ -6735,60 +6681,49 @@ out: + */ + static void rbd_dev_unprobe(struct rbd_device *rbd_dev) + { +- struct rbd_image_header *header; +- + rbd_dev_parent_put(rbd_dev); + rbd_object_map_free(rbd_dev); + rbd_dev_mapping_clear(rbd_dev); + + /* Free dynamic fields from the header, then zero it out */ + +- header = &rbd_dev->header; +- ceph_put_snap_context(header->snapc); +- kfree(header->snap_sizes); +- kfree(header->snap_names); +- kfree(header->object_prefix); +- memset(header, 0, sizeof (*header)); ++ rbd_image_header_cleanup(&rbd_dev->header); + } + +-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) ++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header) + { + int ret; + +- ret = rbd_dev_v2_object_prefix(rbd_dev); ++ ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix); + if (ret) +- goto out_err; ++ return ret; + + /* + * Get the and check features for the image. Currently the + * features are assumed to never change. + */ +- ret = rbd_dev_v2_features(rbd_dev); ++ ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, ++ rbd_is_ro(rbd_dev), &header->features); + if (ret) +- goto out_err; ++ return ret; + + /* If the image supports fancy striping, get its parameters */ + +- if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { +- ret = rbd_dev_v2_striping_info(rbd_dev); +- if (ret < 0) +- goto out_err; ++ if (header->features & RBD_FEATURE_STRIPINGV2) { ++ ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit, ++ &header->stripe_count); ++ if (ret) ++ return ret; + } + +- if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { +- ret = rbd_dev_v2_data_pool(rbd_dev); ++ if (header->features & RBD_FEATURE_DATA_POOL) { ++ ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id); + if (ret) +- goto out_err; ++ return ret; + } + +- rbd_init_layout(rbd_dev); + return 0; +- +-out_err: +- rbd_dev->header.features = 0; +- kfree(rbd_dev->header.object_prefix); +- rbd_dev->header.object_prefix = NULL; +- return ret; + } + + /* +@@ -6983,13 +6918,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + if (!depth) + down_write(&rbd_dev->header_rwsem); + +- ret = rbd_dev_header_info(rbd_dev); ++ ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true); + if (ret) { + if (ret == -ENOENT && !need_watch) + rbd_print_dne(rbd_dev, false); + goto err_out_probe; + } + ++ rbd_init_layout(rbd_dev); ++ + /* + * If this image is the one being mapped, we have pool name and + * id, image name and id, and snap name - need to fill snap id. +@@ -7018,7 +6955,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + } + + if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { +- ret = rbd_dev_v2_parent_info(rbd_dev); ++ ret = rbd_dev_setup_parent(rbd_dev); + if (ret) + goto err_out_probe; + } +@@ -7044,6 +6981,107 @@ err_out_format: + return ret; + } + ++static void rbd_dev_update_header(struct rbd_device *rbd_dev, ++ struct rbd_image_header *header) ++{ ++ rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); ++ rbd_assert(rbd_dev->header.object_prefix); /* !first_time */ ++ ++ if (rbd_dev->header.image_size != header->image_size) { ++ rbd_dev->header.image_size = header->image_size; ++ ++ if (!rbd_is_snap(rbd_dev)) { ++ rbd_dev->mapping.size = header->image_size; ++ rbd_dev_update_size(rbd_dev); ++ } ++ } ++ ++ ceph_put_snap_context(rbd_dev->header.snapc); ++ rbd_dev->header.snapc = header->snapc; ++ header->snapc = NULL; ++ ++ if (rbd_dev->image_format == 1) { ++ kfree(rbd_dev->header.snap_names); ++ rbd_dev->header.snap_names = header->snap_names; ++ header->snap_names = NULL; ++ ++ kfree(rbd_dev->header.snap_sizes); ++ rbd_dev->header.snap_sizes = header->snap_sizes; ++ header->snap_sizes = NULL; ++ } ++} ++ ++static void rbd_dev_update_parent(struct rbd_device *rbd_dev, ++ struct parent_image_info *pii) ++{ ++ if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) { ++ /* ++ * Either the parent never existed, or we have ++ * record of it but the image got flattened so it no ++ * longer has a parent. When the parent of a ++ * layered image disappears we immediately set the ++ * overlap to 0. The effect of this is that all new ++ * requests will be treated as if the image had no ++ * parent. ++ * ++ * If !pii.has_overlap, the parent image spec is not ++ * applicable. It's there to avoid duplication in each ++ * snapshot record. ++ */ ++ if (rbd_dev->parent_overlap) { ++ rbd_dev->parent_overlap = 0; ++ rbd_dev_parent_put(rbd_dev); ++ pr_info("%s: clone has been flattened\n", ++ rbd_dev->disk->disk_name); ++ } ++ } else { ++ rbd_assert(rbd_dev->parent_spec); ++ ++ /* ++ * Update the parent overlap. If it became zero, issue ++ * a warning as we will proceed as if there is no parent. ++ */ ++ if (!pii->overlap && rbd_dev->parent_overlap) ++ rbd_warn(rbd_dev, ++ "clone has become standalone (overlap 0)"); ++ rbd_dev->parent_overlap = pii->overlap; ++ } ++} ++ ++static int rbd_dev_refresh(struct rbd_device *rbd_dev) ++{ ++ struct rbd_image_header header = { 0 }; ++ struct parent_image_info pii = { 0 }; ++ int ret; ++ ++ dout("%s rbd_dev %p\n", __func__, rbd_dev); ++ ++ ret = rbd_dev_header_info(rbd_dev, &header, false); ++ if (ret) ++ goto out; ++ ++ /* ++ * If there is a parent, see if it has disappeared due to the ++ * mapped image getting flattened. ++ */ ++ if (rbd_dev->parent) { ++ ret = rbd_dev_v2_parent_info(rbd_dev, &pii); ++ if (ret) ++ goto out; ++ } ++ ++ down_write(&rbd_dev->header_rwsem); ++ rbd_dev_update_header(rbd_dev, &header); ++ if (rbd_dev->parent) ++ rbd_dev_update_parent(rbd_dev, &pii); ++ up_write(&rbd_dev->header_rwsem); ++ ++out: ++ rbd_parent_info_cleanup(&pii); ++ rbd_image_header_cleanup(&header); ++ return ret; ++} ++ + static ssize_t do_rbd_add(struct bus_type *bus, + const char *buf, + size_t count) +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c +index 60051c0cabeaa..e322a326546b5 100644 +--- a/drivers/firewire/sbp2.c ++++ b/drivers/firewire/sbp2.c +@@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " + * + * - power condition + * Set the power condition field in the START STOP UNIT commands sent by +- * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). ++ * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or ++ * manage_runtime_start_stop is on). + * Some disks need this to spin down or to resume properly. + * + * - override internal blacklist +@@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) + + sdev->use_10_for_rw = 1; + +- if (sbp2_param_exclusive_login) +- sdev->manage_start_stop = 1; ++ if (sbp2_param_exclusive_login) { ++ sdev->manage_system_start_stop = true; ++ sdev->manage_runtime_start_stop = true; ++ } + + if (sdev->type == TYPE_ROM) + sdev->use_10_for_ms = 1; +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c +index 318a7d95a1a8b..42d3e1cf73528 100644 +--- a/drivers/gpio/gpio-aspeed.c ++++ b/drivers/gpio/gpio-aspeed.c +@@ -963,7 +963,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset, + else if (param == PIN_CONFIG_BIAS_DISABLE || + param == PIN_CONFIG_BIAS_PULL_DOWN || + param == PIN_CONFIG_DRIVE_STRENGTH) +- return pinctrl_gpio_set_config(offset, config); ++ return pinctrl_gpio_set_config(chip->base + offset, config); + else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN || + param == PIN_CONFIG_DRIVE_OPEN_SOURCE) + /* Return -ENOTSUPP to trigger emulation, as per datasheet */ +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c +index 1198ab0305d03..b90357774dc04 100644 +--- a/drivers/gpio/gpio-pxa.c ++++ b/drivers/gpio/gpio-pxa.c +@@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void) + switch (gpio_type) { + case PXA3XX_GPIO: + case MMP2_GPIO: ++ case MMP_GPIO: + return false; + + default: +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 5f5999cea7d2c..92fa2faf63e41 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -2179,7 +2179,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) + adev->flags |= AMD_IS_PX; + + if (!(adev->flags & AMD_IS_APU)) { +- parent = pci_upstream_bridge(adev->pdev); ++ parent = pcie_find_root_port(adev->pdev); + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; + } + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 18274ff5082ad..339f1f5a08339 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2344,14 +2344,62 @@ static int dm_late_init(void *handle) + return detect_mst_link_for_all_connectors(adev_to_drm(adev)); + } + ++static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) ++{ ++ int ret; ++ u8 guid[16]; ++ u64 tmp64; ++ ++ mutex_lock(&mgr->lock); ++ if (!mgr->mst_primary) ++ goto out_fail; ++ ++ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { ++ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); ++ goto out_fail; ++ } ++ ++ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, ++ DP_MST_EN | ++ DP_UP_REQ_EN | ++ DP_UPSTREAM_IS_SRC); ++ if (ret < 0) { ++ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); ++ goto out_fail; ++ } ++ ++ /* Some hubs forget their guids after they resume */ ++ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); ++ if (ret != 16) { ++ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); ++ goto out_fail; ++ } ++ ++ if (memchr_inv(guid, 0, 16) == NULL) { ++ tmp64 = get_jiffies_64(); ++ memcpy(&guid[0], &tmp64, sizeof(u64)); ++ memcpy(&guid[8], &tmp64, sizeof(u64)); ++ ++ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); ++ ++ if (ret != 16) { ++ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); ++ goto out_fail; ++ } ++ } ++ ++ memcpy(mgr->mst_primary->guid, guid, 16); ++ ++out_fail: ++ mutex_unlock(&mgr->lock); ++} ++ + static void s3_handle_mst(struct drm_device *dev, bool suspend) + { + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_dp_mst_topology_mgr *mgr; +- int ret; +- bool need_hotplug = false; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { +@@ -2373,18 +2421,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) + if (!dp_is_lttpr_present(aconnector->dc_link)) + dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + +- ret = drm_dp_mst_topology_mgr_resume(mgr, true); +- if (ret < 0) { +- dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, +- aconnector->dc_link); +- need_hotplug = true; +- } ++ /* TODO: move resume_mst_branch_status() into drm mst resume again ++ * once topology probing work is pulled out from mst resume into mst ++ * resume 2nd step. mst resume 2nd step should be called after old ++ * state getting restored (i.e. drm_atomic_helper_resume()). ++ */ ++ resume_mst_branch_status(mgr); + } + } + drm_connector_list_iter_end(&iter); +- +- if (need_hotplug) +- drm_kms_helper_hotplug_event(dev); + } + + static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +@@ -2773,7 +2818,8 @@ static int dm_resume(void *handle) + struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); + enum dc_connection_type new_connection_type = dc_connection_none; + struct dc_state *dc_state; +- int i, r, j; ++ int i, r, j, ret; ++ bool need_hotplug = false; + + if (amdgpu_in_reset(adev)) { + dc_state = dm->cached_dc_state; +@@ -2871,7 +2917,7 @@ static int dm_resume(void *handle) + continue; + + /* +- * this is the case when traversing through already created ++ * this is the case when traversing through already created end sink + * MST connectors, should be skipped + */ + if (aconnector && aconnector->mst_port) +@@ -2931,6 +2977,27 @@ static int dm_resume(void *handle) + + dm->cached_state = NULL; + ++ /* Do mst topology probing after resuming cached state*/ ++ drm_connector_list_iter_begin(ddev, &iter); ++ drm_for_each_connector_iter(connector, &iter) { ++ aconnector = to_amdgpu_dm_connector(connector); ++ if (aconnector->dc_link->type != dc_connection_mst_branch || ++ aconnector->mst_port) ++ continue; ++ ++ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); ++ ++ if (ret < 0) { ++ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, ++ aconnector->dc_link); ++ need_hotplug = true; ++ } ++ } ++ drm_connector_list_iter_end(&iter); ++ ++ if (need_hotplug) ++ drm_kms_helper_hotplug_event(ddev); ++ + amdgpu_dm_irq_resume_late(adev); + + amdgpu_dm_smu_write_watermarks_table(adev); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +index 839a812e0da32..fbc4d706748b7 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +@@ -2081,36 +2081,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context + return ret; + } + ++#define MAX(a, b) ((a) > (b) ? (a) : (b)) ++ + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) + { + struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; +- u32 smu_pcie_arg; ++ uint8_t *table_member1, *table_member2; ++ uint32_t min_gen_speed, max_gen_speed; ++ uint32_t min_lane_width, max_lane_width; ++ uint32_t smu_pcie_arg; + int ret, i; + +- /* PCIE gen speed and lane width override */ +- if (!amdgpu_device_pcie_dynamic_switching_supported()) { +- if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap) +- pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1]; ++ GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); ++ GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); + +- if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap) +- pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1]; ++ min_gen_speed = MAX(0, table_member1[0]); ++ max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); ++ min_gen_speed = min_gen_speed > max_gen_speed ? ++ max_gen_speed : min_gen_speed; ++ min_lane_width = MAX(1, table_member2[0]); ++ max_lane_width = MIN(pcie_width_cap, table_member2[1]); ++ min_lane_width = min_lane_width > max_lane_width ? ++ max_lane_width : min_lane_width; + +- /* Force all levels to use the same settings */ +- for (i = 0; i < NUM_LINK_LEVELS; i++) { +- pcie_table->pcie_gen[i] = pcie_gen_cap; +- pcie_table->pcie_lane[i] = pcie_width_cap; +- } ++ if (!amdgpu_device_pcie_dynamic_switching_supported()) { ++ pcie_table->pcie_gen[0] = max_gen_speed; ++ pcie_table->pcie_lane[0] = max_lane_width; + } else { +- for (i = 0; i < NUM_LINK_LEVELS; i++) { +- if (pcie_table->pcie_gen[i] > pcie_gen_cap) +- pcie_table->pcie_gen[i] = pcie_gen_cap; +- if (pcie_table->pcie_lane[i] > pcie_width_cap) +- pcie_table->pcie_lane[i] = pcie_width_cap; +- } ++ pcie_table->pcie_gen[0] = min_gen_speed; ++ pcie_table->pcie_lane[0] = min_lane_width; + } ++ pcie_table->pcie_gen[1] = max_gen_speed; ++ pcie_table->pcie_lane[1] = max_lane_width; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16 | +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index 03691cdcfb8e1..f7f7252d839ee 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -3074,6 +3074,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) + return ret; + + err: ++ usb_free_urb(sc->ghl_urb); ++ + hid_hw_stop(hdev); + return ret; + } +diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c +index 55cb25038e632..710fda5f19e1c 100644 +--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c ++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c +@@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev) + } + wakeup = &adev->wakeup; + ++ /* ++ * Call acpi_disable_gpe(), so that reference count ++ * gpe_event_info->runtime_count doesn't overflow. ++ * When gpe_event_info->runtime_count = 0, the call ++ * to acpi_disable_gpe() simply return. ++ */ ++ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); ++ + acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); + if (ACPI_FAILURE(acpi_sts)) { + dev_err(dev, "enable ose_gpe failed\n"); +diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c +index 533f38b0b4e9b..a8e72d8fd0605 100644 +--- a/drivers/hwmon/nzxt-smart2.c ++++ b/drivers/hwmon/nzxt-smart2.c +@@ -791,6 +791,8 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = { + { HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */ + { HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */ + { HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */ ++ { HID_USB_DEVICE(0x1e71, 0x2011) }, /* NZXT RGB & Fan Controller (6 RGB) */ ++ { HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller (6 RGB) */ + {}, + }; + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index cfeb24d40d378..bb3d10099ba44 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -1430,6 +1430,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), ++ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), + X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), +@@ -1862,6 +1863,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) + skx_idle_state_table_update(); + break; + case INTEL_FAM6_SAPPHIRERAPIDS_X: ++ case INTEL_FAM6_EMERALDRAPIDS_X: + spr_idle_state_table_update(); + break; + case INTEL_FAM6_ALDERLAKE: +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 4632b1833381a..0773ca7ace247 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -4936,7 +4936,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, + int err = 0; + struct sockaddr *addr = (struct sockaddr *)&mc->addr; + struct net_device *ndev = NULL; +- struct ib_sa_multicast ib; ++ struct ib_sa_multicast ib = {}; + enum ib_gid_type gid_type; + bool send_only; + +diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c +index 7b68b3ea979f7..f2fb2d8a65970 100644 +--- a/drivers/infiniband/core/cma_configfs.c ++++ b/drivers/infiniband/core/cma_configfs.c +@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group, + return -ENOMEM; + + for (i = 0; i < ports_num; i++) { +- char port_str[10]; ++ char port_str[11]; + + ports[i].port_num = i + 1; + snprintf(port_str, sizeof(port_str), "%u", i + 1); +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c +index 222733a83ddb7..1adf20198afd1 100644 +--- a/drivers/infiniband/core/nldev.c ++++ b/drivers/infiniband/core/nldev.c +@@ -2501,6 +2501,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { + }, + [RDMA_NLDEV_CMD_SYS_SET] = { + .doit = nldev_set_sys_set_doit, ++ .flags = RDMA_NL_ADMIN_PERM, + }, + [RDMA_NLDEV_CMD_STAT_SET] = { + .doit = nldev_stat_set_doit, +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index fa937cd268219..6fe825800494c 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, + if (hdr->in_words * 4 != count) + return -EINVAL; + +- if (count < method_elm->req_size + sizeof(hdr)) { ++ if (count < method_elm->req_size + sizeof(*hdr)) { + /* + * rdma-core v18 and v19 have a bug where they send DESTROY_CQ + * with a 16 byte write instead of 24. Old kernels didn't +diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c +index 24ee79aa2122e..88f534cf690e9 100644 +--- a/drivers/infiniband/hw/mlx4/sysfs.c ++++ b/drivers/infiniband/hw/mlx4/sysfs.c +@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, + static int add_port_entries(struct mlx4_ib_dev *device, int port_num) + { + int i; +- char buff[11]; ++ char buff[12]; + struct mlx4_ib_iov_port *port = NULL; + int ret = 0 ; + struct ib_port_attr attr; +diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c +index 5a13d902b0641..1022cebd0a46e 100644 +--- a/drivers/infiniband/hw/mlx5/fs.c ++++ b/drivers/infiniband/hw/mlx5/fs.c +@@ -2471,8 +2471,8 @@ destroy_res: + mlx5_steering_anchor_destroy_res(ft_prio); + put_flow_table: + put_flow_table(dev, ft_prio, true); +- mutex_unlock(&dev->flow_db->lock); + free_obj: ++ mutex_unlock(&dev->flow_db->lock); + kfree(obj); + + return err; +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index 3178df55c4d85..0baf3b5518b46 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -2074,7 +2074,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) + case MLX5_IB_MMAP_DEVICE_MEM: + return "Device Memory"; + default: +- return NULL; ++ return "Unknown"; + } + } + +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c +index 552d8271e423b..dc679c34ceefa 100644 +--- a/drivers/infiniband/sw/siw/siw_cm.c ++++ b/drivers/infiniband/sw/siw/siw_cm.c +@@ -973,6 +973,7 @@ static void siw_accept_newconn(struct siw_cep *cep) + siw_cep_put(cep); + new_cep->listen_cep = NULL; + if (rv) { ++ siw_cancel_mpatimer(new_cep); + siw_cep_set_free(new_cep); + goto error; + } +@@ -1097,9 +1098,12 @@ static void siw_cm_work_handler(struct work_struct *w) + /* + * Socket close before MPA request received. + */ +- siw_dbg_cep(cep, "no mpareq: drop listener\n"); +- siw_cep_put(cep->listen_cep); +- cep->listen_cep = NULL; ++ if (cep->listen_cep) { ++ siw_dbg_cep(cep, ++ "no mpareq: drop listener\n"); ++ siw_cep_put(cep->listen_cep); ++ cep->listen_cep = NULL; ++ } + } + } + release_cep = 1; +@@ -1222,7 +1226,11 @@ static void siw_cm_llp_data_ready(struct sock *sk) + if (!cep) + goto out; + +- siw_dbg_cep(cep, "state: %d\n", cep->state); ++ siw_dbg_cep(cep, "cep state: %d, socket state %d\n", ++ cep->state, sk->sk_state); ++ ++ if (sk->sk_state != TCP_ESTABLISHED) ++ goto out; + + switch (cep->state) { + case SIW_EPSTATE_RDMA_MODE: +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index a7580c4855fec..c4dcef76e9646 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -2789,7 +2789,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) + u32 tag; + u16 ch_idx; + struct srp_rdma_ch *ch; +- int ret; + + shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); + +@@ -2803,19 +2802,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) + shost_printk(KERN_ERR, target->scsi_host, + "Sending SRP abort for tag %#x\n", tag); + if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, +- SRP_TSK_ABORT_TASK, NULL) == 0) +- ret = SUCCESS; +- else if (target->rport->state == SRP_RPORT_LOST) +- ret = FAST_IO_FAIL; +- else +- ret = FAILED; +- if (ret == SUCCESS) { ++ SRP_TSK_ABORT_TASK, NULL) == 0) { + srp_free_req(ch, req, scmnd, 0); +- scmnd->result = DID_ABORT << 16; +- scsi_done(scmnd); ++ return SUCCESS; + } ++ if (target->rport->state == SRP_RPORT_LOST) ++ return FAST_IO_FAIL; + +- return ret; ++ return FAILED; + } + + static int srp_reset_device(struct scsi_cmnd *scmnd) +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +index db33dc87f69ed..8966f7d5aab61 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +@@ -1886,13 +1886,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, + /* Get the leaf page size */ + tg = __ffs(smmu_domain->domain.pgsize_bitmap); + ++ num_pages = size >> tg; ++ + /* Convert page size of 12,14,16 (log2) to 1,2,3 */ + cmd->tlbi.tg = (tg - 10) / 2; + +- /* Determine what level the granule is at */ +- cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); +- +- num_pages = size >> tg; ++ /* ++ * Determine what level the granule is at. For non-leaf, both ++ * io-pgtable and SVA pass a nominal last-level granule because ++ * they don't know what level(s) actually apply, so ignore that ++ * and leave TTL=0. However for various errata reasons we still ++ * want to use a range command, so avoid the SVA corner case ++ * where both scale and num could be 0 as well. ++ */ ++ if (cmd->tlbi.leaf) ++ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); ++ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1) ++ num_pages++; + } + + cmds.num = 0; +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index d4b5d20bd6dda..5c4f5aa8e87e4 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -3163,13 +3163,6 @@ static int iommu_suspend(void) + struct intel_iommu *iommu = NULL; + unsigned long flag; + +- for_each_active_iommu(iommu, drhd) { +- iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), +- GFP_KERNEL); +- if (!iommu->iommu_state) +- goto nomem; +- } +- + iommu_flush_all(); + + for_each_active_iommu(iommu, drhd) { +@@ -3189,12 +3182,6 @@ static int iommu_suspend(void) + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); + } + return 0; +- +-nomem: +- for_each_active_iommu(iommu, drhd) +- kfree(iommu->iommu_state); +- +- return -ENOMEM; + } + + static void iommu_resume(void) +@@ -3226,9 +3213,6 @@ static void iommu_resume(void) + + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); + } +- +- for_each_active_iommu(iommu, drhd) +- kfree(iommu->iommu_state); + } + + static struct syscore_ops iommu_syscore_ops = { +diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h +index db9df7c3790cd..c99cb715bd9a2 100644 +--- a/drivers/iommu/intel/iommu.h ++++ b/drivers/iommu/intel/iommu.h +@@ -595,7 +595,7 @@ struct intel_iommu { + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[16]; + struct q_inval *qi; /* Queued invalidation info */ +- u32 *iommu_state; /* Store iommu states between suspend and resume.*/ ++ u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ + + #ifdef CONFIG_IRQ_REMAP + struct ir_table *ir_table; /* Interrupt remapping info */ +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c +index 9673cd60c84fc..0ba2a63a9538a 100644 +--- a/drivers/iommu/mtk_iommu.c ++++ b/drivers/iommu/mtk_iommu.c +@@ -223,7 +223,7 @@ struct mtk_iommu_data { + struct device *smicomm_dev; + + struct mtk_iommu_bank_data *bank; +- struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */ ++ struct mtk_iommu_domain *share_dom; + + struct regmap *pericfg; + struct mutex mutex; /* Protect m4u_group/m4u_dom above */ +@@ -579,8 +579,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, + struct mtk_iommu_domain *share_dom = data->share_dom; + const struct mtk_iommu_iova_region *region; + +- /* Always use share domain in sharing pgtable case */ +- if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) { ++ /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */ ++ if (share_dom) { + dom->iop = share_dom->iop; + dom->cfg = share_dom->cfg; + dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; +@@ -613,8 +613,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, + /* Update our support page sizes bitmap */ + dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; + +- if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) +- data->share_dom = dom; ++ data->share_dom = dom; + + update_iova_region: + /* Update the iova region for this domain */ +diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c +index aad8bc44459fe..d94d60b526461 100644 +--- a/drivers/leds/led-core.c ++++ b/drivers/leds/led-core.c +@@ -424,10 +424,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data, + + led_parse_fwnode_props(dev, fwnode, &props); + +- /* We want to label LEDs that can produce full range of colors +- * as RGB, not multicolor */ +- BUG_ON(props.color == LED_COLOR_ID_MULTI); +- + if (props.label) { + /* + * If init_data.devicename is NULL, then it indicates that +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c +index 95b132b52f332..4abe1e2f8ad81 100644 +--- a/drivers/md/dm-zoned-target.c ++++ b/drivers/md/dm-zoned-target.c +@@ -748,17 +748,16 @@ err: + /* + * Cleanup zoned device information. + */ +-static void dmz_put_zoned_device(struct dm_target *ti) ++static void dmz_put_zoned_devices(struct dm_target *ti) + { + struct dmz_target *dmz = ti->private; + int i; + +- for (i = 0; i < dmz->nr_ddevs; i++) { +- if (dmz->ddev[i]) { ++ for (i = 0; i < dmz->nr_ddevs; i++) ++ if (dmz->ddev[i]) + dm_put_device(ti, dmz->ddev[i]); +- dmz->ddev[i] = NULL; +- } +- } ++ ++ kfree(dmz->ddev); + } + + static int dmz_fixup_devices(struct dm_target *ti) +@@ -948,7 +947,7 @@ err_bio: + err_meta: + dmz_dtr_metadata(dmz->metadata); + err_dev: +- dmz_put_zoned_device(ti); ++ dmz_put_zoned_devices(ti); + err: + kfree(dmz->dev); + kfree(dmz); +@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti) + + bioset_exit(&dmz->bio_set); + +- dmz_put_zoned_device(ti); ++ dmz_put_zoned_devices(ti); + + mutex_destroy(&dmz->chunk_lock); + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index fbef3c9badb65..98d4e93efa31c 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf, + + set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); + r5l_wake_reclaim(conf->log, 0); ++ ++ /* release batch_last before wait to avoid risk of deadlock */ ++ if (ctx && ctx->batch_last) { ++ raid5_release_stripe(ctx->batch_last); ++ ctx->batch_last = NULL; ++ } ++ + wait_event_lock_irq(conf->wait_for_stripe, + is_inactive_blocked(conf, hash), + *(conf->hash_locks + hash)); +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c +index 1662c12e24ada..6fbd77dc1d18f 100644 +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -893,6 +893,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + return -EINVAL; + } + ++ /* UBI cannot work on flashes with zero erasesize. */ ++ if (!mtd->erasesize) { ++ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n", ++ mtd->index); ++ return -EINVAL; ++ } ++ + if (ubi_num == UBI_DEV_NUM_AUTO) { + /* Search for an empty slot in the @ubi_devices array */ + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index a73008b9e0b3c..ba906dfab055c 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -3012,14 +3012,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ +- mv88e6xxx_g1_wait_eeprom_done(chip); ++ if (chip->info->ops->get_eeprom) ++ mv88e6xxx_g2_eeprom_wait(chip); + + gpiod_set_value_cansleep(gpiod, 1); + usleep_range(10000, 20000); + gpiod_set_value_cansleep(gpiod, 0); + usleep_range(10000, 20000); + +- mv88e6xxx_g1_wait_eeprom_done(chip); ++ if (chip->info->ops->get_eeprom) ++ mv88e6xxx_g2_eeprom_wait(chip); + } + } + +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c +index 5848112036b08..964928285782c 100644 +--- a/drivers/net/dsa/mv88e6xxx/global1.c ++++ b/drivers/net/dsa/mv88e6xxx/global1.c +@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); + } + +-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) +-{ +- const unsigned long timeout = jiffies + 1 * HZ; +- u16 val; +- int err; +- +- /* Wait up to 1 second for the switch to finish reading the +- * EEPROM. +- */ +- while (time_before(jiffies, timeout)) { +- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); +- if (err) { +- dev_err(chip->dev, "Error reading status"); +- return; +- } +- +- /* If the switch is still resetting, it may not +- * respond on the bus, and so MDIO read returns +- * 0xffff. Differentiate between that, and waiting for +- * the EEPROM to be done by bit 0 being set. +- */ +- if (val != 0xffff && +- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)) +- return; +- +- usleep_range(1000, 2000); +- } +- +- dev_err(chip->dev, "Timeout waiting for EEPROM done"); +-} +- + /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 + * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 + * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h +index 65958b2a0d3a3..04b57a21f7868 100644 +--- a/drivers/net/dsa/mv88e6xxx/global1.h ++++ b/drivers/net/dsa/mv88e6xxx/global1.h +@@ -281,7 +281,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); + int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); + int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); + int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); +-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); + + int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); + int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); +diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c +index ec49939968fac..ac302a935ce69 100644 +--- a/drivers/net/dsa/mv88e6xxx/global2.c ++++ b/drivers/net/dsa/mv88e6xxx/global2.c +@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) + * Offset 0x15: EEPROM Addr (for 8-bit data access) + */ + +-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) ++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) + { + int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY); + int err; +diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h +index c05fad5c9f19d..751a6c988de42 100644 +--- a/drivers/net/dsa/mv88e6xxx/global2.h ++++ b/drivers/net/dsa/mv88e6xxx/global2.h +@@ -359,6 +359,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); + + int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, + int port); ++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip); + + extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; + extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops; +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index 0b4ec6e41eb41..1d21a281222d9 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1308,24 +1308,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb, + * the user space for finding a flow. During this process, OVS computes + * checksum on the first packet when CHECKSUM_PARTIAL flag is set. + * +- * So, re-compute TCP pseudo header checksum when configured for +- * trunk mode. ++ * So, re-compute TCP pseudo header checksum. + */ ++ + if (iph_proto == IPPROTO_TCP) { + struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); ++ + if (tcph->check == 0x0000) { + /* Recompute TCP pseudo header checksum */ +- if (adapter->is_active_trunk) { +- tcphdrlen = skb->len - iphlen; +- if (skb_proto == ETH_P_IP) +- tcph->check = +- ~csum_tcpudp_magic(iph->saddr, +- iph->daddr, tcphdrlen, iph_proto, 0); +- else if (skb_proto == ETH_P_IPV6) +- tcph->check = +- ~csum_ipv6_magic(&iph6->saddr, +- &iph6->daddr, tcphdrlen, iph_proto, 0); +- } ++ tcphdrlen = skb->len - iphlen; ++ if (skb_proto == ETH_P_IP) ++ tcph->check = ++ ~csum_tcpudp_magic(iph->saddr, ++ iph->daddr, tcphdrlen, iph_proto, 0); ++ else if (skb_proto == ETH_P_IPV6) ++ tcph->check = ++ ~csum_ipv6_magic(&iph6->saddr, ++ &iph6->daddr, tcphdrlen, iph_proto, 0); + /* Setup SKB fields for checksum offload */ + skb_partial_csum_set(skb, iphlen, + offsetof(struct tcphdr, check)); +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +index ffea0c9c82f1e..97a9efe7b713e 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +@@ -361,9 +361,9 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) + 1000000ULL << 16); + + if (neg_adj) +- adj = I40E_PTP_40GB_INCVAL - diff; ++ adj = freq - diff; + else +- adj = I40E_PTP_40GB_INCVAL + diff; ++ adj = freq + diff; + + wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index 0ac5ae16308f6..17e6ac4445afc 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2862,8 +2862,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) + + eth->rx_events++; + if (likely(napi_schedule_prep(ð->rx_napi))) { +- __napi_schedule(ð->rx_napi); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); ++ __napi_schedule(ð->rx_napi); + } + + return IRQ_HANDLED; +@@ -2875,8 +2875,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) + + eth->tx_events++; + if (likely(napi_schedule_prep(ð->tx_napi))) { +- __napi_schedule(ð->tx_napi); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); ++ __napi_schedule(ð->tx_napi); + } + + return IRQ_HANDLED; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h +index 0bfc375161ed6..a174c6fc626ac 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h +@@ -110,9 +110,9 @@ struct qed_ll2_info { + enum core_tx_dest tx_dest; + u8 tx_stats_en; + bool main_func_queue; ++ struct qed_ll2_cbs cbs; + struct qed_ll2_rx_queue rx_queue; + struct qed_ll2_tx_queue tx_queue; +- struct qed_ll2_cbs cbs; + }; + + extern const struct qed_ll2_ops qed_ll2_ops_pass; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +index 2b38a499a4045..533f5245ad945 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +@@ -105,6 +105,7 @@ struct stm32_ops { + int (*parse_data)(struct stm32_dwmac *dwmac, + struct device *dev); + u32 syscfg_eth_mask; ++ bool clk_rx_enable_in_suspend; + }; + + static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) +@@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) + if (ret) + return ret; + +- if (!dwmac->dev->power.is_suspended) { ++ if (!dwmac->ops->clk_rx_enable_in_suspend || ++ !dwmac->dev->power.is_suspended) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) { + clk_disable_unprepare(dwmac->clk_tx); +@@ -515,7 +517,8 @@ static struct stm32_ops stm32mp1_dwmac_data = { + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, +- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK ++ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, ++ .clk_rx_enable_in_suspend = true + }; + + static const struct of_device_id stm32_dwmac_match[] = { +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c +index 25466cbdc16bd..9f2553799895d 100644 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c +@@ -1614,6 +1614,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) + if (tx_chn->irq <= 0) { + dev_err(dev, "Failed to get tx dma irq %d\n", + tx_chn->irq); ++ ret = tx_chn->irq ?: -ENXIO; + goto err; + } + +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c +index 5d6454fedb3f1..78ad2da3ee29b 100644 +--- a/drivers/net/usb/smsc75xx.c ++++ b/drivers/net/usb/smsc75xx.c +@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, + ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN + | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, &buf, 4); +- if (unlikely(ret < 0)) { ++ if (unlikely(ret < 4)) { ++ ret = ret < 0 ? ret : -ENODATA; ++ + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); + return ret; +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index f6dcec66f0a4b..208df4d419395 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -664,7 +664,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, + skb->protocol = htons(ETH_P_IPV6); + skb->dev = dev; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); + if (unlikely(!neigh)) +@@ -672,10 +672,10 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, + if (!IS_ERR(neigh)) { + sock_confirm_neigh(skb, neigh); + ret = neigh_output(neigh, skb, false); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + return ret; + } +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + IP6_INC_STATS(dev_net(dst->dev), + ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); +@@ -889,7 +889,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s + } + } + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); + if (!IS_ERR(neigh)) { +@@ -898,11 +898,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s + sock_confirm_neigh(skb, neigh); + /* if crossing protocols, can not use the cached header */ + ret = neigh_output(neigh, skb, is_v6gw); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + return ret; + } + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + vrf_tx_error(skb->dev, skb); + return -EINVAL; + } +diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c +index 0c3eb850fcb79..619dd71c9d75e 100644 +--- a/drivers/net/vxlan/vxlan_core.c ++++ b/drivers/net/vxlan/vxlan_core.c +@@ -1910,7 +1910,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) + struct vxlan_fdb *f; + struct sk_buff *reply; + +- if (!(n->nud_state & NUD_CONNECTED)) { ++ if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) { + neigh_release(n); + goto out; + } +@@ -2074,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) + struct vxlan_fdb *f; + struct sk_buff *reply; + +- if (!(n->nud_state & NUD_CONNECTED)) { ++ if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) { + neigh_release(n); + goto out; + } +diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c +index 1c53b55469270..5fec8abe8e1d3 100644 +--- a/drivers/net/wan/fsl_ucc_hdlc.c ++++ b/drivers/net/wan/fsl_ucc_hdlc.c +@@ -34,6 +34,8 @@ + #define TDM_PPPOHT_SLIC_MAXIN + #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S) + ++static int uhdlc_close(struct net_device *dev); ++ + static struct ucc_tdm_info utdm_primary_info = { + .uf_info = { + .tsa = 0, +@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev) + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = hdlc->priv; + struct ucc_tdm *utdm = priv->utdm; ++ int rc = 0; + + if (priv->hdlc_busy != 1) { + if (request_irq(priv->ut_info->uf_info.irq, +@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev) + napi_enable(&priv->napi); + netdev_reset_queue(dev); + netif_start_queue(dev); +- hdlc_open(dev); ++ ++ rc = hdlc_open(dev); ++ if (rc) ++ uhdlc_close(dev); + } + +- return 0; ++ return rc; + } + + static void uhdlc_memclean(struct ucc_hdlc_private *priv) +@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev) + netdev_reset_queue(dev); + priv->hdlc_busy = 0; + ++ hdlc_close(dev); ++ + return 0; + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +index c62576e442bdf..2d481849a9c23 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +@@ -295,9 +295,9 @@ struct iwl_fw_ini_fifo_hdr { + struct iwl_fw_ini_error_dump_range { + __le32 range_data_size; + union { +- __le32 internal_base_addr; +- __le64 dram_base_addr; +- __le32 page_num; ++ __le32 internal_base_addr __packed; ++ __le64 dram_base_addr __packed; ++ __le32 page_num __packed; + struct iwl_fw_ini_fifo_hdr fifo_hdr; + struct iwl_cmd_header fw_pkt_hdr; + }; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +index 887d0789c96c3..2e3c98eaa400c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +@@ -796,7 +796,7 @@ out: + mvm->nvm_data->bands[0].n_channels = 1; + mvm->nvm_data->bands[0].n_bitrates = 1; + mvm->nvm_data->bands[0].bitrates = +- (void *)((u8 *)mvm->nvm_data->channels + 1); ++ (void *)(mvm->nvm_data->channels + 1); + mvm->nvm_data->bands[0].bitrates->hw_value = 10; + } + +diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +index a04b66284af4a..7351acac6932d 100644 +--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c ++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +@@ -965,8 +965,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, + } + } + +- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len); +- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba); ++ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len); ++ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len; + tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp; + } + } +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c +index 65420ad674167..257737137cd70 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c ++++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c +@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, + rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); + rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; + +- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) { ++ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) + ++ rx_pkt_off > skb->len) { + mwifiex_dbg(priv->adapter, ERROR, + "wrong rx packet offset: len=%d, rx_pkt_off=%d\n", + skb->len, rx_pkt_off); +@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, + return -1; + } + +- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, +- sizeof(bridge_tunnel_header))) || +- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, +- sizeof(rfc1042_header)) && +- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && +- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { ++ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len && ++ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, ++ sizeof(bridge_tunnel_header))) || ++ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, ++ sizeof(rfc1042_header)) && ++ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && ++ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) { + /* + * Replace the 803 header and rfc1042 header (llc/snap) with an + * EthernetII header, keep the src/dst and snap_type +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +index 0acabba2d1a50..5d402cf2951cb 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, + s8 *lna_2g, s8 *lna_5g, + struct ieee80211_channel *chan) + { +- u16 val; + u8 lna; + +- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); +- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) +- *lna_2g = 0; +- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) +- memset(lna_5g, 0, sizeof(s8) * 3); +- + if (chan->band == NL80211_BAND_2GHZ) + lna = *lna_2g; + else if (chan->hw_value <= 64) +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +index c57e05a5c65e4..91807bf662dde 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) + struct ieee80211_channel *chan = dev->mphy.chandef.chan; + int channel = chan->hw_value; + s8 lna_5g[3], lna_2g; +- u8 lna; ++ bool use_lna; ++ u8 lna = 0; + u16 val; + + if (chan->band == NL80211_BAND_2GHZ) +@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) + dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; + dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; + +- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); ++ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); ++ if (chan->band == NL80211_BAND_2GHZ) ++ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G); ++ else ++ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G); ++ ++ if (use_lna) ++ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); ++ + dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); + } + EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c +index 95501b77ef314..0fbf331a748fd 100644 +--- a/drivers/of/dynamic.c ++++ b/drivers/of/dynamic.c +@@ -902,13 +902,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, + { + struct of_changeset_entry *ce; + ++ if (WARN_ON(action >= ARRAY_SIZE(action_names))) ++ return -EINVAL; ++ + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + +- if (WARN_ON(action >= ARRAY_SIZE(action_names))) +- return -EINVAL; +- + /* get a reference to the node */ + ce->action = action; + ce->np = of_node_get(np); +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c +index d24712a76ba7c..0ccd92faf078a 100644 +--- a/drivers/pci/controller/dwc/pcie-qcom.c ++++ b/drivers/pci/controller/dwc/pcie-qcom.c +@@ -40,7 +40,6 @@ + #define PARF_PHY_REFCLK 0x4c + #define PARF_CONFIG_BITS 0x50 + #define PARF_DBI_BASE_ADDR 0x168 +-#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */ + #define PARF_MHI_CLOCK_RESET_CTRL 0x174 + #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 + #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 +@@ -1148,8 +1147,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) + u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + u32 val; + +- writel(SLV_ADDR_SPACE_SZ, +- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); ++ writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); + + val = readl(pcie->parf + PARF_PHY_CTRL); + val &= ~BIT(0); +diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c +index a48d9b7d29217..8fee9b330b613 100644 +--- a/drivers/ptp/ptp_ocp.c ++++ b/drivers/ptp/ptp_ocp.c +@@ -3532,7 +3532,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) + return 0; + + out: +- ptp_ocp_dev_release(&bp->dev); + put_device(&bp->dev); + return err; + } +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 351f0fd225b14..f6a95f72af18d 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -5543,6 +5543,8 @@ regulator_register(struct device *dev, + goto rinse; + } + device_initialize(&rdev->dev); ++ dev_set_drvdata(&rdev->dev, rdev); ++ rdev->dev.class = ®ulator_class; + spin_lock_init(&rdev->err_lock); + + /* +@@ -5604,11 +5606,9 @@ regulator_register(struct device *dev, + rdev->supply_name = regulator_desc->supply_name; + + /* register with sysfs */ +- rdev->dev.class = ®ulator_class; + rdev->dev.parent = config->dev; + dev_set_name(&rdev->dev, "regulator.%lu", + (unsigned long) atomic_inc_return(®ulator_no)); +- dev_set_drvdata(&rdev->dev, rdev); + + /* set regulator constraints */ + if (init_data) +diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c +index 8a5ce990f1bf9..a0441b8086712 100644 +--- a/drivers/regulator/mt6358-regulator.c ++++ b/drivers/regulator/mt6358-regulator.c +@@ -35,19 +35,19 @@ struct mt6358_regulator_info { + }; + + #define MT6358_BUCK(match, vreg, min, max, step, \ +- volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask, \ ++ vosel_mask, _da_vsel_reg, _da_vsel_mask, \ + _modeset_reg, _modeset_shift) \ + [MT6358_ID_##vreg] = { \ + .desc = { \ + .name = #vreg, \ + .of_match = of_match_ptr(match), \ +- .ops = &mt6358_volt_range_ops, \ ++ .ops = &mt6358_buck_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MT6358_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ +- .linear_ranges = volt_ranges, \ +- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ ++ .min_uV = (min), \ ++ .uV_step = (step), \ + .vsel_reg = MT6358_BUCK_##vreg##_ELR0, \ + .vsel_mask = vosel_mask, \ + .enable_reg = MT6358_BUCK_##vreg##_CON0, \ +@@ -87,7 +87,7 @@ struct mt6358_regulator_info { + } + + #define MT6358_LDO1(match, vreg, min, max, step, \ +- volt_ranges, _da_vsel_reg, _da_vsel_mask, \ ++ _da_vsel_reg, _da_vsel_mask, \ + vosel, vosel_mask) \ + [MT6358_ID_##vreg] = { \ + .desc = { \ +@@ -98,8 +98,8 @@ struct mt6358_regulator_info { + .id = MT6358_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ +- .linear_ranges = volt_ranges, \ +- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ ++ .min_uV = (min), \ ++ .uV_step = (step), \ + .vsel_reg = vosel, \ + .vsel_mask = vosel_mask, \ + .enable_reg = MT6358_LDO_##vreg##_CON0, \ +@@ -131,19 +131,19 @@ struct mt6358_regulator_info { + } + + #define MT6366_BUCK(match, vreg, min, max, step, \ +- volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask, \ ++ vosel_mask, _da_vsel_reg, _da_vsel_mask, \ + _modeset_reg, _modeset_shift) \ + [MT6366_ID_##vreg] = { \ + .desc = { \ + .name = #vreg, \ + .of_match = of_match_ptr(match), \ +- .ops = &mt6358_volt_range_ops, \ ++ .ops = &mt6358_buck_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MT6366_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ +- .linear_ranges = volt_ranges, \ +- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ ++ .min_uV = (min), \ ++ .uV_step = (step), \ + .vsel_reg = MT6358_BUCK_##vreg##_ELR0, \ + .vsel_mask = vosel_mask, \ + .enable_reg = MT6358_BUCK_##vreg##_CON0, \ +@@ -183,7 +183,7 @@ struct mt6358_regulator_info { + } + + #define MT6366_LDO1(match, vreg, min, max, step, \ +- volt_ranges, _da_vsel_reg, _da_vsel_mask, \ ++ _da_vsel_reg, _da_vsel_mask, \ + vosel, vosel_mask) \ + [MT6366_ID_##vreg] = { \ + .desc = { \ +@@ -194,8 +194,8 @@ struct mt6358_regulator_info { + .id = MT6366_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ +- .linear_ranges = volt_ranges, \ +- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ ++ .min_uV = (min), \ ++ .uV_step = (step), \ + .vsel_reg = vosel, \ + .vsel_mask = vosel_mask, \ + .enable_reg = MT6358_LDO_##vreg##_CON0, \ +@@ -226,21 +226,6 @@ struct mt6358_regulator_info { + .qi = BIT(15), \ + } + +-static const struct linear_range buck_volt_range1[] = { +- REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250), +-}; +- +-static const struct linear_range buck_volt_range2[] = { +- REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500), +-}; +- +-static const struct linear_range buck_volt_range3[] = { +- REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000), +-}; +- +-static const struct linear_range buck_volt_range4[] = { +- REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500), +-}; + + static const unsigned int vdram2_voltages[] = { + 600000, 1800000, +@@ -463,9 +448,9 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev) + } + } + +-static const struct regulator_ops mt6358_volt_range_ops = { +- .list_voltage = regulator_list_voltage_linear_range, +- .map_voltage = regulator_map_voltage_linear_range, ++static const struct regulator_ops mt6358_buck_ops = { ++ .list_voltage = regulator_list_voltage_linear, ++ .map_voltage = regulator_map_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = mt6358_get_buck_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, +@@ -477,6 +462,18 @@ static const struct regulator_ops mt6358_volt_range_ops = { + .get_mode = mt6358_regulator_get_mode, + }; + ++static const struct regulator_ops mt6358_volt_range_ops = { ++ .list_voltage = regulator_list_voltage_linear, ++ .map_voltage = regulator_map_voltage_linear, ++ .set_voltage_sel = regulator_set_voltage_sel_regmap, ++ .get_voltage_sel = mt6358_get_buck_voltage_sel, ++ .set_voltage_time_sel = regulator_set_voltage_time_sel, ++ .enable = regulator_enable_regmap, ++ .disable = regulator_disable_regmap, ++ .is_enabled = regulator_is_enabled_regmap, ++ .get_status = mt6358_get_status, ++}; ++ + static const struct regulator_ops mt6358_volt_table_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_iterate, +@@ -500,35 +497,23 @@ static const struct regulator_ops mt6358_volt_fixed_ops = { + /* The array is indexed by id(MT6358_ID_XXX) */ + static struct mt6358_regulator_info mt6358_regulators[] = { + MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500, +- buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, +- MT6358_VDRAM1_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8), + MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 1), +- MT6358_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 1), ++ 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1), + MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000, +- buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, +- MT6358_VPA_ANA_CON0, 3), ++ 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3), + MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, +- MT6358_VPROC_ANA_CON0, 1), ++ 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1), + MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, +- MT6358_VPROC_ANA_CON0, 2), ++ 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2), + MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 2), ++ 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2), + MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500, +- buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, +- MT6358_VS2_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8), + MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, +- MT6358_VMODEM_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8), + MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500, +- buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, +- MT6358_VS1_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8), + MT6358_REG_FIXED("ldo_vrf12", VRF12, + MT6358_LDO_VRF12_CON0, 0, 1200000), + MT6358_REG_FIXED("ldo_vio18", VIO18, +@@ -582,55 +567,35 @@ static struct mt6358_regulator_info mt6358_regulators[] = { + MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx, + MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00), + MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON0, 0x7f), ++ MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f), + MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON2, 0x7f), +- MT6358_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000, +- 1293750, 6250, buck_volt_range1, +- MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f, +- MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f), ++ MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f), + MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON3, 0x7f), ++ MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f), + MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON1, 0x7f), ++ MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f), + }; + + /* The array is indexed by id(MT6366_ID_XXX) */ + static struct mt6358_regulator_info mt6366_regulators[] = { + MT6366_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500, +- buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, +- MT6358_VDRAM1_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8), + MT6366_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 1), +- MT6366_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 1), ++ 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1), + MT6366_BUCK("buck_vpa", VPA, 500000, 3650000, 50000, +- buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, +- MT6358_VPA_ANA_CON0, 3), ++ 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3), + MT6366_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, +- MT6358_VPROC_ANA_CON0, 1), ++ 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1), + MT6366_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, +- MT6358_VPROC_ANA_CON0, 2), ++ 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2), + MT6366_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, +- MT6358_VCORE_VGPU_ANA_CON0, 2), ++ 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2), + MT6366_BUCK("buck_vs2", VS2, 500000, 2087500, 12500, +- buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, +- MT6358_VS2_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8), + MT6366_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250, +- buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, +- MT6358_VMODEM_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8), + MT6366_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500, +- buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, +- MT6358_VS1_ANA_CON0, 8), ++ 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8), + MT6366_REG_FIXED("ldo_vrf12", VRF12, + MT6358_LDO_VRF12_CON0, 0, 1200000), + MT6366_REG_FIXED("ldo_vio18", VIO18, +@@ -673,21 +638,13 @@ static struct mt6358_regulator_info mt6366_regulators[] = { + MT6366_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx, + MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00), + MT6366_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON0, 0x7f), ++ MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f), + MT6366_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON2, 0x7f), +- MT6366_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000, +- 1293750, 6250, buck_volt_range1, +- MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f, +- MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f), ++ MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f), + MT6366_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON3, 0x7f), ++ MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f), + MT6366_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250, +- buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, +- MT6358_LDO_VSRAM_CON1, 0x7f), ++ MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f), + }; + + static int mt6358_regulator_probe(struct platform_device *pdev) +diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c +index df782646e856f..ab2f35bc294da 100644 +--- a/drivers/s390/scsi/zfcp_aux.c ++++ b/drivers/s390/scsi/zfcp_aux.c +@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, + if (port) { + put_device(&port->dev); + retval = -EEXIST; +- goto err_out; ++ goto err_put; + } + + port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); + if (!port) +- goto err_out; ++ goto err_put; + + rwlock_init(&port->unit_list_lock); + INIT_LIST_HEAD(&port->unit_list); +@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, + + if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { + kfree(port); +- goto err_out; ++ goto err_put; + } + retval = -EINVAL; + +@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, + + return port; + +-err_out: ++err_put: + zfcp_ccw_adapter_put(adapter); ++err_out: + return ERR_PTR(retval); + } +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c +index 3f062e4013ab6..013a9a334972e 100644 +--- a/drivers/scsi/aacraid/commsup.c ++++ b/drivers/scsi/aacraid/commsup.c +@@ -1451,7 +1451,7 @@ retry_next: + #endif + break; + } +- scsi_rescan_device(&device->sdev_gendev); ++ scsi_rescan_device(device); + break; + + default: +diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c +index 05d3ce9b72dba..c4acf65379d20 100644 +--- a/drivers/scsi/mvumi.c ++++ b/drivers/scsi/mvumi.c +@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) + + sdev = scsi_device_lookup(mhba->shost, 0, id, 0); + if (sdev) { +- scsi_rescan_device(&sdev->sdev_gendev); ++ scsi_rescan_device(sdev); + scsi_device_put(sdev); + } + } +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index fb6e9a7a7f58b..d25e1c2472538 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -2445,7 +2445,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) + envp[idx++] = "SDEV_MEDIA_CHANGE=1"; + break; + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: +- scsi_rescan_device(&sdev->sdev_gendev); ++ scsi_rescan_device(sdev); + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: +diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h +index c52de9a973e46..b14545acb40f5 100644 +--- a/drivers/scsi/scsi_priv.h ++++ b/drivers/scsi/scsi_priv.h +@@ -132,7 +132,6 @@ extern int scsi_complete_async_scans(void); + extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, + unsigned int, u64, enum scsi_scan_mode); + extern void scsi_forget_host(struct Scsi_Host *); +-extern void scsi_rescan_device(struct device *); + + /* scsi_sysctl.c */ + #ifdef CONFIG_SYSCTL +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index d12f2dcb4040a..ed26c52ed8474 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1611,12 +1611,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel, + } + EXPORT_SYMBOL(scsi_add_device); + +-void scsi_rescan_device(struct device *dev) ++int scsi_rescan_device(struct scsi_device *sdev) + { +- struct scsi_device *sdev = to_scsi_device(dev); ++ struct device *dev = &sdev->sdev_gendev; ++ int ret = 0; + + device_lock(dev); + ++ /* ++ * Bail out if the device is not running. Otherwise, the rescan may ++ * block waiting for commands to be executed, with us holding the ++ * device lock. This can result in a potential deadlock in the power ++ * management core code when system resume is on-going. ++ */ ++ if (sdev->sdev_state != SDEV_RUNNING) { ++ ret = -EWOULDBLOCK; ++ goto unlock; ++ } ++ + scsi_attach_vpd(sdev); + + if (sdev->handler && sdev->handler->rescan) +@@ -1629,7 +1641,11 @@ void scsi_rescan_device(struct device *dev) + drv->rescan(dev); + module_put(dev->driver->owner); + } ++ ++unlock: + device_unlock(dev); ++ ++ return ret; + } + EXPORT_SYMBOL(scsi_rescan_device); + +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index cac7c902cf70a..1f531063d6331 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -762,7 +762,7 @@ static ssize_t + store_rescan_field (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- scsi_rescan_device(dev); ++ scsi_rescan_device(to_scsi_device(dev)); + return count; + } + static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); +@@ -855,7 +855,7 @@ store_state_field(struct device *dev, struct device_attribute *attr, + * waiting for pending I/O to finish. + */ + blk_mq_run_hw_queues(sdev->request_queue, true); +- scsi_rescan_device(dev); ++ scsi_rescan_device(sdev); + } + + return ret == 0 ? count : -EINVAL; +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index e934779bf05c8..30184f7b762c1 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -213,18 +213,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr, + } + + static ssize_t +-manage_start_stop_show(struct device *dev, struct device_attribute *attr, +- char *buf) ++manage_start_stop_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + +- return sprintf(buf, "%u\n", sdp->manage_start_stop); ++ return sysfs_emit(buf, "%u\n", ++ sdp->manage_system_start_stop && ++ sdp->manage_runtime_start_stop); + } ++static DEVICE_ATTR_RO(manage_start_stop); + + static ssize_t +-manage_start_stop_store(struct device *dev, struct device_attribute *attr, +- const char *buf, size_t count) ++manage_system_start_stop_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct scsi_disk *sdkp = to_scsi_disk(dev); ++ struct scsi_device *sdp = sdkp->device; ++ ++ return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop); ++} ++ ++static ssize_t ++manage_system_start_stop_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) + { + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; +@@ -236,11 +250,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, + if (kstrtobool(buf, &v)) + return -EINVAL; + +- sdp->manage_start_stop = v; ++ sdp->manage_system_start_stop = v; + + return count; + } +-static DEVICE_ATTR_RW(manage_start_stop); ++static DEVICE_ATTR_RW(manage_system_start_stop); ++ ++static ssize_t ++manage_runtime_start_stop_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct scsi_disk *sdkp = to_scsi_disk(dev); ++ struct scsi_device *sdp = sdkp->device; ++ ++ return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop); ++} ++ ++static ssize_t ++manage_runtime_start_stop_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct scsi_disk *sdkp = to_scsi_disk(dev); ++ struct scsi_device *sdp = sdkp->device; ++ bool v; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ ++ if (kstrtobool(buf, &v)) ++ return -EINVAL; ++ ++ sdp->manage_runtime_start_stop = v; ++ ++ return count; ++} ++static DEVICE_ATTR_RW(manage_runtime_start_stop); + + static ssize_t + allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) +@@ -572,6 +617,8 @@ static struct attribute *sd_disk_attrs[] = { + &dev_attr_FUA.attr, + &dev_attr_allow_restart.attr, + &dev_attr_manage_start_stop.attr, ++ &dev_attr_manage_system_start_stop.attr, ++ &dev_attr_manage_runtime_start_stop.attr, + &dev_attr_protection_type.attr, + &dev_attr_protection_mode.attr, + &dev_attr_app_tag_own.attr, +@@ -3579,7 +3626,8 @@ static int sd_remove(struct device *dev) + + device_del(&sdkp->disk_dev); + del_gendisk(sdkp->disk); +- sd_shutdown(dev); ++ if (!sdkp->suspended) ++ sd_shutdown(dev); + + put_disk(sdkp->disk); + return 0; +@@ -3652,13 +3700,20 @@ static void sd_shutdown(struct device *dev) + sd_sync_cache(sdkp, NULL); + } + +- if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { ++ if (system_state != SYSTEM_RESTART && ++ sdkp->device->manage_system_start_stop) { + sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + sd_start_stop_device(sdkp, 0); + } + } + +-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) ++static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) ++{ ++ return (sdev->manage_system_start_stop && !runtime) || ++ (sdev->manage_runtime_start_stop && runtime); ++} ++ ++static int sd_suspend_common(struct device *dev, bool runtime) + { + struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_sense_hdr sshdr; +@@ -3690,15 +3745,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) + } + } + +- if (sdkp->device->manage_start_stop) { ++ if (sd_do_start_stop(sdkp->device, runtime)) { + if (!sdkp->device->silence_suspend) + sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + /* an error is not worth aborting a system sleep */ + ret = sd_start_stop_device(sdkp, 0); +- if (ignore_stop_errors) ++ if (!runtime) + ret = 0; + } + ++ if (!ret) ++ sdkp->suspended = true; ++ + return ret; + } + +@@ -3707,29 +3765,37 @@ static int sd_suspend_system(struct device *dev) + if (pm_runtime_suspended(dev)) + return 0; + +- return sd_suspend_common(dev, true); ++ return sd_suspend_common(dev, false); + } + + static int sd_suspend_runtime(struct device *dev) + { +- return sd_suspend_common(dev, false); ++ return sd_suspend_common(dev, true); + } + +-static int sd_resume(struct device *dev) ++static int sd_resume(struct device *dev, bool runtime) + { + struct scsi_disk *sdkp = dev_get_drvdata(dev); +- int ret; ++ int ret = 0; + + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ + return 0; + +- if (!sdkp->device->manage_start_stop) ++ if (!sd_do_start_stop(sdkp->device, runtime)) { ++ sdkp->suspended = false; + return 0; ++ } + +- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); +- ret = sd_start_stop_device(sdkp, 1); +- if (!ret) ++ if (!sdkp->device->no_start_on_resume) { ++ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ++ ret = sd_start_stop_device(sdkp, 1); ++ } ++ ++ if (!ret) { + opal_unlock_from_suspend(sdkp->opal_dev); ++ sdkp->suspended = false; ++ } ++ + return ret; + } + +@@ -3738,7 +3804,7 @@ static int sd_resume_system(struct device *dev) + if (pm_runtime_suspended(dev)) + return 0; + +- return sd_resume(dev); ++ return sd_resume(dev, false); + } + + static int sd_resume_runtime(struct device *dev) +@@ -3762,7 +3828,7 @@ static int sd_resume_runtime(struct device *dev) + "Failed to clear sense data\n"); + } + +- return sd_resume(dev); ++ return sd_resume(dev, true); + } + + /** +diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h +index 5eea762f84d18..409dda5350d10 100644 +--- a/drivers/scsi/sd.h ++++ b/drivers/scsi/sd.h +@@ -131,6 +131,7 @@ struct scsi_disk { + u8 provisioning_mode; + u8 zeroing_mode; + u8 nr_actuators; /* Number of actuators */ ++ bool suspended; /* Disk is suspended (stopped) */ + unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ + unsigned WCE : 1; /* state of disk WCE bit */ +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c +index 9f0f69c1ed665..47d487729635c 100644 +--- a/drivers/scsi/smartpqi/smartpqi_init.c ++++ b/drivers/scsi/smartpqi/smartpqi_init.c +@@ -2278,7 +2278,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, + device->advertised_queue_depth = device->queue_depth; + scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); + if (device->rescan) { +- scsi_rescan_device(&device->sdev->sdev_gendev); ++ scsi_rescan_device(device->sdev); + device->rescan = false; + } + } +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index 7a1dc5c7c49ee..c2d981d5a2dd5 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -471,7 +471,7 @@ static void storvsc_device_scan(struct work_struct *work) + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); + if (!sdev) + goto done; +- scsi_rescan_device(&sdev->sdev_gendev); ++ scsi_rescan_device(sdev); + scsi_device_put(sdev); + + done: +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c +index 2a79ab16134b1..3f8c553f3d91e 100644 +--- a/drivers/scsi/virtio_scsi.c ++++ b/drivers/scsi/virtio_scsi.c +@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, + /* Handle "Parameters changed", "Mode parameters changed", and + "Capacity data has changed". */ + if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) +- scsi_rescan_device(&sdev->sdev_gendev); ++ scsi_rescan_device(sdev); + + scsi_device_put(sdev); + } +diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c +index c760aac070e54..3b56d5e7080e1 100644 +--- a/drivers/spi/spi-zynqmp-gqspi.c ++++ b/drivers/spi/spi-zynqmp-gqspi.c +@@ -1218,9 +1218,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) + return 0; + + clk_dis_all: +- pm_runtime_put_sync(&pdev->dev); +- pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++ pm_runtime_set_suspended(&pdev->dev); + clk_disable_unprepare(xqspi->refclk); + clk_dis_pclk: + clk_disable_unprepare(xqspi->pclk); +@@ -1244,11 +1244,15 @@ static int zynqmp_qspi_remove(struct platform_device *pdev) + { + struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev); + ++ pm_runtime_get_sync(&pdev->dev); ++ + zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); ++ ++ pm_runtime_disable(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++ pm_runtime_set_suspended(&pdev->dev); + clk_disable_unprepare(xqspi->refclk); + clk_disable_unprepare(xqspi->pclk); +- pm_runtime_set_suspended(&pdev->dev); +- pm_runtime_disable(&pdev->dev); + + return 0; + } +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index d21f88de197c7..301fe376a1206 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -883,7 +883,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) + EXPORT_SYMBOL(target_to_linux_sector); + + struct devices_idr_iter { +- struct config_item *prev_item; + int (*fn)(struct se_device *dev, void *data); + void *data; + }; +@@ -893,11 +892,9 @@ static int target_devices_idr_iter(int id, void *p, void *data) + { + struct devices_idr_iter *iter = data; + struct se_device *dev = p; ++ struct config_item *item; + int ret; + +- config_item_put(iter->prev_item); +- iter->prev_item = NULL; +- + /* + * We add the device early to the idr, so it can be used + * by backend modules during configuration. We do not want +@@ -907,12 +904,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) + if (!target_dev_configured(dev)) + return 0; + +- iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); +- if (!iter->prev_item) ++ item = config_item_get_unless_zero(&dev->dev_group.cg_item); ++ if (!item) + return 0; + mutex_unlock(&device_mutex); + + ret = iter->fn(dev, iter->data); ++ config_item_put(item); + + mutex_lock(&device_mutex); + return ret; +@@ -935,7 +933,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), + mutex_lock(&device_mutex); + ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); + mutex_unlock(&device_mutex); +- config_item_put(iter.prev_item); + return ret; + } + +diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c +index 33eb941fcf154..10bfc5f1c50d5 100644 +--- a/drivers/vhost/vringh.c ++++ b/drivers/vhost/vringh.c +@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, + done += partlen; + len -= partlen; + ptr += partlen; ++ iov->consumed += partlen; ++ iov->iov[iov->i].iov_len -= partlen; ++ iov->iov[iov->i].iov_base += partlen; + +- vringh_kiov_advance(iov, partlen); ++ if (!iov->iov[iov->i].iov_len) { ++ /* Fix up old iov element then increment. */ ++ iov->iov[iov->i].iov_len = iov->consumed; ++ iov->iov[iov->i].iov_base -= iov->consumed; ++ ++ iov->consumed = 0; ++ iov->i++; ++ } + } + return done; + } +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index c443f04aaad77..80b46de14f413 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -96,6 +97,7 @@ enum xen_irq_type { + struct irq_info { + struct list_head list; + struct list_head eoi_list; ++ struct rcu_work rwork; + short refcnt; + u8 spurious_cnt; + u8 is_accounted; +@@ -145,23 +147,13 @@ const struct evtchn_ops *evtchn_ops; + */ + static DEFINE_MUTEX(irq_mapping_update_lock); + +-/* +- * Lock protecting event handling loop against removing event channels. +- * Adding of event channels is no issue as the associated IRQ becomes active +- * only after everything is setup (before request_[threaded_]irq() the handler +- * can't be entered for an event, as the event channel will be unmasked only +- * then). +- */ +-static DEFINE_RWLOCK(evtchn_rwlock); +- + /* + * Lock hierarchy: + * + * irq_mapping_update_lock +- * evtchn_rwlock +- * IRQ-desc lock +- * percpu eoi_list_lock +- * irq_info->lock ++ * IRQ-desc lock ++ * percpu eoi_list_lock ++ * irq_info->lock + */ + + static LIST_HEAD(xen_irq_list_head); +@@ -305,6 +297,22 @@ static void channels_on_cpu_inc(struct irq_info *info) + info->is_accounted = 1; + } + ++static void delayed_free_irq(struct work_struct *work) ++{ ++ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, ++ rwork); ++ unsigned int irq = info->irq; ++ ++ /* Remove the info pointer only now, with no potential users left. */ ++ set_info_for_irq(irq, NULL); ++ ++ kfree(info); ++ ++ /* Legacy IRQ descriptors are managed by the arch. */ ++ if (irq >= nr_legacy_irqs()) ++ irq_free_desc(irq); ++} ++ + /* Constructors for packed IRQ information. */ + static int xen_irq_info_common_setup(struct irq_info *info, + unsigned irq, +@@ -667,33 +675,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work) + + eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); + +- read_lock_irqsave(&evtchn_rwlock, flags); ++ rcu_read_lock(); + + while (true) { +- spin_lock(&eoi->eoi_list_lock); ++ spin_lock_irqsave(&eoi->eoi_list_lock, flags); + + info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, + eoi_list); + +- if (info == NULL || now < info->eoi_time) { +- spin_unlock(&eoi->eoi_list_lock); ++ if (info == NULL) ++ break; ++ ++ if (now < info->eoi_time) { ++ mod_delayed_work_on(info->eoi_cpu, system_wq, ++ &eoi->delayed, ++ info->eoi_time - now); + break; + } + + list_del_init(&info->eoi_list); + +- spin_unlock(&eoi->eoi_list_lock); ++ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); + + info->eoi_time = 0; + + xen_irq_lateeoi_locked(info, false); + } + +- if (info) +- mod_delayed_work_on(info->eoi_cpu, system_wq, +- &eoi->delayed, info->eoi_time - now); ++ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); + +- read_unlock_irqrestore(&evtchn_rwlock, flags); ++ rcu_read_unlock(); + } + + static void xen_cpu_init_eoi(unsigned int cpu) +@@ -708,16 +719,15 @@ static void xen_cpu_init_eoi(unsigned int cpu) + void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) + { + struct irq_info *info; +- unsigned long flags; + +- read_lock_irqsave(&evtchn_rwlock, flags); ++ rcu_read_lock(); + + info = info_for_irq(irq); + + if (info) + xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); + +- read_unlock_irqrestore(&evtchn_rwlock, flags); ++ rcu_read_unlock(); + } + EXPORT_SYMBOL_GPL(xen_irq_lateeoi); + +@@ -731,6 +741,7 @@ static void xen_irq_init(unsigned irq) + + info->type = IRQT_UNBOUND; + info->refcnt = -1; ++ INIT_RCU_WORK(&info->rwork, delayed_free_irq); + + set_info_for_irq(irq, info); + /* +@@ -788,31 +799,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) + static void xen_free_irq(unsigned irq) + { + struct irq_info *info = info_for_irq(irq); +- unsigned long flags; + + if (WARN_ON(!info)) + return; + +- write_lock_irqsave(&evtchn_rwlock, flags); +- + if (!list_empty(&info->eoi_list)) + lateeoi_list_del(info); + + list_del(&info->list); + +- set_info_for_irq(irq, NULL); +- + WARN_ON(info->refcnt > 0); + +- write_unlock_irqrestore(&evtchn_rwlock, flags); +- +- kfree(info); +- +- /* Legacy IRQ descriptors are managed by the arch. */ +- if (irq < nr_legacy_irqs()) +- return; +- +- irq_free_desc(irq); ++ queue_rcu_work(system_wq, &info->rwork); + } + + static void xen_evtchn_close(evtchn_port_t port) +@@ -1716,7 +1714,14 @@ static void __xen_evtchn_do_upcall(void) + int cpu = smp_processor_id(); + struct evtchn_loop_ctrl ctrl = { 0 }; + +- read_lock(&evtchn_rwlock); ++ /* ++ * When closing an event channel the associated IRQ must not be freed ++ * until all cpus have left the event handling loop. This is ensured ++ * by taking the rcu_read_lock() while handling events, as freeing of ++ * the IRQ is handled via queue_rcu_work() _after_ closing the event ++ * channel. ++ */ ++ rcu_read_lock(); + + do { + vcpu_info->evtchn_upcall_pending = 0; +@@ -1729,7 +1734,7 @@ static void __xen_evtchn_do_upcall(void) + + } while (vcpu_info->evtchn_upcall_pending); + +- read_unlock(&evtchn_rwlock); ++ rcu_read_unlock(); + + /* + * Increment irq_epoch only now to defer EOIs only for +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 3bcef0c4d6fc4..27d06bb5e5c05 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include "extent-io-tree.h" + #include "extent_io.h" + #include "extent_map.h" +@@ -3238,11 +3239,11 @@ static inline void btrfs_clear_sb_rdonly(struct super_block *sb) + + /* root-item.c */ + int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, +- u64 ref_id, u64 dirid, u64 sequence, const char *name, +- int name_len); ++ u64 ref_id, u64 dirid, u64 sequence, ++ const struct fscrypt_str *name); + int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, +- u64 ref_id, u64 dirid, u64 *sequence, const char *name, +- int name_len); ++ u64 ref_id, u64 dirid, u64 *sequence, ++ const struct fscrypt_str *name); + int btrfs_del_root(struct btrfs_trans_handle *trans, + const struct btrfs_key *key); + int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, +@@ -3271,25 +3272,23 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info); + + /* dir-item.c */ + int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, +- const char *name, int name_len); +-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, +- int name_len, struct btrfs_inode *dir, ++ const struct fscrypt_str *name); ++int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, ++ const struct fscrypt_str *name, struct btrfs_inode *dir, + struct btrfs_key *location, u8 type, u64 index); + struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 dir, +- const char *name, int name_len, +- int mod); ++ const struct fscrypt_str *name, int mod); + struct btrfs_dir_item * + btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 dir, +- u64 index, const char *name, int name_len, +- int mod); ++ u64 index, const struct fscrypt_str *name, int mod); + struct btrfs_dir_item * + btrfs_search_dir_index_item(struct btrfs_root *root, + struct btrfs_path *path, u64 dirid, +- const char *name, int name_len); ++ const struct fscrypt_str *name); + int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, +@@ -3370,10 +3369,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); + int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); + int btrfs_unlink_inode(struct btrfs_trans_handle *trans, + struct btrfs_inode *dir, struct btrfs_inode *inode, +- const char *name, int name_len); ++ const struct fscrypt_str *name); + int btrfs_add_link(struct btrfs_trans_handle *trans, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, +- const char *name, int name_len, int add_backref, u64 index); ++ const struct fscrypt_str *name, int add_backref, u64 index); + int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry); + int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, + int front); +@@ -3398,6 +3397,7 @@ struct btrfs_new_inode_args { + */ + struct posix_acl *default_acl; + struct posix_acl *acl; ++ struct fscrypt_name fname; + }; + int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, + unsigned int *trans_num_items); +diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c +index 72fb2c518a2b4..fdab48c1abb8a 100644 +--- a/fs/btrfs/dir-item.c ++++ b/fs/btrfs/dir-item.c +@@ -103,8 +103,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, + * to use for the second index (if one is created). + * Will return 0 or -ENOMEM + */ +-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, +- int name_len, struct btrfs_inode *dir, ++int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, ++ const struct fscrypt_str *name, struct btrfs_inode *dir, + struct btrfs_key *location, u8 type, u64 index) + { + int ret = 0; +@@ -120,7 +120,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, + + key.objectid = btrfs_ino(dir); + key.type = BTRFS_DIR_ITEM_KEY; +- key.offset = btrfs_name_hash(name, name_len); ++ key.offset = btrfs_name_hash(name->name, name->len); + + path = btrfs_alloc_path(); + if (!path) +@@ -128,9 +128,9 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, + + btrfs_cpu_key_to_disk(&disk_key, location); + +- data_size = sizeof(*dir_item) + name_len; ++ data_size = sizeof(*dir_item) + name->len; + dir_item = insert_with_overflow(trans, root, path, &key, data_size, +- name, name_len); ++ name->name, name->len); + if (IS_ERR(dir_item)) { + ret = PTR_ERR(dir_item); + if (ret == -EEXIST) +@@ -142,11 +142,11 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, + btrfs_set_dir_item_key(leaf, dir_item, &disk_key); + btrfs_set_dir_type(leaf, dir_item, type); + btrfs_set_dir_data_len(leaf, dir_item, 0); +- btrfs_set_dir_name_len(leaf, dir_item, name_len); ++ btrfs_set_dir_name_len(leaf, dir_item, name->len); + btrfs_set_dir_transid(leaf, dir_item, trans->transid); + name_ptr = (unsigned long)(dir_item + 1); + +- write_extent_buffer(leaf, name, name_ptr, name_len); ++ write_extent_buffer(leaf, name->name, name_ptr, name->len); + btrfs_mark_buffer_dirty(leaf); + + second_insert: +@@ -157,7 +157,7 @@ second_insert: + } + btrfs_release_path(path); + +- ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir, ++ ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir, + &disk_key, type, index); + out_free: + btrfs_free_path(path); +@@ -206,7 +206,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir( + struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 dir, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + int mod) + { + struct btrfs_key key; +@@ -214,9 +214,10 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, + + key.objectid = dir; + key.type = BTRFS_DIR_ITEM_KEY; +- key.offset = btrfs_name_hash(name, name_len); ++ key.offset = btrfs_name_hash(name->name, name->len); + +- di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod); ++ di = btrfs_lookup_match_dir(trans, root, path, &key, name->name, ++ name->len, mod); + if (IS_ERR(di) && PTR_ERR(di) == -ENOENT) + return NULL; + +@@ -224,7 +225,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, + } + + int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, +- const char *name, int name_len) ++ const struct fscrypt_str *name) + { + int ret; + struct btrfs_key key; +@@ -240,9 +241,10 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, + + key.objectid = dir; + key.type = BTRFS_DIR_ITEM_KEY; +- key.offset = btrfs_name_hash(name, name_len); ++ key.offset = btrfs_name_hash(name->name, name->len); + +- di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0); ++ di = btrfs_lookup_match_dir(NULL, root, path, &key, name->name, ++ name->len, 0); + if (IS_ERR(di)) { + ret = PTR_ERR(di); + /* Nothing found, we're safe */ +@@ -262,11 +264,8 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, + goto out; + } + +- /* +- * see if there is room in the item to insert this +- * name +- */ +- data_size = sizeof(*di) + name_len; ++ /* See if there is room in the item to insert this name. */ ++ data_size = sizeof(*di) + name->len; + leaf = path->nodes[0]; + slot = path->slots[0]; + if (data_size + btrfs_item_size(leaf, slot) + +@@ -303,8 +302,7 @@ struct btrfs_dir_item * + btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 dir, +- u64 index, const char *name, int name_len, +- int mod) ++ u64 index, const struct fscrypt_str *name, int mod) + { + struct btrfs_dir_item *di; + struct btrfs_key key; +@@ -313,7 +311,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, + key.type = BTRFS_DIR_INDEX_KEY; + key.offset = index; + +- di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod); ++ di = btrfs_lookup_match_dir(trans, root, path, &key, name->name, ++ name->len, mod); + if (di == ERR_PTR(-ENOENT)) + return NULL; + +@@ -321,9 +320,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, + } + + struct btrfs_dir_item * +-btrfs_search_dir_index_item(struct btrfs_root *root, +- struct btrfs_path *path, u64 dirid, +- const char *name, int name_len) ++btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path, ++ u64 dirid, const struct fscrypt_str *name) + { + struct btrfs_dir_item *di; + struct btrfs_key key; +@@ -338,7 +336,7 @@ btrfs_search_dir_index_item(struct btrfs_root *root, + break; + + di = btrfs_match_dir_item_name(root->fs_info, path, +- name, name_len); ++ name->name, name->len); + if (di) + return di; + } +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 77202addead83..0a46fff3dd067 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1458,8 +1458,13 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + if (iocb->ki_flags & IOCB_NOWAIT) + ilock_flags |= BTRFS_ILOCK_TRY; + +- /* If the write DIO is within EOF, use a shared lock */ +- if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode)) ++ /* ++ * If the write DIO is within EOF, use a shared lock and also only if ++ * security bits will likely not be dropped by file_remove_privs() called ++ * from btrfs_write_check(). Either will need to be rechecked after the ++ * lock was acquired. ++ */ ++ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode)) + ilock_flags |= BTRFS_ILOCK_SHARED; + + relock: +@@ -1467,6 +1472,13 @@ relock: + if (err < 0) + return err; + ++ /* Shared lock cannot be used with security bits set. */ ++ if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) { ++ btrfs_inode_unlock(inode, ilock_flags); ++ ilock_flags &= ~BTRFS_ILOCK_SHARED; ++ goto relock; ++ } ++ + err = generic_write_checks(iocb, from); + if (err <= 0) { + btrfs_inode_unlock(inode, ilock_flags); +diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c +index 0eeb5ea878948..5add022d3534f 100644 +--- a/fs/btrfs/inode-item.c ++++ b/fs/btrfs/inode-item.c +@@ -10,8 +10,8 @@ + #include "print-tree.h" + + struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, +- int slot, const char *name, +- int name_len) ++ int slot, ++ const struct fscrypt_str *name) + { + struct btrfs_inode_ref *ref; + unsigned long ptr; +@@ -27,9 +27,10 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, + len = btrfs_inode_ref_name_len(leaf, ref); + name_ptr = (unsigned long)(ref + 1); + cur_offset += len + sizeof(*ref); +- if (len != name_len) ++ if (len != name->len) + continue; +- if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) ++ if (memcmp_extent_buffer(leaf, name->name, name_ptr, ++ name->len) == 0) + return ref; + } + return NULL; +@@ -37,7 +38,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, + + struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( + struct extent_buffer *leaf, int slot, u64 ref_objectid, +- const char *name, int name_len) ++ const struct fscrypt_str *name) + { + struct btrfs_inode_extref *extref; + unsigned long ptr; +@@ -60,9 +61,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( + name_ptr = (unsigned long)(&extref->name); + ref_name_len = btrfs_inode_extref_name_len(leaf, extref); + +- if (ref_name_len == name_len && ++ if (ref_name_len == name->len && + btrfs_inode_extref_parent(leaf, extref) == ref_objectid && +- (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) ++ (memcmp_extent_buffer(leaf, name->name, name_ptr, ++ name->len) == 0)) + return extref; + + cur_offset += ref_name_len + sizeof(*extref); +@@ -75,7 +77,7 @@ struct btrfs_inode_extref * + btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, int ins_len, + int cow) + { +@@ -84,7 +86,7 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, + + key.objectid = inode_objectid; + key.type = BTRFS_INODE_EXTREF_KEY; +- key.offset = btrfs_extref_hash(ref_objectid, name, name_len); ++ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); + + ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); + if (ret < 0) +@@ -92,13 +94,13 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, + if (ret > 0) + return NULL; + return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], +- ref_objectid, name, name_len); ++ ref_objectid, name); + + } + + static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, + u64 *index) + { +@@ -107,14 +109,14 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, + struct btrfs_inode_extref *extref; + struct extent_buffer *leaf; + int ret; +- int del_len = name_len + sizeof(*extref); ++ int del_len = name->len + sizeof(*extref); + unsigned long ptr; + unsigned long item_start; + u32 item_size; + + key.objectid = inode_objectid; + key.type = BTRFS_INODE_EXTREF_KEY; +- key.offset = btrfs_extref_hash(ref_objectid, name, name_len); ++ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); + + path = btrfs_alloc_path(); + if (!path) +@@ -132,7 +134,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, + * readonly. + */ + extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], +- ref_objectid, name, name_len); ++ ref_objectid, name); + if (!extref) { + btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL); + ret = -EROFS; +@@ -168,8 +170,7 @@ out: + } + + int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, +- const char *name, int name_len, ++ struct btrfs_root *root, const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, u64 *index) + { + struct btrfs_path *path; +@@ -182,7 +183,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, + u32 sub_item_len; + int ret; + int search_ext_refs = 0; +- int del_len = name_len + sizeof(*ref); ++ int del_len = name->len + sizeof(*ref); + + key.objectid = inode_objectid; + key.offset = ref_objectid; +@@ -201,8 +202,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, + goto out; + } + +- ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name, +- name_len); ++ ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name); + if (!ref) { + ret = -ENOENT; + search_ext_refs = 1; +@@ -219,7 +219,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, + goto out; + } + ptr = (unsigned long)ref; +- sub_item_len = name_len + sizeof(*ref); ++ sub_item_len = name->len + sizeof(*ref); + item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); + memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, + item_size - (ptr + sub_item_len - item_start)); +@@ -233,7 +233,7 @@ out: + * name in our ref array. Find and remove the extended + * inode ref then. + */ +- return btrfs_del_inode_extref(trans, root, name, name_len, ++ return btrfs_del_inode_extref(trans, root, name, + inode_objectid, ref_objectid, index); + } + +@@ -247,12 +247,13 @@ out: + */ + static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, +- u64 inode_objectid, u64 ref_objectid, u64 index) ++ const struct fscrypt_str *name, ++ u64 inode_objectid, u64 ref_objectid, ++ u64 index) + { + struct btrfs_inode_extref *extref; + int ret; +- int ins_len = name_len + sizeof(*extref); ++ int ins_len = name->len + sizeof(*extref); + unsigned long ptr; + struct btrfs_path *path; + struct btrfs_key key; +@@ -260,7 +261,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, + + key.objectid = inode_objectid; + key.type = BTRFS_INODE_EXTREF_KEY; +- key.offset = btrfs_extref_hash(ref_objectid, name, name_len); ++ key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); + + path = btrfs_alloc_path(); + if (!path) +@@ -272,7 +273,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, + if (btrfs_find_name_in_ext_backref(path->nodes[0], + path->slots[0], + ref_objectid, +- name, name_len)) ++ name)) + goto out; + + btrfs_extend_item(path, ins_len); +@@ -286,12 +287,12 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, + ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len; + extref = (struct btrfs_inode_extref *)ptr; + +- btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len); ++ btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len); + btrfs_set_inode_extref_index(path->nodes[0], extref, index); + btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); + + ptr = (unsigned long)&extref->name; +- write_extent_buffer(path->nodes[0], name, ptr, name_len); ++ write_extent_buffer(path->nodes[0], name->name, ptr, name->len); + btrfs_mark_buffer_dirty(path->nodes[0]); + + out: +@@ -301,8 +302,7 @@ out: + + /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */ + int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, +- const char *name, int name_len, ++ struct btrfs_root *root, const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, u64 index) + { + struct btrfs_fs_info *fs_info = root->fs_info; +@@ -311,7 +311,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, + struct btrfs_inode_ref *ref; + unsigned long ptr; + int ret; +- int ins_len = name_len + sizeof(*ref); ++ int ins_len = name->len + sizeof(*ref); + + key.objectid = inode_objectid; + key.offset = ref_objectid; +@@ -327,7 +327,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, + if (ret == -EEXIST) { + u32 old_size; + ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], +- name, name_len); ++ name); + if (ref) + goto out; + +@@ -336,7 +336,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, + ref = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_ref); + ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); +- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); ++ btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); + btrfs_set_inode_ref_index(path->nodes[0], ref, index); + ptr = (unsigned long)(ref + 1); + ret = 0; +@@ -344,7 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, + if (ret == -EOVERFLOW) { + if (btrfs_find_name_in_backref(path->nodes[0], + path->slots[0], +- name, name_len)) ++ name)) + ret = -EEXIST; + else + ret = -EMLINK; +@@ -353,11 +353,11 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, + } else { + ref = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_ref); +- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); ++ btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); + btrfs_set_inode_ref_index(path->nodes[0], ref, index); + ptr = (unsigned long)(ref + 1); + } +- write_extent_buffer(path->nodes[0], name, ptr, name_len); ++ write_extent_buffer(path->nodes[0], name->name, ptr, name->len); + btrfs_mark_buffer_dirty(path->nodes[0]); + + out: +@@ -370,7 +370,6 @@ out: + if (btrfs_super_incompat_flags(disk_super) + & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) + ret = btrfs_insert_inode_extref(trans, root, name, +- name_len, + inode_objectid, + ref_objectid, index); + } +diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h +index a8fc16d0147f6..b80aeb7157010 100644 +--- a/fs/btrfs/inode-item.h ++++ b/fs/btrfs/inode-item.h +@@ -64,33 +64,31 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_truncate_control *control); + int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, +- const char *name, int name_len, ++ struct btrfs_root *root, const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, u64 index); + int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, +- const char *name, int name_len, +- u64 inode_objectid, u64 ref_objectid, u64 *index); ++ struct btrfs_root *root, const struct fscrypt_str *name, ++ u64 inode_objectid, u64 ref_objectid, u64 *index); + int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 objectid); +-int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root +- *root, struct btrfs_path *path, ++int btrfs_lookup_inode(struct btrfs_trans_handle *trans, ++ struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *location, int mod); + + struct btrfs_inode_extref *btrfs_lookup_inode_extref( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + u64 inode_objectid, u64 ref_objectid, int ins_len, + int cow); + + struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, +- int slot, const char *name, +- int name_len); ++ int slot, ++ const struct fscrypt_str *name); + struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( + struct extent_buffer *leaf, int slot, u64 ref_objectid, +- const char *name, int name_len); ++ const struct fscrypt_str *name); + + #endif +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 222068bf80031..4063447217f92 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3627,7 +3627,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) + spin_unlock(&fs_info->delayed_iput_lock); + } + +-/** ++/* + * Wait for flushing all delayed iputs + * + * @fs_info: the filesystem +@@ -4272,7 +4272,7 @@ int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, + static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, + struct btrfs_inode *dir, + struct btrfs_inode *inode, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_rename_ctx *rename_ctx) + { + struct btrfs_root *root = dir->root; +@@ -4290,8 +4290,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, + goto out; + } + +- di = btrfs_lookup_dir_item(trans, root, path, dir_ino, +- name, name_len, -1); ++ di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); + if (IS_ERR_OR_NULL(di)) { + ret = di ? PTR_ERR(di) : -ENOENT; + goto err; +@@ -4319,12 +4318,11 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, + } + } + +- ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, +- dir_ino, &index); ++ ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); + if (ret) { + btrfs_info(fs_info, + "failed to delete reference to %.*s, inode %llu parent %llu", +- name_len, name, ino, dir_ino); ++ name->len, name->name, ino, dir_ino); + btrfs_abort_transaction(trans, ret); + goto err; + } +@@ -4345,10 +4343,8 @@ skip_backref: + * operations on the log tree, increasing latency for applications. + */ + if (!rename_ctx) { +- btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, +- dir_ino); +- btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, +- index); ++ btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); ++ btrfs_del_dir_entries_in_log(trans, root, name, dir, index); + } + + /* +@@ -4366,7 +4362,7 @@ err: + if (ret) + goto out; + +- btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); ++ btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); + inode_inc_iversion(&inode->vfs_inode); + inode_inc_iversion(&dir->vfs_inode); + inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); +@@ -4379,10 +4375,11 @@ out: + + int btrfs_unlink_inode(struct btrfs_trans_handle *trans, + struct btrfs_inode *dir, struct btrfs_inode *inode, +- const char *name, int name_len) ++ const struct fscrypt_str *name) + { + int ret; +- ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL); ++ ++ ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); + if (!ret) { + drop_nlink(&inode->vfs_inode); + ret = btrfs_update_inode(trans, inode->root, inode); +@@ -4418,29 +4415,39 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) + struct btrfs_trans_handle *trans; + struct inode *inode = d_inode(dentry); + int ret; ++ struct fscrypt_name fname; ++ ++ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); ++ if (ret) ++ return ret; ++ ++ /* This needs to handle no-key deletions later on */ + + trans = __unlink_start_trans(dir); +- if (IS_ERR(trans)) +- return PTR_ERR(trans); ++ if (IS_ERR(trans)) { ++ ret = PTR_ERR(trans); ++ goto fscrypt_free; ++ } + + btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), + 0); + +- ret = btrfs_unlink_inode(trans, BTRFS_I(dir), +- BTRFS_I(d_inode(dentry)), dentry->d_name.name, +- dentry->d_name.len); ++ ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), ++ &fname.disk_name); + if (ret) +- goto out; ++ goto end_trans; + + if (inode->i_nlink == 0) { + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + if (ret) +- goto out; ++ goto end_trans; + } + +-out: ++end_trans: + btrfs_end_transaction(trans); + btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); ++fscrypt_free: ++ fscrypt_free_filename(&fname); + return ret; + } + +@@ -4453,12 +4460,17 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + struct extent_buffer *leaf; + struct btrfs_dir_item *di; + struct btrfs_key key; +- const char *name = dentry->d_name.name; +- int name_len = dentry->d_name.len; + u64 index; + int ret; + u64 objectid; + u64 dir_ino = btrfs_ino(BTRFS_I(dir)); ++ struct fscrypt_name fname; ++ ++ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); ++ if (ret) ++ return ret; ++ ++ /* This needs to handle no-key deletions later on */ + + if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { + objectid = inode->root->root_key.objectid; +@@ -4466,15 +4478,18 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + objectid = inode->location.objectid; + } else { + WARN_ON(1); ++ fscrypt_free_filename(&fname); + return -EINVAL; + } + + path = btrfs_alloc_path(); +- if (!path) +- return -ENOMEM; ++ if (!path) { ++ ret = -ENOMEM; ++ goto out; ++ } + + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, +- name, name_len, -1); ++ &fname.disk_name, -1); + if (IS_ERR_OR_NULL(di)) { + ret = di ? PTR_ERR(di) : -ENOENT; + goto out; +@@ -4500,8 +4515,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + * call btrfs_del_root_ref, and it _shouldn't_ fail. + */ + if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { +- di = btrfs_search_dir_index_item(root, path, dir_ino, +- name, name_len); ++ di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); + if (IS_ERR_OR_NULL(di)) { + if (!di) + ret = -ENOENT; +@@ -4518,7 +4532,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + } else { + ret = btrfs_del_root_ref(trans, objectid, + root->root_key.objectid, dir_ino, +- &index, name, name_len); ++ &index, &fname.disk_name); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; +@@ -4531,7 +4545,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + goto out; + } + +- btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); ++ btrfs_i_size_write(BTRFS_I(dir), dir->i_size - fname.disk_name.len * 2); + inode_inc_iversion(dir); + dir->i_mtime = current_time(dir); + dir->i_ctime = dir->i_mtime; +@@ -4540,6 +4554,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + btrfs_abort_transaction(trans, ret); + out: + btrfs_free_path(path); ++ fscrypt_free_filename(&fname); + return ret; + } + +@@ -4553,6 +4568,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct btrfs_key key; ++ struct fscrypt_str name = FSTR_INIT("default", 7); + u64 dir_id; + int ret; + +@@ -4563,7 +4579,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) + /* Make sure this root isn't set as the default subvol */ + dir_id = btrfs_super_root_dir(fs_info->super_copy); + di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, +- dir_id, "default", 7, 0); ++ dir_id, &name, 0); + if (di && !IS_ERR(di)) { + btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); + if (key.objectid == root->root_key.objectid) { +@@ -4802,6 +4818,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + int err = 0; + struct btrfs_trans_handle *trans; + u64 last_unlink_trans; ++ struct fscrypt_name fname; + + if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) + return -ENOTEMPTY; +@@ -4814,9 +4831,17 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + return btrfs_delete_subvolume(dir, dentry); + } + ++ err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); ++ if (err) ++ return err; ++ ++ /* This needs to handle no-key deletions later on */ ++ + trans = __unlink_start_trans(dir); +- if (IS_ERR(trans)) +- return PTR_ERR(trans); ++ if (IS_ERR(trans)) { ++ err = PTR_ERR(trans); ++ goto out_notrans; ++ } + + if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + err = btrfs_unlink_subvol(trans, dir, dentry); +@@ -4830,9 +4855,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; + + /* now the directory is empty */ +- err = btrfs_unlink_inode(trans, BTRFS_I(dir), +- BTRFS_I(d_inode(dentry)), dentry->d_name.name, +- dentry->d_name.len); ++ err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), ++ &fname.disk_name); + if (!err) { + btrfs_i_size_write(BTRFS_I(inode), 0); + /* +@@ -4851,7 +4875,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + } + out: + btrfs_end_transaction(trans); ++out_notrans: + btrfs_btree_balance_dirty(fs_info); ++ fscrypt_free_filename(&fname); + + return err; + } +@@ -5532,19 +5558,24 @@ no_delete: + static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, + struct btrfs_key *location, u8 *type) + { +- const char *name = dentry->d_name.name; +- int namelen = dentry->d_name.len; + struct btrfs_dir_item *di; + struct btrfs_path *path; + struct btrfs_root *root = BTRFS_I(dir)->root; + int ret = 0; ++ struct fscrypt_name fname; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ++ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); ++ if (ret) ++ goto out; ++ ++ /* This needs to handle no-key deletions later on */ ++ + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), +- name, namelen, 0); ++ &fname.disk_name, 0); + if (IS_ERR_OR_NULL(di)) { + ret = di ? PTR_ERR(di) : -ENOENT; + goto out; +@@ -5556,12 +5587,13 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, + ret = -EUCLEAN; + btrfs_warn(root->fs_info, + "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", +- __func__, name, btrfs_ino(BTRFS_I(dir)), ++ __func__, fname.disk_name.name, btrfs_ino(BTRFS_I(dir)), + location->objectid, location->type, location->offset); + } + if (!ret) + *type = btrfs_dir_type(path->nodes[0], di); + out: ++ fscrypt_free_filename(&fname); + btrfs_free_path(path); + return ret; + } +@@ -5584,6 +5616,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, + struct btrfs_key key; + int ret; + int err = 0; ++ struct fscrypt_name fname; ++ ++ ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); ++ if (ret) ++ return ret; + + path = btrfs_alloc_path(); + if (!path) { +@@ -5606,12 +5643,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, + leaf = path->nodes[0]; + ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || +- btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) ++ btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) + goto out; + +- ret = memcmp_extent_buffer(leaf, dentry->d_name.name, +- (unsigned long)(ref + 1), +- dentry->d_name.len); ++ ret = memcmp_extent_buffer(leaf, fname.disk_name.name, ++ (unsigned long)(ref + 1), fname.disk_name.len); + if (ret) + goto out; + +@@ -5630,6 +5666,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, + err = 0; + out: + btrfs_free_path(path); ++ fscrypt_free_filename(&fname); + return err; + } + +@@ -6238,9 +6275,18 @@ int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, + struct inode *inode = args->inode; + int ret; + ++ if (!args->orphan) { ++ ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, ++ &args->fname); ++ if (ret) ++ return ret; ++ } ++ + ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); +- if (ret) ++ if (ret) { ++ fscrypt_free_filename(&args->fname); + return ret; ++ } + + /* 1 to add inode item */ + *trans_num_items = 1; +@@ -6280,6 +6326,7 @@ void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) + { + posix_acl_release(args->acl); + posix_acl_release(args->default_acl); ++ fscrypt_free_filename(&args->fname); + } + + /* +@@ -6315,8 +6362,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + { + struct inode *dir = args->dir; + struct inode *inode = args->inode; +- const char *name = args->orphan ? NULL : args->dentry->d_name.name; +- int name_len = args->orphan ? 0 : args->dentry->d_name.len; ++ const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; + struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); + struct btrfs_root *root; + struct btrfs_inode_item *inode_item; +@@ -6417,7 +6463,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + sizes[1] = 2 + sizeof(*ref); + } else { + key[1].offset = btrfs_ino(BTRFS_I(dir)); +- sizes[1] = name_len + sizeof(*ref); ++ sizes[1] = name->len + sizeof(*ref); + } + } + +@@ -6456,10 +6502,12 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + btrfs_set_inode_ref_index(path->nodes[0], ref, 0); + write_extent_buffer(path->nodes[0], "..", ptr, 2); + } else { +- btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); ++ btrfs_set_inode_ref_name_len(path->nodes[0], ref, ++ name->len); + btrfs_set_inode_ref_index(path->nodes[0], ref, + BTRFS_I(inode)->dir_index); +- write_extent_buffer(path->nodes[0], name, ptr, name_len); ++ write_extent_buffer(path->nodes[0], name->name, ptr, ++ name->len); + } + } + +@@ -6520,7 +6568,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + } else { + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, +- name_len, 0, BTRFS_I(inode)->dir_index); ++ 0, BTRFS_I(inode)->dir_index); + } + if (ret) { + btrfs_abort_transaction(trans, ret); +@@ -6549,7 +6597,7 @@ out: + */ + int btrfs_add_link(struct btrfs_trans_handle *trans, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, +- const char *name, int name_len, int add_backref, u64 index) ++ const struct fscrypt_str *name, int add_backref, u64 index) + { + int ret = 0; + struct btrfs_key key; +@@ -6568,17 +6616,17 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { + ret = btrfs_add_root_ref(trans, key.objectid, + root->root_key.objectid, parent_ino, +- index, name, name_len); ++ index, name); + } else if (add_backref) { +- ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, +- parent_ino, index); ++ ret = btrfs_insert_inode_ref(trans, root, name, ++ ino, parent_ino, index); + } + + /* Nothing to clean up yet */ + if (ret) + return ret; + +- ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key, ++ ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, + btrfs_inode_type(&inode->vfs_inode), index); + if (ret == -EEXIST || ret == -EOVERFLOW) + goto fail_dir_item; +@@ -6588,7 +6636,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, + } + + btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + +- name_len * 2); ++ name->len * 2); + inode_inc_iversion(&parent_inode->vfs_inode); + /* + * If we are replaying a log tree, we do not want to update the mtime +@@ -6613,15 +6661,15 @@ fail_dir_item: + int err; + err = btrfs_del_root_ref(trans, key.objectid, + root->root_key.objectid, parent_ino, +- &local_index, name, name_len); ++ &local_index, name); + if (err) + btrfs_abort_transaction(trans, err); + } else if (add_backref) { + u64 local_index; + int err; + +- err = btrfs_del_inode_ref(trans, root, name, name_len, +- ino, parent_ino, &local_index); ++ err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, ++ &local_index); + if (err) + btrfs_abort_transaction(trans, err); + } +@@ -6704,6 +6752,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, + struct btrfs_root *root = BTRFS_I(dir)->root; + struct inode *inode = d_inode(old_dentry); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ++ struct fscrypt_name fname; + u64 index; + int err; + int drop_inode = 0; +@@ -6715,6 +6764,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, + if (inode->i_nlink >= BTRFS_LINK_MAX) + return -EMLINK; + ++ err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); ++ if (err) ++ goto fail; ++ + err = btrfs_set_inode_index(BTRFS_I(dir), &index); + if (err) + goto fail; +@@ -6741,7 +6794,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, + set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); + + err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), +- dentry->d_name.name, dentry->d_name.len, 1, index); ++ &fname.disk_name, 1, index); + + if (err) { + drop_inode = 1; +@@ -6765,6 +6818,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, + } + + fail: ++ fscrypt_free_filename(&fname); + if (trans) + btrfs_end_transaction(trans); + if (drop_inode) { +@@ -9037,6 +9091,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, + int ret; + int ret2; + bool need_abort = false; ++ struct fscrypt_name old_fname, new_fname; ++ struct fscrypt_str *old_name, *new_name; + + /* + * For non-subvolumes allow exchange only within one subvolume, in the +@@ -9048,6 +9104,19 @@ static int btrfs_rename_exchange(struct inode *old_dir, + new_ino != BTRFS_FIRST_FREE_OBJECTID)) + return -EXDEV; + ++ ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); ++ if (ret) ++ return ret; ++ ++ ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); ++ if (ret) { ++ fscrypt_free_filename(&old_fname); ++ return ret; ++ } ++ ++ old_name = &old_fname.disk_name; ++ new_name = &new_fname.disk_name; ++ + /* close the race window with snapshot create/destroy ioctl */ + if (old_ino == BTRFS_FIRST_FREE_OBJECTID || + new_ino == BTRFS_FIRST_FREE_OBJECTID) +@@ -9115,10 +9184,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + /* force full log commit if subvolume involved. */ + btrfs_set_log_full_commit(trans); + } else { +- ret = btrfs_insert_inode_ref(trans, dest, +- new_dentry->d_name.name, +- new_dentry->d_name.len, +- old_ino, ++ ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, + btrfs_ino(BTRFS_I(new_dir)), + old_idx); + if (ret) +@@ -9131,10 +9197,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + /* force full log commit if subvolume involved. */ + btrfs_set_log_full_commit(trans); + } else { +- ret = btrfs_insert_inode_ref(trans, root, +- old_dentry->d_name.name, +- old_dentry->d_name.len, +- new_ino, ++ ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, + btrfs_ino(BTRFS_I(old_dir)), + new_idx); + if (ret) { +@@ -9169,9 +9232,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + } else { /* src is an inode */ + ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), + BTRFS_I(old_dentry->d_inode), +- old_dentry->d_name.name, +- old_dentry->d_name.len, +- &old_rename_ctx); ++ old_name, &old_rename_ctx); + if (!ret) + ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); + } +@@ -9186,9 +9247,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + } else { /* dest is an inode */ + ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), + BTRFS_I(new_dentry->d_inode), +- new_dentry->d_name.name, +- new_dentry->d_name.len, +- &new_rename_ctx); ++ new_name, &new_rename_ctx); + if (!ret) + ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); + } +@@ -9198,16 +9257,14 @@ static int btrfs_rename_exchange(struct inode *old_dir, + } + + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), +- new_dentry->d_name.name, +- new_dentry->d_name.len, 0, old_idx); ++ new_name, 0, old_idx); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out_fail; + } + + ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), +- old_dentry->d_name.name, +- old_dentry->d_name.len, 0, new_idx); ++ old_name, 0, new_idx); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out_fail; +@@ -9250,6 +9307,8 @@ out_notrans: + old_ino == BTRFS_FIRST_FREE_OBJECTID) + up_read(&fs_info->subvol_sem); + ++ fscrypt_free_filename(&new_fname); ++ fscrypt_free_filename(&old_fname); + return ret; + } + +@@ -9289,6 +9348,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + int ret; + int ret2; + u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); ++ struct fscrypt_name old_fname, new_fname; + + if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + return -EPERM; +@@ -9305,22 +9365,28 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) + return -ENOTEMPTY; + ++ ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); ++ if (ret) ++ return ret; + +- /* check for collisions, even if the name isn't there */ +- ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, +- new_dentry->d_name.name, +- new_dentry->d_name.len); ++ ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); ++ if (ret) { ++ fscrypt_free_filename(&old_fname); ++ return ret; ++ } + ++ /* check for collisions, even if the name isn't there */ ++ ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); + if (ret) { + if (ret == -EEXIST) { + /* we shouldn't get + * eexist without a new_inode */ + if (WARN_ON(!new_inode)) { +- return ret; ++ goto out_fscrypt_names; + } + } else { + /* maybe -EOVERFLOW */ +- return ret; ++ goto out_fscrypt_names; + } + } + ret = 0; +@@ -9334,8 +9400,10 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + + if (flags & RENAME_WHITEOUT) { + whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir); +- if (!whiteout_args.inode) +- return -ENOMEM; ++ if (!whiteout_args.inode) { ++ ret = -ENOMEM; ++ goto out_fscrypt_names; ++ } + ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); + if (ret) + goto out_whiteout_inode; +@@ -9403,11 +9471,9 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + /* force full log commit if subvolume involved. */ + btrfs_set_log_full_commit(trans); + } else { +- ret = btrfs_insert_inode_ref(trans, dest, +- new_dentry->d_name.name, +- new_dentry->d_name.len, +- old_ino, +- btrfs_ino(BTRFS_I(new_dir)), index); ++ ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, ++ old_ino, btrfs_ino(BTRFS_I(new_dir)), ++ index); + if (ret) + goto out_fail; + } +@@ -9429,10 +9495,8 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); + } else { + ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), +- BTRFS_I(d_inode(old_dentry)), +- old_dentry->d_name.name, +- old_dentry->d_name.len, +- &rename_ctx); ++ BTRFS_I(d_inode(old_dentry)), ++ &old_fname.disk_name, &rename_ctx); + if (!ret) + ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); + } +@@ -9451,8 +9515,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + } else { + ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), + BTRFS_I(d_inode(new_dentry)), +- new_dentry->d_name.name, +- new_dentry->d_name.len); ++ &new_fname.disk_name); + } + if (!ret && new_inode->i_nlink == 0) + ret = btrfs_orphan_add(trans, +@@ -9464,8 +9527,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns, + } + + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), +- new_dentry->d_name.name, +- new_dentry->d_name.len, 0, index); ++ &new_fname.disk_name, 0, index); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out_fail; +@@ -9500,6 +9562,9 @@ out_notrans: + out_whiteout_inode: + if (flags & RENAME_WHITEOUT) + iput(whiteout_args.inode); ++out_fscrypt_names: ++ fscrypt_free_filename(&old_fname); ++ fscrypt_free_filename(&new_fname); + return ret; + } + +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 2e29fafe0e7d9..9e323420c96d3 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -951,6 +951,7 @@ static noinline int btrfs_mksubvol(const struct path *parent, + struct inode *dir = d_inode(parent->dentry); + struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); + struct dentry *dentry; ++ struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen); + int error; + + error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); +@@ -971,8 +972,7 @@ static noinline int btrfs_mksubvol(const struct path *parent, + * check for them now when we can safely fail + */ + error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, +- dir->i_ino, name, +- namelen); ++ dir->i_ino, &name_str); + if (error) + goto out_dput; + +@@ -3782,6 +3782,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) + struct btrfs_trans_handle *trans; + struct btrfs_path *path = NULL; + struct btrfs_disk_key disk_key; ++ struct fscrypt_str name = FSTR_INIT("default", 7); + u64 objectid = 0; + u64 dir_id; + int ret; +@@ -3825,7 +3826,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) + + dir_id = btrfs_super_root_dir(fs_info->super_copy); + di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path, +- dir_id, "default", 7, 1); ++ dir_id, &name, 1); + if (IS_ERR_OR_NULL(di)) { + btrfs_release_path(path); + btrfs_end_transaction(trans); +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c +index e1f599d7a9164..7d783f0943068 100644 +--- a/fs/btrfs/root-tree.c ++++ b/fs/btrfs/root-tree.c +@@ -327,9 +327,8 @@ out: + } + + int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, +- u64 ref_id, u64 dirid, u64 *sequence, const char *name, +- int name_len) +- ++ u64 ref_id, u64 dirid, u64 *sequence, ++ const struct fscrypt_str *name) + { + struct btrfs_root *tree_root = trans->fs_info->tree_root; + struct btrfs_path *path; +@@ -356,8 +355,8 @@ again: + struct btrfs_root_ref); + ptr = (unsigned long)(ref + 1); + if ((btrfs_root_ref_dirid(leaf, ref) != dirid) || +- (btrfs_root_ref_name_len(leaf, ref) != name_len) || +- memcmp_extent_buffer(leaf, name, ptr, name_len)) { ++ (btrfs_root_ref_name_len(leaf, ref) != name->len) || ++ memcmp_extent_buffer(leaf, name->name, ptr, name->len)) { + ret = -ENOENT; + goto out; + } +@@ -400,8 +399,8 @@ out: + * Will return 0, -ENOMEM, or anything from the CoW path + */ + int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, +- u64 ref_id, u64 dirid, u64 sequence, const char *name, +- int name_len) ++ u64 ref_id, u64 dirid, u64 sequence, ++ const struct fscrypt_str *name) + { + struct btrfs_root *tree_root = trans->fs_info->tree_root; + struct btrfs_key key; +@@ -420,7 +419,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, + key.offset = ref_id; + again: + ret = btrfs_insert_empty_item(trans, tree_root, path, &key, +- sizeof(*ref) + name_len); ++ sizeof(*ref) + name->len); + if (ret) { + btrfs_abort_transaction(trans, ret); + btrfs_free_path(path); +@@ -431,9 +430,9 @@ again: + ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); + btrfs_set_root_ref_dirid(leaf, ref, dirid); + btrfs_set_root_ref_sequence(leaf, ref, sequence); +- btrfs_set_root_ref_name_len(leaf, ref, name_len); ++ btrfs_set_root_ref_name_len(leaf, ref, name->len); + ptr = (unsigned long)(ref + 1); +- write_extent_buffer(leaf, name, ptr, name_len); ++ write_extent_buffer(leaf, name->name, ptr, name->len); + btrfs_mark_buffer_dirty(leaf); + + if (key.type == BTRFS_ROOT_BACKREF_KEY) { +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 35e889fe2a95d..547b5c2292186 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -1596,13 +1596,17 @@ static int gen_unique_name(struct send_ctx *sctx, + return -ENOMEM; + + while (1) { ++ struct fscrypt_str tmp_name; ++ + len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", + ino, gen, idx); + ASSERT(len < sizeof(tmp)); ++ tmp_name.name = tmp; ++ tmp_name.len = strlen(tmp); + + di = btrfs_lookup_dir_item(NULL, sctx->send_root, + path, BTRFS_FIRST_FREE_OBJECTID, +- tmp, strlen(tmp), 0); ++ &tmp_name, 0); + btrfs_release_path(path); + if (IS_ERR(di)) { + ret = PTR_ERR(di); +@@ -1622,7 +1626,7 @@ static int gen_unique_name(struct send_ctx *sctx, + + di = btrfs_lookup_dir_item(NULL, sctx->parent_root, + path, BTRFS_FIRST_FREE_OBJECTID, +- tmp, strlen(tmp), 0); ++ &tmp_name, 0); + btrfs_release_path(path); + if (IS_ERR(di)) { + ret = PTR_ERR(di); +@@ -1752,13 +1756,13 @@ static int lookup_dir_item_inode(struct btrfs_root *root, + struct btrfs_dir_item *di; + struct btrfs_key key; + struct btrfs_path *path; ++ struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len); + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + +- di = btrfs_lookup_dir_item(NULL, root, path, +- dir, name, name_len, 0); ++ di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0); + if (IS_ERR_OR_NULL(di)) { + ret = di ? PTR_ERR(di) : -ENOENT; + goto out; +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 582b71b7fa779..2c562febd801e 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -1398,6 +1398,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec + struct btrfs_dir_item *di; + struct btrfs_path *path; + struct btrfs_key location; ++ struct fscrypt_str name = FSTR_INIT("default", 7); + u64 dir_id; + + path = btrfs_alloc_path(); +@@ -1410,7 +1411,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec + * to mount. + */ + dir_id = btrfs_super_root_dir(fs_info->super_copy); +- di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); ++ di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0); + if (IS_ERR(di)) { + btrfs_free_path(path); + return PTR_ERR(di); +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index a555567594418..1193214ba8c10 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1627,10 +1628,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_root *root = pending->root; + struct btrfs_root *parent_root; + struct btrfs_block_rsv *rsv; +- struct inode *parent_inode; ++ struct inode *parent_inode = pending->dir; + struct btrfs_path *path; + struct btrfs_dir_item *dir_item; +- struct dentry *dentry; + struct extent_buffer *tmp; + struct extent_buffer *old; + struct timespec64 cur_time; +@@ -1639,6 +1639,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + u64 index = 0; + u64 objectid; + u64 root_flags; ++ unsigned int nofs_flags; ++ struct fscrypt_name fname; + + ASSERT(pending->path); + path = pending->path; +@@ -1646,9 +1648,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + ASSERT(pending->root_item); + new_root_item = pending->root_item; + ++ /* ++ * We're inside a transaction and must make sure that any potential ++ * allocations with GFP_KERNEL in fscrypt won't recurse back to ++ * filesystem. ++ */ ++ nofs_flags = memalloc_nofs_save(); ++ pending->error = fscrypt_setup_filename(parent_inode, ++ &pending->dentry->d_name, 0, ++ &fname); ++ memalloc_nofs_restore(nofs_flags); ++ if (pending->error) ++ goto free_pending; ++ + pending->error = btrfs_get_free_objectid(tree_root, &objectid); + if (pending->error) +- goto no_free_objectid; ++ goto free_fname; + + /* + * Make qgroup to skip current new snapshot's qgroupid, as it is +@@ -1677,8 +1692,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + trace_btrfs_space_reservation(fs_info, "transaction", + trans->transid, + trans->bytes_reserved, 1); +- dentry = pending->dentry; +- parent_inode = pending->dir; + parent_root = BTRFS_I(parent_inode)->root; + ret = record_root_in_trans(trans, parent_root, 0); + if (ret) +@@ -1694,8 +1707,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + /* check if there is a file/dir which has the same name. */ + dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, + btrfs_ino(BTRFS_I(parent_inode)), +- dentry->d_name.name, +- dentry->d_name.len, 0); ++ &fname.disk_name, 0); + if (dir_item != NULL && !IS_ERR(dir_item)) { + pending->error = -EEXIST; + goto dir_item_existed; +@@ -1790,7 +1802,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + ret = btrfs_add_root_ref(trans, objectid, + parent_root->root_key.objectid, + btrfs_ino(BTRFS_I(parent_inode)), index, +- dentry->d_name.name, dentry->d_name.len); ++ &fname.disk_name); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; +@@ -1822,9 +1834,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + if (ret < 0) + goto fail; + +- ret = btrfs_insert_dir_item(trans, dentry->d_name.name, +- dentry->d_name.len, BTRFS_I(parent_inode), +- &key, BTRFS_FT_DIR, index); ++ ret = btrfs_insert_dir_item(trans, &fname.disk_name, ++ BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, ++ index); + /* We have check then name at the beginning, so it is impossible. */ + BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); + if (ret) { +@@ -1833,7 +1845,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + } + + btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + +- dentry->d_name.len * 2); ++ fname.disk_name.len * 2); + parent_inode->i_mtime = current_time(parent_inode); + parent_inode->i_ctime = parent_inode->i_mtime; + ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode)); +@@ -1865,7 +1877,9 @@ dir_item_existed: + trans->bytes_reserved = 0; + clear_skip_qgroup: + btrfs_clear_skip_qgroup(trans); +-no_free_objectid: ++free_fname: ++ fscrypt_free_filename(&fname); ++free_pending: + kfree(new_root_item); + pending->root_item = NULL; + btrfs_free_path(path); +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 00be69ce7b90f..c03ff6a5a7f6b 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -595,6 +595,21 @@ static int overwrite_item(struct btrfs_trans_handle *trans, + return do_overwrite_item(trans, root, path, eb, slot, key); + } + ++static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len, ++ struct fscrypt_str *name) ++{ ++ char *buf; ++ ++ buf = kmalloc(len, GFP_NOFS); ++ if (!buf) ++ return -ENOMEM; ++ ++ read_extent_buffer(eb, buf, (unsigned long)start, len); ++ name->name = buf; ++ name->len = len; ++ return 0; ++} ++ + /* + * simple helper to read an inode off the disk from a given root + * This can only be called for subvolume roots and not for the log +@@ -901,12 +916,11 @@ out: + static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans, + struct btrfs_inode *dir, + struct btrfs_inode *inode, +- const char *name, +- int name_len) ++ const struct fscrypt_str *name) + { + int ret; + +- ret = btrfs_unlink_inode(trans, dir, inode, name, name_len); ++ ret = btrfs_unlink_inode(trans, dir, inode, name); + if (ret) + return ret; + /* +@@ -933,8 +947,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, + { + struct btrfs_root *root = dir->root; + struct inode *inode; +- char *name; +- int name_len; ++ struct fscrypt_str name; + struct extent_buffer *leaf; + struct btrfs_key location; + int ret; +@@ -942,12 +955,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, + leaf = path->nodes[0]; + + btrfs_dir_item_key_to_cpu(leaf, di, &location); +- name_len = btrfs_dir_name_len(leaf, di); +- name = kmalloc(name_len, GFP_NOFS); +- if (!name) ++ ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name); ++ if (ret) + return -ENOMEM; + +- read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); + btrfs_release_path(path); + + inode = read_one_inode(root, location.objectid); +@@ -960,10 +971,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, + if (ret) + goto out; + +- ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), name, +- name_len); ++ ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name); + out: +- kfree(name); ++ kfree(name.name); + iput(inode); + return ret; + } +@@ -978,14 +988,14 @@ out: + static noinline int inode_in_dir(struct btrfs_root *root, + struct btrfs_path *path, + u64 dirid, u64 objectid, u64 index, +- const char *name, int name_len) ++ struct fscrypt_str *name) + { + struct btrfs_dir_item *di; + struct btrfs_key location; + int ret = 0; + + di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, +- index, name, name_len, 0); ++ index, name, 0); + if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; +@@ -998,7 +1008,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, + } + + btrfs_release_path(path); +- di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); ++ di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0); + if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; +@@ -1025,7 +1035,7 @@ out: + static noinline int backref_in_log(struct btrfs_root *log, + struct btrfs_key *key, + u64 ref_objectid, +- const char *name, int namelen) ++ const struct fscrypt_str *name) + { + struct btrfs_path *path; + int ret; +@@ -1045,12 +1055,10 @@ static noinline int backref_in_log(struct btrfs_root *log, + if (key->type == BTRFS_INODE_EXTREF_KEY) + ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], + path->slots[0], +- ref_objectid, +- name, namelen); ++ ref_objectid, name); + else + ret = !!btrfs_find_name_in_backref(path->nodes[0], +- path->slots[0], +- name, namelen); ++ path->slots[0], name); + out: + btrfs_free_path(path); + return ret; +@@ -1063,11 +1071,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, + struct btrfs_inode *dir, + struct btrfs_inode *inode, + u64 inode_objectid, u64 parent_objectid, +- u64 ref_index, char *name, int namelen) ++ u64 ref_index, struct fscrypt_str *name) + { + int ret; +- char *victim_name; +- int victim_name_len; + struct extent_buffer *leaf; + struct btrfs_dir_item *di; + struct btrfs_key search_key; +@@ -1099,43 +1105,40 @@ again: + ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); + ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); + while (ptr < ptr_end) { +- victim_ref = (struct btrfs_inode_ref *)ptr; +- victim_name_len = btrfs_inode_ref_name_len(leaf, +- victim_ref); +- victim_name = kmalloc(victim_name_len, GFP_NOFS); +- if (!victim_name) +- return -ENOMEM; ++ struct fscrypt_str victim_name; + +- read_extent_buffer(leaf, victim_name, +- (unsigned long)(victim_ref + 1), +- victim_name_len); ++ victim_ref = (struct btrfs_inode_ref *)ptr; ++ ret = read_alloc_one_name(leaf, (victim_ref + 1), ++ btrfs_inode_ref_name_len(leaf, victim_ref), ++ &victim_name); ++ if (ret) ++ return ret; + + ret = backref_in_log(log_root, &search_key, +- parent_objectid, victim_name, +- victim_name_len); ++ parent_objectid, &victim_name); + if (ret < 0) { +- kfree(victim_name); ++ kfree(victim_name.name); + return ret; + } else if (!ret) { + inc_nlink(&inode->vfs_inode); + btrfs_release_path(path); + + ret = unlink_inode_for_log_replay(trans, dir, inode, +- victim_name, victim_name_len); +- kfree(victim_name); ++ &victim_name); ++ kfree(victim_name.name); + if (ret) + return ret; + goto again; + } +- kfree(victim_name); ++ kfree(victim_name.name); + +- ptr = (unsigned long)(victim_ref + 1) + victim_name_len; ++ ptr = (unsigned long)(victim_ref + 1) + victim_name.len; + } + } + btrfs_release_path(path); + + /* Same search but for extended refs */ +- extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, ++ extref = btrfs_lookup_inode_extref(NULL, root, path, name, + inode_objectid, parent_objectid, 0, + 0); + if (IS_ERR(extref)) { +@@ -1152,29 +1155,28 @@ again: + base = btrfs_item_ptr_offset(leaf, path->slots[0]); + + while (cur_offset < item_size) { +- extref = (struct btrfs_inode_extref *)(base + cur_offset); ++ struct fscrypt_str victim_name; + +- victim_name_len = btrfs_inode_extref_name_len(leaf, extref); ++ extref = (struct btrfs_inode_extref *)(base + cur_offset); + + if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) + goto next; + +- victim_name = kmalloc(victim_name_len, GFP_NOFS); +- if (!victim_name) +- return -ENOMEM; +- read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, +- victim_name_len); ++ ret = read_alloc_one_name(leaf, &extref->name, ++ btrfs_inode_extref_name_len(leaf, extref), ++ &victim_name); ++ if (ret) ++ return ret; + + search_key.objectid = inode_objectid; + search_key.type = BTRFS_INODE_EXTREF_KEY; + search_key.offset = btrfs_extref_hash(parent_objectid, +- victim_name, +- victim_name_len); ++ victim_name.name, ++ victim_name.len); + ret = backref_in_log(log_root, &search_key, +- parent_objectid, victim_name, +- victim_name_len); ++ parent_objectid, &victim_name); + if (ret < 0) { +- kfree(victim_name); ++ kfree(victim_name.name); + return ret; + } else if (!ret) { + ret = -ENOENT; +@@ -1186,26 +1188,24 @@ again: + + ret = unlink_inode_for_log_replay(trans, + BTRFS_I(victim_parent), +- inode, +- victim_name, +- victim_name_len); ++ inode, &victim_name); + } + iput(victim_parent); +- kfree(victim_name); ++ kfree(victim_name.name); + if (ret) + return ret; + goto again; + } +- kfree(victim_name); ++ kfree(victim_name.name); + next: +- cur_offset += victim_name_len + sizeof(*extref); ++ cur_offset += victim_name.len + sizeof(*extref); + } + } + btrfs_release_path(path); + + /* look for a conflicting sequence number */ + di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), +- ref_index, name, namelen, 0); ++ ref_index, name, 0); + if (IS_ERR(di)) { + return PTR_ERR(di); + } else if (di) { +@@ -1216,8 +1216,7 @@ next: + btrfs_release_path(path); + + /* look for a conflicting name */ +- di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), +- name, namelen, 0); ++ di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0); + if (IS_ERR(di)) { + return PTR_ERR(di); + } else if (di) { +@@ -1231,20 +1230,18 @@ next: + } + + static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, +- u32 *namelen, char **name, u64 *index, ++ struct fscrypt_str *name, u64 *index, + u64 *parent_objectid) + { + struct btrfs_inode_extref *extref; ++ int ret; + + extref = (struct btrfs_inode_extref *)ref_ptr; + +- *namelen = btrfs_inode_extref_name_len(eb, extref); +- *name = kmalloc(*namelen, GFP_NOFS); +- if (*name == NULL) +- return -ENOMEM; +- +- read_extent_buffer(eb, *name, (unsigned long)&extref->name, +- *namelen); ++ ret = read_alloc_one_name(eb, &extref->name, ++ btrfs_inode_extref_name_len(eb, extref), name); ++ if (ret) ++ return ret; + + if (index) + *index = btrfs_inode_extref_index(eb, extref); +@@ -1255,18 +1252,17 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, + } + + static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, +- u32 *namelen, char **name, u64 *index) ++ struct fscrypt_str *name, u64 *index) + { + struct btrfs_inode_ref *ref; ++ int ret; + + ref = (struct btrfs_inode_ref *)ref_ptr; + +- *namelen = btrfs_inode_ref_name_len(eb, ref); +- *name = kmalloc(*namelen, GFP_NOFS); +- if (*name == NULL) +- return -ENOMEM; +- +- read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); ++ ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref), ++ name); ++ if (ret) ++ return ret; + + if (index) + *index = btrfs_inode_ref_index(eb, ref); +@@ -1308,28 +1304,24 @@ again: + ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); + ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]); + while (ref_ptr < ref_end) { +- char *name = NULL; +- int namelen; ++ struct fscrypt_str name; + u64 parent_id; + + if (key->type == BTRFS_INODE_EXTREF_KEY) { +- ret = extref_get_fields(eb, ref_ptr, &namelen, &name, ++ ret = extref_get_fields(eb, ref_ptr, &name, + NULL, &parent_id); + } else { + parent_id = key->offset; +- ret = ref_get_fields(eb, ref_ptr, &namelen, &name, +- NULL); ++ ret = ref_get_fields(eb, ref_ptr, &name, NULL); + } + if (ret) + goto out; + + if (key->type == BTRFS_INODE_EXTREF_KEY) + ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, +- parent_id, name, +- namelen); ++ parent_id, &name); + else +- ret = !!btrfs_find_name_in_backref(log_eb, log_slot, +- name, namelen); ++ ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name); + + if (!ret) { + struct inode *dir; +@@ -1338,20 +1330,20 @@ again: + dir = read_one_inode(root, parent_id); + if (!dir) { + ret = -ENOENT; +- kfree(name); ++ kfree(name.name); + goto out; + } + ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), +- inode, name, namelen); +- kfree(name); ++ inode, &name); ++ kfree(name.name); + iput(dir); + if (ret) + goto out; + goto again; + } + +- kfree(name); +- ref_ptr += namelen; ++ kfree(name.name); ++ ref_ptr += name.len; + if (key->type == BTRFS_INODE_EXTREF_KEY) + ref_ptr += sizeof(struct btrfs_inode_extref); + else +@@ -1380,8 +1372,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + struct inode *inode = NULL; + unsigned long ref_ptr; + unsigned long ref_end; +- char *name = NULL; +- int namelen; ++ struct fscrypt_str name; + int ret; + int log_ref_ver = 0; + u64 parent_objectid; +@@ -1425,7 +1416,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + + while (ref_ptr < ref_end) { + if (log_ref_ver) { +- ret = extref_get_fields(eb, ref_ptr, &namelen, &name, ++ ret = extref_get_fields(eb, ref_ptr, &name, + &ref_index, &parent_objectid); + /* + * parent object can change from one array +@@ -1438,15 +1429,13 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + goto out; + } + } else { +- ret = ref_get_fields(eb, ref_ptr, &namelen, &name, +- &ref_index); ++ ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); + } + if (ret) + goto out; + + ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), +- btrfs_ino(BTRFS_I(inode)), ref_index, +- name, namelen); ++ btrfs_ino(BTRFS_I(inode)), ref_index, &name); + if (ret < 0) { + goto out; + } else if (ret == 0) { +@@ -1460,7 +1449,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + ret = __add_inode_ref(trans, root, path, log, + BTRFS_I(dir), BTRFS_I(inode), + inode_objectid, parent_objectid, +- ref_index, name, namelen); ++ ref_index, &name); + if (ret) { + if (ret == 1) + ret = 0; +@@ -1469,7 +1458,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + + /* insert our name */ + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), +- name, namelen, 0, ref_index); ++ &name, 0, ref_index); + if (ret) + goto out; + +@@ -1479,9 +1468,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + } + /* Else, ret == 1, we already have a perfect match, we're done. */ + +- ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; +- kfree(name); +- name = NULL; ++ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len; ++ kfree(name.name); ++ name.name = NULL; + if (log_ref_ver) { + iput(dir); + dir = NULL; +@@ -1505,7 +1494,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + ret = overwrite_item(trans, root, path, eb, slot, key); + out: + btrfs_release_path(path); +- kfree(name); ++ kfree(name.name); + iput(dir); + iput(inode); + return ret; +@@ -1777,7 +1766,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, + static noinline int insert_one_name(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 dirid, u64 index, +- char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_key *location) + { + struct inode *inode; +@@ -1795,7 +1784,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, + } + + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, +- name_len, 1, index); ++ 1, index); + + /* FIXME, put inode into FIXUP list */ + +@@ -1855,8 +1844,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + struct btrfs_dir_item *di, + struct btrfs_key *key) + { +- char *name; +- int name_len; ++ struct fscrypt_str name; + struct btrfs_dir_item *dir_dst_di; + struct btrfs_dir_item *index_dst_di; + bool dir_dst_matches = false; +@@ -1874,17 +1862,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + if (!dir) + return -EIO; + +- name_len = btrfs_dir_name_len(eb, di); +- name = kmalloc(name_len, GFP_NOFS); +- if (!name) { +- ret = -ENOMEM; ++ ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); ++ if (ret) + goto out; +- } + + log_type = btrfs_dir_type(eb, di); +- read_extent_buffer(eb, name, (unsigned long)(di + 1), +- name_len); +- + btrfs_dir_item_key_to_cpu(eb, di, &log_key); + ret = btrfs_lookup_inode(trans, root, path, &log_key, 0); + btrfs_release_path(path); +@@ -1894,7 +1876,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + ret = 0; + + dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, +- name, name_len, 1); ++ &name, 1); + if (IS_ERR(dir_dst_di)) { + ret = PTR_ERR(dir_dst_di); + goto out; +@@ -1911,7 +1893,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + + index_dst_di = btrfs_lookup_dir_index_item(trans, root, path, + key->objectid, key->offset, +- name, name_len, 1); ++ &name, 1); + if (IS_ERR(index_dst_di)) { + ret = PTR_ERR(index_dst_di); + goto out; +@@ -1939,7 +1921,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + search_key.objectid = log_key.objectid; + search_key.type = BTRFS_INODE_REF_KEY; + search_key.offset = key->objectid; +- ret = backref_in_log(root->log_root, &search_key, 0, name, name_len); ++ ret = backref_in_log(root->log_root, &search_key, 0, &name); + if (ret < 0) { + goto out; + } else if (ret) { +@@ -1952,8 +1934,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + search_key.objectid = log_key.objectid; + search_key.type = BTRFS_INODE_EXTREF_KEY; + search_key.offset = key->objectid; +- ret = backref_in_log(root->log_root, &search_key, key->objectid, name, +- name_len); ++ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); + if (ret < 0) { + goto out; + } else if (ret) { +@@ -1964,7 +1945,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + } + btrfs_release_path(path); + ret = insert_one_name(trans, root, key->objectid, key->offset, +- name, name_len, &log_key); ++ &name, &log_key); + if (ret && ret != -ENOENT && ret != -EEXIST) + goto out; + if (!ret) +@@ -1974,10 +1955,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + + out: + if (!ret && update_size) { +- btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); ++ btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2); + ret = btrfs_update_inode(trans, root, BTRFS_I(dir)); + } +- kfree(name); ++ kfree(name.name); + iput(dir); + if (!ret && name_added) + ret = 1; +@@ -2143,8 +2124,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, + struct extent_buffer *eb; + int slot; + struct btrfs_dir_item *di; +- int name_len; +- char *name; ++ struct fscrypt_str name; + struct inode *inode = NULL; + struct btrfs_key location; + +@@ -2159,22 +2139,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, + eb = path->nodes[0]; + slot = path->slots[0]; + di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); +- name_len = btrfs_dir_name_len(eb, di); +- name = kmalloc(name_len, GFP_NOFS); +- if (!name) { +- ret = -ENOMEM; ++ ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); ++ if (ret) + goto out; +- } +- +- read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); + + if (log) { + struct btrfs_dir_item *log_di; + + log_di = btrfs_lookup_dir_index_item(trans, log, log_path, + dir_key->objectid, +- dir_key->offset, +- name, name_len, 0); ++ dir_key->offset, &name, 0); + if (IS_ERR(log_di)) { + ret = PTR_ERR(log_di); + goto out; +@@ -2200,7 +2174,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, + + inc_nlink(inode); + ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode), +- name, name_len); ++ &name); + /* + * Unlike dir item keys, dir index keys can only have one name (entry) in + * them, as there are no key collisions since each key has a unique offset +@@ -2209,7 +2183,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, + out: + btrfs_release_path(path); + btrfs_release_path(log_path); +- kfree(name); ++ kfree(name.name); + iput(inode); + return ret; + } +@@ -3443,7 +3417,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans, + struct btrfs_root *log, + struct btrfs_path *path, + u64 dir_ino, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + u64 index) + { + struct btrfs_dir_item *di; +@@ -3453,7 +3427,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans, + * for dir item keys. + */ + di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, +- index, name, name_len, -1); ++ index, name, -1); + if (IS_ERR(di)) + return PTR_ERR(di); + else if (!di) +@@ -3490,7 +3464,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans, + */ + void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_inode *dir, u64 index) + { + struct btrfs_path *path; +@@ -3517,7 +3491,7 @@ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, + } + + ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir), +- name, name_len, index); ++ name, index); + btrfs_free_path(path); + out_unlock: + mutex_unlock(&dir->log_mutex); +@@ -3529,7 +3503,7 @@ out_unlock: + /* see comments for btrfs_del_dir_entries_in_log */ + void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_inode *inode, u64 dirid) + { + struct btrfs_root *log; +@@ -3550,7 +3524,7 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, + log = root->log_root; + mutex_lock(&inode->log_mutex); + +- ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), ++ ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode), + dirid, &index); + mutex_unlock(&inode->log_mutex); + if (ret < 0 && ret != -ENOENT) +@@ -5293,6 +5267,7 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb, + u32 this_len; + unsigned long name_ptr; + struct btrfs_dir_item *di; ++ struct fscrypt_str name_str; + + if (key->type == BTRFS_INODE_REF_KEY) { + struct btrfs_inode_ref *iref; +@@ -5326,8 +5301,11 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb, + } + + read_extent_buffer(eb, name, name_ptr, this_name_len); ++ ++ name_str.name = name; ++ name_str.len = this_name_len; + di = btrfs_lookup_dir_item(NULL, inode->root, search_path, +- parent, name, this_name_len, 0); ++ parent, &name_str, 0); + if (di && !IS_ERR(di)) { + struct btrfs_key di_key; + +@@ -7493,9 +7471,14 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, + if (old_dir && old_dir->logged_trans == trans->transid) { + struct btrfs_root *log = old_dir->root->log_root; + struct btrfs_path *path; ++ struct fscrypt_name fname; + + ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX); + ++ ret = fscrypt_setup_filename(&old_dir->vfs_inode, ++ &old_dentry->d_name, 0, &fname); ++ if (ret) ++ goto out; + /* + * We have two inodes to update in the log, the old directory and + * the inode that got renamed, so we must pin the log to prevent +@@ -7508,13 +7491,17 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, + * not fail, but if it does, it's not serious, just bail out and + * mark the log for a full commit. + */ +- if (WARN_ON_ONCE(ret < 0)) ++ if (WARN_ON_ONCE(ret < 0)) { ++ fscrypt_free_filename(&fname); + goto out; ++ } ++ + log_pinned = true; + + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; ++ fscrypt_free_filename(&fname); + goto out; + } + +@@ -7530,8 +7517,7 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, + */ + mutex_lock(&old_dir->log_mutex); + ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir), +- old_dentry->d_name.name, +- old_dentry->d_name.len, old_dir_index); ++ &fname.disk_name, old_dir_index); + if (ret > 0) { + /* + * The dentry does not exist in the log, so record its +@@ -7545,6 +7531,7 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, + mutex_unlock(&old_dir->log_mutex); + + btrfs_free_path(path); ++ fscrypt_free_filename(&fname); + if (ret < 0) + goto out; + } +diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h +index bcca74128c3bb..8adebf4c9adaf 100644 +--- a/fs/btrfs/tree-log.h ++++ b/fs/btrfs/tree-log.h +@@ -84,11 +84,11 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, + struct btrfs_log_ctx *ctx); + void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_inode *dir, u64 index); + void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, + struct btrfs_root *root, +- const char *name, int name_len, ++ const struct fscrypt_str *name, + struct btrfs_inode *inode, u64 dirid); + void btrfs_end_log_trans(struct btrfs_root *root); + void btrfs_pin_log_trans(struct btrfs_root *root); +diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c +index 5cd612a8f8584..49addc345aebe 100644 +--- a/fs/erofs/decompressor_lzma.c ++++ b/fs/erofs/decompressor_lzma.c +@@ -217,9 +217,12 @@ again: + strm->buf.out_size = min_t(u32, outlen, + PAGE_SIZE - pageofs); + outlen -= strm->buf.out_size; +- if (!rq->out[no] && rq->fillgaps) /* deduped */ ++ if (!rq->out[no] && rq->fillgaps) { /* deduped */ + rq->out[no] = erofs_allocpage(pagepool, + GFP_KERNEL | __GFP_NOFAIL); ++ set_page_private(rq->out[no], ++ Z_EROFS_SHORTLIVED_PAGE); ++ } + if (rq->out[no]) + strm->buf.out = kmap(rq->out[no]) + pageofs; + pageofs = 0; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index be570c65ae154..e1297c6bcfbe2 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -7157,7 +7157,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) + { + struct nfs4_lockdata *data = calldata; + struct nfs4_lock_state *lsp = data->lsp; +- struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry)); + + if (!nfs4_sequence_done(task, &data->res.seq_res)) + return; +@@ -7165,7 +7164,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) + data->rpc_status = task->tk_status; + switch (task->tk_status) { + case 0: +- renew_lease(server, data->timestamp); ++ renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), ++ data->timestamp); + if (data->arg.new_lock && !data->cancelled) { + data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); + if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) +@@ -7193,8 +7193,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) + if (!nfs4_stateid_match(&data->arg.open_stateid, + &lsp->ls_state->open_stateid)) + goto out_restart; +- else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN) +- goto out_restart; + } else if (!nfs4_stateid_match(&data->arg.lock_stateid, + &lsp->ls_stateid)) + goto out_restart; +@@ -10629,7 +10627,9 @@ static void nfs4_disable_swap(struct inode *inode) + */ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + +- nfs4_schedule_state_manager(clp); ++ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); ++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); ++ wake_up_var(&clp->cl_state); + } + + static const struct inode_operations nfs4_dir_inode_operations = { +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 5b49e5365bb30..457b2b2f804ab 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1209,17 +1209,23 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) + { + struct task_struct *task; + char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; +- struct rpc_clnt *cl = clp->cl_rpcclient; +- +- while (cl != cl->cl_parent) +- cl = cl->cl_parent; ++ struct rpc_clnt *clnt = clp->cl_rpcclient; ++ bool swapon = false; + + set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); +- if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) { +- wake_up_var(&clp->cl_state); +- return; ++ ++ if (atomic_read(&clnt->cl_swapper)) { ++ swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, ++ &clp->cl_state); ++ if (!swapon) { ++ wake_up_var(&clp->cl_state); ++ return; ++ } + } +- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); ++ ++ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) ++ return; ++ + __module_get(THIS_MODULE); + refcount_inc(&clp->cl_count); + +@@ -1236,8 +1242,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) + __func__, PTR_ERR(task)); + if (!nfs_client_init_is_complete(clp)) + nfs_mark_client_ready(clp, PTR_ERR(task)); ++ if (swapon) ++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + nfs4_clear_state_manager_bit(clp); +- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + nfs_put_client(clp); + module_put(THIS_MODULE); + } +@@ -2703,6 +2710,13 @@ static void nfs4_state_manager(struct nfs_client *clp) + nfs4_end_drain_session(clp); + nfs4_clear_state_manager_bit(clp); + ++ if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) && ++ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, ++ &clp->cl_state)) { ++ memflags = memalloc_nofs_save(); ++ continue; ++ } ++ + if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) { + if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { + nfs_client_return_marked_delegations(clp); +@@ -2741,22 +2755,25 @@ static int nfs4_run_state_manager(void *ptr) + + allow_signal(SIGKILL); + again: +- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); + nfs4_state_manager(clp); +- if (atomic_read(&cl->cl_swapper)) { ++ ++ if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) && ++ !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) { + wait_var_event_interruptible(&clp->cl_state, + test_bit(NFS4CLNT_RUN_MANAGER, + &clp->cl_state)); +- if (atomic_read(&cl->cl_swapper) && +- test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) ++ if (!atomic_read(&cl->cl_swapper)) ++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); ++ if (refcount_read(&clp->cl_count) > 1 && !signalled() && ++ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) + goto again; + /* Either no longer a swapper, or were signalled */ ++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + } +- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + + if (refcount_read(&clp->cl_count) > 1 && !signalled() && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) && +- !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state)) ++ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) + goto again; + + nfs_put_client(clp); +diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c +index a6f7403669631..edb535a0ff973 100644 +--- a/fs/nfs/sysfs.c ++++ b/fs/nfs/sysfs.c +@@ -18,7 +18,7 @@ + #include "sysfs.h" + + struct kobject *nfs_client_kobj; +-static struct kset *nfs_client_kset; ++static struct kset *nfs_kset; + + static void nfs_netns_object_release(struct kobject *kobj) + { +@@ -55,13 +55,13 @@ static struct kobject *nfs_netns_object_alloc(const char *name, + + int nfs_sysfs_init(void) + { +- nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj); +- if (!nfs_client_kset) ++ nfs_kset = kset_create_and_add("nfs", NULL, fs_kobj); ++ if (!nfs_kset) + return -ENOMEM; +- nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL); ++ nfs_client_kobj = nfs_netns_object_alloc("net", nfs_kset, NULL); + if (!nfs_client_kobj) { +- kset_unregister(nfs_client_kset); +- nfs_client_kset = NULL; ++ kset_unregister(nfs_kset); ++ nfs_kset = NULL; + return -ENOMEM; + } + return 0; +@@ -70,7 +70,7 @@ int nfs_sysfs_init(void) + void nfs_sysfs_exit(void) + { + kobject_put(nfs_client_kobj); +- kset_unregister(nfs_client_kset); ++ kset_unregister(nfs_kset); + } + + static ssize_t nfs_netns_identifier_show(struct kobject *kobj, +@@ -159,7 +159,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (p) { + p->net = net; +- p->kobject.kset = nfs_client_kset; ++ p->kobject.kset = nfs_kset; + if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type, + parent, "nfs_client") == 0) + return p; +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 57da4f23c1e43..acb8951eb7576 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -2901,9 +2901,9 @@ bind_socket(struct TCP_Server_Info *server) + if (server->srcaddr.ss_family != AF_UNSPEC) { + /* Bind to the specified local IP address */ + struct socket *socket = server->ssocket; +- rc = socket->ops->bind(socket, +- (struct sockaddr *) &server->srcaddr, +- sizeof(server->srcaddr)); ++ rc = kernel_bind(socket, ++ (struct sockaddr *) &server->srcaddr, ++ sizeof(server->srcaddr)); + if (rc < 0) { + struct sockaddr_in *saddr4; + struct sockaddr_in6 *saddr6; +@@ -3050,8 +3050,8 @@ generic_ip_connect(struct TCP_Server_Info *server) + socket->sk->sk_sndbuf, + socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); + +- rc = socket->ops->connect(socket, saddr, slen, +- server->noblockcnt ? O_NONBLOCK : 0); ++ rc = kernel_connect(socket, saddr, slen, ++ server->noblockcnt ? O_NONBLOCK : 0); + /* + * When mounting SMB root file systems, we do not want to block in + * connect. Otherwise bail out and then let cifs_reconnect() perform +diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c +index e1d2be19cddfa..ff97cad8d5b45 100644 +--- a/fs/smb/server/connection.c ++++ b/fs/smb/server/connection.c +@@ -84,6 +84,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void) + spin_lock_init(&conn->llist_lock); + INIT_LIST_HEAD(&conn->lock_list); + ++ init_rwsem(&conn->session_lock); ++ + down_write(&conn_list_lock); + list_add(&conn->conns_list, &conn_list); + up_write(&conn_list_lock); +diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h +index ad8dfaa48ffb3..335fdd714d595 100644 +--- a/fs/smb/server/connection.h ++++ b/fs/smb/server/connection.h +@@ -50,6 +50,7 @@ struct ksmbd_conn { + struct nls_table *local_nls; + struct unicode_map *um; + struct list_head conns_list; ++ struct rw_semaphore session_lock; + /* smb session 1 per user */ + struct xarray sessions; + unsigned long last_active; +diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c +index ea4b56d570fbb..cf6621e21ba36 100644 +--- a/fs/smb/server/mgmt/user_session.c ++++ b/fs/smb/server/mgmt/user_session.c +@@ -183,7 +183,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) + unsigned long id; + struct ksmbd_session *sess; + +- down_write(&sessions_table_lock); ++ down_write(&conn->session_lock); + xa_for_each(&conn->sessions, id, sess) { + if (sess->state != SMB2_SESSION_VALID || + time_after(jiffies, +@@ -194,7 +194,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) + continue; + } + } +- up_write(&sessions_table_lock); ++ up_write(&conn->session_lock); + } + + int ksmbd_session_register(struct ksmbd_conn *conn, +@@ -236,7 +236,9 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) + } + } + } ++ up_write(&sessions_table_lock); + ++ down_write(&conn->session_lock); + xa_for_each(&conn->sessions, id, sess) { + unsigned long chann_id; + struct channel *chann; +@@ -253,7 +255,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) + ksmbd_session_destroy(sess); + } + } +- up_write(&sessions_table_lock); ++ up_write(&conn->session_lock); + } + + struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, +@@ -261,9 +263,11 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, + { + struct ksmbd_session *sess; + ++ down_read(&conn->session_lock); + sess = xa_load(&conn->sessions, id); + if (sess) + sess->last_active = jiffies; ++ up_read(&conn->session_lock); + return sess; + } + +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index f6fd5cf976a50..683152007566c 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -8128,10 +8128,10 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) + goto err_out; + } + +- opinfo_put(opinfo); +- ksmbd_fd_put(work, fp); + opinfo->op_state = OPLOCK_STATE_NONE; + wake_up_interruptible_all(&opinfo->oplock_q); ++ opinfo_put(opinfo); ++ ksmbd_fd_put(work, fp); + + rsp->StructureSize = cpu_to_le16(24); + rsp->OplockLevel = rsp_oplevel; +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 1ed2ec035e779..1fba826f0acef 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1065,7 +1065,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, + struct bpf_attach_target_info *tgt_info) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return NULL; + } + static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} + #define DEFINE_BPF_DISPATCHER(name) +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h +index 15d7529ac9534..9a44de45cc1f2 100644 +--- a/include/linux/ipv6.h ++++ b/include/linux/ipv6.h +@@ -33,6 +33,7 @@ struct ipv6_devconf { + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; ++ __s32 accept_ra_min_lft; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + #ifdef CONFIG_IPV6_ROUTER_PREF +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 104ec00823da8..eefb0948110ae 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1906,6 +1906,8 @@ static inline bool can_do_mlock(void) { return false; } + extern int user_shm_lock(size_t, struct ucounts *); + extern void user_shm_unlock(size_t, struct ucounts *); + ++struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, ++ pte_t pte); + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, +diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h +index 625f491b95de8..fb31312825ae5 100644 +--- a/include/linux/netfilter/nf_conntrack_sctp.h ++++ b/include/linux/netfilter/nf_conntrack_sctp.h +@@ -9,6 +9,7 @@ struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; ++ u8 init[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; + }; +diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h +index bdcf83cd719ef..be9f61e3e8e6d 100644 +--- a/include/linux/regulator/mt6358-regulator.h ++++ b/include/linux/regulator/mt6358-regulator.h +@@ -48,8 +48,6 @@ enum { + MT6358_ID_VLDO28, + MT6358_ID_VAUD28, + MT6358_ID_VSIM2, +- MT6358_ID_VCORE_SSHUB, +- MT6358_ID_VSRAM_OTHERS_SSHUB, + MT6358_ID_RG_MAX, + }; + +@@ -90,8 +88,6 @@ enum { + MT6366_ID_VMC, + MT6366_ID_VAUD28, + MT6366_ID_VSIM2, +- MT6366_ID_VCORE_SSHUB, +- MT6366_ID_VSRAM_OTHERS_SSHUB, + MT6366_ID_RG_MAX, + }; + +diff --git a/include/net/arp.h b/include/net/arp.h +index d7ef4ec71dfeb..e8747e0713c79 100644 +--- a/include/net/arp.h ++++ b/include/net/arp.h +@@ -38,11 +38,11 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 + { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv4_neigh_lookup_noref(dev, key); + if (n && !refcount_inc_not_zero(&n->refcnt)) + n = NULL; +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return n; + } +@@ -51,10 +51,10 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key) + { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv4_neigh_lookup_noref(dev, key); + neigh_confirm(n); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + + void arp_init(void); +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h +index 5976545aa26b9..7a6c3059d50b5 100644 +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -5621,12 +5621,17 @@ struct cfg80211_cqm_config; + * wiphy_lock - lock the wiphy + * @wiphy: the wiphy to lock + * +- * This is mostly exposed so it can be done around registering and +- * unregistering netdevs that aren't created through cfg80211 calls, +- * since that requires locking in cfg80211 when the notifiers is +- * called, but that cannot differentiate which way it's called. ++ * This is needed around registering and unregistering netdevs that ++ * aren't created through cfg80211 calls, since that requires locking ++ * in cfg80211 when the notifiers is called, but that cannot ++ * differentiate which way it's called. ++ * ++ * It can also be used by drivers for their own purposes. + * + * When cfg80211 ops are called, the wiphy is already locked. ++ * ++ * Note that this makes sure that no workers that have been queued ++ * with wiphy_queue_work() are running. + */ + static inline void wiphy_lock(struct wiphy *wiphy) + __acquires(&wiphy->mtx) +@@ -5646,6 +5651,88 @@ static inline void wiphy_unlock(struct wiphy *wiphy) + mutex_unlock(&wiphy->mtx); + } + ++struct wiphy_work; ++typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); ++ ++struct wiphy_work { ++ struct list_head entry; ++ wiphy_work_func_t func; ++}; ++ ++static inline void wiphy_work_init(struct wiphy_work *work, ++ wiphy_work_func_t func) ++{ ++ INIT_LIST_HEAD(&work->entry); ++ work->func = func; ++} ++ ++/** ++ * wiphy_work_queue - queue work for the wiphy ++ * @wiphy: the wiphy to queue for ++ * @work: the work item ++ * ++ * This is useful for work that must be done asynchronously, and work ++ * queued here has the special property that the wiphy mutex will be ++ * held as if wiphy_lock() was called, and that it cannot be running ++ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can ++ * use just cancel_work() instead of cancel_work_sync(), it requires ++ * being in a section protected by wiphy_lock(). ++ */ ++void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work); ++ ++/** ++ * wiphy_work_cancel - cancel previously queued work ++ * @wiphy: the wiphy, for debug purposes ++ * @work: the work to cancel ++ * ++ * Cancel the work *without* waiting for it, this assumes being ++ * called under the wiphy mutex acquired by wiphy_lock(). ++ */ ++void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work); ++ ++struct wiphy_delayed_work { ++ struct wiphy_work work; ++ struct wiphy *wiphy; ++ struct timer_list timer; ++}; ++ ++void wiphy_delayed_work_timer(struct timer_list *t); ++ ++static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork, ++ wiphy_work_func_t func) ++{ ++ timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0); ++ wiphy_work_init(&dwork->work, func); ++} ++ ++/** ++ * wiphy_delayed_work_queue - queue delayed work for the wiphy ++ * @wiphy: the wiphy to queue for ++ * @dwork: the delayable worker ++ * @delay: number of jiffies to wait before queueing ++ * ++ * This is useful for work that must be done asynchronously, and work ++ * queued here has the special property that the wiphy mutex will be ++ * held as if wiphy_lock() was called, and that it cannot be running ++ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can ++ * use just cancel_work() instead of cancel_work_sync(), it requires ++ * being in a section protected by wiphy_lock(). ++ */ ++void wiphy_delayed_work_queue(struct wiphy *wiphy, ++ struct wiphy_delayed_work *dwork, ++ unsigned long delay); ++ ++/** ++ * wiphy_delayed_work_cancel - cancel previously queued delayed work ++ * @wiphy: the wiphy, for debug purposes ++ * @dwork: the delayed work to cancel ++ * ++ * Cancel the work *without* waiting for it, this assumes being ++ * called under the wiphy mutex acquired by wiphy_lock(). ++ */ ++void wiphy_delayed_work_cancel(struct wiphy *wiphy, ++ struct wiphy_delayed_work *dwork); ++ + /** + * struct wireless_dev - wireless device state + * +@@ -5718,6 +5805,7 @@ static inline void wiphy_unlock(struct wiphy *wiphy) + * @event_lock: (private) lock for event list + * @owner_nlportid: (private) owner socket port ID + * @nl_owner_dead: (private) owner socket went away ++ * @cqm_rssi_work: (private) CQM RSSI reporting work + * @cqm_config: (private) nl80211 RSSI monitor state + * @pmsr_list: (private) peer measurement requests + * @pmsr_lock: (private) peer measurements requests/results lock +@@ -5790,7 +5878,8 @@ struct wireless_dev { + } wext; + #endif + +- struct cfg80211_cqm_config *cqm_config; ++ struct wiphy_work cqm_rssi_work; ++ struct cfg80211_cqm_config __rcu *cqm_config; + + struct list_head pmsr_list; + spinlock_t pmsr_lock; +diff --git a/include/net/ndisc.h b/include/net/ndisc.h +index da7eec8669ec4..325a6fb65c896 100644 +--- a/include/net/ndisc.h ++++ b/include/net/ndisc.h +@@ -395,11 +395,11 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons + { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv6_neigh_lookup_noref(dev, pkey); + if (n && !refcount_inc_not_zero(&n->refcnt)) + n = NULL; +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return n; + } +@@ -409,10 +409,10 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev, + { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv6_neigh_lookup_noref(dev, pkey); + neigh_confirm(n); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + + static inline void __ipv6_confirm_neigh_stub(struct net_device *dev, +@@ -420,10 +420,10 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev, + { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv6_neigh_lookup_noref_stub(dev, pkey); + neigh_confirm(n); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + + /* uses ipv6_stub and is meant for use outside of IPv6 core */ +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 794e45981891a..ccc4a0f8b4ad8 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -299,14 +299,14 @@ static inline struct neighbour *___neigh_lookup_noref( + const void *pkey, + struct net_device *dev) + { +- struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); ++ struct neigh_hash_table *nht = rcu_dereference(tbl->nht); + struct neighbour *n; + u32 hash_val; + + hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); +- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); ++ for (n = rcu_dereference(nht->hash_buckets[hash_val]); + n != NULL; +- n = rcu_dereference_bh(n->next)) { ++ n = rcu_dereference(n->next)) { + if (n->dev == dev && key_eq(n, pkey)) + return n; + } +@@ -464,7 +464,7 @@ static __always_inline int neigh_event_send_probe(struct neighbour *neigh, + + if (READ_ONCE(neigh->used) != now) + WRITE_ONCE(neigh->used, now); +- if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))) ++ if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))) + return __neigh_event_send(neigh, skb, immediate_ok); + return 0; + } +@@ -541,7 +541,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb, + READ_ONCE(hh->hh_len)) + return neigh_hh_output(hh, skb); + +- return n->output(n, skb); ++ return READ_ONCE(n->output)(n, skb); + } + + static inline struct neighbour * +diff --git a/include/net/netlink.h b/include/net/netlink.h +index 6bfa972f2fbf2..a686c9041ddc0 100644 +--- a/include/net/netlink.h ++++ b/include/net/netlink.h +@@ -937,6 +937,27 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se + return __nlmsg_put(skb, portid, seq, type, payload, flags); + } + ++/** ++ * nlmsg_append - Add more data to a nlmsg in a skb ++ * @skb: socket buffer to store message in ++ * @size: length of message payload ++ * ++ * Append data to an existing nlmsg, used when constructing a message ++ * with multiple fixed-format headers (which is rare). ++ * Returns NULL if the tailroom of the skb is insufficient to store ++ * the extra payload. ++ */ ++static inline void *nlmsg_append(struct sk_buff *skb, u32 size) ++{ ++ if (unlikely(skb_tailroom(skb) < NLMSG_ALIGN(size))) ++ return NULL; ++ ++ if (NLMSG_ALIGN(size) - size) ++ memset(skb_tail_pointer(skb) + size, 0, ++ NLMSG_ALIGN(size) - size); ++ return __skb_put(skb, NLMSG_ALIGN(size)); ++} ++ + /** + * nlmsg_put_answer - Add a new callback based netlink message to an skb + * @skb: socket buffer to store message in +diff --git a/include/net/nexthop.h b/include/net/nexthop.h +index 28085b995ddcf..2b12725de9c09 100644 +--- a/include/net/nexthop.h ++++ b/include/net/nexthop.h +@@ -497,29 +497,6 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) + return NULL; + } + +-/* Variant of nexthop_fib6_nh(). +- * Caller should either hold rcu_read_lock_bh(), or RTNL. +- */ +-static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh) +-{ +- struct nh_info *nhi; +- +- if (nh->is_group) { +- struct nh_group *nh_grp; +- +- nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp); +- nh = nexthop_mpath_select(nh_grp, 0); +- if (!nh) +- return NULL; +- } +- +- nhi = rcu_dereference_bh_rtnl(nh->nh_info); +- if (nhi->family == AF_INET6) +- return &nhi->fib6_nh; +- +- return NULL; +-} +- + static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i) + { + struct fib6_nh *fib6_nh; +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 5fd69f2342a44..9ebb54122bb71 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -355,12 +355,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule); + +-static inline void tcp_dec_quickack_mode(struct sock *sk, +- const unsigned int pkts) ++static inline void tcp_dec_quickack_mode(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + + if (icsk->icsk_ack.quick) { ++ /* How many ACKs S/ACKing new data have we sent? */ ++ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; ++ + if (pkts >= icsk->icsk_ack.quick) { + icsk->icsk_ack.quick = 0; + /* Leaving quickack mode we deflate ATO. */ +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index 006858ed04e8c..dc2cff18b68bd 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -161,6 +161,10 @@ struct scsi_device { + * pass settings from slave_alloc to scsi + * core. */ + unsigned int eh_timeout; /* Error handling timeout */ ++ ++ bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */ ++ bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */ ++ + unsigned removable:1; + unsigned changed:1; /* Data invalid due to media change */ + unsigned busy:1; /* Used to prevent races */ +@@ -192,7 +196,7 @@ struct scsi_device { + unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ + unsigned no_start_on_add:1; /* do not issue start on add */ + unsigned allow_restart:1; /* issue START_UNIT in error handler */ +- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ ++ unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ + unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ + unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ + unsigned select_no_atn:1; +diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h +index d27d9fb7174c8..71def41b1ad78 100644 +--- a/include/scsi/scsi_host.h ++++ b/include/scsi/scsi_host.h +@@ -752,7 +752,7 @@ extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, + struct device *, + struct device *); + extern void scsi_scan_host(struct Scsi_Host *); +-extern void scsi_rescan_device(struct device *); ++extern int scsi_rescan_device(struct scsi_device *sdev); + extern void scsi_remove_host(struct Scsi_Host *); + extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); + extern int scsi_host_busy(struct Scsi_Host *shost); +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h +index 53bc487947197..92dbe89dafbf5 100644 +--- a/include/uapi/linux/bpf.h ++++ b/include/uapi/linux/bpf.h +@@ -3112,6 +3112,11 @@ union bpf_attr { + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). ++ * **BPF_FIB_LOOKUP_SKIP_NEIGH** ++ * Skip the neighbour table lookup. *params*->dmac ++ * and *params*->smac will not be set as output. A common ++ * use case is to call **bpf_redirect_neigh**\ () after ++ * doing **bpf_fib_lookup**\ (). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. +@@ -6678,6 +6683,7 @@ struct bpf_raw_tracepoint_args { + enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), ++ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + }; + + enum { +diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h +index 53326dfc59ecb..4fa8511b1e355 100644 +--- a/include/uapi/linux/ipv6.h ++++ b/include/uapi/linux/ipv6.h +@@ -198,6 +198,7 @@ enum { + DEVCONF_IOAM6_ID_WIDE, + DEVCONF_NDISC_EVICT_NOCARRIER, + DEVCONF_ACCEPT_UNTRACKED_NA, ++ DEVCONF_ACCEPT_RA_MIN_LFT, + DEVCONF_MAX + }; + +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 2f562cf961e0a..b7383358c4ea1 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -354,10 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage) + local_set(&bpage->commit, 0); + } + +-/* +- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing +- * this issue out. +- */ ++static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) ++{ ++ return local_read(&bpage->page->commit); ++} ++ + static void free_buffer_page(struct buffer_page *bpage) + { + free_page((unsigned long)bpage->page); +@@ -2024,7 +2025,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) + * Increment overrun to account for the lost events. + */ + local_add(page_entries, &cpu_buffer->overrun); +- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); ++ local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); + } + +@@ -2368,11 +2369,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) + cpu_buffer->reader_page->read); + } + +-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) +-{ +- return local_read(&bpage->page->commit); +-} +- + static struct ring_buffer_event * + rb_iter_head_event(struct ring_buffer_iter *iter) + { +@@ -2518,7 +2514,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, + * the counters. + */ + local_add(entries, &cpu_buffer->overrun); +- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); ++ local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); + + /* +@@ -2661,9 +2657,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + + event = __rb_page_index(tail_page, tail); + +- /* account for padding bytes */ +- local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); +- + /* + * Save the original length to the meta data. + * This will be used by the reader to add lost event +@@ -2677,7 +2670,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + * write counter enough to allow another writer to slip + * in on this page. + * We put in a discarded commit instead, to make sure +- * that this space is not used again. ++ * that this space is not used again, and this space will ++ * not be accounted into 'entries_bytes'. + * + * If we are less than the minimum size, we don't need to + * worry about it. +@@ -2702,6 +2696,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + /* time delta must be non zero */ + event->time_delta = 1; + ++ /* account for padding bytes */ ++ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); ++ + /* Make sure the padding is visible before the tail_page->write update */ + smp_wmb(); + +@@ -4219,7 +4216,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) + EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); + + /** +- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer ++ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer + * @buffer: The ring buffer + * @cpu: The per CPU buffer to read from. + */ +@@ -4729,6 +4726,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) + + length = rb_event_length(event); + cpu_buffer->reader_page->read += length; ++ cpu_buffer->read_bytes += length; + } + + static void rb_advance_iter(struct ring_buffer_iter *iter) +@@ -5824,7 +5822,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, + } else { + /* update the entry counter */ + cpu_buffer->read += rb_page_entries(reader); +- cpu_buffer->read_bytes += BUF_PAGE_SIZE; ++ cpu_buffer->read_bytes += rb_page_commit(reader); + + /* swap the pages */ + rb_init_page(bpage); +diff --git a/mm/memory.c b/mm/memory.c +index 2083078cd0615..0d1b3ee8fcd7a 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -672,6 +672,16 @@ out: + return pfn_to_page(pfn); + } + ++struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, ++ pte_t pte) ++{ ++ struct page *page = vm_normal_page(vma, addr, pte); ++ ++ if (page) ++ return page_folio(page); ++ return NULL; ++} ++ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd) +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 7d36dd95d1fff..bfe2d1d50fbee 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { + }, + }; + +-static int migrate_page_add(struct page *page, struct list_head *pagelist, ++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, + unsigned long flags); + + struct queue_pages { +@@ -424,6 +424,7 @@ struct queue_pages { + unsigned long start; + unsigned long end; + struct vm_area_struct *first; ++ bool has_unmovable; + }; + + /* +@@ -442,21 +443,20 @@ static inline bool queue_pages_required(struct page *page, + } + + /* +- * queue_pages_pmd() has three possible return values: +- * 0 - pages are placed on the right node or queued successfully, or +- * special page is met, i.e. huge zero page. +- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were +- * specified. ++ * queue_folios_pmd() has three possible return values: ++ * 0 - folios are placed on the right node or queued successfully, or ++ * special page is met, i.e. zero page, or unmovable page is found ++ * but continue walking (indicated by queue_pages.has_unmovable). + * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an +- * existing page was already on a node that does not follow the ++ * existing folio was already on a node that does not follow the + * policy. + */ +-static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, ++static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, + unsigned long end, struct mm_walk *walk) + __releases(ptl) + { + int ret = 0; +- struct page *page; ++ struct folio *folio; + struct queue_pages *qp = walk->private; + unsigned long flags; + +@@ -464,20 +464,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, + ret = -EIO; + goto unlock; + } +- page = pmd_page(*pmd); +- if (is_huge_zero_page(page)) { ++ folio = pfn_folio(pmd_pfn(*pmd)); ++ if (is_huge_zero_page(&folio->page)) { + walk->action = ACTION_CONTINUE; + goto unlock; + } +- if (!queue_pages_required(page, qp)) ++ if (!queue_pages_required(&folio->page, qp)) + goto unlock; + + flags = qp->flags; +- /* go to thp migration */ ++ /* go to folio migration */ + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { + if (!vma_migratable(walk->vma) || +- migrate_page_add(page, qp->pagelist, flags)) { +- ret = 1; ++ migrate_folio_add(folio, qp->pagelist, flags)) { ++ qp->has_unmovable = true; + goto unlock; + } + } else +@@ -491,28 +491,26 @@ unlock: + * Scan through pages checking if pages follow certain conditions, + * and move them to the pagelist if they do. + * +- * queue_pages_pte_range() has three possible return values: +- * 0 - pages are placed on the right node or queued successfully, or +- * special page is met, i.e. zero page. +- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were +- * specified. +- * -EIO - only MPOL_MF_STRICT was specified and an existing page was already ++ * queue_folios_pte_range() has three possible return values: ++ * 0 - folios are placed on the right node or queued successfully, or ++ * special page is met, i.e. zero page, or unmovable page is found ++ * but continue walking (indicated by queue_pages.has_unmovable). ++ * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already + * on a node that does not follow the policy. + */ +-static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, ++static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) + { + struct vm_area_struct *vma = walk->vma; +- struct page *page; ++ struct folio *folio; + struct queue_pages *qp = walk->private; + unsigned long flags = qp->flags; +- bool has_unmovable = false; + pte_t *pte, *mapped_pte; + spinlock_t *ptl; + + ptl = pmd_trans_huge_lock(pmd, vma); + if (ptl) +- return queue_pages_pmd(pmd, ptl, addr, end, walk); ++ return queue_folios_pmd(pmd, ptl, addr, end, walk); + + if (pmd_trans_unstable(pmd)) + return 0; +@@ -521,40 +519,38 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, + for (; addr != end; pte++, addr += PAGE_SIZE) { + if (!pte_present(*pte)) + continue; +- page = vm_normal_page(vma, addr, *pte); +- if (!page || is_zone_device_page(page)) ++ folio = vm_normal_folio(vma, addr, *pte); ++ if (!folio || folio_is_zone_device(folio)) + continue; + /* +- * vm_normal_page() filters out zero pages, but there might +- * still be PageReserved pages to skip, perhaps in a VDSO. ++ * vm_normal_folio() filters out zero pages, but there might ++ * still be reserved folios to skip, perhaps in a VDSO. + */ +- if (PageReserved(page)) ++ if (folio_test_reserved(folio)) + continue; +- if (!queue_pages_required(page, qp)) ++ if (!queue_pages_required(&folio->page, qp)) + continue; + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { +- /* MPOL_MF_STRICT must be specified if we get here */ +- if (!vma_migratable(vma)) { +- has_unmovable = true; +- break; +- } ++ /* ++ * MPOL_MF_STRICT must be specified if we get here. ++ * Continue walking vmas due to MPOL_MF_MOVE* flags. ++ */ ++ if (!vma_migratable(vma)) ++ qp->has_unmovable = true; + + /* + * Do not abort immediately since there may be + * temporary off LRU pages in the range. Still + * need migrate other LRU pages. + */ +- if (migrate_page_add(page, qp->pagelist, flags)) +- has_unmovable = true; ++ if (migrate_folio_add(folio, qp->pagelist, flags)) ++ qp->has_unmovable = true; + } else + break; + } + pte_unmap_unlock(mapped_pte, ptl); + cond_resched(); + +- if (has_unmovable) +- return 1; +- + return addr != end ? -EIO : 0; + } + +@@ -594,7 +590,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, + * Detecting misplaced page but allow migrating pages which + * have been queued. + */ +- ret = 1; ++ qp->has_unmovable = true; + goto unlock; + } + +@@ -608,7 +604,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, + * Failed to isolate page but allow migrating pages + * which have been queued. + */ +- ret = 1; ++ qp->has_unmovable = true; + } + unlock: + spin_unlock(ptl); +@@ -705,7 +701,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, + + static const struct mm_walk_ops queue_pages_walk_ops = { + .hugetlb_entry = queue_pages_hugetlb, +- .pmd_entry = queue_pages_pte_range, ++ .pmd_entry = queue_folios_pte_range, + .test_walk = queue_pages_test_walk, + }; + +@@ -737,10 +733,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, + .start = start, + .end = end, + .first = NULL, ++ .has_unmovable = false, + }; + + err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); + ++ if (qp.has_unmovable) ++ err = 1; + if (!qp.first) + /* whole range in hole */ + err = -EFAULT; +@@ -1012,27 +1011,28 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, + } + + #ifdef CONFIG_MIGRATION +-/* +- * page migration, thp tail pages can be passed. +- */ +-static int migrate_page_add(struct page *page, struct list_head *pagelist, ++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, + unsigned long flags) + { +- struct page *head = compound_head(page); + /* +- * Avoid migrating a page that is shared with others. ++ * We try to migrate only unshared folios. If it is shared it ++ * is likely not worth migrating. ++ * ++ * To check if the folio is shared, ideally we want to make sure ++ * every page is mapped to the same process. Doing that is very ++ * expensive, so check the estimated mapcount of the folio instead. + */ +- if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { +- if (!isolate_lru_page(head)) { +- list_add_tail(&head->lru, pagelist); +- mod_node_page_state(page_pgdat(head), +- NR_ISOLATED_ANON + page_is_file_lru(head), +- thp_nr_pages(head)); ++ if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { ++ if (!folio_isolate_lru(folio)) { ++ list_add_tail(&folio->lru, foliolist); ++ node_stat_mod_folio(folio, ++ NR_ISOLATED_ANON + folio_is_file_lru(folio), ++ folio_nr_pages(folio)); + } else if (flags & MPOL_MF_STRICT) { + /* +- * Non-movable page may reach here. And, there may be +- * temporary off LRU pages or non-LRU movable pages. +- * Treat them as unmovable pages since they can't be ++ * Non-movable folio may reach here. And, there may be ++ * temporary off LRU folios or non-LRU movable folios. ++ * Treat them as unmovable folios since they can't be + * isolated, so they can't be moved at the moment. It + * should return -EIO for this case too. + */ +@@ -1224,7 +1224,7 @@ static struct page *new_page(struct page *page, unsigned long start) + } + #else + +-static int migrate_page_add(struct page *page, struct list_head *pagelist, ++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, + unsigned long flags) + { + return -EIO; +@@ -1337,7 +1337,7 @@ static long do_mbind(unsigned long start, unsigned long len, + putback_movable_pages(&pagelist); + } + +- if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) ++ if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT)) + err = -EIO; + } else { + up_out: +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 69668817fed37..ca017c6008b7c 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -170,21 +170,12 @@ static DEFINE_MUTEX(pcp_batch_high_lock); + _ret; \ + }) + +-#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \ ++#define pcpu_spin_trylock(type, member, ptr) \ + ({ \ + type *_ret; \ + pcpu_task_pin(); \ + _ret = this_cpu_ptr(ptr); \ +- spin_lock_irqsave(&_ret->member, flags); \ +- _ret; \ +-}) +- +-#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \ +-({ \ +- type *_ret; \ +- pcpu_task_pin(); \ +- _ret = this_cpu_ptr(ptr); \ +- if (!spin_trylock_irqsave(&_ret->member, flags)) { \ ++ if (!spin_trylock(&_ret->member)) { \ + pcpu_task_unpin(); \ + _ret = NULL; \ + } \ +@@ -197,27 +188,16 @@ static DEFINE_MUTEX(pcp_batch_high_lock); + pcpu_task_unpin(); \ + }) + +-#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \ +-({ \ +- spin_unlock_irqrestore(&ptr->member, flags); \ +- pcpu_task_unpin(); \ +-}) +- + /* struct per_cpu_pages specific helpers. */ + #define pcp_spin_lock(ptr) \ + pcpu_spin_lock(struct per_cpu_pages, lock, ptr) + +-#define pcp_spin_lock_irqsave(ptr, flags) \ +- pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags) +- +-#define pcp_spin_trylock_irqsave(ptr, flags) \ +- pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags) ++#define pcp_spin_trylock(ptr) \ ++ pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) + + #define pcp_spin_unlock(ptr) \ + pcpu_spin_unlock(lock, ptr) + +-#define pcp_spin_unlock_irqrestore(ptr, flags) \ +- pcpu_spin_unlock_irqrestore(lock, ptr, flags) + #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID + DEFINE_PER_CPU(int, numa_node); + EXPORT_PER_CPU_SYMBOL(numa_node); +@@ -1548,6 +1528,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, + struct per_cpu_pages *pcp, + int pindex) + { ++ unsigned long flags; + int min_pindex = 0; + int max_pindex = NR_PCP_LISTS - 1; + unsigned int order; +@@ -1563,8 +1544,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, + /* Ensure requested pindex is drained first. */ + pindex = pindex - 1; + +- /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ +- spin_lock(&zone->lock); ++ spin_lock_irqsave(&zone->lock, flags); + isolated_pageblocks = has_isolate_pageblock(zone); + + while (count > 0) { +@@ -1612,7 +1592,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, + } while (count > 0 && !list_empty(list)); + } + +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static void free_one_page(struct zone *zone, +@@ -3126,10 +3106,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, + unsigned long count, struct list_head *list, + int migratetype, unsigned int alloc_flags) + { ++ unsigned long flags; + int i, allocated = 0; + +- /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ +- spin_lock(&zone->lock); ++ spin_lock_irqsave(&zone->lock, flags); + for (i = 0; i < count; ++i) { + struct page *page = __rmqueue(zone, order, migratetype, + alloc_flags); +@@ -3163,7 +3143,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, + * pages added to the pcp list. + */ + __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + return allocated; + } + +@@ -3180,16 +3160,9 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) { +- unsigned long flags; +- +- /* +- * free_pcppages_bulk expects IRQs disabled for zone->lock +- * so even though pcp->lock is not intended to be IRQ-safe, +- * it's needed in this context. +- */ +- spin_lock_irqsave(&pcp->lock, flags); ++ spin_lock(&pcp->lock); + free_pcppages_bulk(zone, to_drain, pcp, 0); +- spin_unlock_irqrestore(&pcp->lock, flags); ++ spin_unlock(&pcp->lock); + } + } + #endif +@@ -3203,12 +3176,9 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) + + pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); + if (pcp->count) { +- unsigned long flags; +- +- /* See drain_zone_pages on why this is disabling IRQs */ +- spin_lock_irqsave(&pcp->lock, flags); ++ spin_lock(&pcp->lock); + free_pcppages_bulk(zone, pcp->count, pcp, 0); +- spin_unlock_irqrestore(&pcp->lock, flags); ++ spin_unlock(&pcp->lock); + } + } + +@@ -3474,12 +3444,11 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, + */ + void free_unref_page(struct page *page, unsigned int order) + { +- unsigned long flags; + unsigned long __maybe_unused UP_flags; + struct per_cpu_pages *pcp; + struct zone *zone; + unsigned long pfn = page_to_pfn(page); +- int migratetype; ++ int migratetype, pcpmigratetype; + + if (!free_unref_page_prepare(page, pfn, order)) + return; +@@ -3487,25 +3456,25 @@ void free_unref_page(struct page *page, unsigned int order) + /* + * We only track unmovable, reclaimable and movable on pcp lists. + * Place ISOLATE pages on the isolated list because they are being +- * offlined but treat HIGHATOMIC as movable pages so we can get those +- * areas back if necessary. Otherwise, we may have to free ++ * offlined but treat HIGHATOMIC and CMA as movable pages so we can ++ * get those areas back if necessary. Otherwise, we may have to free + * excessively into the page allocator + */ +- migratetype = get_pcppage_migratetype(page); ++ migratetype = pcpmigratetype = get_pcppage_migratetype(page); + if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { + if (unlikely(is_migrate_isolate(migratetype))) { + free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); + return; + } +- migratetype = MIGRATE_MOVABLE; ++ pcpmigratetype = MIGRATE_MOVABLE; + } + + zone = page_zone(page); + pcp_trylock_prepare(UP_flags); +- pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); ++ pcp = pcp_spin_trylock(zone->per_cpu_pageset); + if (pcp) { +- free_unref_page_commit(zone, pcp, page, migratetype, order); +- pcp_spin_unlock_irqrestore(pcp, flags); ++ free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); ++ pcp_spin_unlock(pcp); + } else { + free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); + } +@@ -3517,10 +3486,10 @@ void free_unref_page(struct page *page, unsigned int order) + */ + void free_unref_page_list(struct list_head *list) + { ++ unsigned long __maybe_unused UP_flags; + struct page *page, *next; + struct per_cpu_pages *pcp = NULL; + struct zone *locked_zone = NULL; +- unsigned long flags; + int batch_count = 0; + int migratetype; + +@@ -3547,20 +3516,37 @@ void free_unref_page_list(struct list_head *list) + list_for_each_entry_safe(page, next, list, lru) { + struct zone *zone = page_zone(page); + ++ list_del(&page->lru); ++ migratetype = get_pcppage_migratetype(page); ++ + /* Different zone, different pcp lock. */ + if (zone != locked_zone) { +- if (pcp) +- pcp_spin_unlock_irqrestore(pcp, flags); ++ if (pcp) { ++ pcp_spin_unlock(pcp); ++ pcp_trylock_finish(UP_flags); ++ } + ++ /* ++ * trylock is necessary as pages may be getting freed ++ * from IRQ or SoftIRQ context after an IO completion. ++ */ ++ pcp_trylock_prepare(UP_flags); ++ pcp = pcp_spin_trylock(zone->per_cpu_pageset); ++ if (unlikely(!pcp)) { ++ pcp_trylock_finish(UP_flags); ++ free_one_page(zone, page, page_to_pfn(page), ++ 0, migratetype, FPI_NONE); ++ locked_zone = NULL; ++ continue; ++ } + locked_zone = zone; +- pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); ++ batch_count = 0; + } + + /* + * Non-isolated types over MIGRATE_PCPTYPES get added + * to the MIGRATE_MOVABLE pcp list. + */ +- migratetype = get_pcppage_migratetype(page); + if (unlikely(migratetype >= MIGRATE_PCPTYPES)) + migratetype = MIGRATE_MOVABLE; + +@@ -3568,18 +3554,23 @@ void free_unref_page_list(struct list_head *list) + free_unref_page_commit(zone, pcp, page, migratetype, 0); + + /* +- * Guard against excessive IRQ disabled times when we get +- * a large list of pages to free. ++ * Guard against excessive lock hold times when freeing ++ * a large list of pages. Lock will be reacquired if ++ * necessary on the next iteration. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { +- pcp_spin_unlock_irqrestore(pcp, flags); ++ pcp_spin_unlock(pcp); ++ pcp_trylock_finish(UP_flags); + batch_count = 0; +- pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); ++ pcp = NULL; ++ locked_zone = NULL; + } + } + +- if (pcp) +- pcp_spin_unlock_irqrestore(pcp, flags); ++ if (pcp) { ++ pcp_spin_unlock(pcp); ++ pcp_trylock_finish(UP_flags); ++ } + } + + /* +@@ -3780,15 +3771,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + struct per_cpu_pages *pcp; + struct list_head *list; + struct page *page; +- unsigned long flags; + unsigned long __maybe_unused UP_flags; + +- /* +- * spin_trylock may fail due to a parallel drain. In the future, the +- * trylock will also protect against IRQ reentrancy. +- */ ++ /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ + pcp_trylock_prepare(UP_flags); +- pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); ++ pcp = pcp_spin_trylock(zone->per_cpu_pageset); + if (!pcp) { + pcp_trylock_finish(UP_flags); + return NULL; +@@ -3802,7 +3789,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + pcp->free_factor >>= 1; + list = &pcp->lists[order_to_pindex(migratetype, order)]; + page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); +- pcp_spin_unlock_irqrestore(pcp, flags); ++ pcp_spin_unlock(pcp); + pcp_trylock_finish(UP_flags); + if (page) { + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); +@@ -5373,7 +5360,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + struct page **page_array) + { + struct page *page; +- unsigned long flags; + unsigned long __maybe_unused UP_flags; + struct zone *zone; + struct zoneref *z; +@@ -5455,9 +5441,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + if (unlikely(!zone)) + goto failed; + +- /* Is a parallel drain in progress? */ ++ /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ + pcp_trylock_prepare(UP_flags); +- pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); ++ pcp = pcp_spin_trylock(zone->per_cpu_pageset); + if (!pcp) + goto failed_irq; + +@@ -5476,7 +5462,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + if (unlikely(!page)) { + /* Try and allocate at least one page */ + if (!nr_account) { +- pcp_spin_unlock_irqrestore(pcp, flags); ++ pcp_spin_unlock(pcp); + goto failed_irq; + } + break; +@@ -5491,7 +5477,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nr_populated++; + } + +- pcp_spin_unlock_irqrestore(pcp, flags); ++ pcp_spin_unlock(pcp); + pcp_trylock_finish(UP_flags); + + __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index fa4dd5fab0d44..d13b498f148cc 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -2783,6 +2783,7 @@ void hci_release_dev(struct hci_dev *hdev) + hci_conn_params_clear_all(hdev); + hci_discovery_filter_clear(hdev); + hci_blocked_keys_clear(hdev); ++ hci_codec_list_clear(&hdev->local_codecs); + hci_dev_unlock(hdev); + + ida_simple_remove(&hci_index_ida, hdev->id); +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 83eaf25ece465..e4d8857716eb7 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -32,6 +32,7 @@ + + #include "hci_request.h" + #include "hci_debugfs.h" ++#include "hci_codec.h" + #include "a2mp.h" + #include "amp.h" + #include "smp.h" +diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h +index b9c5a98238374..0be75cf0efed8 100644 +--- a/net/bluetooth/hci_request.h ++++ b/net/bluetooth/hci_request.h +@@ -71,7 +71,5 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, + void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); + void hci_req_add_le_passive_scan(struct hci_request *req); + +-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); +- + void hci_request_setup(struct hci_dev *hdev); + void hci_request_cancel_all(struct hci_dev *hdev); +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index 2ae038dfc39f7..5218c4dfe0a89 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -412,11 +412,6 @@ static int hci_le_scan_restart_sync(struct hci_dev *hdev) + LE_SCAN_FILTER_DUP_ENABLE); + } + +-static int le_scan_restart_sync(struct hci_dev *hdev, void *data) +-{ +- return hci_le_scan_restart_sync(hdev); +-} +- + static void le_scan_restart(struct work_struct *work) + { + struct hci_dev *hdev = container_of(work, struct hci_dev, +@@ -426,15 +421,15 @@ static void le_scan_restart(struct work_struct *work) + + bt_dev_dbg(hdev, ""); + +- hci_dev_lock(hdev); +- +- status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL); ++ status = hci_le_scan_restart_sync(hdev); + if (status) { + bt_dev_err(hdev, "failed to restart LE scan: status %d", + status); +- goto unlock; ++ return; + } + ++ hci_dev_lock(hdev); ++ + if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || + !hdev->discovery.scan_start) + goto unlock; +@@ -5033,6 +5028,7 @@ int hci_dev_close_sync(struct hci_dev *hdev) + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); + bacpy(&hdev->random_addr, BDADDR_ANY); ++ hci_codec_list_clear(&hdev->local_codecs); + + hci_dev_put(hdev); + return err; +diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c +index 5cd2e775915be..91e990accbf20 100644 +--- a/net/bluetooth/iso.c ++++ b/net/bluetooth/iso.c +@@ -458,7 +458,7 @@ drop: + } + + /* -------- Socket interface ---------- */ +-static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba) ++static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst) + { + struct sock *sk; + +@@ -466,7 +466,10 @@ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba) + if (sk->sk_state != BT_LISTEN) + continue; + +- if (!bacmp(&iso_pi(sk)->src, ba)) ++ if (bacmp(&iso_pi(sk)->dst, dst)) ++ continue; ++ ++ if (!bacmp(&iso_pi(sk)->src, src)) + return sk; + } + +@@ -910,7 +913,7 @@ static int iso_listen_cis(struct sock *sk) + + write_lock(&iso_sk_list.lock); + +- if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src)) ++ if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); +diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c +index e5e48c6e35d78..b45c00c01dea1 100644 +--- a/net/bridge/br_arp_nd_proxy.c ++++ b/net/bridge/br_arp_nd_proxy.c +@@ -192,7 +192,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, + if (n) { + struct net_bridge_fdb_entry *f; + +- if (!(n->nud_state & NUD_VALID)) { ++ if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { + neigh_release(n); + return; + } +@@ -452,7 +452,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, + if (n) { + struct net_bridge_fdb_entry *f; + +- if (!(n->nud_state & NUD_VALID)) { ++ if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { + neigh_release(n); + return; + } +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 812bd7e1750b6..01d690d9fe5f8 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -277,7 +277,8 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + int ret; + +- if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) { ++ if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) && ++ READ_ONCE(neigh->hh.hh_len)) { + neigh_hh_bridge(&neigh->hh, skb); + skb->dev = nf_bridge->physindev; + ret = br_handle_frame_finish(net, sk, skb); +@@ -293,7 +294,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ + /* tell br_dev_xmit to continue with forwarding */ + nf_bridge->bridged_dnat = 1; + /* FIXME Need to refragment */ +- ret = neigh->output(neigh, skb); ++ ret = READ_ONCE(neigh->output)(neigh, skb); + } + neigh_release(neigh); + return ret; +diff --git a/net/core/filter.c b/net/core/filter.c +index 9fd7c88b5db4e..adc327f4af1e9 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2197,7 +2197,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, + return -ENOMEM; + } + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + if (!nh) { + dst = skb_dst(skb); + nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), +@@ -2210,10 +2210,12 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, + int ret; + + sock_confirm_neigh(skb, neigh); ++ local_bh_disable(); + dev_xmit_recursion_inc(); + ret = neigh_output(neigh, skb, false); + dev_xmit_recursion_dec(); +- rcu_read_unlock_bh(); ++ local_bh_enable(); ++ rcu_read_unlock(); + return ret; + } + rcu_read_unlock_bh(); +@@ -2295,7 +2297,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, + return -ENOMEM; + } + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + if (!nh) { + struct dst_entry *dst = skb_dst(skb); + struct rtable *rt = container_of(dst, struct rtable, dst); +@@ -2307,7 +2309,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, + } else if (nh->nh_family == AF_INET) { + neigh = ip_neigh_gw4(dev, nh->ipv4_nh); + } else { +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + goto out_drop; + } + +@@ -2315,13 +2317,15 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, + int ret; + + sock_confirm_neigh(skb, neigh); ++ local_bh_disable(); + dev_xmit_recursion_inc(); + ret = neigh_output(neigh, skb, is_v6gw); + dev_xmit_recursion_dec(); +- rcu_read_unlock_bh(); ++ local_bh_enable(); ++ rcu_read_unlock(); + return ret; + } +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + out_drop: + kfree_skb(skb); + return -ENETDOWN; +@@ -5674,12 +5678,8 @@ static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { + #endif + + #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) +-static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, +- const struct neighbour *neigh, +- const struct net_device *dev, u32 mtu) ++static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu) + { +- memcpy(params->dmac, neigh->ha, ETH_ALEN); +- memcpy(params->smac, dev->dev_addr, ETH_ALEN); + params->h_vlan_TCI = 0; + params->h_vlan_proto = 0; + if (mtu) +@@ -5790,21 +5790,29 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, + if (likely(nhc->nhc_gw_family != AF_INET6)) { + if (nhc->nhc_gw_family) + params->ipv4_dst = nhc->nhc_gw.ipv4; +- +- neigh = __ipv4_neigh_lookup_noref(dev, +- (__force u32)params->ipv4_dst); + } else { + struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; + + params->family = AF_INET6; + *dst = nhc->nhc_gw.ipv6; +- neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); + } + +- if (!neigh || !(neigh->nud_state & NUD_VALID)) ++ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) ++ goto set_fwd_params; ++ ++ if (likely(nhc->nhc_gw_family != AF_INET6)) ++ neigh = __ipv4_neigh_lookup_noref(dev, ++ (__force u32)params->ipv4_dst); ++ else ++ neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst); ++ ++ if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID)) + return BPF_FIB_LKUP_RET_NO_NEIGH; ++ memcpy(params->dmac, neigh->ha, ETH_ALEN); ++ memcpy(params->smac, dev->dev_addr, ETH_ALEN); + +- return bpf_fib_set_fwd_params(params, neigh, dev, mtu); ++set_fwd_params: ++ return bpf_fib_set_fwd_params(params, mtu); + } + #endif + +@@ -5912,24 +5920,33 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, + params->rt_metric = res.f6i->fib6_metric; + params->ifindex = dev->ifindex; + ++ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) ++ goto set_fwd_params; ++ + /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is + * not needed here. + */ + neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); +- if (!neigh || !(neigh->nud_state & NUD_VALID)) ++ if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID)) + return BPF_FIB_LKUP_RET_NO_NEIGH; ++ memcpy(params->dmac, neigh->ha, ETH_ALEN); ++ memcpy(params->smac, dev->dev_addr, ETH_ALEN); + +- return bpf_fib_set_fwd_params(params, neigh, dev, mtu); ++set_fwd_params: ++ return bpf_fib_set_fwd_params(params, mtu); + } + #endif + ++#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \ ++ BPF_FIB_LOOKUP_SKIP_NEIGH) ++ + BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, + struct bpf_fib_lookup *, params, int, plen, u32, flags) + { + if (plen < sizeof(*params)) + return -EINVAL; + +- if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) ++ if (flags & ~BPF_FIB_LOOKUP_MASK) + return -EINVAL; + + switch (params->family) { +@@ -5967,7 +5984,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, + if (plen < sizeof(*params)) + return -EINVAL; + +- if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) ++ if (flags & ~BPF_FIB_LOOKUP_MASK) + return -EINVAL; + + if (params->tot_len) +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 6c0f2149f2c72..b20c9768d9f3f 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -410,7 +410,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, + */ + __skb_queue_purge(&n->arp_queue); + n->arp_queue_len_bytes = 0; +- n->output = neigh_blackhole; ++ WRITE_ONCE(n->output, neigh_blackhole); + if (n->nud_state & NUD_VALID) + n->nud_state = NUD_NOARP; + else +@@ -614,7 +614,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, + + NEIGH_CACHE_STAT_INC(tbl, lookups); + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __neigh_lookup_noref(tbl, pkey, dev); + if (n) { + if (!refcount_inc_not_zero(&n->refcnt)) +@@ -622,7 +622,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, + NEIGH_CACHE_STAT_INC(tbl, hits); + } + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + return n; + } + EXPORT_SYMBOL(neigh_lookup); +@@ -920,7 +920,7 @@ static void neigh_suspect(struct neighbour *neigh) + { + neigh_dbg(2, "neigh %p is suspected\n", neigh); + +- neigh->output = neigh->ops->output; ++ WRITE_ONCE(neigh->output, neigh->ops->output); + } + + /* Neighbour state is OK; +@@ -932,7 +932,7 @@ static void neigh_connect(struct neighbour *neigh) + { + neigh_dbg(2, "neigh %p is connected\n", neigh); + +- neigh->output = neigh->ops->connected_output; ++ WRITE_ONCE(neigh->output, neigh->ops->connected_output); + } + + static void neigh_periodic_work(struct work_struct *work) +@@ -988,7 +988,9 @@ static void neigh_periodic_work(struct work_struct *work) + (state == NUD_FAILED || + !time_in_range_open(jiffies, n->used, + n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { +- *np = n->next; ++ rcu_assign_pointer(*np, ++ rcu_dereference_protected(n->next, ++ lockdep_is_held(&tbl->lock))); + neigh_mark_dead(n); + write_unlock(&n->lock); + neigh_cleanup_and_release(n); +@@ -1093,13 +1095,13 @@ static void neigh_timer_handler(struct timer_list *t) + neigh->used + + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { + neigh_dbg(2, "neigh %p is delayed\n", neigh); +- neigh->nud_state = NUD_DELAY; ++ WRITE_ONCE(neigh->nud_state, NUD_DELAY); + neigh->updated = jiffies; + neigh_suspect(neigh); + next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); + } else { + neigh_dbg(2, "neigh %p is suspected\n", neigh); +- neigh->nud_state = NUD_STALE; ++ WRITE_ONCE(neigh->nud_state, NUD_STALE); + neigh->updated = jiffies; + neigh_suspect(neigh); + notify = 1; +@@ -1109,14 +1111,14 @@ static void neigh_timer_handler(struct timer_list *t) + neigh->confirmed + + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { + neigh_dbg(2, "neigh %p is now reachable\n", neigh); +- neigh->nud_state = NUD_REACHABLE; ++ WRITE_ONCE(neigh->nud_state, NUD_REACHABLE); + neigh->updated = jiffies; + neigh_connect(neigh); + notify = 1; + next = neigh->confirmed + neigh->parms->reachable_time; + } else { + neigh_dbg(2, "neigh %p is probed\n", neigh); +- neigh->nud_state = NUD_PROBE; ++ WRITE_ONCE(neigh->nud_state, NUD_PROBE); + neigh->updated = jiffies; + atomic_set(&neigh->probes, 0); + notify = 1; +@@ -1130,7 +1132,7 @@ static void neigh_timer_handler(struct timer_list *t) + + if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && + atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { +- neigh->nud_state = NUD_FAILED; ++ WRITE_ONCE(neigh->nud_state, NUD_FAILED); + notify = 1; + neigh_invalidate(neigh); + goto out; +@@ -1179,7 +1181,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, + atomic_set(&neigh->probes, + NEIGH_VAR(neigh->parms, UCAST_PROBES)); + neigh_del_timer(neigh); +- neigh->nud_state = NUD_INCOMPLETE; ++ WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE); + neigh->updated = now; + if (!immediate_ok) { + next = now + 1; +@@ -1191,7 +1193,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, + } + neigh_add_timer(neigh, next); + } else { +- neigh->nud_state = NUD_FAILED; ++ WRITE_ONCE(neigh->nud_state, NUD_FAILED); + neigh->updated = jiffies; + write_unlock_bh(&neigh->lock); + +@@ -1201,7 +1203,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, + } else if (neigh->nud_state & NUD_STALE) { + neigh_dbg(2, "neigh %p is delayed\n", neigh); + neigh_del_timer(neigh); +- neigh->nud_state = NUD_DELAY; ++ WRITE_ONCE(neigh->nud_state, NUD_DELAY); + neigh->updated = jiffies; + neigh_add_timer(neigh, jiffies + + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); +@@ -1313,7 +1315,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update); + if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) { + new = old & ~NUD_PERMANENT; +- neigh->nud_state = new; ++ WRITE_ONCE(neigh->nud_state, new); + err = 0; + goto out; + } +@@ -1322,7 +1324,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + neigh_del_timer(neigh); + if (old & NUD_CONNECTED) + neigh_suspect(neigh); +- neigh->nud_state = new; ++ WRITE_ONCE(neigh->nud_state, new); + err = 0; + notify = old & NUD_VALID; + if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && +@@ -1401,7 +1403,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + ((new & NUD_REACHABLE) ? + neigh->parms->reachable_time : + 0))); +- neigh->nud_state = new; ++ WRITE_ONCE(neigh->nud_state, new); + notify = 1; + } + +@@ -1447,7 +1449,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + if (n2) + n1 = n2; + } +- n1->output(n1, skb); ++ READ_ONCE(n1->output)(n1, skb); + if (n2) + neigh_release(n2); + rcu_read_unlock(); +@@ -1488,7 +1490,7 @@ void __neigh_set_probe_once(struct neighbour *neigh) + neigh->updated = jiffies; + if (!(neigh->nud_state & NUD_FAILED)) + return; +- neigh->nud_state = NUD_INCOMPLETE; ++ WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE); + atomic_set(&neigh->probes, neigh_max_probes(neigh)); + neigh_add_timer(neigh, + jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), +@@ -2174,11 +2176,11 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, + .ndtc_proxy_qlen = tbl->proxy_queue.qlen, + }; + +- rcu_read_lock_bh(); +- nht = rcu_dereference_bh(tbl->nht); ++ rcu_read_lock(); ++ nht = rcu_dereference(tbl->nht); + ndc.ndtc_hash_rnd = nht->hash_rnd[0]; + ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) + goto nla_put_failure; +@@ -2693,15 +2695,15 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, + if (filter->dev_idx || filter->master_idx) + flags |= NLM_F_DUMP_FILTERED; + +- rcu_read_lock_bh(); +- nht = rcu_dereference_bh(tbl->nht); ++ rcu_read_lock(); ++ nht = rcu_dereference(tbl->nht); + + for (h = s_h; h < (1 << nht->hash_shift); h++) { + if (h > s_h) + s_idx = 0; +- for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; ++ for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0; + n != NULL; +- n = rcu_dereference_bh(n->next)) { ++ n = rcu_dereference(n->next)) { + if (idx < s_idx || !net_eq(dev_net(n->dev), net)) + goto next; + if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || +@@ -2720,7 +2722,7 @@ next: + } + rc = skb->len; + out: +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + cb->args[1] = h; + cb->args[2] = idx; + return rc; +@@ -3065,20 +3067,20 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void + int chain; + struct neigh_hash_table *nht; + +- rcu_read_lock_bh(); +- nht = rcu_dereference_bh(tbl->nht); ++ rcu_read_lock(); ++ nht = rcu_dereference(tbl->nht); + +- read_lock(&tbl->lock); /* avoid resizes */ ++ read_lock_bh(&tbl->lock); /* avoid resizes */ + for (chain = 0; chain < (1 << nht->hash_shift); chain++) { + struct neighbour *n; + +- for (n = rcu_dereference_bh(nht->hash_buckets[chain]); ++ for (n = rcu_dereference(nht->hash_buckets[chain]); + n != NULL; +- n = rcu_dereference_bh(n->next)) ++ n = rcu_dereference(n->next)) + cb(n, cookie); + } +- read_unlock(&tbl->lock); +- rcu_read_unlock_bh(); ++ read_unlock_bh(&tbl->lock); ++ rcu_read_unlock(); + } + EXPORT_SYMBOL(neigh_for_each); + +@@ -3128,7 +3130,7 @@ int neigh_xmit(int index, struct net_device *dev, + tbl = neigh_tables[index]; + if (!tbl) + goto out; +- rcu_read_lock_bh(); ++ rcu_read_lock(); + if (index == NEIGH_ARP_TABLE) { + u32 key = *((u32 *)addr); + +@@ -3140,11 +3142,11 @@ int neigh_xmit(int index, struct net_device *dev, + neigh = __neigh_create(tbl, addr, dev, false); + err = PTR_ERR(neigh); + if (IS_ERR(neigh)) { +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + goto out_kfree_skb; + } +- err = neigh->output(neigh, skb); +- rcu_read_unlock_bh(); ++ err = READ_ONCE(neigh->output)(neigh, skb); ++ rcu_read_unlock(); + } + else if (index == NEIGH_LINK_TABLE) { + err = dev_hard_header(skb, dev, ntohs(skb->protocol), +@@ -3173,7 +3175,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq) + + state->flags &= ~NEIGH_SEQ_IS_PNEIGH; + for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { +- n = rcu_dereference_bh(nht->hash_buckets[bucket]); ++ n = rcu_dereference(nht->hash_buckets[bucket]); + + while (n) { + if (!net_eq(dev_net(n->dev), net)) +@@ -3188,10 +3190,10 @@ static struct neighbour *neigh_get_first(struct seq_file *seq) + } + if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) + break; +- if (n->nud_state & ~NUD_NOARP) ++ if (READ_ONCE(n->nud_state) & ~NUD_NOARP) + break; + next: +- n = rcu_dereference_bh(n->next); ++ n = rcu_dereference(n->next); + } + + if (n) +@@ -3215,7 +3217,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq, + if (v) + return n; + } +- n = rcu_dereference_bh(n->next); ++ n = rcu_dereference(n->next); + + while (1) { + while (n) { +@@ -3230,10 +3232,10 @@ static struct neighbour *neigh_get_next(struct seq_file *seq, + if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) + break; + +- if (n->nud_state & ~NUD_NOARP) ++ if (READ_ONCE(n->nud_state) & ~NUD_NOARP) + break; + next: +- n = rcu_dereference_bh(n->next); ++ n = rcu_dereference(n->next); + } + + if (n) +@@ -3242,7 +3244,7 @@ next: + if (++state->bucket >= (1 << nht->hash_shift)) + break; + +- n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); ++ n = rcu_dereference(nht->hash_buckets[state->bucket]); + } + + if (n && pos) +@@ -3344,7 +3346,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) + + void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) + __acquires(tbl->lock) +- __acquires(rcu_bh) ++ __acquires(rcu) + { + struct neigh_seq_state *state = seq->private; + +@@ -3352,9 +3354,9 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl + state->bucket = 0; + state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); + +- rcu_read_lock_bh(); +- state->nht = rcu_dereference_bh(tbl->nht); +- read_lock(&tbl->lock); ++ rcu_read_lock(); ++ state->nht = rcu_dereference(tbl->nht); ++ read_lock_bh(&tbl->lock); + + return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; + } +@@ -3389,13 +3391,13 @@ EXPORT_SYMBOL(neigh_seq_next); + + void neigh_seq_stop(struct seq_file *seq, void *v) + __releases(tbl->lock) +- __releases(rcu_bh) ++ __releases(rcu) + { + struct neigh_seq_state *state = seq->private; + struct neigh_table *tbl = state->tbl; + +- read_unlock(&tbl->lock); +- rcu_read_unlock_bh(); ++ read_unlock_bh(&tbl->lock); ++ rcu_read_unlock(); + } + EXPORT_SYMBOL(neigh_seq_stop); + +diff --git a/net/core/sock_map.c b/net/core/sock_map.c +index 96db7409baa12..38e01f82f2ef3 100644 +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -670,6 +670,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, + sk = __sock_map_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) + return SK_DROP; ++ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) ++ return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; +@@ -1262,6 +1264,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, + sk = __sock_hash_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) + return SK_DROP; ++ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) ++ return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c +index 4f7237661afb9..9456f5bb35e5d 100644 +--- a/net/ipv4/arp.c ++++ b/net/ipv4/arp.c +@@ -375,7 +375,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) + + probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES); + if (probes < 0) { +- if (!(neigh->nud_state & NUD_VALID)) ++ if (!(READ_ONCE(neigh->nud_state) & NUD_VALID)) + pr_debug("trying to ucast probe in NUD_INVALID\n"); + neigh_ha_snapshot(dst_ha, neigh, dev); + dst_hw = dst_ha; +@@ -1123,7 +1123,7 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) + + neigh = neigh_lookup(&arp_tbl, &ip, dev); + if (neigh) { +- if (!(neigh->nud_state & NUD_NOARP)) { ++ if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) { + read_lock_bh(&neigh->lock); + memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); + r->arp_flags = arp_state_to_flags(neigh); +@@ -1144,12 +1144,12 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force) + struct neigh_table *tbl = &arp_tbl; + + if (neigh) { +- if ((neigh->nud_state & NUD_VALID) && !force) { ++ if ((READ_ONCE(neigh->nud_state) & NUD_VALID) && !force) { + neigh_release(neigh); + return 0; + } + +- if (neigh->nud_state & ~NUD_NOARP) ++ if (READ_ONCE(neigh->nud_state) & ~NUD_NOARP) + err = neigh_update(neigh, NULL, NUD_FAILED, + NEIGH_UPDATE_F_OVERRIDE| + NEIGH_UPDATE_F_ADMIN, 0); +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 3b6e6bc80dc1c..eafa4a0335157 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -564,7 +564,7 @@ static int fib_detect_death(struct fib_info *fi, int order, + n = NULL; + + if (n) { +- state = n->nud_state; ++ state = READ_ONCE(n->nud_state); + neigh_release(n); + } else { + return 0; +@@ -2194,7 +2194,7 @@ static bool fib_good_nh(const struct fib_nh *nh) + if (nh->fib_nh_scope == RT_SCOPE_LINK) { + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + if (likely(nh->fib_nh_gw_family == AF_INET)) + n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, +@@ -2205,9 +2205,9 @@ static bool fib_good_nh(const struct fib_nh *nh) + else + n = NULL; + if (n) +- state = n->nud_state; ++ state = READ_ONCE(n->nud_state); + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + + return !!(state & NUD_VALID); +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 66908ce2dd116..493c679ea54f3 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -218,7 +218,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s + return res; + } + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); + if (!IS_ERR(neigh)) { + int res; +@@ -226,10 +226,10 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s + sock_confirm_neigh(skb, neigh); + /* if crossing protocols, can not use the cached header */ + res = neigh_output(neigh, skb, is_v6gw); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + return res; + } +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + net_dbg_ratelimited("%s: No header cache and no neighbour!\n", + __func__); +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index 9cc2879024541..be5498f5dd319 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -1124,13 +1124,13 @@ static bool ipv6_good_nh(const struct fib6_nh *nh) + int state = NUD_REACHABLE; + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); + if (n) +- state = n->nud_state; ++ state = READ_ONCE(n->nud_state); + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return !!(state & NUD_VALID); + } +@@ -1140,14 +1140,14 @@ static bool ipv4_good_nh(const struct fib_nh *nh) + int state = NUD_REACHABLE; + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, + (__force u32)nh->fib_nh_gw4); + if (n) +- state = n->nud_state; ++ state = READ_ONCE(n->nud_state); + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return !!(state & NUD_VALID); + } +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 84a0a71a6f4e7..9cbaae4f5ee71 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -408,7 +408,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + struct net_device *dev = dst->dev; + struct neighbour *n; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + if (likely(rt->rt_gw_family == AF_INET)) { + n = ip_neigh_gw4(dev, rt->rt_gw4); +@@ -424,7 +424,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt)) + n = NULL; + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return n; + } +@@ -784,7 +784,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow + if (!n) + n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); + if (!IS_ERR(n)) { +- if (!(n->nud_state & NUD_VALID)) { ++ if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { + neigh_event_send(n, NULL); + } else { + if (fib_lookup(net, fl4, &res, 0) == 0) { +@@ -3421,6 +3421,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, + fa->fa_type == fri.type) { + fri.offload = READ_ONCE(fa->offload); + fri.trap = READ_ONCE(fa->trap); ++ fri.offload_failed = ++ READ_ONCE(fa->offload_failed); + break; + } + } +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index fab25d4f3a6f1..96fdde6e42b1b 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1755,16 +1755,13 @@ EXPORT_SYMBOL(tcp_read_sock); + + int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) + { +- struct tcp_sock *tp = tcp_sk(sk); +- u32 seq = tp->copied_seq; + struct sk_buff *skb; + int copied = 0; +- u32 offset; + + if (sk->sk_state == TCP_LISTEN) + return -ENOTCONN; + +- while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { ++ while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { + u8 tcp_flags; + int used; + +@@ -1777,13 +1774,10 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) + copied = used; + break; + } +- seq += used; + copied += used; + +- if (tcp_flags & TCPHDR_FIN) { +- ++seq; ++ if (tcp_flags & TCPHDR_FIN) + break; +- } + } + return copied; + } +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 5f93918c063c7..f53380fd89bcf 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -217,6 +217,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, + int *addr_len) + { + struct tcp_sock *tcp = tcp_sk(sk); ++ int peek = flags & MSG_PEEK; + u32 seq = tcp->copied_seq; + struct sk_psock *psock; + int copied = 0; +@@ -306,7 +307,8 @@ msg_bytes_ready: + copied = -EAGAIN; + } + out: +- WRITE_ONCE(tcp->copied_seq, seq); ++ if (!peek) ++ WRITE_ONCE(tcp->copied_seq, seq); + tcp_rcv_space_adjust(sk); + if (copied > 0) + __tcp_cleanup_rbuf(sk, copied); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index c697836f2b5b4..068221e742425 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -243,6 +243,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) + if (unlikely(len > icsk->icsk_ack.rcv_mss + + MAX_TCP_OPTION_SPACE)) + tcp_gro_dev_warn(sk, skb, len); ++ /* If the skb has a len of exactly 1*MSS and has the PSH bit ++ * set then it is likely the end of an application write. So ++ * more data may not be arriving soon, and yet the data sender ++ * may be waiting for an ACK if cwnd-bound or using TX zero ++ * copy. So we set ICSK_ACK_PUSHED here so that ++ * tcp_cleanup_rbuf() will send an ACK immediately if the app ++ * reads all of the data and is not ping-pong. If len > MSS ++ * then this logic does not matter (and does not hurt) because ++ * tcp_cleanup_rbuf() will always ACK immediately if the app ++ * reads data and there is more than an MSS of unACKed data. ++ */ ++ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH) ++ icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; + } else { + /* Otherwise, we make more careful check taking into account, + * that SACKs block is variable. +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index dc3166e56169f..5921b0f6f9f41 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -177,8 +177,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, + } + + /* Account for an ACK we sent. */ +-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, +- u32 rcv_nxt) ++static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) + { + struct tcp_sock *tp = tcp_sk(sk); + +@@ -192,7 +191,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + + if (unlikely(rcv_nxt != tp->rcv_nxt)) + return; /* Special ACK sent by DCTCP to reflect ECN */ +- tcp_dec_quickack_mode(sk, pkts); ++ tcp_dec_quickack_mode(sk); + inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); + } + +@@ -1373,7 +1372,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + sk, skb); + + if (likely(tcb->tcp_flags & TCPHDR_ACK)) +- tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); ++ tcp_event_ack_sent(sk, rcv_nxt); + + if (skb->len != tcp_header_size) { + tcp_event_data_sent(tp, sk); +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 83be842198244..c63ccd39fc552 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -202,6 +202,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { + .ra_defrtr_metric = IP6_RT_PRIO_USER, + .accept_ra_from_local = 0, + .accept_ra_min_hop_limit= 1, ++ .accept_ra_min_lft = 0, + .accept_ra_pinfo = 1, + #ifdef CONFIG_IPV6_ROUTER_PREF + .accept_ra_rtr_pref = 1, +@@ -262,6 +263,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { + .ra_defrtr_metric = IP6_RT_PRIO_USER, + .accept_ra_from_local = 0, + .accept_ra_min_hop_limit= 1, ++ .accept_ra_min_lft = 0, + .accept_ra_pinfo = 1, + #ifdef CONFIG_IPV6_ROUTER_PREF + .accept_ra_rtr_pref = 1, +@@ -1033,7 +1035,7 @@ static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa) + unsigned int hash = inet6_addr_hash(net, &ifa->addr); + int err = 0; + +- spin_lock(&net->ipv6.addrconf_hash_lock); ++ spin_lock_bh(&net->ipv6.addrconf_hash_lock); + + /* Ignore adding duplicate addresses on an interface */ + if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) { +@@ -1043,7 +1045,7 @@ static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa) + hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]); + } + +- spin_unlock(&net->ipv6.addrconf_hash_lock); ++ spin_unlock_bh(&net->ipv6.addrconf_hash_lock); + + return err; + } +@@ -1138,15 +1140,15 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, + /* For caller */ + refcount_set(&ifa->refcnt, 1); + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + + err = ipv6_add_addr_hash(idev->dev, ifa); + if (err < 0) { +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + goto out; + } + +- write_lock(&idev->lock); ++ write_lock_bh(&idev->lock); + + /* Add to inet6_dev unicast addr list. */ + ipv6_link_dev_addr(idev, ifa); +@@ -1157,9 +1159,9 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, + } + + in6_ifa_hold(ifa); +- write_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + inet6addr_notifier_call_chain(NETDEV_UP, ifa); + out: +@@ -2731,6 +2733,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) + return; + } + ++ if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft) ++ goto put; ++ + /* + * Two things going on here: + * 1) Add routes for on-link prefixes +@@ -5601,6 +5606,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, + array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide; + array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier; + array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na; ++ array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft; + } + + static inline size_t inet6_ifla6_size(void) +@@ -6794,6 +6800,13 @@ static const struct ctl_table addrconf_sysctl[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, ++ { ++ .procname = "accept_ra_min_lft", ++ .data = &ipv6_devconf.accept_ra_min_lft, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, + { + .procname = "accept_ra_pinfo", + .data = &ipv6_devconf.accept_ra_pinfo, +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 413f66781e50d..eb6640f9a7921 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -2492,7 +2492,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v) + const struct net_device *dev; + + if (rt->nh) +- fib6_nh = nexthop_fib6_nh_bh(rt->nh); ++ fib6_nh = nexthop_fib6_nh(rt->nh); + + seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); + +@@ -2557,14 +2557,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, + + if (tbl) { + h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; +- node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); ++ node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist)); + } else { + h = 0; + node = NULL; + } + + while (!node && h < FIB6_TABLE_HASHSZ) { +- node = rcu_dereference_bh( ++ node = rcu_dereference( + hlist_first_rcu(&net->ipv6.fib_table_hash[h++])); + } + return hlist_entry_safe(node, struct fib6_table, tb6_hlist); +@@ -2594,7 +2594,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) + if (!v) + goto iter_table; + +- n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); ++ n = rcu_dereference(((struct fib6_info *)v)->fib6_next); + if (n) + return n; + +@@ -2620,12 +2620,12 @@ iter_table: + } + + static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) +- __acquires(RCU_BH) ++ __acquires(RCU) + { + struct net *net = seq_file_net(seq); + struct ipv6_route_iter *iter = seq->private; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + iter->tbl = ipv6_route_seq_next_table(NULL, net); + iter->skip = *pos; + +@@ -2646,7 +2646,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) + } + + static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) +- __releases(RCU_BH) ++ __releases(RCU) + { + struct net *net = seq_file_net(seq); + struct ipv6_route_iter *iter = seq->private; +@@ -2654,7 +2654,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) + if (ipv6_route_iter_active(iter)) + fib6_walker_unlink(net, &iter->w); + +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + + #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 34192f7a166fb..ce2c5e728745f 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -116,7 +116,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * + return res; + } + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + nexthop = rt6_nexthop((struct rt6_info *)dst, daddr); + neigh = __ipv6_neigh_lookup_noref(dev, nexthop); + +@@ -124,7 +124,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * + if (unlikely(!neigh)) + neigh = __neigh_create(&nd_tbl, nexthop, dev, false); + if (IS_ERR(neigh)) { +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL); + return -EINVAL; +@@ -132,7 +132,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * + } + sock_confirm_neigh(skb, neigh); + ret = neigh_output(neigh, skb, false); +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + return ret; + } + +@@ -1150,11 +1150,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, + * dst entry of the nexthop router + */ + rt = (struct rt6_info *) *dst; +- rcu_read_lock_bh(); ++ rcu_read_lock(); + n = __ipv6_neigh_lookup_noref(rt->dst.dev, + rt6_nexthop(rt, &fl6->daddr)); +- err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; +- rcu_read_unlock_bh(); ++ err = n && !(READ_ONCE(n->nud_state) & NUD_VALID) ? -EINVAL : 0; ++ rcu_read_unlock(); + + if (err) { + struct inet6_ifaddr *ifp; +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index a4d43eb45a9de..8c5a99fe68030 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -746,7 +746,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) + saddr = &ipv6_hdr(skb)->saddr; + probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES); + if (probes < 0) { +- if (!(neigh->nud_state & NUD_VALID)) { ++ if (!(READ_ONCE(neigh->nud_state) & NUD_VALID)) { + ND_PRINTK(1, dbg, + "%s: trying to ucast probe in NUD_INVALID: %pI6\n", + __func__, target); +@@ -1092,7 +1092,7 @@ static void ndisc_recv_na(struct sk_buff *skb) + u8 old_flags = neigh->flags; + struct net *net = dev_net(dev); + +- if (neigh->nud_state & NUD_FAILED) ++ if (READ_ONCE(neigh->nud_state) & NUD_FAILED) + goto out; + + /* +@@ -1331,6 +1331,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) + goto skip_defrtr; + } + ++ lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); ++ if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) { ++ ND_PRINTK(2, info, ++ "RA: router lifetime (%ds) is too short: %s\n", ++ lifetime, skb->dev->name); ++ goto skip_defrtr; ++ } ++ + /* Do not accept RA with source-addr found on local machine unless + * accept_ra_from_local is set to true. + */ +@@ -1343,8 +1351,6 @@ static void ndisc_router_discovery(struct sk_buff *skb) + goto skip_defrtr; + } + +- lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); +- + #ifdef CONFIG_IPV6_ROUTER_PREF + pref = ra_msg->icmph.icmp6_router_pref; + /* 10b is handled as if it were 00b (medium) */ +@@ -1519,6 +1525,9 @@ skip_linkparms: + if (ri->prefix_len == 0 && + !in6_dev->cnf.accept_ra_defrtr) + continue; ++ if (ri->lifetime != 0 && ++ ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft) ++ continue; + if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen) + continue; + if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 93957b20fccce..0bcdb675ba2c1 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -636,15 +636,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh) + + nh_gw = &fib6_nh->fib_nh_gw6; + dev = fib6_nh->fib_nh_dev; +- rcu_read_lock_bh(); ++ rcu_read_lock(); + last_probe = READ_ONCE(fib6_nh->last_probe); + idev = __in6_dev_get(dev); + neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); + if (neigh) { +- if (neigh->nud_state & NUD_VALID) ++ if (READ_ONCE(neigh->nud_state) & NUD_VALID) + goto out; + +- write_lock(&neigh->lock); ++ write_lock_bh(&neigh->lock); + if (!(neigh->nud_state & NUD_VALID) && + time_after(jiffies, + neigh->updated + idev->cnf.rtr_probe_interval)) { +@@ -652,7 +652,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) + if (work) + __neigh_set_probe_once(neigh); + } +- write_unlock(&neigh->lock); ++ write_unlock_bh(&neigh->lock); + } else if (time_after(jiffies, last_probe + + idev->cnf.rtr_probe_interval)) { + work = kmalloc(sizeof(*work), GFP_ATOMIC); +@@ -670,7 +670,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) + } + + out: +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + } + #else + static inline void rt6_probe(struct fib6_nh *fib6_nh) +@@ -686,25 +686,25 @@ static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh) + enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; + struct neighbour *neigh; + +- rcu_read_lock_bh(); ++ rcu_read_lock(); + neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev, + &fib6_nh->fib_nh_gw6); + if (neigh) { +- read_lock(&neigh->lock); +- if (neigh->nud_state & NUD_VALID) ++ u8 nud_state = READ_ONCE(neigh->nud_state); ++ ++ if (nud_state & NUD_VALID) + ret = RT6_NUD_SUCCEED; + #ifdef CONFIG_IPV6_ROUTER_PREF +- else if (!(neigh->nud_state & NUD_FAILED)) ++ else if (!(nud_state & NUD_FAILED)) + ret = RT6_NUD_SUCCEED; + else + ret = RT6_NUD_FAIL_PROBE; + #endif +- read_unlock(&neigh->lock); + } else { + ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? + RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; + } +- rcu_read_unlock_bh(); ++ rcu_read_unlock(); + + return ret; + } +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 4bdd356bb5c46..7be89dcfd5fc5 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1644,9 +1644,12 @@ process: + struct sock *nsk; + + sk = req->rsk_listener; +- drop_reason = tcp_inbound_md5_hash(sk, skb, +- &hdr->saddr, &hdr->daddr, +- AF_INET6, dif, sdif); ++ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) ++ drop_reason = SKB_DROP_REASON_XFRM_POLICY; ++ else ++ drop_reason = tcp_inbound_md5_hash(sk, skb, ++ &hdr->saddr, &hdr->daddr, ++ AF_INET6, dif, sdif); + if (drop_reason) { + sk_drops_add(sk, skb); + reqsk_put(req); +@@ -1693,6 +1696,7 @@ process: + } + goto discard_and_relse; + } ++ nf_reset_ct(skb); + if (nsk == sk) { + reqsk_put(req); + tcp_v6_restore_cb(skb); +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index bce4132b0a5c8..314ec3a51e8de 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -510,7 +510,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + */ + if (len > INT_MAX - transhdrlen) + return -EMSGSIZE; +- ulen = len + transhdrlen; + + /* Mirror BSD error message compatibility */ + if (msg->msg_flags & MSG_OOB) +@@ -631,6 +630,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + back_from_confirm: + lock_sock(sk); ++ ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0; + err = ip6_append_data(sk, ip_generic_getfrag, msg, + ulen, transhdrlen, &ipc6, + &fl6, (struct rt6_info *)dst, +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index cf3453b532d67..0167413d56972 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -566,6 +566,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, + } + + err = ieee80211_key_link(key, link, sta); ++ /* KRACK protection, shouldn't happen but just silently accept key */ ++ if (err == -EALREADY) ++ err = 0; + + out_unlock: + mutex_unlock(&local->sta_mtx); +diff --git a/net/mac80211/key.c b/net/mac80211/key.c +index e8f6c1e5eabfc..23bb24243c6e9 100644 +--- a/net/mac80211/key.c ++++ b/net/mac80211/key.c +@@ -901,7 +901,7 @@ int ieee80211_key_link(struct ieee80211_key *key, + */ + if (ieee80211_key_identical(sdata, old_key, key)) { + ieee80211_key_free_unused(key); +- ret = 0; ++ ret = -EALREADY; + goto out; + } + +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 9127a7fd5269c..5d845fcf3d09e 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -2047,7 +2047,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, + nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) + return -EMSGSIZE; + +- sk_err = ssk->sk_err; ++ sk_err = READ_ONCE(ssk->sk_err); + if (sk_err && sk->sk_state == TCP_ESTABLISHED && + nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) + return -EMSGSIZE; +diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c +index 8a2aa63caa51f..38cbdc66d8bff 100644 +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -309,12 +309,6 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info) + goto create_err; + } + +- if (addr_l.id == 0) { +- NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id"); +- err = -EINVAL; +- goto create_err; +- } +- + err = mptcp_pm_parse_addr(raddr, info, &addr_r); + if (err < 0) { + NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr"); +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 6dd880d6b0518..b6e0579e72644 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -401,7 +401,7 @@ drop: + return false; + } + +-static void mptcp_stop_timer(struct sock *sk) ++static void mptcp_stop_rtx_timer(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + +@@ -765,6 +765,46 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) + return moved; + } + ++static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) ++{ ++ int err = sock_error(ssk); ++ int ssk_state; ++ ++ if (!err) ++ return false; ++ ++ /* only propagate errors on fallen-back sockets or ++ * on MPC connect ++ */ ++ if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) ++ return false; ++ ++ /* We need to propagate only transition to CLOSE state. ++ * Orphaned socket will see such state change via ++ * subflow_sched_work_if_closed() and that path will properly ++ * destroy the msk as needed. ++ */ ++ ssk_state = inet_sk_state_load(ssk); ++ if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) ++ inet_sk_state_store(sk, ssk_state); ++ WRITE_ONCE(sk->sk_err, -err); ++ ++ /* This barrier is coupled with smp_rmb() in mptcp_poll() */ ++ smp_wmb(); ++ sk_error_report(sk); ++ return true; ++} ++ ++void __mptcp_error_report(struct sock *sk) ++{ ++ struct mptcp_subflow_context *subflow; ++ struct mptcp_sock *msk = mptcp_sk(sk); ++ ++ mptcp_for_each_subflow(msk, subflow) ++ if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) ++ break; ++} ++ + /* In most cases we will be able to lock the mptcp socket. If its already + * owned, we need to defer to the work queue to avoid ABBA deadlock. + */ +@@ -846,6 +886,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) + + mptcp_sockopt_sync_locked(msk, ssk); + mptcp_subflow_joined(msk, ssk); ++ mptcp_stop_tout_timer(sk); + return true; + } + +@@ -865,12 +906,12 @@ static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list + } + } + +-static bool mptcp_timer_pending(struct sock *sk) ++static bool mptcp_rtx_timer_pending(struct sock *sk) + { + return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); + } + +-static void mptcp_reset_timer(struct sock *sk) ++static void mptcp_reset_rtx_timer(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + unsigned long tout; +@@ -1054,10 +1095,10 @@ static void __mptcp_clean_una(struct sock *sk) + out: + if (snd_una == READ_ONCE(msk->snd_nxt) && + snd_una == READ_ONCE(msk->write_seq)) { +- if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) +- mptcp_stop_timer(sk); ++ if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) ++ mptcp_stop_rtx_timer(sk); + } else { +- mptcp_reset_timer(sk); ++ mptcp_reset_rtx_timer(sk); + } + } + +@@ -1606,8 +1647,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) + + out: + /* ensure the rtx timer is running */ +- if (!mptcp_timer_pending(sk)) +- mptcp_reset_timer(sk); ++ if (!mptcp_rtx_timer_pending(sk)) ++ mptcp_reset_rtx_timer(sk); + if (do_check_data_fin) + mptcp_check_send_data_fin(sk); + } +@@ -1665,8 +1706,8 @@ out: + if (copied) { + tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, + info.size_goal); +- if (!mptcp_timer_pending(sk)) +- mptcp_reset_timer(sk); ++ if (!mptcp_rtx_timer_pending(sk)) ++ mptcp_reset_rtx_timer(sk); + + if (msk->snd_data_fin_enable && + msk->snd_nxt + 1 == msk->write_seq) +@@ -2227,7 +2268,7 @@ static void mptcp_retransmit_timer(struct timer_list *t) + sock_put(sk); + } + +-static void mptcp_timeout_timer(struct timer_list *t) ++static void mptcp_tout_timer(struct timer_list *t) + { + struct sock *sk = from_timer(sk, t, sk_timer); + +@@ -2349,18 +2390,14 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + bool dispose_it, need_push = false; + + /* If the first subflow moved to a close state before accept, e.g. due +- * to an incoming reset, mptcp either: +- * - if either the subflow or the msk are dead, destroy the context +- * (the subflow socket is deleted by inet_child_forget) and the msk +- * - otherwise do nothing at the moment and take action at accept and/or +- * listener shutdown - user-space must be able to accept() the closed +- * socket. ++ * to an incoming reset or listener shutdown, the subflow socket is ++ * already deleted by inet_child_forget() and the mptcp socket can't ++ * survive too. + */ +- if (msk->in_accept_queue && msk->first == ssk) { +- if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD)) +- return; +- ++ if (msk->in_accept_queue && msk->first == ssk && ++ (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { + /* ensure later check in mptcp_worker() will dispose the msk */ ++ mptcp_set_close_tout(sk, tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1)); + sock_set_flag(sk, SOCK_DEAD); + lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); + mptcp_subflow_drop_ctx(ssk); +@@ -2413,6 +2450,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + } + + out_release: ++ __mptcp_subflow_error_report(sk, ssk); + release_sock(ssk); + + sock_put(ssk); +@@ -2426,6 +2464,22 @@ out: + + if (need_push) + __mptcp_push_pending(sk, 0); ++ ++ /* Catch every 'all subflows closed' scenario, including peers silently ++ * closing them, e.g. due to timeout. ++ * For established sockets, allow an additional timeout before closing, ++ * as the protocol can still create more subflows. ++ */ ++ if (list_is_singular(&msk->conn_list) && msk->first && ++ inet_sk_state_load(msk->first) == TCP_CLOSE) { ++ if (sk->sk_state != TCP_ESTABLISHED || ++ msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { ++ inet_sk_state_store(sk, TCP_CLOSE); ++ mptcp_close_wake_up(sk); ++ } else { ++ mptcp_start_tout_timer(sk); ++ } ++ } + } + + void mptcp_close_ssk(struct sock *sk, struct sock *ssk, +@@ -2469,23 +2523,14 @@ static void __mptcp_close_subflow(struct sock *sk) + + } + +-static bool mptcp_should_close(const struct sock *sk) ++static bool mptcp_close_tout_expired(const struct sock *sk) + { +- s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp; +- struct mptcp_subflow_context *subflow; +- +- if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue) +- return true; ++ if (!inet_csk(sk)->icsk_mtup.probe_timestamp || ++ sk->sk_state == TCP_CLOSE) ++ return false; + +- /* if all subflows are in closed status don't bother with additional +- * timeout +- */ +- mptcp_for_each_subflow(mptcp_sk(sk), subflow) { +- if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) != +- TCP_CLOSE) +- return false; +- } +- return true; ++ return time_after32(tcp_jiffies32, ++ inet_csk(sk)->icsk_mtup.probe_timestamp + TCP_TIMEWAIT_LEN); + } + + static void mptcp_check_fastclose(struct mptcp_sock *msk) +@@ -2513,15 +2558,15 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) + /* Mirror the tcp_reset() error propagation */ + switch (sk->sk_state) { + case TCP_SYN_SENT: +- sk->sk_err = ECONNREFUSED; ++ WRITE_ONCE(sk->sk_err, ECONNREFUSED); + break; + case TCP_CLOSE_WAIT: +- sk->sk_err = EPIPE; ++ WRITE_ONCE(sk->sk_err, EPIPE); + break; + case TCP_CLOSE: + return; + default: +- sk->sk_err = ECONNRESET; ++ WRITE_ONCE(sk->sk_err, ECONNRESET); + } + + inet_sk_state_store(sk, TCP_CLOSE); +@@ -2597,27 +2642,28 @@ static void __mptcp_retrans(struct sock *sk) + reset_timer: + mptcp_check_and_set_pending(sk); + +- if (!mptcp_timer_pending(sk)) +- mptcp_reset_timer(sk); ++ if (!mptcp_rtx_timer_pending(sk)) ++ mptcp_reset_rtx_timer(sk); + } + + /* schedule the timeout timer for the relevant event: either close timeout + * or mp_fail timeout. The close timeout takes precedence on the mp_fail one + */ +-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout) ++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) + { + struct sock *sk = (struct sock *)msk; + unsigned long timeout, close_timeout; + +- if (!fail_tout && !sock_flag(sk, SOCK_DEAD)) ++ if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) + return; + +- close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN; ++ close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + ++ TCP_TIMEWAIT_LEN; + + /* the close timeout takes precedence on the fail one, and here at least one of + * them is active + */ +- timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout; ++ timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; + + sk_reset_timer(sk, &sk->sk_timer, timeout); + } +@@ -2636,8 +2682,6 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) + mptcp_subflow_reset(ssk); + WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); + unlock_sock_fast(ssk, slow); +- +- mptcp_reset_timeout(msk, 0); + } + + static void mptcp_do_fastclose(struct sock *sk) +@@ -2676,19 +2720,15 @@ static void mptcp_worker(struct work_struct *work) + if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) + __mptcp_close_subflow(sk); + +- /* There is no point in keeping around an orphaned sk timedout or +- * closed, but we need the msk around to reply to incoming DATA_FIN, +- * even if it is orphaned and in FIN_WAIT2 state +- */ +- if (sock_flag(sk, SOCK_DEAD)) { +- if (mptcp_should_close(sk)) { +- inet_sk_state_store(sk, TCP_CLOSE); +- mptcp_do_fastclose(sk); +- } +- if (sk->sk_state == TCP_CLOSE) { +- __mptcp_destroy_sock(sk); +- goto unlock; +- } ++ if (mptcp_close_tout_expired(sk)) { ++ inet_sk_state_store(sk, TCP_CLOSE); ++ mptcp_do_fastclose(sk); ++ mptcp_close_wake_up(sk); ++ } ++ ++ if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) { ++ __mptcp_destroy_sock(sk); ++ goto unlock; + } + + if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) +@@ -2728,7 +2768,7 @@ static int __mptcp_init_sock(struct sock *sk) + + /* re-use the csk retrans timer for MPTCP-level retrans */ + timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); +- timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); ++ timer_setup(&sk->sk_timer, mptcp_tout_timer, 0); + + return 0; + } +@@ -2820,8 +2860,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) + } else { + pr_debug("Sending DATA_FIN on subflow %p", ssk); + tcp_send_ack(ssk); +- if (!mptcp_timer_pending(sk)) +- mptcp_reset_timer(sk); ++ if (!mptcp_rtx_timer_pending(sk)) ++ mptcp_reset_rtx_timer(sk); + } + break; + } +@@ -2904,7 +2944,7 @@ static void __mptcp_destroy_sock(struct sock *sk) + + might_sleep(); + +- mptcp_stop_timer(sk); ++ mptcp_stop_rtx_timer(sk); + sk_stop_timer(sk, &sk->sk_timer); + msk->pm.status = 0; + +@@ -2984,7 +3024,6 @@ bool __mptcp_close(struct sock *sk, long timeout) + + cleanup: + /* orphan all the subflows */ +- inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow = lock_sock_fast_nested(ssk); +@@ -3021,7 +3060,7 @@ cleanup: + __mptcp_destroy_sock(sk); + do_cancel_work = true; + } else { +- mptcp_reset_timeout(msk, 0); ++ mptcp_start_tout_timer(sk); + } + + return do_cancel_work; +@@ -3084,8 +3123,8 @@ static int mptcp_disconnect(struct sock *sk, int flags) + mptcp_check_listen_stop(sk); + inet_sk_state_store(sk, TCP_CLOSE); + +- mptcp_stop_timer(sk); +- sk_stop_timer(sk, &sk->sk_timer); ++ mptcp_stop_rtx_timer(sk); ++ mptcp_stop_tout_timer(sk); + + if (mptcp_sk(sk)->token) + mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL); +@@ -3895,7 +3934,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, + + /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ + smp_rmb(); +- if (sk->sk_err) ++ if (READ_ONCE(sk->sk_err)) + mask |= EPOLLERR; + + return mask; +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index d77b25636125b..91d89a0aeb586 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -681,7 +681,29 @@ void mptcp_get_options(const struct sk_buff *skb, + + void mptcp_finish_connect(struct sock *sk); + void __mptcp_set_connected(struct sock *sk); +-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout); ++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout); ++ ++static inline void mptcp_stop_tout_timer(struct sock *sk) ++{ ++ if (!inet_csk(sk)->icsk_mtup.probe_timestamp) ++ return; ++ ++ sk_stop_timer(sk, &sk->sk_timer); ++ inet_csk(sk)->icsk_mtup.probe_timestamp = 0; ++} ++ ++static inline void mptcp_set_close_tout(struct sock *sk, unsigned long tout) ++{ ++ /* avoid 0 timestamp, as that means no close timeout */ ++ inet_csk(sk)->icsk_mtup.probe_timestamp = tout ? : 1; ++} ++ ++static inline void mptcp_start_tout_timer(struct sock *sk) ++{ ++ mptcp_set_close_tout(sk, tcp_jiffies32); ++ mptcp_reset_tout_timer(mptcp_sk(sk), 0); ++} ++ + static inline bool mptcp_is_fully_established(struct sock *sk) + { + return inet_sk_state_load(sk) == TCP_ESTABLISHED && +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 52a747a80e88e..b93b08a75017b 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -1161,7 +1161,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) + WRITE_ONCE(subflow->fail_tout, fail_tout); + tcp_send_ack(ssk); + +- mptcp_reset_timeout(msk, subflow->fail_tout); ++ mptcp_reset_tout_timer(msk, subflow->fail_tout); + } + + static bool subflow_check_data_avail(struct sock *ssk) +@@ -1248,7 +1248,7 @@ fallback: + subflow->reset_reason = MPTCP_RST_EMPTCP; + + reset: +- ssk->sk_err = EBADMSG; ++ WRITE_ONCE(ssk->sk_err, EBADMSG); + tcp_set_state(ssk, TCP_CLOSE); + while ((skb = skb_peek(&ssk->sk_receive_queue))) + sk_eat_skb(ssk, skb); +@@ -1305,42 +1305,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space) + *full_space = tcp_full_space(sk); + } + +-void __mptcp_error_report(struct sock *sk) +-{ +- struct mptcp_subflow_context *subflow; +- struct mptcp_sock *msk = mptcp_sk(sk); +- +- mptcp_for_each_subflow(msk, subflow) { +- struct sock *ssk = mptcp_subflow_tcp_sock(subflow); +- int err = sock_error(ssk); +- int ssk_state; +- +- if (!err) +- continue; +- +- /* only propagate errors on fallen-back sockets or +- * on MPC connect +- */ +- if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk)) +- continue; +- +- /* We need to propagate only transition to CLOSE state. +- * Orphaned socket will see such state change via +- * subflow_sched_work_if_closed() and that path will properly +- * destroy the msk as needed. +- */ +- ssk_state = inet_sk_state_load(ssk); +- if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) +- inet_sk_state_store(sk, ssk_state); +- sk->sk_err = -err; +- +- /* This barrier is coupled with smp_rmb() in mptcp_poll() */ +- smp_wmb(); +- sk_error_report(sk); +- break; +- } +-} +- + static void subflow_error_report(struct sock *ssk) + { + struct sock *sk = mptcp_subflow_ctx(ssk)->conn; +@@ -1527,6 +1491,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, + mptcp_sock_graft(ssk, sk->sk_socket); + iput(SOCK_INODE(sf)); + WRITE_ONCE(msk->allow_infinite_fallback, false); ++ mptcp_stop_tout_timer(sk); + return 0; + + failed_unlink: +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index d4fe7bb4f853a..6574f4e651b1a 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -1507,8 +1507,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id, + } + + get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id); +- result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, +- salen, 0); ++ result = kernel_connect(sock, (struct sockaddr *)&mcast_addr, ++ salen, 0); + if (result < 0) { + pr_err("Error connecting to the multicast addr\n"); + goto error; +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c +index 7247af51bdfc4..c94a9971d790c 100644 +--- a/net/netfilter/nf_conntrack_proto_sctp.c ++++ b/net/netfilter/nf_conntrack_proto_sctp.c +@@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { + /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA}, + /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/ + /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */ +-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ ++/* cookie_ack */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ + /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL}, + /* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, + /* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +@@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { + /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV}, + /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV}, + /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV}, +-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ ++/* cookie_echo */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ + /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV}, + /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV}, + /* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +@@ -426,6 +426,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + /* (D) vtag must be same as init_vtag as found in INIT_ACK */ + if (sh->vtag != ct->proto.sctp.vtag[dir]) + goto out_unlock; ++ } else if (sch->type == SCTP_CID_COOKIE_ACK) { ++ ct->proto.sctp.init[dir] = 0; ++ ct->proto.sctp.init[!dir] = 0; + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); +@@ -474,16 +477,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + } + + /* If it is an INIT or an INIT ACK note down the vtag */ +- if (sch->type == SCTP_CID_INIT || +- sch->type == SCTP_CID_INIT_ACK) { +- struct sctp_inithdr _inithdr, *ih; ++ if (sch->type == SCTP_CID_INIT) { ++ struct sctp_inithdr _ih, *ih; + +- ih = skb_header_pointer(skb, offset + sizeof(_sch), +- sizeof(_inithdr), &_inithdr); +- if (ih == NULL) ++ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih); ++ if (!ih) + goto out_unlock; +- pr_debug("Setting vtag %x for dir %d\n", +- ih->init_tag, !dir); ++ ++ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir]) ++ ct->proto.sctp.init[!dir] = 0; ++ ct->proto.sctp.init[dir] = 1; ++ ++ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); + ct->proto.sctp.vtag[!dir] = ih->init_tag; + + /* don't renew timeout on init retransmit so +@@ -494,6 +499,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + old_state == SCTP_CONNTRACK_CLOSED && + nf_ct_is_confirmed(ct)) + ignore = true; ++ } else if (sch->type == SCTP_CID_INIT_ACK) { ++ struct sctp_inithdr _ih, *ih; ++ __be32 vtag; ++ ++ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih); ++ if (!ih) ++ goto out_unlock; ++ ++ vtag = ct->proto.sctp.vtag[!dir]; ++ if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag) ++ goto out_unlock; ++ /* collision */ ++ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] && ++ vtag != ih->init_tag) ++ goto out_unlock; ++ ++ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); ++ ct->proto.sctp.vtag[!dir] = ih->init_tag; + } + + ct->proto.sctp.state = new_state; +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 52b81dc1fcf5b..5e3dbe2652dbd 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -7576,24 +7576,14 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info, + return nft_delobj(&ctx, obj); + } + +-void nft_obj_notify(struct net *net, const struct nft_table *table, +- struct nft_object *obj, u32 portid, u32 seq, int event, +- u16 flags, int family, int report, gfp_t gfp) ++static void ++__nft_obj_notify(struct net *net, const struct nft_table *table, ++ struct nft_object *obj, u32 portid, u32 seq, int event, ++ u16 flags, int family, int report, gfp_t gfp) + { + struct nftables_pernet *nft_net = nft_pernet(net); + struct sk_buff *skb; + int err; +- char *buf = kasprintf(gfp, "%s:%u", +- table->name, nft_net->base_seq); +- +- audit_log_nfcfg(buf, +- family, +- obj->handle, +- event == NFT_MSG_NEWOBJ ? +- AUDIT_NFT_OP_OBJ_REGISTER : +- AUDIT_NFT_OP_OBJ_UNREGISTER, +- gfp); +- kfree(buf); + + if (!report && + !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) +@@ -7616,13 +7606,35 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, + err: + nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); + } ++ ++void nft_obj_notify(struct net *net, const struct nft_table *table, ++ struct nft_object *obj, u32 portid, u32 seq, int event, ++ u16 flags, int family, int report, gfp_t gfp) ++{ ++ struct nftables_pernet *nft_net = nft_pernet(net); ++ char *buf = kasprintf(gfp, "%s:%u", ++ table->name, nft_net->base_seq); ++ ++ audit_log_nfcfg(buf, ++ family, ++ obj->handle, ++ event == NFT_MSG_NEWOBJ ? ++ AUDIT_NFT_OP_OBJ_REGISTER : ++ AUDIT_NFT_OP_OBJ_UNREGISTER, ++ gfp); ++ kfree(buf); ++ ++ __nft_obj_notify(net, table, obj, portid, seq, event, ++ flags, family, report, gfp); ++} + EXPORT_SYMBOL_GPL(nft_obj_notify); + + static void nf_tables_obj_notify(const struct nft_ctx *ctx, + struct nft_object *obj, int event) + { +- nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, +- ctx->flags, ctx->family, ctx->report, GFP_KERNEL); ++ __nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ++ ctx->seq, event, ctx->flags, ctx->family, ++ ctx->report, GFP_KERNEL); + } + + /* +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index 487572dcd6144..2660ceab3759d 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -233,10 +233,9 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set, + rb_erase(&rbe->node, &priv->root); + } + +-static int nft_rbtree_gc_elem(const struct nft_set *__set, +- struct nft_rbtree *priv, +- struct nft_rbtree_elem *rbe, +- u8 genmask) ++static const struct nft_rbtree_elem * ++nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv, ++ struct nft_rbtree_elem *rbe, u8 genmask) + { + struct nft_set *set = (struct nft_set *)__set; + struct rb_node *prev = rb_prev(&rbe->node); +@@ -246,7 +245,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, + + gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC); + if (!gc) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + + /* search for end interval coming before this element. + * end intervals don't carry a timeout extension, they +@@ -261,6 +260,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, + prev = rb_prev(prev); + } + ++ rbe_prev = NULL; + if (prev) { + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + nft_rbtree_gc_remove(net, set, priv, rbe_prev); +@@ -272,7 +272,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, + */ + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + + nft_trans_gc_elem_add(gc, rbe_prev); + } +@@ -280,13 +280,13 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, + nft_rbtree_gc_remove(net, set, priv, rbe); + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + + nft_trans_gc_elem_add(gc, rbe); + + nft_trans_gc_queue_sync_done(gc); + +- return 0; ++ return rbe_prev; + } + + static bool nft_rbtree_update_first(const struct nft_set *set, +@@ -314,7 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + struct nft_rbtree *priv = nft_set_priv(set); + u8 cur_genmask = nft_genmask_cur(net); + u8 genmask = nft_genmask_next(net); +- int d, err; ++ int d; + + /* Descend the tree to search for an existing element greater than the + * key value to insert that is greater than the new element. This is the +@@ -363,9 +363,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + */ + if (nft_set_elem_expired(&rbe->ext) && + nft_set_elem_active(&rbe->ext, cur_genmask)) { +- err = nft_rbtree_gc_elem(set, priv, rbe, genmask); +- if (err < 0) +- return err; ++ const struct nft_rbtree_elem *removed_end; ++ ++ removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask); ++ if (IS_ERR(removed_end)) ++ return PTR_ERR(removed_end); ++ ++ if (removed_end == rbe_le || removed_end == rbe_ge) ++ return -EAGAIN; + + continue; + } +@@ -486,11 +491,18 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, + struct nft_rbtree_elem *rbe = elem->priv; + int err; + +- write_lock_bh(&priv->lock); +- write_seqcount_begin(&priv->count); +- err = __nft_rbtree_insert(net, set, rbe, ext); +- write_seqcount_end(&priv->count); +- write_unlock_bh(&priv->lock); ++ do { ++ if (fatal_signal_pending(current)) ++ return -EINTR; ++ ++ cond_resched(); ++ ++ write_lock_bh(&priv->lock); ++ write_seqcount_begin(&priv->count); ++ err = __nft_rbtree_insert(net, set, rbe, ext); ++ write_seqcount_end(&priv->count); ++ write_unlock_bh(&priv->lock); ++ } while (err == -EAGAIN); + + return err; + } +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 387e430a35ccc..cb833302270a6 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -352,7 +352,7 @@ static void netlink_overrun(struct sock *sk) + if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) { + if (!test_and_set_bit(NETLINK_S_CONGESTED, + &nlk_sk(sk)->state)) { +- sk->sk_err = ENOBUFS; ++ WRITE_ONCE(sk->sk_err, ENOBUFS); + sk_error_report(sk); + } + } +@@ -1566,7 +1566,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) + goto out; + } + +- sk->sk_err = p->code; ++ WRITE_ONCE(sk->sk_err, p->code); + sk_error_report(sk); + out: + return ret; +@@ -1955,7 +1955,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { + ret = netlink_dump(sk); + if (ret) { +- sk->sk_err = -ret; ++ WRITE_ONCE(sk->sk_err, -ret); + sk_error_report(sk); + } + } +@@ -2443,19 +2443,24 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + flags |= NLM_F_ACK_TLVS; + + skb = nlmsg_new(payload + tlvlen, GFP_KERNEL); +- if (!skb) { +- NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; +- sk_error_report(NETLINK_CB(in_skb).sk); +- return; +- } ++ if (!skb) ++ goto err_skb; + + rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, +- NLMSG_ERROR, payload, flags); ++ NLMSG_ERROR, sizeof(*errmsg), flags); ++ if (!rep) ++ goto err_bad_put; + errmsg = nlmsg_data(rep); + errmsg->error = err; +- unsafe_memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) +- ? nlh->nlmsg_len : sizeof(*nlh), +- /* Bounds checked by the skb layer. */); ++ errmsg->msg = *nlh; ++ ++ if (!(flags & NLM_F_CAPPED)) { ++ if (!nlmsg_append(skb, nlmsg_len(nlh))) ++ goto err_bad_put; ++ ++ memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh), ++ nlmsg_len(nlh)); ++ } + + if (tlvlen) + netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack); +@@ -2463,6 +2468,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + nlmsg_end(skb, rep); + + nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid); ++ ++ return; ++ ++err_bad_put: ++ nlmsg_free(skb); ++err_skb: ++ WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS); ++ sk_error_report(NETLINK_CB(in_skb).sk); + } + EXPORT_SYMBOL(netlink_ack); + +diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c +index f60e424e06076..6705bb895e239 100644 +--- a/net/nfc/llcp_core.c ++++ b/net/nfc/llcp_core.c +@@ -1636,7 +1636,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) + timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0); + INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work); + ++ spin_lock(&llcp_devices_lock); + list_add(&local->list, &llcp_devices); ++ spin_unlock(&llcp_devices_lock); + + return 0; + } +diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c +index f0c477c5d1db4..d788c6d28986f 100644 +--- a/net/rds/tcp_connect.c ++++ b/net/rds/tcp_connect.c +@@ -173,7 +173,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) + * own the socket + */ + rds_tcp_set_callbacks(sock, cp); +- ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK); ++ ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK); + + rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret); + if (ret == -EINPROGRESS) +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index 3460abceba443..2965a12fe8aa2 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1161,8 +1161,7 @@ int sctp_assoc_update(struct sctp_association *asoc, + /* Add any peer addresses from the new association. */ + list_for_each_entry(trans, &new->peer.transport_addr_list, + transports) +- if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) && +- !sctp_assoc_add_peer(asoc, &trans->ipaddr, ++ if (!sctp_assoc_add_peer(asoc, &trans->ipaddr, + GFP_ATOMIC, trans->state)) + return -ENOMEM; + +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 32e3669adf146..e25dc17091311 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -2449,6 +2449,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, + if (trans) { + trans->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); ++ sctp_transport_reset_hb_timer(trans); + } else if (asoc) { + asoc->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); +diff --git a/net/socket.c b/net/socket.c +index d281a7ef4b1d3..b0169168e3f4e 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -720,6 +720,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) + return ret; + } + ++static int __sock_sendmsg(struct socket *sock, struct msghdr *msg) ++{ ++ int err = security_socket_sendmsg(sock, msg, ++ msg_data_left(msg)); ++ ++ return err ?: sock_sendmsg_nosec(sock, msg); ++} ++ + /** + * sock_sendmsg - send a message through @sock + * @sock: socket +@@ -730,10 +738,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) + */ + int sock_sendmsg(struct socket *sock, struct msghdr *msg) + { +- int err = security_socket_sendmsg(sock, msg, +- msg_data_left(msg)); ++ struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name; ++ struct sockaddr_storage address; ++ int ret; + +- return err ?: sock_sendmsg_nosec(sock, msg); ++ if (msg->msg_name) { ++ memcpy(&address, msg->msg_name, msg->msg_namelen); ++ msg->msg_name = &address; ++ } ++ ++ ret = __sock_sendmsg(sock, msg); ++ msg->msg_name = save_addr; ++ ++ return ret; + } + EXPORT_SYMBOL(sock_sendmsg); + +@@ -1110,7 +1127,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) + if (sock->type == SOCK_SEQPACKET) + msg.msg_flags |= MSG_EOR; + +- res = sock_sendmsg(sock, &msg); ++ res = __sock_sendmsg(sock, &msg); + *from = msg.msg_iter; + return res; + } +@@ -2114,7 +2131,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, + if (sock->file->f_flags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + msg.msg_flags = flags; +- err = sock_sendmsg(sock, &msg); ++ err = __sock_sendmsg(sock, &msg); + + out_put: + fput_light(sock->file, fput_needed); +@@ -2479,7 +2496,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys, + err = sock_sendmsg_nosec(sock, msg_sys); + goto out_freectl; + } +- err = sock_sendmsg(sock, msg_sys); ++ err = __sock_sendmsg(sock, msg_sys); + /* + * If this is sendmmsg() and sending to current destination address was + * successful, remember it. +diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c +index 2b236d95a6469..65f59739a041a 100644 +--- a/net/tipc/crypto.c ++++ b/net/tipc/crypto.c +@@ -1441,14 +1441,14 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) + struct tipc_crypto *tx = tipc_net(net)->crypto_tx; + struct tipc_key key; + +- spin_lock(&tx->lock); ++ spin_lock_bh(&tx->lock); + key = tx->key; + WARN_ON(!key.active || tx_key != key.active); + + /* Free the active key */ + tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); + tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); +- spin_unlock(&tx->lock); ++ spin_unlock_bh(&tx->lock); + + pr_warn("%s: key is revoked\n", tx->name); + return -EKEYREVOKED; +diff --git a/net/wireless/core.c b/net/wireless/core.c +index 609b79fe4a748..2c79604672062 100644 +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -408,6 +408,34 @@ static void cfg80211_propagate_cac_done_wk(struct work_struct *work) + rtnl_unlock(); + } + ++static void cfg80211_wiphy_work(struct work_struct *work) ++{ ++ struct cfg80211_registered_device *rdev; ++ struct wiphy_work *wk; ++ ++ rdev = container_of(work, struct cfg80211_registered_device, wiphy_work); ++ ++ wiphy_lock(&rdev->wiphy); ++ if (rdev->suspended) ++ goto out; ++ ++ spin_lock_irq(&rdev->wiphy_work_lock); ++ wk = list_first_entry_or_null(&rdev->wiphy_work_list, ++ struct wiphy_work, entry); ++ if (wk) { ++ list_del_init(&wk->entry); ++ if (!list_empty(&rdev->wiphy_work_list)) ++ schedule_work(work); ++ spin_unlock_irq(&rdev->wiphy_work_lock); ++ ++ wk->func(&rdev->wiphy, wk); ++ } else { ++ spin_unlock_irq(&rdev->wiphy_work_lock); ++ } ++out: ++ wiphy_unlock(&rdev->wiphy); ++} ++ + /* exported functions */ + + struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, +@@ -533,6 +561,9 @@ use_default_name: + return NULL; + } + ++ INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); ++ INIT_LIST_HEAD(&rdev->wiphy_work_list); ++ spin_lock_init(&rdev->wiphy_work_lock); + INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); + INIT_WORK(&rdev->conn_work, cfg80211_conn_work); + INIT_WORK(&rdev->event_work, cfg80211_event_work); +@@ -1011,6 +1042,31 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy) + } + EXPORT_SYMBOL(wiphy_rfkill_start_polling); + ++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev) ++{ ++ unsigned int runaway_limit = 100; ++ unsigned long flags; ++ ++ lockdep_assert_held(&rdev->wiphy.mtx); ++ ++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags); ++ while (!list_empty(&rdev->wiphy_work_list)) { ++ struct wiphy_work *wk; ++ ++ wk = list_first_entry(&rdev->wiphy_work_list, ++ struct wiphy_work, entry); ++ list_del_init(&wk->entry); ++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); ++ ++ wk->func(&rdev->wiphy, wk); ++ ++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags); ++ if (WARN_ON(--runaway_limit == 0)) ++ INIT_LIST_HEAD(&rdev->wiphy_work_list); ++ } ++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); ++} ++ + void wiphy_unregister(struct wiphy *wiphy) + { + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); +@@ -1049,9 +1105,19 @@ void wiphy_unregister(struct wiphy *wiphy) + cfg80211_rdev_list_generation++; + device_del(&rdev->wiphy.dev); + ++#ifdef CONFIG_PM ++ if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) ++ rdev_set_wakeup(rdev, false); ++#endif ++ ++ /* surely nothing is reachable now, clean up work */ ++ cfg80211_process_wiphy_works(rdev); + wiphy_unlock(&rdev->wiphy); + rtnl_unlock(); + ++ /* this has nothing to do now but make sure it's gone */ ++ cancel_work_sync(&rdev->wiphy_work); ++ + flush_work(&rdev->scan_done_wk); + cancel_work_sync(&rdev->conn_work); + flush_work(&rdev->event_work); +@@ -1064,10 +1130,6 @@ void wiphy_unregister(struct wiphy *wiphy) + flush_work(&rdev->mgmt_registrations_update_wk); + flush_work(&rdev->background_cac_abort_wk); + +-#ifdef CONFIG_PM +- if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) +- rdev_set_wakeup(rdev, false); +-#endif + cfg80211_rdev_free_wowlan(rdev); + cfg80211_rdev_free_coalesce(rdev); + } +@@ -1114,16 +1176,11 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked, + } + EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason); + +-void cfg80211_cqm_config_free(struct wireless_dev *wdev) +-{ +- kfree(wdev->cqm_config); +- wdev->cqm_config = NULL; +-} +- + static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, + bool unregister_netdev) + { + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); ++ struct cfg80211_cqm_config *cqm_config; + unsigned int link_id; + + ASSERT_RTNL(); +@@ -1162,11 +1219,10 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, + kfree_sensitive(wdev->wext.keys); + wdev->wext.keys = NULL; + #endif +- /* only initialized if we have a netdev */ +- if (wdev->netdev) +- flush_work(&wdev->disconnect_wk); +- +- cfg80211_cqm_config_free(wdev); ++ wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work); ++ /* deleted from the list, so can't be found from nl80211 any more */ ++ cqm_config = rcu_access_pointer(wdev->cqm_config); ++ kfree_rcu(cqm_config, rcu_head); + + /* + * Ensure that all events have been processed and +@@ -1318,6 +1374,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev) + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; + #endif + ++ wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work); ++ + if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) + wdev->ps = true; + else +@@ -1439,6 +1497,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, + cfg80211_leave(rdev, wdev); + cfg80211_remove_links(wdev); + wiphy_unlock(&rdev->wiphy); ++ /* since we just did cfg80211_leave() nothing to do there */ ++ cancel_work_sync(&wdev->disconnect_wk); + break; + case NETDEV_DOWN: + wiphy_lock(&rdev->wiphy); +@@ -1548,6 +1608,66 @@ static struct pernet_operations cfg80211_pernet_ops = { + .exit = cfg80211_pernet_exit, + }; + ++void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work) ++{ ++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags); ++ if (list_empty(&work->entry)) ++ list_add_tail(&work->entry, &rdev->wiphy_work_list); ++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); ++ ++ schedule_work(&rdev->wiphy_work); ++} ++EXPORT_SYMBOL_GPL(wiphy_work_queue); ++ ++void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work) ++{ ++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ++ unsigned long flags; ++ ++ lockdep_assert_held(&wiphy->mtx); ++ ++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags); ++ if (!list_empty(&work->entry)) ++ list_del_init(&work->entry); ++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); ++} ++EXPORT_SYMBOL_GPL(wiphy_work_cancel); ++ ++void wiphy_delayed_work_timer(struct timer_list *t) ++{ ++ struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer); ++ ++ wiphy_work_queue(dwork->wiphy, &dwork->work); ++} ++EXPORT_SYMBOL(wiphy_delayed_work_timer); ++ ++void wiphy_delayed_work_queue(struct wiphy *wiphy, ++ struct wiphy_delayed_work *dwork, ++ unsigned long delay) ++{ ++ if (!delay) { ++ wiphy_work_queue(wiphy, &dwork->work); ++ return; ++ } ++ ++ dwork->wiphy = wiphy; ++ mod_timer(&dwork->timer, jiffies + delay); ++} ++EXPORT_SYMBOL_GPL(wiphy_delayed_work_queue); ++ ++void wiphy_delayed_work_cancel(struct wiphy *wiphy, ++ struct wiphy_delayed_work *dwork) ++{ ++ lockdep_assert_held(&wiphy->mtx); ++ ++ del_timer_sync(&dwork->timer); ++ wiphy_work_cancel(wiphy, &dwork->work); ++} ++EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); ++ + static int __init cfg80211_init(void) + { + int err; +diff --git a/net/wireless/core.h b/net/wireless/core.h +index 775e16cb99eda..86fd79912254d 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -108,6 +108,12 @@ struct cfg80211_registered_device { + /* lock for all wdev lists */ + spinlock_t mgmt_registrations_lock; + ++ struct work_struct wiphy_work; ++ struct list_head wiphy_work_list; ++ /* protects the list above */ ++ spinlock_t wiphy_work_lock; ++ bool suspended; ++ + /* must be last because of the way we do wiphy_priv(), + * and it should at least be aligned to NETDEV_ALIGN */ + struct wiphy wiphy __aligned(NETDEV_ALIGN); +@@ -287,12 +293,17 @@ struct cfg80211_beacon_registration { + }; + + struct cfg80211_cqm_config { ++ struct rcu_head rcu_head; + u32 rssi_hyst; + s32 last_rssi_event_value; ++ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type; + int n_rssi_thresholds; + s32 rssi_thresholds[]; + }; + ++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, ++ struct wiphy_work *work); ++ + void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev); + + /* free object */ +@@ -450,6 +461,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, + struct net_device *dev, enum nl80211_iftype ntype, + struct vif_params *params); + void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); ++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev); + void cfg80211_process_wdev_events(struct wireless_dev *wdev); + + bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, +@@ -556,8 +568,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, + #define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; }) + #endif + +-void cfg80211_cqm_config_free(struct wireless_dev *wdev); +- + void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid); + void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev); + void cfg80211_pmsr_free_wk(struct work_struct *work); +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 12c7c89d5be1d..1d993a490ac4b 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -12565,7 +12565,8 @@ static int nl80211_set_cqm_txe(struct genl_info *info, + } + + static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, +- struct net_device *dev) ++ struct net_device *dev, ++ struct cfg80211_cqm_config *cqm_config) + { + struct wireless_dev *wdev = dev->ieee80211_ptr; + s32 last, low, high; +@@ -12574,7 +12575,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, + int err; + + /* RSSI reporting disabled? */ +- if (!wdev->cqm_config) ++ if (!cqm_config) + return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); + + /* +@@ -12583,7 +12584,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, + * connection is established and enough beacons received to calculate + * the average. + */ +- if (!wdev->cqm_config->last_rssi_event_value && ++ if (!cqm_config->last_rssi_event_value && + wdev->links[0].client.current_bss && + rdev->ops->get_station) { + struct station_info sinfo = {}; +@@ -12597,30 +12598,30 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, + + cfg80211_sinfo_release_content(&sinfo); + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) +- wdev->cqm_config->last_rssi_event_value = ++ cqm_config->last_rssi_event_value = + (s8) sinfo.rx_beacon_signal_avg; + } + +- last = wdev->cqm_config->last_rssi_event_value; +- hyst = wdev->cqm_config->rssi_hyst; +- n = wdev->cqm_config->n_rssi_thresholds; ++ last = cqm_config->last_rssi_event_value; ++ hyst = cqm_config->rssi_hyst; ++ n = cqm_config->n_rssi_thresholds; + + for (i = 0; i < n; i++) { + i = array_index_nospec(i, n); +- if (last < wdev->cqm_config->rssi_thresholds[i]) ++ if (last < cqm_config->rssi_thresholds[i]) + break; + } + + low_index = i - 1; + if (low_index >= 0) { + low_index = array_index_nospec(low_index, n); +- low = wdev->cqm_config->rssi_thresholds[low_index] - hyst; ++ low = cqm_config->rssi_thresholds[low_index] - hyst; + } else { + low = S32_MIN; + } + if (i < n) { + i = array_index_nospec(i, n); +- high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1; ++ high = cqm_config->rssi_thresholds[i] + hyst - 1; + } else { + high = S32_MAX; + } +@@ -12633,6 +12634,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, + u32 hysteresis) + { + struct cfg80211_registered_device *rdev = info->user_ptr[0]; ++ struct cfg80211_cqm_config *cqm_config = NULL, *old; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + int i, err; +@@ -12650,10 +12652,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, + wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) + return -EOPNOTSUPP; + +- wdev_lock(wdev); +- cfg80211_cqm_config_free(wdev); +- wdev_unlock(wdev); +- + if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) { + if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */ + return rdev_set_cqm_rssi_config(rdev, dev, 0, 0); +@@ -12670,9 +12668,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, + n_thresholds = 0; + + wdev_lock(wdev); +- if (n_thresholds) { +- struct cfg80211_cqm_config *cqm_config; ++ old = rcu_dereference_protected(wdev->cqm_config, ++ lockdep_is_held(&wdev->mtx)); + ++ if (n_thresholds) { + cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds, + n_thresholds), + GFP_KERNEL); +@@ -12687,11 +12686,18 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, + flex_array_size(cqm_config, rssi_thresholds, + n_thresholds)); + +- wdev->cqm_config = cqm_config; ++ rcu_assign_pointer(wdev->cqm_config, cqm_config); ++ } else { ++ RCU_INIT_POINTER(wdev->cqm_config, NULL); + } + +- err = cfg80211_cqm_rssi_update(rdev, dev); +- ++ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config); ++ if (err) { ++ rcu_assign_pointer(wdev->cqm_config, old); ++ kfree_rcu(cqm_config, rcu_head); ++ } else { ++ kfree_rcu(old, rcu_head); ++ } + unlock: + wdev_unlock(wdev); + +@@ -18719,9 +18725,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, + enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level, gfp_t gfp) + { +- struct sk_buff *msg; + struct wireless_dev *wdev = dev->ieee80211_ptr; +- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); ++ struct cfg80211_cqm_config *cqm_config; + + trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level); + +@@ -18729,18 +18734,41 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, + rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) + return; + +- if (wdev->cqm_config) { +- wdev->cqm_config->last_rssi_event_value = rssi_level; ++ rcu_read_lock(); ++ cqm_config = rcu_dereference(wdev->cqm_config); ++ if (cqm_config) { ++ cqm_config->last_rssi_event_value = rssi_level; ++ cqm_config->last_rssi_event_type = rssi_event; ++ wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work); ++ } ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); ++ ++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work) ++{ ++ struct wireless_dev *wdev = container_of(work, struct wireless_dev, ++ cqm_rssi_work); ++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ++ enum nl80211_cqm_rssi_threshold_event rssi_event; ++ struct cfg80211_cqm_config *cqm_config; ++ struct sk_buff *msg; ++ s32 rssi_level; + +- cfg80211_cqm_rssi_update(rdev, dev); ++ wdev_lock(wdev); ++ cqm_config = rcu_dereference_protected(wdev->cqm_config, ++ lockdep_is_held(&wdev->mtx)); ++ if (!wdev->cqm_config) ++ goto unlock; + +- if (rssi_level == 0) +- rssi_level = wdev->cqm_config->last_rssi_event_value; +- } ++ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config); + +- msg = cfg80211_prepare_cqm(dev, NULL, gfp); ++ rssi_level = cqm_config->last_rssi_event_value; ++ rssi_event = cqm_config->last_rssi_event_type; ++ ++ msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL); + if (!msg) +- return; ++ goto unlock; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, + rssi_event)) +@@ -18750,14 +18778,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, + rssi_level)) + goto nla_put_failure; + +- cfg80211_send_cqm(msg, gfp); ++ cfg80211_send_cqm(msg, GFP_KERNEL); + +- return; ++ goto unlock; + + nla_put_failure: + nlmsg_free(msg); ++ unlock: ++ wdev_unlock(wdev); + } +-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); + + void cfg80211_cqm_txe_notify(struct net_device *dev, + const u8 *peer, u32 num_packets, +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 6e87d2cd83456..b97834284baef 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -5,7 +5,7 @@ + * (for nl80211's connect() and wext) + * + * Copyright 2009 Johannes Berg +- * Copyright (C) 2009, 2020, 2022 Intel Corporation. All rights reserved. ++ * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved. + * Copyright 2017 Intel Deutschland GmbH + */ + +@@ -1555,6 +1555,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) + container_of(work, struct wireless_dev, disconnect_wk); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + ++ wiphy_lock(wdev->wiphy); + wdev_lock(wdev); + + if (wdev->conn_owner_nlportid) { +@@ -1593,4 +1594,5 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) + } + + wdev_unlock(wdev); ++ wiphy_unlock(wdev->wiphy); + } +diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c +index 0c3f05c9be27a..4d3b658030105 100644 +--- a/net/wireless/sysfs.c ++++ b/net/wireless/sysfs.c +@@ -5,7 +5,7 @@ + * + * Copyright 2005-2006 Jiri Benc + * Copyright 2006 Johannes Berg +- * Copyright (C) 2020-2021 Intel Corporation ++ * Copyright (C) 2020-2021, 2023 Intel Corporation + */ + + #include +@@ -105,14 +105,18 @@ static int wiphy_suspend(struct device *dev) + cfg80211_leave_all(rdev); + cfg80211_process_rdev_events(rdev); + } ++ cfg80211_process_wiphy_works(rdev); + if (rdev->ops->suspend) + ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); + if (ret == 1) { + /* Driver refuse to configure wowlan */ + cfg80211_leave_all(rdev); + cfg80211_process_rdev_events(rdev); ++ cfg80211_process_wiphy_works(rdev); + ret = rdev_suspend(rdev, NULL); + } ++ if (ret == 0) ++ rdev->suspended = true; + } + wiphy_unlock(&rdev->wiphy); + rtnl_unlock(); +@@ -132,6 +136,8 @@ static int wiphy_resume(struct device *dev) + wiphy_lock(&rdev->wiphy); + if (rdev->wiphy.registered && rdev->ops->resume) + ret = rdev_resume(rdev); ++ rdev->suspended = false; ++ schedule_work(&rdev->wiphy_work); + wiphy_unlock(&rdev->wiphy); + + if (ret) +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index 80d973144fded..111d5464c12df 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -1577,7 +1577,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, + /* First handle the "special" cases */ + if (sym_is(name, namelen, "usb")) + do_usb_table(symval, sym->st_size, mod); +- if (sym_is(name, namelen, "of")) ++ else if (sym_is(name, namelen, "of")) + do_of_table(symval, sym->st_size, mod); + else if (sym_is(name, namelen, "pnp")) + do_pnp_device_entry(symval, sym->st_size, mod); +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig +index c17660bf5f347..6ef7bde551263 100644 +--- a/security/integrity/ima/Kconfig ++++ b/security/integrity/ima/Kconfig +@@ -29,9 +29,11 @@ config IMA + to learn more about IMA. + If unsure, say N. + ++if IMA ++ + config IMA_KEXEC + bool "Enable carrying the IMA measurement list across a soft boot" +- depends on IMA && TCG_TPM && HAVE_IMA_KEXEC ++ depends on TCG_TPM && HAVE_IMA_KEXEC + default n + help + TPM PCRs are only reset on a hard reboot. In order to validate +@@ -43,7 +45,6 @@ config IMA_KEXEC + + config IMA_MEASURE_PCR_IDX + int +- depends on IMA + range 8 14 + default 10 + help +@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX + + config IMA_LSM_RULES + bool +- depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR) ++ depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR) + default y + help + Disabling this option will disregard LSM based policy rules. +@@ -61,7 +62,6 @@ config IMA_LSM_RULES + choice + prompt "Default template" + default IMA_NG_TEMPLATE +- depends on IMA + help + Select the default IMA measurement template. + +@@ -80,14 +80,12 @@ endchoice + + config IMA_DEFAULT_TEMPLATE + string +- depends on IMA + default "ima-ng" if IMA_NG_TEMPLATE + default "ima-sig" if IMA_SIG_TEMPLATE + + choice + prompt "Default integrity hash algorithm" + default IMA_DEFAULT_HASH_SHA1 +- depends on IMA + help + Select the default hash algorithm used for the measurement + list, integrity appraisal and audit log. The compiled default +@@ -117,7 +115,6 @@ endchoice + + config IMA_DEFAULT_HASH + string +- depends on IMA + default "sha1" if IMA_DEFAULT_HASH_SHA1 + default "sha256" if IMA_DEFAULT_HASH_SHA256 + default "sha512" if IMA_DEFAULT_HASH_SHA512 +@@ -126,7 +123,6 @@ config IMA_DEFAULT_HASH + + config IMA_WRITE_POLICY + bool "Enable multiple writes to the IMA policy" +- depends on IMA + default n + help + IMA policy can now be updated multiple times. The new rules get +@@ -137,7 +133,6 @@ config IMA_WRITE_POLICY + + config IMA_READ_POLICY + bool "Enable reading back the current IMA policy" +- depends on IMA + default y if IMA_WRITE_POLICY + default n if !IMA_WRITE_POLICY + help +@@ -147,7 +142,6 @@ config IMA_READ_POLICY + + config IMA_APPRAISE + bool "Appraise integrity measurements" +- depends on IMA + default n + help + This option enables local measurement integrity appraisal. +@@ -268,7 +262,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY + config IMA_BLACKLIST_KEYRING + bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)" + depends on SYSTEM_TRUSTED_KEYRING +- depends on IMA_TRUSTED_KEYRING ++ depends on INTEGRITY_TRUSTED_KEYRING + default n + help + This option creates an IMA blacklist keyring, which contains all +@@ -278,7 +272,7 @@ config IMA_BLACKLIST_KEYRING + + config IMA_LOAD_X509 + bool "Load X509 certificate onto the '.ima' trusted keyring" +- depends on IMA_TRUSTED_KEYRING ++ depends on INTEGRITY_TRUSTED_KEYRING + default n + help + File signature verification is based on the public keys +@@ -303,7 +297,6 @@ config IMA_APPRAISE_SIGNED_INIT + + config IMA_MEASURE_ASYMMETRIC_KEYS + bool +- depends on IMA + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y + default y + +@@ -322,7 +315,8 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT + + config IMA_DISABLE_HTABLE + bool "Disable htable to allow measurement of duplicate records" +- depends on IMA + default n + help + This option disables htable to allow measurement of duplicate records. ++ ++endif +diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c +index a4dba0b751e76..1bbd1d077dfd9 100644 +--- a/sound/soc/soc-utils.c ++++ b/sound/soc/soc-utils.c +@@ -217,6 +217,7 @@ int snd_soc_dai_is_dummy(struct snd_soc_dai *dai) + return 1; + return 0; + } ++EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy); + + int snd_soc_component_is_dummy(struct snd_soc_component *component) + { +diff --git a/sound/soc/tegra/tegra_audio_graph_card.c b/sound/soc/tegra/tegra_audio_graph_card.c +index 1f2c5018bf5ac..4737e776d3837 100644 +--- a/sound/soc/tegra/tegra_audio_graph_card.c ++++ b/sound/soc/tegra/tegra_audio_graph_card.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #define MAX_PLLA_OUT0_DIV 128 + +@@ -44,6 +45,21 @@ struct tegra_audio_cdata { + unsigned int plla_out0_rates[NUM_RATE_TYPE]; + }; + ++static bool need_clk_update(struct snd_soc_dai *dai) ++{ ++ if (snd_soc_dai_is_dummy(dai) || ++ !dai->driver->ops || ++ !dai->driver->name) ++ return false; ++ ++ if (strstr(dai->driver->name, "I2S") || ++ strstr(dai->driver->name, "DMIC") || ++ strstr(dai->driver->name, "DSPK")) ++ return true; ++ ++ return false; ++} ++ + /* Setup PLL clock as per the given sample rate */ + static int tegra_audio_graph_update_pll(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +@@ -140,19 +156,7 @@ static int tegra_audio_graph_hw_params(struct snd_pcm_substream *substream, + struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); + int err; + +- /* +- * This gets called for each DAI link (FE or BE) when DPCM is used. +- * We may not want to update PLLA rate for each call. So PLLA update +- * must be restricted to external I/O links (I2S, DMIC or DSPK) since +- * they actually depend on it. I/O modules update their clocks in +- * hw_param() of their respective component driver and PLLA rate +- * update here helps them to derive appropriate rates. +- * +- * TODO: When more HW accelerators get added (like sample rate +- * converter, volume gain controller etc., which don't really +- * depend on PLLA) we need a better way to filter here. +- */ +- if (cpu_dai->driver->ops && rtd->dai_link->no_pcm) { ++ if (need_clk_update(cpu_dai)) { + err = tegra_audio_graph_update_pll(substream, params); + if (err) + return err; +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h +index 53bc487947197..92dbe89dafbf5 100644 +--- a/tools/include/uapi/linux/bpf.h ++++ b/tools/include/uapi/linux/bpf.h +@@ -3112,6 +3112,11 @@ union bpf_attr { + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). ++ * **BPF_FIB_LOOKUP_SKIP_NEIGH** ++ * Skip the neighbour table lookup. *params*->dmac ++ * and *params*->smac will not be set as output. A common ++ * use case is to call **bpf_redirect_neigh**\ () after ++ * doing **bpf_fib_lookup**\ (). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. +@@ -6678,6 +6683,7 @@ struct bpf_raw_tracepoint_args { + enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), ++ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + }; + + enum { +diff --git a/tools/testing/selftests/netfilter/.gitignore b/tools/testing/selftests/netfilter/.gitignore +index 4cb887b574138..4b2928e1c19d8 100644 +--- a/tools/testing/selftests/netfilter/.gitignore ++++ b/tools/testing/selftests/netfilter/.gitignore +@@ -1,3 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only + nf-queue + connect_close ++audit_logread +diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile +index 3686bfa6c58d7..321db8850da00 100644 +--- a/tools/testing/selftests/netfilter/Makefile ++++ b/tools/testing/selftests/netfilter/Makefile +@@ -6,13 +6,13 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ + nft_concat_range.sh nft_conntrack_helper.sh \ + nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ + ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ +- conntrack_vrf.sh nft_synproxy.sh rpath.sh ++ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh + + HOSTPKG_CONFIG := pkg-config + + CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null) + LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) + +-TEST_GEN_FILES = nf-queue connect_close ++TEST_GEN_FILES = nf-queue connect_close audit_logread + + include ../lib.mk +diff --git a/tools/testing/selftests/netfilter/audit_logread.c b/tools/testing/selftests/netfilter/audit_logread.c +new file mode 100644 +index 0000000000000..a0a880fc2d9de +--- /dev/null ++++ b/tools/testing/selftests/netfilter/audit_logread.c +@@ -0,0 +1,165 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#define _GNU_SOURCE ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int fd; ++ ++#define MAX_AUDIT_MESSAGE_LENGTH 8970 ++struct audit_message { ++ struct nlmsghdr nlh; ++ union { ++ struct audit_status s; ++ char data[MAX_AUDIT_MESSAGE_LENGTH]; ++ } u; ++}; ++ ++int audit_recv(int fd, struct audit_message *rep) ++{ ++ struct sockaddr_nl addr; ++ socklen_t addrlen = sizeof(addr); ++ int ret; ++ ++ do { ++ ret = recvfrom(fd, rep, sizeof(*rep), 0, ++ (struct sockaddr *)&addr, &addrlen); ++ } while (ret < 0 && errno == EINTR); ++ ++ if (ret < 0 || ++ addrlen != sizeof(addr) || ++ addr.nl_pid != 0 || ++ rep->nlh.nlmsg_type == NLMSG_ERROR) /* short-cut for now */ ++ return -1; ++ ++ return ret; ++} ++ ++int audit_send(int fd, uint16_t type, uint32_t key, uint32_t val) ++{ ++ static int seq = 0; ++ struct audit_message msg = { ++ .nlh = { ++ .nlmsg_len = NLMSG_SPACE(sizeof(msg.u.s)), ++ .nlmsg_type = type, ++ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK, ++ .nlmsg_seq = ++seq, ++ }, ++ .u.s = { ++ .mask = key, ++ .enabled = key == AUDIT_STATUS_ENABLED ? val : 0, ++ .pid = key == AUDIT_STATUS_PID ? val : 0, ++ } ++ }; ++ struct sockaddr_nl addr = { ++ .nl_family = AF_NETLINK, ++ }; ++ int ret; ++ ++ do { ++ ret = sendto(fd, &msg, msg.nlh.nlmsg_len, 0, ++ (struct sockaddr *)&addr, sizeof(addr)); ++ } while (ret < 0 && errno == EINTR); ++ ++ if (ret != (int)msg.nlh.nlmsg_len) ++ return -1; ++ return 0; ++} ++ ++int audit_set(int fd, uint32_t key, uint32_t val) ++{ ++ struct audit_message rep = { 0 }; ++ int ret; ++ ++ ret = audit_send(fd, AUDIT_SET, key, val); ++ if (ret) ++ return ret; ++ ++ ret = audit_recv(fd, &rep); ++ if (ret < 0) ++ return ret; ++ return 0; ++} ++ ++int readlog(int fd) ++{ ++ struct audit_message rep = { 0 }; ++ int ret = audit_recv(fd, &rep); ++ const char *sep = ""; ++ char *k, *v; ++ ++ if (ret < 0) ++ return ret; ++ ++ if (rep.nlh.nlmsg_type != AUDIT_NETFILTER_CFG) ++ return 0; ++ ++ /* skip the initial "audit(...): " part */ ++ strtok(rep.u.data, " "); ++ ++ while ((k = strtok(NULL, "="))) { ++ v = strtok(NULL, " "); ++ ++ /* these vary and/or are uninteresting, ignore */ ++ if (!strcmp(k, "pid") || ++ !strcmp(k, "comm") || ++ !strcmp(k, "subj")) ++ continue; ++ ++ /* strip the varying sequence number */ ++ if (!strcmp(k, "table")) ++ *strchrnul(v, ':') = '\0'; ++ ++ printf("%s%s=%s", sep, k, v); ++ sep = " "; ++ } ++ if (*sep) { ++ printf("\n"); ++ fflush(stdout); ++ } ++ return 0; ++} ++ ++void cleanup(int sig) ++{ ++ audit_set(fd, AUDIT_STATUS_ENABLED, 0); ++ close(fd); ++ if (sig) ++ exit(0); ++} ++ ++int main(int argc, char **argv) ++{ ++ struct sigaction act = { ++ .sa_handler = cleanup, ++ }; ++ ++ fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_AUDIT); ++ if (fd < 0) { ++ perror("Can't open netlink socket"); ++ return -1; ++ } ++ ++ if (sigaction(SIGTERM, &act, NULL) < 0 || ++ sigaction(SIGINT, &act, NULL) < 0) { ++ perror("Can't set signal handler"); ++ close(fd); ++ return -1; ++ } ++ ++ audit_set(fd, AUDIT_STATUS_ENABLED, 1); ++ audit_set(fd, AUDIT_STATUS_PID, getpid()); ++ ++ while (1) ++ readlog(fd); ++} +diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config +index 4faf2ce021d90..7c42b1b2c69b4 100644 +--- a/tools/testing/selftests/netfilter/config ++++ b/tools/testing/selftests/netfilter/config +@@ -6,3 +6,4 @@ CONFIG_NFT_REDIR=m + CONFIG_NFT_MASQ=m + CONFIG_NFT_FLOW_OFFLOAD=m + CONFIG_NF_CT_NETLINK=m ++CONFIG_AUDIT=y +diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh +new file mode 100755 +index 0000000000000..bb34329e02a7f +--- /dev/null ++++ b/tools/testing/selftests/netfilter/nft_audit.sh +@@ -0,0 +1,193 @@ ++#!/bin/bash ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Check that audit logs generated for nft commands are as expected. ++ ++SKIP_RC=4 ++RC=0 ++ ++nft --version >/dev/null 2>&1 || { ++ echo "SKIP: missing nft tool" ++ exit $SKIP_RC ++} ++ ++logfile=$(mktemp) ++rulefile=$(mktemp) ++echo "logging into $logfile" ++./audit_logread >"$logfile" & ++logread_pid=$! ++trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT ++exec 3<"$logfile" ++ ++do_test() { # (cmd, log) ++ echo -n "testing for cmd: $1 ... " ++ cat <&3 >/dev/null ++ $1 >/dev/null || exit 1 ++ sleep 0.1 ++ res=$(diff -a -u <(echo "$2") - <&3) ++ [ $? -eq 0 ] && { echo "OK"; return; } ++ echo "FAIL" ++ grep -v '^\(---\|+++\|@@\)' <<< "$res" ++ ((RC--)) ++} ++ ++nft flush ruleset ++ ++# adding tables, chains and rules ++ ++for table in t1 t2; do ++ do_test "nft add table $table" \ ++ "table=$table family=2 entries=1 op=nft_register_table" ++ ++ do_test "nft add chain $table c1" \ ++ "table=$table family=2 entries=1 op=nft_register_chain" ++ ++ do_test "nft add chain $table c2; add chain $table c3" \ ++ "table=$table family=2 entries=2 op=nft_register_chain" ++ ++ cmd="add rule $table c1 counter" ++ ++ do_test "nft $cmd" \ ++ "table=$table family=2 entries=1 op=nft_register_rule" ++ ++ do_test "nft $cmd; $cmd" \ ++ "table=$table family=2 entries=2 op=nft_register_rule" ++ ++ cmd="" ++ sep="" ++ for chain in c2 c3; do ++ for i in {1..3}; do ++ cmd+="$sep add rule $table $chain counter" ++ sep=";" ++ done ++ done ++ do_test "nft $cmd" \ ++ "table=$table family=2 entries=6 op=nft_register_rule" ++done ++ ++for ((i = 0; i < 500; i++)); do ++ echo "add rule t2 c3 counter accept comment \"rule $i\"" ++done >$rulefile ++do_test "nft -f $rulefile" \ ++'table=t2 family=2 entries=500 op=nft_register_rule' ++ ++# adding sets and elements ++ ++settype='type inet_service; counter' ++setelem='{ 22, 80, 443 }' ++setblock="{ $settype; elements = $setelem; }" ++do_test "nft add set t1 s $setblock" \ ++"table=t1 family=2 entries=4 op=nft_register_set" ++ ++do_test "nft add set t1 s2 $setblock; add set t1 s3 { $settype; }" \ ++"table=t1 family=2 entries=5 op=nft_register_set" ++ ++do_test "nft add element t1 s3 $setelem" \ ++"table=t1 family=2 entries=3 op=nft_register_setelem" ++ ++# adding counters ++ ++do_test 'nft add counter t1 c1' \ ++'table=t1 family=2 entries=1 op=nft_register_obj' ++ ++do_test 'nft add counter t2 c1; add counter t2 c2' \ ++'table=t2 family=2 entries=2 op=nft_register_obj' ++ ++# adding/updating quotas ++ ++do_test 'nft add quota t1 q1 { 10 bytes }' \ ++'table=t1 family=2 entries=1 op=nft_register_obj' ++ ++do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \ ++'table=t2 family=2 entries=2 op=nft_register_obj' ++ ++# changing the quota value triggers obj update path ++do_test 'nft add quota t1 q1 { 20 bytes }' \ ++'table=t1 family=2 entries=1 op=nft_register_obj' ++ ++# resetting rules ++ ++do_test 'nft reset rules t1 c2' \ ++'table=t1 family=2 entries=3 op=nft_reset_rule' ++ ++do_test 'nft reset rules table t1' \ ++'table=t1 family=2 entries=3 op=nft_reset_rule ++table=t1 family=2 entries=3 op=nft_reset_rule ++table=t1 family=2 entries=3 op=nft_reset_rule' ++ ++do_test 'nft reset rules t2 c3' \ ++'table=t2 family=2 entries=189 op=nft_reset_rule ++table=t2 family=2 entries=188 op=nft_reset_rule ++table=t2 family=2 entries=126 op=nft_reset_rule' ++ ++do_test 'nft reset rules t2' \ ++'table=t2 family=2 entries=3 op=nft_reset_rule ++table=t2 family=2 entries=3 op=nft_reset_rule ++table=t2 family=2 entries=186 op=nft_reset_rule ++table=t2 family=2 entries=188 op=nft_reset_rule ++table=t2 family=2 entries=129 op=nft_reset_rule' ++ ++do_test 'nft reset rules' \ ++'table=t1 family=2 entries=3 op=nft_reset_rule ++table=t1 family=2 entries=3 op=nft_reset_rule ++table=t1 family=2 entries=3 op=nft_reset_rule ++table=t2 family=2 entries=3 op=nft_reset_rule ++table=t2 family=2 entries=3 op=nft_reset_rule ++table=t2 family=2 entries=180 op=nft_reset_rule ++table=t2 family=2 entries=188 op=nft_reset_rule ++table=t2 family=2 entries=135 op=nft_reset_rule' ++ ++# resetting sets and elements ++ ++elem=(22 ,80 ,443) ++relem="" ++for i in {1..3}; do ++ relem+="${elem[((i - 1))]}" ++ do_test "nft reset element t1 s { $relem }" \ ++ "table=t1 family=2 entries=$i op=nft_reset_setelem" ++done ++ ++do_test 'nft reset set t1 s' \ ++'table=t1 family=2 entries=3 op=nft_reset_setelem' ++ ++# deleting rules ++ ++readarray -t handles < <(nft -a list chain t1 c1 | \ ++ sed -n 's/.*counter.* handle \(.*\)$/\1/p') ++ ++do_test "nft delete rule t1 c1 handle ${handles[0]}" \ ++'table=t1 family=2 entries=1 op=nft_unregister_rule' ++ ++cmd='delete rule t1 c1 handle' ++do_test "nft $cmd ${handles[1]}; $cmd ${handles[2]}" \ ++'table=t1 family=2 entries=2 op=nft_unregister_rule' ++ ++do_test 'nft flush chain t1 c2' \ ++'table=t1 family=2 entries=3 op=nft_unregister_rule' ++ ++do_test 'nft flush table t2' \ ++'table=t2 family=2 entries=509 op=nft_unregister_rule' ++ ++# deleting chains ++ ++do_test 'nft delete chain t2 c2' \ ++'table=t2 family=2 entries=1 op=nft_unregister_chain' ++ ++# deleting sets and elements ++ ++do_test 'nft delete element t1 s { 22 }' \ ++'table=t1 family=2 entries=1 op=nft_unregister_setelem' ++ ++do_test 'nft delete element t1 s { 80, 443 }' \ ++'table=t1 family=2 entries=2 op=nft_unregister_setelem' ++ ++do_test 'nft flush set t1 s2' \ ++'table=t1 family=2 entries=3 op=nft_unregister_setelem' ++ ++do_test 'nft delete set t1 s2' \ ++'table=t1 family=2 entries=1 op=nft_unregister_set' ++ ++do_test 'nft delete set t1 s3' \ ++'table=t1 family=2 entries=1 op=nft_unregister_set' ++ ++exit $RC diff --git a/patch/kernel/archive/odroidxu4-6.1/patch-6.1.57-58.patch b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.57-58.patch new file mode 100644 index 0000000000..b890875ff1 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.57-58.patch @@ -0,0 +1,389 @@ +diff --git a/Makefile b/Makefile +index b435b56594f0f..ce1eec0b5010d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 57 ++SUBLEVEL = 58 + EXTRAVERSION = + NAME = Curry Ramen + +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index 5a976fa343df1..3bb530d4bb5ce 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -93,10 +93,12 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq, + dreq->max_count = dreq_len; + if (dreq->count > dreq_len) + dreq->count = dreq_len; +- } + +- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error) +- dreq->error = hdr->error; ++ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) ++ dreq->error = hdr->error; ++ else /* Clear outstanding error if this is EOF */ ++ dreq->error = 0; ++ } + } + + static void +@@ -118,18 +120,6 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq, + dreq->count = dreq_len; + } + +-static void nfs_direct_truncate_request(struct nfs_direct_req *dreq, +- struct nfs_page *req) +-{ +- loff_t offs = req_offset(req); +- size_t req_start = (size_t)(offs - dreq->io_start); +- +- if (req_start < dreq->max_count) +- dreq->max_count = req_start; +- if (req_start < dreq->count) +- dreq->count = req_start; +-} +- + /** + * nfs_swap_rw - NFS address space operation for swap I/O + * @iocb: target I/O control block +@@ -500,9 +490,7 @@ static void nfs_direct_add_page_head(struct list_head *list, + kref_get(&head->wb_kref); + } + +-static void nfs_direct_join_group(struct list_head *list, +- struct nfs_commit_info *cinfo, +- struct inode *inode) ++static void nfs_direct_join_group(struct list_head *list, struct inode *inode) + { + struct nfs_page *req, *subreq; + +@@ -524,7 +512,7 @@ static void nfs_direct_join_group(struct list_head *list, + nfs_release_request(subreq); + } + } while ((subreq = subreq->wb_this_page) != req); +- nfs_join_page_group(req, cinfo, inode); ++ nfs_join_page_group(req, inode); + } + } + +@@ -542,15 +530,20 @@ nfs_direct_write_scan_commit_list(struct inode *inode, + static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) + { + struct nfs_pageio_descriptor desc; +- struct nfs_page *req; ++ struct nfs_page *req, *tmp; + LIST_HEAD(reqs); + struct nfs_commit_info cinfo; ++ LIST_HEAD(failed); + + nfs_init_cinfo_from_dreq(&cinfo, dreq); + nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); + +- nfs_direct_join_group(&reqs, &cinfo, dreq->inode); ++ nfs_direct_join_group(&reqs, dreq->inode); + ++ dreq->count = 0; ++ dreq->max_count = 0; ++ list_for_each_entry(req, &reqs, wb_list) ++ dreq->max_count += req->wb_bytes; + nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); + get_dreq(dreq); + +@@ -558,40 +551,27 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) + &nfs_direct_write_completion_ops); + desc.pg_dreq = dreq; + +- while (!list_empty(&reqs)) { +- req = nfs_list_entry(reqs.next); ++ list_for_each_entry_safe(req, tmp, &reqs, wb_list) { + /* Bump the transmission count */ + req->wb_nio++; + if (!nfs_pageio_add_request(&desc, req)) { +- spin_lock(&dreq->lock); +- if (dreq->error < 0) { +- desc.pg_error = dreq->error; +- } else if (desc.pg_error != -EAGAIN) { +- dreq->flags = 0; +- if (!desc.pg_error) +- desc.pg_error = -EIO; ++ nfs_list_move_request(req, &failed); ++ spin_lock(&cinfo.inode->i_lock); ++ dreq->flags = 0; ++ if (desc.pg_error < 0) + dreq->error = desc.pg_error; +- } else +- dreq->flags = NFS_ODIRECT_RESCHED_WRITES; +- spin_unlock(&dreq->lock); +- break; ++ else ++ dreq->error = -EIO; ++ spin_unlock(&cinfo.inode->i_lock); + } + nfs_release_request(req); + } + nfs_pageio_complete(&desc); + +- while (!list_empty(&reqs)) { +- req = nfs_list_entry(reqs.next); ++ while (!list_empty(&failed)) { ++ req = nfs_list_entry(failed.next); + nfs_list_remove_request(req); + nfs_unlock_and_release_request(req); +- if (desc.pg_error == -EAGAIN) { +- nfs_mark_request_commit(req, NULL, &cinfo, 0); +- } else { +- spin_lock(&dreq->lock); +- nfs_direct_truncate_request(dreq, req); +- spin_unlock(&dreq->lock); +- nfs_release_request(req); +- } + } + + if (put_dreq(dreq)) +@@ -611,6 +591,8 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) + if (status < 0) { + /* Errors in commit are fatal */ + dreq->error = status; ++ dreq->max_count = 0; ++ dreq->count = 0; + dreq->flags = NFS_ODIRECT_DONE; + } else { + status = dreq->error; +@@ -621,12 +603,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) + while (!list_empty(&data->pages)) { + req = nfs_list_entry(data->pages.next); + nfs_list_remove_request(req); +- if (status < 0) { +- spin_lock(&dreq->lock); +- nfs_direct_truncate_request(dreq, req); +- spin_unlock(&dreq->lock); +- nfs_release_request(req); +- } else if (!nfs_write_match_verf(verf, req)) { ++ if (status >= 0 && !nfs_write_match_verf(verf, req)) { + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; + /* + * Despite the reboot, the write was successful, +@@ -634,7 +611,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) + */ + req->wb_nio = 0; + nfs_mark_request_commit(req, NULL, &cinfo, 0); +- } else ++ } else /* Error or match */ + nfs_release_request(req); + nfs_unlock_and_release_request(req); + } +@@ -687,7 +664,6 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) + while (!list_empty(&reqs)) { + req = nfs_list_entry(reqs.next); + nfs_list_remove_request(req); +- nfs_direct_truncate_request(dreq, req); + nfs_release_request(req); + nfs_unlock_and_release_request(req); + } +@@ -737,8 +713,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) + } + + nfs_direct_count_bytes(dreq, hdr); +- if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) && +- !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { ++ if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) { + if (!dreq->flags) + dreq->flags = NFS_ODIRECT_DO_COMMIT; + flags = dreq->flags; +@@ -782,23 +757,18 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error) + static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr) + { + struct nfs_direct_req *dreq = hdr->dreq; +- struct nfs_page *req; +- struct nfs_commit_info cinfo; + + trace_nfs_direct_write_reschedule_io(dreq); + +- nfs_init_cinfo_from_dreq(&cinfo, dreq); + spin_lock(&dreq->lock); +- if (dreq->error == 0) ++ if (dreq->error == 0) { + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; +- set_bit(NFS_IOHDR_REDO, &hdr->flags); +- spin_unlock(&dreq->lock); +- while (!list_empty(&hdr->pages)) { +- req = nfs_list_entry(hdr->pages.next); +- nfs_list_remove_request(req); +- nfs_unlock_request(req); +- nfs_mark_request_commit(req, NULL, &cinfo, 0); ++ /* fake unstable write to let common nfs resend pages */ ++ hdr->verf.committed = NFS_UNSTABLE; ++ hdr->good_bytes = hdr->args.offset + hdr->args.count - ++ hdr->io_start; + } ++ spin_unlock(&dreq->lock); + } + + static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { +@@ -826,11 +796,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, + { + struct nfs_pageio_descriptor desc; + struct inode *inode = dreq->inode; +- struct nfs_commit_info cinfo; + ssize_t result = 0; + size_t requested_bytes = 0; + size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); +- bool defer = false; + + trace_nfs_direct_write_schedule_iovec(dreq); + +@@ -871,39 +839,19 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, + break; + } + +- pgbase = 0; +- bytes -= req_len; +- requested_bytes += req_len; +- pos += req_len; +- dreq->bytes_left -= req_len; +- +- if (defer) { +- nfs_mark_request_commit(req, NULL, &cinfo, 0); +- continue; +- } +- + nfs_lock_request(req); + req->wb_index = pos >> PAGE_SHIFT; + req->wb_offset = pos & ~PAGE_MASK; +- if (nfs_pageio_add_request(&desc, req)) +- continue; +- +- /* Exit on hard errors */ +- if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) { ++ if (!nfs_pageio_add_request(&desc, req)) { + result = desc.pg_error; + nfs_unlock_and_release_request(req); + break; + } +- +- /* If the error is soft, defer remaining requests */ +- nfs_init_cinfo_from_dreq(&cinfo, dreq); +- spin_lock(&dreq->lock); +- dreq->flags = NFS_ODIRECT_RESCHED_WRITES; +- spin_unlock(&dreq->lock); +- nfs_unlock_request(req); +- nfs_mark_request_commit(req, NULL, &cinfo, 0); +- desc.pg_error = 0; +- defer = true; ++ pgbase = 0; ++ bytes -= req_len; ++ requested_bytes += req_len; ++ pos += req_len; ++ dreq->bytes_left -= req_len; + } + nfs_direct_release_pages(pagevec, npages); + kvfree(pagevec); +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 0a8aed0ac9945..f41d24b54fd1f 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -58,8 +58,7 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; + static const struct nfs_commit_completion_ops nfs_commit_completion_ops; + static const struct nfs_rw_ops nfs_rw_write_ops; + static void nfs_inode_remove_request(struct nfs_page *req); +-static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, +- struct nfs_page *req); ++static void nfs_clear_request_commit(struct nfs_page *req); + static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, + struct inode *inode); + static struct nfs_page * +@@ -503,8 +502,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, + * the (former) group. All subrequests are removed from any write or commit + * lists, unlinked from the group and destroyed. + */ +-void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, +- struct inode *inode) ++void ++nfs_join_page_group(struct nfs_page *head, struct inode *inode) + { + struct nfs_page *subreq; + struct nfs_page *destroy_list = NULL; +@@ -534,7 +533,7 @@ void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, + * Commit list removal accounting is done after locks are dropped */ + subreq = head; + do { +- nfs_clear_request_commit(cinfo, subreq); ++ nfs_clear_request_commit(subreq); + subreq = subreq->wb_this_page; + } while (subreq != head); + +@@ -568,10 +567,8 @@ nfs_lock_and_join_requests(struct page *page) + { + struct inode *inode = page_file_mapping(page)->host; + struct nfs_page *head; +- struct nfs_commit_info cinfo; + int ret; + +- nfs_init_cinfo_from_inode(&cinfo, inode); + /* + * A reference is taken only on the head request which acts as a + * reference to the whole page group - the group will not be destroyed +@@ -588,7 +585,7 @@ nfs_lock_and_join_requests(struct page *page) + return ERR_PTR(ret); + } + +- nfs_join_page_group(head, &cinfo, inode); ++ nfs_join_page_group(head, inode); + + return head; + } +@@ -959,16 +956,18 @@ nfs_clear_page_commit(struct page *page) + } + + /* Called holding the request lock on @req */ +-static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, +- struct nfs_page *req) ++static void ++nfs_clear_request_commit(struct nfs_page *req) + { + if (test_bit(PG_CLEAN, &req->wb_flags)) { + struct nfs_open_context *ctx = nfs_req_openctx(req); + struct inode *inode = d_inode(ctx->dentry); ++ struct nfs_commit_info cinfo; + ++ nfs_init_cinfo_from_inode(&cinfo, inode); + mutex_lock(&NFS_I(inode)->commit_mutex); +- if (!pnfs_clear_request_commit(req, cinfo)) { +- nfs_request_remove_commit_list(req, cinfo); ++ if (!pnfs_clear_request_commit(req, &cinfo)) { ++ nfs_request_remove_commit_list(req, &cinfo); + } + mutex_unlock(&NFS_I(inode)->commit_mutex); + nfs_clear_page_commit(req->wb_page); +diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h +index e39a8cf8b1797..ba7e2e4b09264 100644 +--- a/include/linux/nfs_page.h ++++ b/include/linux/nfs_page.h +@@ -145,9 +145,7 @@ extern void nfs_unlock_request(struct nfs_page *req); + extern void nfs_unlock_and_release_request(struct nfs_page *); + extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); + extern int nfs_page_group_lock_subrequests(struct nfs_page *head); +-extern void nfs_join_page_group(struct nfs_page *head, +- struct nfs_commit_info *cinfo, +- struct inode *inode); ++extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); + extern int nfs_page_group_lock(struct nfs_page *); + extern void nfs_page_group_unlock(struct nfs_page *); + extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +diff --git a/lib/test_meminit.c b/lib/test_meminit.c +index 0ae35223d7733..85d8dd8e01dc4 100644 +--- a/lib/test_meminit.c ++++ b/lib/test_meminit.c +@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures) + int failures = 0, num_tests = 0; + int i; + +- for (i = 0; i <= MAX_ORDER; i++) ++ for (i = 0; i < MAX_ORDER; i++) + num_tests += do_alloc_pages_order(i, &failures); + + REPORT_FAILURES_IN_FN(); diff --git a/patch/kernel/archive/odroidxu4-6.1/patch-6.1.58-59.patch b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.58-59.patch new file mode 100644 index 0000000000..5d22b4125d --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.58-59.patch @@ -0,0 +1,4902 @@ +diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml +index 33b90e975e33c..ea7db3618b23e 100644 +--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml ++++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml +@@ -31,8 +31,9 @@ properties: + - const: renesas,rzg2l-irqc + + '#interrupt-cells': +- description: The first cell should contain external interrupt number (IRQ0-7) and the +- second cell is used to specify the flag. ++ description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the ++ include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second ++ cell is used to specify the flag. + const: 2 + + '#address-cells': +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index f5f7a464605f9..b47b3d0ce5596 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -967,6 +967,21 @@ tcp_tw_reuse - INTEGER + tcp_window_scaling - BOOLEAN + Enable window scaling as defined in RFC1323. + ++tcp_shrink_window - BOOLEAN ++ This changes how the TCP receive window is calculated. ++ ++ RFC 7323, section 2.4, says there are instances when a retracted ++ window can be offered, and that TCP implementations MUST ensure ++ that they handle a shrinking window, as specified in RFC 1122. ++ ++ - 0 - Disabled. The window is never shrunk. ++ - 1 - Enabled. The window is shrunk when necessary to remain within ++ the memory limit set by autotuning (sk_rcvbuf). ++ This only occurs if a non-zero receive window ++ scaling factor is also in effect. ++ ++ Default: 0 ++ + tcp_wmem - vector of 3 INTEGERs: min, default, max + min: Amount of memory reserved for send buffers for TCP sockets. + Each TCP socket has rights to use it due to fact of its birth. +diff --git a/Makefile b/Makefile +index ce1eec0b5010d..4ad29c852e5f8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 58 ++SUBLEVEL = 59 + EXTRAVERSION = + NAME = Curry Ramen + +diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +index dec85d2548384..5117b2e7985af 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts ++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +@@ -48,7 +48,7 @@ + + memory@40000000 { + device_type = "memory"; +- reg = <0 0x40000000 0 0x80000000>; ++ reg = <0 0x40000000 0x2 0x00000000>; + }; + + reserved-memory { +@@ -56,13 +56,8 @@ + #size-cells = <2>; + ranges; + +- /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ +- bl31_secmon_reserved: secmon@54600000 { +- no-map; +- reg = <0 0x54600000 0x0 0x200000>; +- }; +- +- /* 12 MiB reserved for OP-TEE (BL32) ++ /* ++ * 12 MiB reserved for OP-TEE (BL32) + * +-----------------------+ 0x43e0_0000 + * | SHMEM 2MiB | + * +-----------------------+ 0x43c0_0000 +@@ -75,6 +70,34 @@ + no-map; + reg = <0 0x43200000 0 0x00c00000>; + }; ++ ++ scp_mem: memory@50000000 { ++ compatible = "shared-dma-pool"; ++ reg = <0 0x50000000 0 0x2900000>; ++ no-map; ++ }; ++ ++ vpu_mem: memory@53000000 { ++ compatible = "shared-dma-pool"; ++ reg = <0 0x53000000 0 0x1400000>; /* 20 MB */ ++ }; ++ ++ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ ++ bl31_secmon_mem: memory@54600000 { ++ no-map; ++ reg = <0 0x54600000 0x0 0x200000>; ++ }; ++ ++ snd_dma_mem: memory@60000000 { ++ compatible = "shared-dma-pool"; ++ reg = <0 0x60000000 0 0x1100000>; ++ no-map; ++ }; ++ ++ apu_mem: memory@62000000 { ++ compatible = "shared-dma-pool"; ++ reg = <0 0x62000000 0 0x1400000>; /* 20 MB */ ++ }; + }; + }; + +diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi +index 2c2b946b614bf..ef2764a595eda 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi +@@ -229,6 +229,7 @@ + interrupts = ; + cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>, + <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>; ++ status = "fail"; + }; + + dmic_codec: dmic-codec { +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi +index f049fb42e3ca8..de794a5078dfc 100644 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi +@@ -3701,7 +3701,7 @@ + + pdc: interrupt-controller@b220000 { + compatible = "qcom,sm8150-pdc", "qcom,pdc"; +- reg = <0 0x0b220000 0 0x400>; ++ reg = <0 0x0b220000 0 0x30000>; + qcom,pdc-ranges = <0 480 94>, <94 609 31>, + <125 63 1>; + #interrupt-cells = <2>; +diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h +index 1a89ebdc3acc9..0238e6bd0d6c1 100644 +--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h ++++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h +@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte) + + #define pte_wrprotect pte_wrprotect + ++static inline int pte_read(pte_t pte) ++{ ++ return (pte_val(pte) & _PAGE_RO) != _PAGE_NA; ++} ++ ++#define pte_read pte_read ++ + static inline int pte_write(pte_t pte) + { + return !(pte_val(pte) & _PAGE_RO); +diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h +index 879e9a6e5a870..00a003d367523 100644 +--- a/arch/powerpc/include/asm/nohash/64/pgtable.h ++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h +@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + { + unsigned long old; + +- if (pte_young(*ptep)) ++ if (!pte_young(*ptep)) + return 0; + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); + return (old & _PAGE_ACCESSED) != 0; +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h +index d9067dfc531cc..3d7dce90863c2 100644 +--- a/arch/powerpc/include/asm/nohash/pgtable.h ++++ b/arch/powerpc/include/asm/nohash/pgtable.h +@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte) + return pte_val(pte) & _PAGE_RW; + } + #endif ++#ifndef pte_read + static inline int pte_read(pte_t pte) { return 1; } ++#endif + static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } + static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } + static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S +index 3fc7c9886bb70..d4fc546762db4 100644 +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -135,8 +135,9 @@ ret_from_syscall: + lis r4,icache_44x_need_flush@ha + lwz r5,icache_44x_need_flush@l(r4) + cmplwi cr0,r5,0 +- bne- 2f ++ bne- .L44x_icache_flush + #endif /* CONFIG_PPC_47x */ ++.L44x_icache_flush_return: + kuep_unlock + lwz r4,_LINK(r1) + lwz r5,_CCR(r1) +@@ -170,10 +171,11 @@ syscall_exit_finish: + b 1b + + #ifdef CONFIG_44x +-2: li r7,0 ++.L44x_icache_flush: ++ li r7,0 + iccci r0,r0 + stw r7,icache_44x_need_flush@l(r4) +- b 1b ++ b .L44x_icache_flush_return + #endif /* CONFIG_44x */ + + .globl ret_from_fork +diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c +index f2417ac54edd6..8f5d3c57d58ad 100644 +--- a/arch/riscv/net/bpf_jit_comp64.c ++++ b/arch/riscv/net/bpf_jit_comp64.c +@@ -236,7 +236,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) + emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx); + /* Set return value. */ + if (!is_tail_call) +- emit_mv(RV_REG_A0, RV_REG_A5, ctx); ++ emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx); + emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, + is_tail_call ? 4 : 0, /* skip TCC init */ + ctx); +@@ -428,12 +428,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) + *rd = RV_REG_T2; + } + +-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr, ++static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, + struct rv_jit_context *ctx) + { + s64 upper, lower; + +- if (rvoff && is_21b_int(rvoff) && !force_jalr) { ++ if (rvoff && fixed_addr && is_21b_int(rvoff)) { + emit(rv_jal(rd, rvoff >> 1), ctx); + return 0; + } else if (in_auipc_jalr_range(rvoff)) { +@@ -454,24 +454,17 @@ static bool is_signed_bpf_cond(u8 cond) + cond == BPF_JSGE || cond == BPF_JSLE; + } + +-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) ++static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx) + { + s64 off = 0; + u64 ip; +- u8 rd; +- int ret; + + if (addr && ctx->insns) { + ip = (u64)(long)(ctx->insns + ctx->ninsns); + off = addr - ip; + } + +- ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx); +- if (ret) +- return ret; +- rd = bpf_to_rv_reg(BPF_REG_0, ctx); +- emit_mv(rd, RV_REG_A0, ctx); +- return 0; ++ return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx); + } + + static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, +@@ -913,7 +906,7 @@ out_be: + /* JUMP off */ + case BPF_JMP | BPF_JA: + rvoff = rv_offset(i, off, ctx); +- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); ++ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); + if (ret) + return ret; + break; +@@ -1032,17 +1025,21 @@ out_be: + /* function call */ + case BPF_JMP | BPF_CALL: + { +- bool fixed; ++ bool fixed_addr; + u64 addr; + + mark_call(ctx); +- ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, +- &fixed); ++ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, ++ &addr, &fixed_addr); + if (ret < 0) + return ret; +- ret = emit_call(fixed, addr, ctx); ++ ++ ret = emit_call(addr, fixed_addr, ctx); + if (ret) + return ret; ++ ++ if (insn->src_reg != BPF_PSEUDO_CALL) ++ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx); + break; + } + /* tail call */ +@@ -1057,7 +1054,7 @@ out_be: + break; + + rvoff = epilogue_offset(ctx); +- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx); ++ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); + if (ret) + return ret; + break; +diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c +index 76b1f8bb0fd5f..dab4ed199227f 100644 +--- a/arch/x86/events/utils.c ++++ b/arch/x86/events/utils.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + #include ++#include + + #include "perf_event.h" + +@@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort, + * The LBR logs any address in the IP, even if the IP just + * faulted. This means userspace can control the from address. + * Ensure we don't blindly read any address by validating it is +- * a known text address. ++ * a known text address and not a vsyscall address. + */ +- if (kernel_text_address(from)) { ++ if (kernel_text_address(from) && !in_gate_area_no_mm(from)) { + addr = (void *)from; + /* + * Assume we can get the maximum possible size +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 52d8c67d93081..016fb500b3a6f 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -635,12 +635,17 @@ + /* AMD Last Branch Record MSRs */ + #define MSR_AMD64_LBR_SELECT 0xc000010e + +-/* Fam 17h MSRs */ +-#define MSR_F17H_IRPERF 0xc00000e9 ++/* Zen4 */ ++#define MSR_ZEN4_BP_CFG 0xc001102e ++#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5 + ++/* Zen 2 */ + #define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 + #define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) + ++/* Fam 17h MSRs */ ++#define MSR_F17H_IRPERF 0xc00000e9 ++ + /* Fam 16h MSRs */ + #define MSR_F16H_L2I_PERF_CTL 0xc0010230 + #define MSR_F16H_L2I_PERF_CTR 0xc0010231 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index d1d92897ed6be..46b7ee0ab01a4 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -270,6 +270,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, + u8 insn_buff[MAX_PATCH_LEN]; + + DPRINTK("alt table %px, -> %px", start, end); ++ ++ /* ++ * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using ++ * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. ++ * During the process, KASAN becomes confused seeing partial LA57 ++ * conversion and triggers a false-positive out-of-bound report. ++ * ++ * Disable KASAN until the patching is complete. ++ */ ++ kasan_disable_current(); ++ + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite previously scanned alternative code. +@@ -337,6 +348,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, + next: + optimize_nops(instr, a->instrlen); + } ++ ++ kasan_enable_current(); + } + + static inline bool is_jcc32(struct insn *insn) +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index f240c978d85e4..b66960358381b 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -80,6 +80,10 @@ static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + ++static const int amd_erratum_1485[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), ++ AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -1125,6 +1129,10 @@ static void init_amd(struct cpuinfo_x86 *c) + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } ++ ++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && ++ cpu_has_amd_erratum(c, amd_erratum_1485)) ++ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); + } + + #ifdef CONFIG_X86_32 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index ee4c812c8f6cc..8bb233d2d1e48 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1886,6 +1886,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"), + }, + }, ++ { ++ /* ++ * HP Pavilion Gaming Laptop 15-dk1xxx ++ * https://github.com/systemd/systemd/issues/28942 ++ */ ++ .callback = ec_honor_dsdt_gpe, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "HP"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), ++ }, ++ }, + { + /* + * Samsung hardware +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index a7f12bdbc5e25..af6fa801d1ed8 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = { + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), + }, + }, ++ { ++ .ident = "Asus ExpertBook B1402CBA", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"), ++ }, ++ }, + { + .ident = "Asus ExpertBook B2402CBA", + .matches = { +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 6a053cd0cf410..fbc231a3f7951 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -1943,6 +1943,96 @@ retry: + return rc; + } + ++/** ++ * ata_dev_power_set_standby - Set a device power mode to standby ++ * @dev: target device ++ * ++ * Issue a STANDBY IMMEDIATE command to set a device power mode to standby. ++ * For an HDD device, this spins down the disks. ++ * ++ * LOCKING: ++ * Kernel thread context (may sleep). ++ */ ++void ata_dev_power_set_standby(struct ata_device *dev) ++{ ++ unsigned long ap_flags = dev->link->ap->flags; ++ struct ata_taskfile tf; ++ unsigned int err_mask; ++ ++ /* Issue STANDBY IMMEDIATE command only if supported by the device */ ++ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ++ return; ++ ++ /* ++ * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5) ++ * causing some drives to spin up and down again. For these, do nothing ++ * if we are being called on shutdown. ++ */ ++ if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && ++ system_state == SYSTEM_POWER_OFF) ++ return; ++ ++ if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && ++ system_entering_hibernation()) ++ return; ++ ++ ata_tf_init(dev, &tf); ++ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; ++ tf.protocol = ATA_PROT_NODATA; ++ tf.command = ATA_CMD_STANDBYNOW1; ++ ++ ata_dev_notice(dev, "Entering standby power mode\n"); ++ ++ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); ++ if (err_mask) ++ ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n", ++ err_mask); ++} ++ ++/** ++ * ata_dev_power_set_active - Set a device power mode to active ++ * @dev: target device ++ * ++ * Issue a VERIFY command to enter to ensure that the device is in the ++ * active power mode. For a spun-down HDD (standby or idle power mode), ++ * the VERIFY command will complete after the disk spins up. ++ * ++ * LOCKING: ++ * Kernel thread context (may sleep). ++ */ ++void ata_dev_power_set_active(struct ata_device *dev) ++{ ++ struct ata_taskfile tf; ++ unsigned int err_mask; ++ ++ /* ++ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only ++ * if supported by the device. ++ */ ++ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ++ return; ++ ++ ata_tf_init(dev, &tf); ++ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; ++ tf.protocol = ATA_PROT_NODATA; ++ tf.command = ATA_CMD_VERIFY; ++ tf.nsect = 1; ++ if (dev->flags & ATA_DFLAG_LBA) { ++ tf.flags |= ATA_TFLAG_LBA; ++ tf.device |= ATA_LBA; ++ } else { ++ /* CHS */ ++ tf.lbal = 0x1; /* sect */ ++ } ++ ++ ata_dev_notice(dev, "Entering active power mode\n"); ++ ++ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); ++ if (err_mask) ++ ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n", ++ err_mask); ++} ++ + /** + * ata_read_log_page - read a specific log page + * @dev: target device +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 6d4c80b6daaef..2a04dd36a4948 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -106,6 +106,14 @@ static const unsigned int ata_eh_flush_timeouts[] = { + UINT_MAX, + }; + ++static const unsigned int ata_eh_pm_timeouts[] = { ++ 10000, /* most drives spin up by 10sec */ ++ 10000, /* > 99% working drives spin up before 20sec */ ++ 35000, /* give > 30 secs of idleness for outlier devices */ ++ 5000, /* and sweet one last chance */ ++ UINT_MAX, /* > 1 min has elapsed, give up */ ++}; ++ + static const unsigned int ata_eh_other_timeouts[] = { + 5000, /* same rationale as identify timeout */ + 10000, /* ditto */ +@@ -147,6 +155,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), + .timeouts = ata_eh_flush_timeouts }, ++ { .commands = CMDS(ATA_CMD_VERIFY), ++ .timeouts = ata_eh_pm_timeouts }, + }; + #undef CMDS + +@@ -498,7 +508,19 @@ static void ata_eh_unload(struct ata_port *ap) + struct ata_device *dev; + unsigned long flags; + +- /* Restore SControl IPM and SPD for the next driver and ++ /* ++ * Unless we are restarting, transition all enabled devices to ++ * standby power mode. ++ */ ++ if (system_state != SYSTEM_RESTART) { ++ ata_for_each_link(link, ap, PMP_FIRST) { ++ ata_for_each_dev(dev, link, ENABLED) ++ ata_dev_power_set_standby(dev); ++ } ++ } ++ ++ /* ++ * Restore SControl IPM and SPD for the next driver and + * disable attached devices. + */ + ata_for_each_link(link, ap, PMP_FIRST) { +@@ -687,6 +709,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) + ehc->saved_xfer_mode[devno] = dev->xfer_mode; + if (ata_ncq_enabled(dev)) + ehc->saved_ncq_enabled |= 1 << devno; ++ ++ /* If we are resuming, wake up the device */ ++ if (ap->pflags & ATA_PFLAG_RESUMING) ++ ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; + } + } + +@@ -750,6 +776,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) + /* clean up */ + spin_lock_irqsave(ap->lock, flags); + ++ ap->pflags &= ~ATA_PFLAG_RESUMING; ++ + if (ap->pflags & ATA_PFLAG_LOADING) + ap->pflags &= ~ATA_PFLAG_LOADING; + else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && +@@ -1241,6 +1269,13 @@ void ata_eh_detach_dev(struct ata_device *dev) + struct ata_eh_context *ehc = &link->eh_context; + unsigned long flags; + ++ /* ++ * If the device is still enabled, transition it to standby power mode ++ * (i.e. spin down HDDs). ++ */ ++ if (ata_dev_enabled(dev)) ++ ata_dev_power_set_standby(dev); ++ + ata_dev_disable(dev); + + spin_lock_irqsave(ap->lock, flags); +@@ -2927,6 +2962,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, + if (ehc->i.flags & ATA_EHI_DID_RESET) + readid_flags |= ATA_READID_POSTRESET; + ++ /* ++ * When resuming, before executing any command, make sure to ++ * transition the device to the active power mode. ++ */ ++ if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) { ++ ata_dev_power_set_active(dev); ++ ata_eh_done(link, dev, ATA_EH_SET_ACTIVE); ++ } ++ + if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { + WARN_ON(dev->class == ATA_DEV_PMP); + +@@ -3886,6 +3930,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) + unsigned long flags; + int rc = 0; + struct ata_device *dev; ++ struct ata_link *link; + + /* are we suspending? */ + spin_lock_irqsave(ap->lock, flags); +@@ -3898,6 +3943,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) + + WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); + ++ /* Set all devices attached to the port in standby mode */ ++ ata_for_each_link(link, ap, HOST_FIRST) { ++ ata_for_each_dev(dev, link, ENABLED) ++ ata_dev_power_set_standby(dev); ++ } ++ + /* + * If we have a ZPODD attached, check its zero + * power ready status before the port is frozen. +@@ -3980,6 +4031,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) + /* update the flags */ + spin_lock_irqsave(ap->lock, flags); + ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); ++ ap->pflags |= ATA_PFLAG_RESUMING; + spin_unlock_irqrestore(ap->lock, flags); + } + #endif /* CONFIG_PM */ +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 7b9c9264b9a72..2b9676416b8e8 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -1081,15 +1081,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) + } + } else { + sdev->sector_size = ata_id_logical_sector_size(dev->id); ++ + /* +- * Stop the drive on suspend but do not issue START STOP UNIT +- * on resume as this is not necessary and may fail: the device +- * will be woken up by ata_port_pm_resume() with a port reset +- * and device revalidation. ++ * Ask the sd driver to issue START STOP UNIT on runtime suspend ++ * and resume only. For system level suspend/resume, devices ++ * power state is handled directly by libata EH. + */ +- sdev->manage_system_start_stop = true; + sdev->manage_runtime_start_stop = true; +- sdev->no_start_on_resume = 1; + } + + /* +@@ -1265,7 +1263,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) + } + + if (cdb[4] & 0x1) { +- tf->nsect = 1; /* 1 sector, lba=0 */ ++ tf->nsect = 1; /* 1 sector, lba=0 */ + + if (qc->dev->flags & ATA_DFLAG_LBA) { + tf->flags |= ATA_TFLAG_LBA; +@@ -1281,7 +1279,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) + tf->lbah = 0x0; /* cyl high */ + } + +- tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ ++ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ + } else { + /* Some odd clown BIOSen issue spindown on power off (ACPI S4 + * or S5) causing some drives to spin up and down again. +@@ -1291,7 +1289,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) + goto skip; + + if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && +- system_entering_hibernation()) ++ system_entering_hibernation()) + goto skip; + + /* Issue ATA STANDBY IMMEDIATE command */ +diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h +index e5ec197aed303..a5e0e676ed9a8 100644 +--- a/drivers/ata/libata.h ++++ b/drivers/ata/libata.h +@@ -62,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); + extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, + unsigned int readid_flags); + extern int ata_dev_configure(struct ata_device *dev); ++extern void ata_dev_power_set_standby(struct ata_device *dev); ++extern void ata_dev_power_set_active(struct ata_device *dev); + extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); + extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); + extern unsigned int ata_dev_set_feature(struct ata_device *dev, +diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c +index 80acdf62794a3..afc94d0062b17 100644 +--- a/drivers/counter/counter-chrdev.c ++++ b/drivers/counter/counter-chrdev.c +@@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext, + if (*id == component_id) + return 0; + +- if (ext->type == COUNTER_COMP_ARRAY) { +- element = ext->priv; ++ if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) { ++ element = ext[*ext_idx].priv; + + if (component_id - *id < element->length) + return 0; +diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c +index e2d1dc6ca6682..c7af13aca36cf 100644 +--- a/drivers/counter/microchip-tcb-capture.c ++++ b/drivers/counter/microchip-tcb-capture.c +@@ -98,7 +98,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter, + priv->qdec_mode = 0; + /* Set highest rate based on whether soc has gclk or not */ + bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN); +- if (priv->tc_cfg->has_gclk) ++ if (!priv->tc_cfg->has_gclk) + cmr |= ATMEL_TC_TIMER_CLOCK2; + else + cmr |= ATMEL_TC_TIMER_CLOCK1; +diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c +index c625bb2b5d563..628af51c81af3 100644 +--- a/drivers/dma-buf/dma-fence-unwrap.c ++++ b/drivers/dma-buf/dma-fence-unwrap.c +@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, + dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { + if (!dma_fence_is_signaled(tmp)) { + ++count; +- } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, +- &tmp->flags)) { +- if (ktime_after(tmp->timestamp, timestamp)) +- timestamp = tmp->timestamp; + } else { +- /* +- * Use the current time if the fence is +- * currently signaling. +- */ +- timestamp = ktime_get(); ++ ktime_t t = dma_fence_timestamp(tmp); ++ ++ if (ktime_after(t, timestamp)) ++ timestamp = t; + } + } + } +diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c +index af57799c86cee..2e9a316c596a3 100644 +--- a/drivers/dma-buf/sync_file.c ++++ b/drivers/dma-buf/sync_file.c +@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence, + sizeof(info->driver_name)); + + info->status = dma_fence_get_status(fence); +- while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && +- !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) +- cpu_relax(); + info->timestamp_ns = +- test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? +- ktime_to_ns(fence->timestamp) : +- ktime_set(0, 0); ++ dma_fence_is_signaled(fence) ? ++ ktime_to_ns(dma_fence_timestamp(fence)) : ++ ktime_set(0, 0); + + return info->status; + } +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c +index 3b4ad7739f9ee..188f6b8625f78 100644 +--- a/drivers/dma/idxd/device.c ++++ b/drivers/dma/idxd/device.c +@@ -495,6 +495,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, + union idxd_command_reg cmd; + DECLARE_COMPLETION_ONSTACK(done); + u32 stat; ++ unsigned long flags; + + if (idxd_device_is_halted(idxd)) { + dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); +@@ -508,7 +509,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, + cmd.operand = operand; + cmd.int_req = 1; + +- spin_lock(&idxd->cmd_lock); ++ spin_lock_irqsave(&idxd->cmd_lock, flags); + wait_event_lock_irq(idxd->cmd_waitq, + !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), + idxd->cmd_lock); +@@ -525,7 +526,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, + * After command submitted, release lock and go to sleep until + * the command completes via interrupt. + */ +- spin_unlock(&idxd->cmd_lock); ++ spin_unlock_irqrestore(&idxd->cmd_lock, flags); + wait_for_completion(&done); + stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + spin_lock(&idxd->cmd_lock); +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c +index a1517ef1f4a01..0acf6a92a4ad3 100644 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c +@@ -451,9 +451,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan) + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + +- synchronize_irq(c->irq); +- + spin_unlock_irqrestore(&c->vc.lock, flags); ++ synchronize_irq(c->irq); + + return 0; + } +diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c +index 37674029cb427..592d48ecf241f 100644 +--- a/drivers/dma/stm32-dma.c ++++ b/drivers/dma/stm32-dma.c +@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( + chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; + + /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */ +- if (chan->trig_mdma && sg_len > 1) ++ if (chan->trig_mdma && sg_len > 1) { + chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; ++ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; ++ } + + for_each_sg(sgl, sg, sg_len, i) { + ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, +@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, + + residue = stm32_dma_get_remaining_bytes(chan); + +- if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) { ++ if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) { + n_sg++; + if (n_sg == chan->desc->num_sgs) + n_sg = 0; +- residue = sg_req->len; ++ if (!chan->trig_mdma) ++ residue = sg_req->len; + } + + /* +@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, + * residue = remaining bytes from NDTR + remaining + * periods/sg to be transferred + */ +- if (!chan->desc->cyclic || n_sg != 0) ++ if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0) + for (i = n_sg; i < desc->num_sgs; i++) + residue += desc->sg_req[i].len; + +diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c +index b9d4c843635fc..4e9bab61f4663 100644 +--- a/drivers/dma/stm32-mdma.c ++++ b/drivers/dma/stm32-mdma.c +@@ -778,8 +778,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, + /* Enable interrupts */ + ccr &= ~STM32_MDMA_CCR_IRQ_MASK; + ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE; +- if (sg_len > 1) +- ccr |= STM32_MDMA_CCR_BTIE; + desc->ccr = ccr; + + return 0; +@@ -1237,6 +1235,10 @@ static int stm32_mdma_resume(struct dma_chan *c) + unsigned long flags; + u32 status, reg; + ++ /* Transfer can be terminated */ ++ if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN)) ++ return -EPERM; ++ + hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc; + + spin_lock_irqsave(&chan->vchan.lock, flags); +@@ -1317,21 +1319,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c, + + static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, + struct stm32_mdma_desc *desc, +- u32 curr_hwdesc) ++ u32 curr_hwdesc, ++ struct dma_tx_state *state) + { + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct stm32_mdma_hwdesc *hwdesc; +- u32 cbndtr, residue, modulo, burst_size; ++ u32 cisr, clar, cbndtr, residue, modulo, burst_size; + int i; + ++ cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); ++ + residue = 0; +- for (i = curr_hwdesc + 1; i < desc->count; i++) { ++ /* Get the next hw descriptor to process from current transfer */ ++ clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)); ++ for (i = desc->count - 1; i >= 0; i--) { + hwdesc = desc->node[i].hwdesc; ++ ++ if (hwdesc->clar == clar) ++ break;/* Current transfer found, stop cumulating */ ++ ++ /* Cumulate residue of unprocessed hw descriptors */ + residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); + } + cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); + residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; + ++ state->in_flight_bytes = 0; ++ if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA)) ++ state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; ++ + if (!chan->mem_burst) + return residue; + +@@ -1361,11 +1377,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, + + vdesc = vchan_find_desc(&chan->vchan, cookie); + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) +- residue = stm32_mdma_desc_residue(chan, chan->desc, +- chan->curr_hwdesc); ++ residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state); + else if (vdesc) +- residue = stm32_mdma_desc_residue(chan, +- to_stm32_mdma_desc(vdesc), 0); ++ residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state); ++ + dma_set_residue(state, residue); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +index 93207badf83f3..6dcd7bab42fbb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +@@ -220,7 +220,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_res_cursor cursor; + +- if (bo->tbo.resource->mem_type != TTM_PL_VRAM) ++ if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) + return false; + + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 16c05a24ac7aa..15d3caf3d6d72 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -1183,6 +1183,9 @@ static void disable_vbios_mode_if_required( + if (stream == NULL) + continue; + ++ if (stream->apply_seamless_boot_optimization) ++ continue; ++ + // only looking for first odm pipe + if (pipe->prev_odm_pipe) + continue; +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index 202a9990f4517..b097bff1cd18e 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -290,7 +290,8 @@ static int + update_connector_routing(struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_connector_state *old_connector_state, +- struct drm_connector_state *new_connector_state) ++ struct drm_connector_state *new_connector_state, ++ bool added_by_user) + { + const struct drm_connector_helper_funcs *funcs; + struct drm_encoder *new_encoder; +@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state, + * there's a chance the connector may have been destroyed during the + * process, but it's better to ignore that then cause + * drm_atomic_helper_resume() to fail. ++ * ++ * Last, we want to ignore connector registration when the connector ++ * was not pulled in the atomic state by user-space (ie, was pulled ++ * in by the driver, e.g. when updating a DP-MST stream). + */ + if (!state->duplicated && drm_connector_is_unregistered(connector) && +- crtc_state->active) { ++ added_by_user && crtc_state->active) { + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); +@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_connector *connector; + struct drm_connector_state *old_connector_state, *new_connector_state; + int i, ret; +- unsigned int connectors_mask = 0; ++ unsigned int connectors_mask = 0, user_connectors_mask = 0; ++ ++ for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) ++ user_connectors_mask |= BIT(i); + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + bool has_connectors = +@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, + */ + ret = update_connector_routing(state, connector, + old_connector_state, +- new_connector_state); ++ new_connector_state, ++ BIT(i) & user_connectors_mask); + if (ret) + return ret; + if (old_connector_state->crtc) { +diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +index cc84685368715..efc22f9b17f07 100644 +--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c ++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +@@ -235,8 +235,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) + u32 flags = 0; + u32 *cs; + ++ /* ++ * L3 fabric flush is needed for AUX CCS invalidation ++ * which happens as part of pipe-control so we can ++ * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3 ++ * deals with Protected Memory which is not needed for ++ * AUX CCS invalidation and lead to unwanted side effects. ++ */ ++ if (mode & EMIT_FLUSH) ++ flags |= PIPE_CONTROL_FLUSH_L3; ++ + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; +- flags |= PIPE_CONTROL_FLUSH_L3; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* Wa_1409600907:tgl,adl-p */ +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +index 3fbda2a1f77fc..62d48c0f905e4 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +@@ -142,6 +142,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, + const struct dpu_format *fmt = NULL; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + int src_width, src_height, dst_height, fps; ++ u64 plane_pixel_rate, plane_bit_rate; + u64 plane_prefill_bw; + u64 plane_bw; + u32 hw_latency_lines; +@@ -164,13 +165,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, + scale_factor = src_height > dst_height ? + mult_frac(src_height, 1, dst_height) : 1; + +- plane_bw = +- src_width * mode->vtotal * fps * fmt->bpp * +- scale_factor; ++ plane_pixel_rate = src_width * mode->vtotal * fps; ++ plane_bit_rate = plane_pixel_rate * fmt->bpp; + +- plane_prefill_bw = +- src_width * hw_latency_lines * fps * fmt->bpp * +- scale_factor * mode->vtotal; ++ plane_bw = plane_bit_rate * scale_factor; ++ ++ plane_prefill_bw = plane_bw * hw_latency_lines; + + if ((vbp+vpw) > hw_latency_lines) + do_div(plane_prefill_bw, (vbp+vpw)); +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c +index dd26ca651a054..103eef9f059a0 100644 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c +@@ -1711,13 +1711,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) + return rc; + + while (--link_train_max_retries) { +- rc = dp_ctrl_reinitialize_mainlink(ctrl); +- if (rc) { +- DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", +- rc); +- break; +- } +- + training_step = DP_TRAINING_NONE; + rc = dp_ctrl_setup_main_link(ctrl, &training_step); + if (rc == 0) { +@@ -1769,6 +1762,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) + /* stop link training before start re training */ + dp_ctrl_clear_training_pattern(ctrl); + } ++ ++ rc = dp_ctrl_reinitialize_mainlink(ctrl); ++ if (rc) { ++ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); ++ break; ++ } + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) +diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c +index 36bb6191d2f03..cb66d1126ea96 100644 +--- a/drivers/gpu/drm/msm/dp/dp_link.c ++++ b/drivers/gpu/drm/msm/dp/dp_link.c +@@ -1068,7 +1068,7 @@ int dp_link_process_request(struct dp_link *dp_link) + } + } + +- drm_dbg_dp(link->drm_dev, "sink request=%#x", ++ drm_dbg_dp(link->drm_dev, "sink request=%#x\n", + dp_link->sink_request); + return ret; + } +diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c +index b433ccfe4d7da..e20cd3dd2c6cc 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_host.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c +@@ -1098,9 +1098,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host) + + static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) + { ++ u32 data; ++ + if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) + return; + ++ data = dsi_read(msm_host, REG_DSI_STATUS0); ++ ++ /* if video mode engine is not busy, its because ++ * either timing engine was not turned on or the ++ * DSI controller has finished transmitting the video ++ * data already, so no need to wait in those cases ++ */ ++ if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY)) ++ return; ++ + if (msm_host->power_on && msm_host->enabled) { + dsi_wait4video_done(msm_host); + /* delay 4 ms to skip BLLP */ +@@ -1960,10 +1972,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) + } + + msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); +- if (msm_host->irq < 0) { +- ret = msm_host->irq; +- dev_err(&pdev->dev, "failed to get irq: %d\n", ret); +- return ret; ++ if (!msm_host->irq) { ++ dev_err(&pdev->dev, "failed to get irq\n"); ++ return -EINVAL; + } + + /* do not autoenable, will be enabled later */ +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c +index e5a4ecde0063d..f138b3be1646f 100644 +--- a/drivers/gpu/drm/scheduler/sched_main.c ++++ b/drivers/gpu/drm/scheduler/sched_main.c +@@ -841,7 +841,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) + + if (next) { + next->s_fence->scheduled.timestamp = +- job->s_fence->finished.timestamp; ++ dma_fence_timestamp(&job->s_fence->finished); + /* start TO timer for next job */ + drm_sched_start_timeout(sched); + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index 58ca9adf09871..7e59469e1cb9f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -1614,7 +1614,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, + { + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); + SVGA3dTextureState *last_state = (SVGA3dTextureState *) +- ((unsigned long) header + header->size + sizeof(header)); ++ ((unsigned long) header + header->size + sizeof(*header)); + SVGA3dTextureState *cur_state = (SVGA3dTextureState *) + ((unsigned long) header + sizeof(*cmd)); + struct vmw_resource *ctx; +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c +index 97eefb77f6014..fb427391c3b86 100644 +--- a/drivers/hid/hid-logitech-hidpp.c ++++ b/drivers/hid/hid-logitech-hidpp.c +@@ -4275,7 +4275,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) + goto hid_hw_init_fail; + } + +- hidpp_connect_event(hidpp); ++ schedule_work(&hidpp->work); ++ flush_work(&hidpp->work); + + if (will_restart) { + /* Reset the HID node state */ +diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c +index f5a0fc9e64c54..fff6e5a2d9569 100644 +--- a/drivers/iio/adc/imx8qxp-adc.c ++++ b/drivers/iio/adc/imx8qxp-adc.c +@@ -38,8 +38,8 @@ + #define IMX8QXP_ADR_ADC_FCTRL 0x30 + #define IMX8QXP_ADR_ADC_SWTRIG 0x34 + #define IMX8QXP_ADR_ADC_TCTRL(tid) (0xc0 + (tid) * 4) +-#define IMX8QXP_ADR_ADC_CMDH(cid) (0x100 + (cid) * 8) +-#define IMX8QXP_ADR_ADC_CMDL(cid) (0x104 + (cid) * 8) ++#define IMX8QXP_ADR_ADC_CMDL(cid) (0x100 + (cid) * 8) ++#define IMX8QXP_ADR_ADC_CMDH(cid) (0x104 + (cid) * 8) + #define IMX8QXP_ADR_ADC_RESFIFO 0x300 + #define IMX8QXP_ADR_ADC_TST 0xffc + +diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig +index fcf6d2269bfc2..3507cd6ab4e54 100644 +--- a/drivers/iio/addac/Kconfig ++++ b/drivers/iio/addac/Kconfig +@@ -10,6 +10,8 @@ config AD74413R + depends on GPIOLIB && SPI + select REGMAP_SPI + select CRC8 ++ select IIO_BUFFER ++ select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Analog Devices AD74412R/AD74413R + quad-channel software configurable input/output solution. +diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c +index d5ea1a1be1226..a492e8f2fc0fb 100644 +--- a/drivers/iio/dac/ad3552r.c ++++ b/drivers/iio/dac/ad3552r.c +@@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select { + }; + + enum ad3542r_id { +- AD3542R_ID = 0x4008, +- AD3552R_ID = 0x4009, ++ AD3542R_ID = 0x4009, ++ AD3552R_ID = 0x4008, + }; + + enum ad3552r_ch_output_range { +diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c +index e6311213f3e89..d15b85377159b 100644 +--- a/drivers/iio/frequency/admv1013.c ++++ b/drivers/iio/frequency/admv1013.c +@@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st) + if (vcm < 0) + return vcm; + +- if (vcm < 1800000) ++ if (vcm <= 1800000) + mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; +- else if (vcm > 1800000 && vcm < 2600000) ++ else if (vcm > 1800000 && vcm <= 2600000) + mixer_vgate = (2375 * vcm / 1000000 + 125) / 100; + else + return -EINVAL; +diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig +index fa79b1ac4f85b..83e53acfbe880 100644 +--- a/drivers/iio/imu/bno055/Kconfig ++++ b/drivers/iio/imu/bno055/Kconfig +@@ -2,6 +2,8 @@ + + config BOSCH_BNO055 + tristate ++ select IIO_BUFFER ++ select IIO_TRIGGERED_BUFFER + + config BOSCH_BNO055_SERIAL + tristate "Bosch BNO055 attached via UART" +diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c +index c0aff78489b46..4c867157aa968 100644 +--- a/drivers/iio/pressure/bmp280-core.c ++++ b/drivers/iio/pressure/bmp280-core.c +@@ -1786,7 +1786,7 @@ int bmp280_common_probe(struct device *dev, + * however as it happens, the BMP085 shares the chip ID of BMP180 + * so we look for an IRQ if we have that. + */ +- if (irq > 0 || (chip_id == BMP180_CHIP_ID)) { ++ if (irq > 0 && (chip_id == BMP180_CHIP_ID)) { + ret = bmp085_fetch_eoc_irq(dev, name, irq, data); + if (ret) + return ret; +diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c +index 984a3f511a1ae..db1b1e48225aa 100644 +--- a/drivers/iio/pressure/dps310.c ++++ b/drivers/iio/pressure/dps310.c +@@ -57,8 +57,8 @@ + #define DPS310_RESET_MAGIC 0x09 + #define DPS310_COEF_BASE 0x10 + +-/* Make sure sleep time is <= 20ms for usleep_range */ +-#define DPS310_POLL_SLEEP_US(t) min(20000, (t) / 8) ++/* Make sure sleep time is <= 30ms for usleep_range */ ++#define DPS310_POLL_SLEEP_US(t) min(30000, (t) / 8) + /* Silently handle error in rate value here */ + #define DPS310_POLL_TIMEOUT_US(rc) ((rc) <= 0 ? 1000000 : 1000000 / (rc)) + +@@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data) + if (rc) + return rc; + +- /* Wait for device chip access: 2.5ms in specification */ +- usleep_range(2500, 12000); ++ /* Wait for device chip access: 15ms in specification */ ++ usleep_range(15000, 55000); + return 0; + } + +diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c +index c564a1d6cafe8..44cfdbedcfaab 100644 +--- a/drivers/iio/pressure/ms5611_core.c ++++ b/drivers/iio/pressure/ms5611_core.c +@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len) + + crc = (crc >> 12) & 0x000F; + +- return crc_orig != 0x0000 && crc == crc_orig; ++ return crc == crc_orig; + } + + static int ms5611_read_prom(struct iio_dev *indio_dev) +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c +index ced615b5ea096..040ba2224f9ff 100644 +--- a/drivers/infiniband/hw/cxgb4/cm.c ++++ b/drivers/infiniband/hw/cxgb4/cm.c +@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) + int win; + + skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); ++ if (!skb) ++ return -ENOMEM; ++ + req = __skb_put_zero(skb, sizeof(*req)); + req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); + req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 76cbcca13c9e9..c19a4d2023805 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -272,6 +272,7 @@ static const struct xpad_device { + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, ++ { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, +@@ -474,6 +475,7 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ ++ XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */ + XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ +diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c +index c1c733a9cb890..db2ba89adaefa 100644 +--- a/drivers/input/misc/powermate.c ++++ b/drivers/input/misc/powermate.c +@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf) + pm->requires_update = 0; + usb_kill_urb(pm->irq); + input_unregister_device(pm->input); ++ usb_kill_urb(pm->config); + usb_free_urb(pm->irq); + usb_free_urb(pm->config); + powermate_free_buffers(interface_to_usbdev(intf), pm); +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 2118b2075f437..4e38229404b4b 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse, + psmouse->protocol_handler = elantech_process_byte; + psmouse->disconnect = elantech_disconnect; + psmouse->reconnect = elantech_reconnect; ++ psmouse->fast_reconnect = NULL; + psmouse->pktsize = info->hw_version > 1 ? 6 : 4; + + return 0; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index fa021af8506e4..d2c9f4cbd00c6 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse, + psmouse->set_rate = synaptics_set_rate; + psmouse->disconnect = synaptics_disconnect; + psmouse->reconnect = synaptics_reconnect; ++ psmouse->fast_reconnect = NULL; + psmouse->cleanup = synaptics_reset; + /* Synaptics can usually stay in sync without extra help */ + psmouse->resync_time = 0; +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h +index 1724d6cb8649d..9c39553d30fa2 100644 +--- a/drivers/input/serio/i8042-acpipnpio.h ++++ b/drivers/input/serio/i8042-acpipnpio.h +@@ -618,6 +618,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, ++ { ++ /* Fujitsu Lifebook E5411 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOAUX) ++ }, + { + /* Gigabyte M912 */ + .matches = { +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c +index 25e575183dd18..3f0732db7bf5b 100644 +--- a/drivers/input/touchscreen/goodix.c ++++ b/drivers/input/touchscreen/goodix.c +@@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) + dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n"); + ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; + gpio_mapping = acpi_goodix_int_last_gpios; ++ } else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) { ++ /* ++ * On newer devices there is only 1 GpioInt resource and _PS0 ++ * does the whole reset sequence for us. ++ */ ++ acpi_device_fix_up_power(ACPI_COMPANION(dev)); ++ ++ /* ++ * Before the _PS0 call the int GPIO may have been in output ++ * mode and the call should have put the int GPIO in input mode, ++ * but the GPIO subsys cached state may still think it is ++ * in output mode, causing gpiochip_lock_as_irq() failure. ++ * ++ * Add a mapping for the int GPIO to make the ++ * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed, ++ * which will explicitly set the direction to input. ++ */ ++ ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE; ++ gpio_mapping = acpi_goodix_int_first_gpios; + } else { + dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n", + ts->gpio_count, ts->gpio_int_idx); +diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c +index 25fd8ee66565b..10c3e85c90c23 100644 +--- a/drivers/irqchip/irq-renesas-rzg2l.c ++++ b/drivers/irqchip/irq-renesas-rzg2l.c +@@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d) + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); +- reg &= ~(TSSEL_MASK << tssr_offset); ++ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } +diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c +index b8ad4f16b4acd..e7b6989d8b4a8 100644 +--- a/drivers/mcb/mcb-core.c ++++ b/drivers/mcb/mcb-core.c +@@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB); + + static int __mcb_bus_add_devices(struct device *dev, void *data) + { +- struct mcb_device *mdev = to_mcb_device(dev); + int retval; + +- if (mdev->is_added) +- return 0; +- + retval = device_attach(dev); +- if (retval < 0) ++ if (retval < 0) { + dev_err(dev, "Error adding device (%d)\n", retval); +- +- mdev->is_added = true; ++ return retval; ++ } + + return 0; + } +diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c +index aa6938da0db85..c41cbacc75a2c 100644 +--- a/drivers/mcb/mcb-parse.c ++++ b/drivers/mcb/mcb-parse.c +@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, + mdev->mem.end = mdev->mem.start + size - 1; + mdev->mem.flags = IORESOURCE_MEM; + +- mdev->is_added = false; +- + ret = mcb_device_register(bus, mdev); + if (ret < 0) + goto err; +diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig +index 8236aabebb394..e45b95a13157b 100644 +--- a/drivers/net/can/Kconfig ++++ b/drivers/net/can/Kconfig +@@ -174,7 +174,7 @@ config CAN_SLCAN + + config CAN_SUN4I + tristate "Allwinner A10 CAN controller" +- depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST ++ depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST + help + Say Y here if you want to use CAN controller found on Allwinner + A10/A20/D1 SoCs. +diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c +index b3f7988668996..1e94ba1031ece 100644 +--- a/drivers/net/dsa/qca/qca8k-8xxx.c ++++ b/drivers/net/dsa/qca/qca8k-8xxx.c +@@ -544,6 +544,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, + goto err_read_skb; + } + ++ /* It seems that accessing the switch's internal PHYs via management ++ * packets still uses the MDIO bus within the switch internally, and ++ * these accesses can conflict with external MDIO accesses to other ++ * devices on the MDIO bus. ++ * We therefore need to lock the MDIO bus onto which the switch is ++ * connected. ++ */ ++ mutex_lock(&priv->bus->mdio_lock); ++ + /* Actually start the request: + * 1. Send mdio master packet + * 2. Busy Wait for mdio master command +@@ -556,6 +565,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, + mgmt_master = priv->mgmt_master; + if (!mgmt_master) { + mutex_unlock(&mgmt_eth_data->mutex); ++ mutex_unlock(&priv->bus->mdio_lock); + ret = -EINVAL; + goto err_mgmt_master; + } +@@ -643,6 +653,7 @@ exit: + QCA8K_ETHERNET_TIMEOUT); + + mutex_unlock(&mgmt_eth_data->mutex); ++ mutex_unlock(&priv->bus->mdio_lock); + + return ret; + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +index 29cc609880712..ea88ac04ab9ad 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, + struct vf_macvlans *mv_list; + int num_vf_macvlans, i; + ++ /* Initialize list of VF macvlans */ ++ INIT_LIST_HEAD(&adapter->vf_mvs.l); ++ + num_vf_macvlans = hw->mac.num_rar_entries - + (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) +@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { +- /* Initialize list of VF macvlans */ +- INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list[i].vf = -1; + mv_list[i].free = true; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +index 0f8f3ce35537d..a7832a0180ee6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +@@ -611,7 +611,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) + goto out; + } + +- if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) { ++ if (ctx->sa.update_pn) { + netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n", + assoc_num); + err = -EINVAL; +@@ -1016,7 +1016,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) + goto out; + } + +- if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) { ++ if (ctx->sa.update_pn) { + netdev_err(ctx->netdev, + "MACsec offload update RX sa %d PN isn't supported\n", + assoc_num); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 4e7daa382bc05..42e6f2fcf5f59 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -3862,13 +3862,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable) + struct mlx5e_channels *chs = &priv->channels; + struct mlx5e_params new_params; + int err; ++ bool rx_ts_over_crc = !enable; + + mutex_lock(&priv->state_lock); + + new_params = chs->params; + new_params.scatter_fcs_en = enable; + err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap, +- &new_params.scatter_fcs_en, true); ++ &rx_ts_over_crc, true); + mutex_unlock(&priv->state_lock); + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +index d309b77a01944..cdd8818b49d0a 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +@@ -308,8 +308,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { + .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, + }; + +-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, +- bool learning_en) ++static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, ++ bool learning_en) + { + char tnpc_pl[MLXSW_REG_TNPC_LEN]; + +diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c +index 4f4204432aaa3..b751b03eddfb1 100644 +--- a/drivers/net/ethernet/microsoft/mana/mana_en.c ++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c +@@ -1003,17 +1003,21 @@ static void mana_poll_tx_cq(struct mana_cq *cq) + case CQE_TX_VPORT_IDX_OUT_OF_RANGE: + case CQE_TX_VPORT_DISABLED: + case CQE_TX_VLAN_TAGGING_VIOLATION: +- WARN_ONCE(1, "TX: CQE error %d: ignored.\n", +- cqe_oob->cqe_hdr.cqe_type); ++ if (net_ratelimit()) ++ netdev_err(ndev, "TX: CQE error %d\n", ++ cqe_oob->cqe_hdr.cqe_type); ++ + break; + + default: +- /* If the CQE type is unexpected, log an error, assert, +- * and go through the error path. ++ /* If the CQE type is unknown, log an error, ++ * and still free the SKB, update tail, etc. + */ +- WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", +- cqe_oob->cqe_hdr.cqe_type); +- return; ++ if (net_ratelimit()) ++ netdev_err(ndev, "TX: unknown CQE type %d\n", ++ cqe_oob->cqe_hdr.cqe_type); ++ ++ break; + } + + if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) +diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +index f21cf1f40f987..153533cd8f086 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) + unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); + struct nfp_flower_cmsg_merge_hint *msg; + struct nfp_fl_payload *sub_flows[2]; ++ struct nfp_flower_priv *priv; + int err, i, flow_cnt; + + msg = nfp_flower_cmsg_get_data(skb); +@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) + return; + } + +- rtnl_lock(); ++ priv = app->priv; ++ mutex_lock(&priv->nfp_fl_lock); + for (i = 0; i < flow_cnt; i++) { + u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); + + sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); + if (!sub_flows[i]) { + nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); +- goto err_rtnl_unlock; ++ goto err_mutex_unlock; + } + } + +@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) + if (err == -ENOMEM) + nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); + +-err_rtnl_unlock: +- rtnl_unlock(); ++err_mutex_unlock: ++ mutex_unlock(&priv->nfp_fl_lock); + } + + static void +diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +index f693119541d55..f7492be452aed 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +@@ -1971,8 +1971,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl + struct nfp_fl_ct_flow_entry *ct_entry; + struct netlink_ext_ack *extack = NULL; + +- ASSERT_RTNL(); +- + extack = flow->common.extack; + switch (flow->command) { + case FLOW_CLS_REPLACE: +@@ -2015,9 +2013,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb + + switch (type) { + case TC_SETUP_CLSFLOWER: +- rtnl_lock(); ++ while (!mutex_trylock(&zt->priv->nfp_fl_lock)) { ++ if (!zt->nft) /* avoid deadlock */ ++ return err; ++ msleep(20); ++ } + err = nfp_fl_ct_offload_nft_flow(zt, flow); +- rtnl_unlock(); ++ mutex_unlock(&zt->priv->nfp_fl_lock); + break; + default: + return -EOPNOTSUPP; +@@ -2045,6 +2047,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) + struct nfp_fl_ct_flow_entry *ct_entry; + struct nfp_fl_ct_zone_entry *zt; + struct rhashtable *m_table; ++ struct nf_flowtable *nft; + + if (!ct_map_ent) + return -ENOENT; +@@ -2061,8 +2064,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) + nfp_fl_ct_clean_flow_entry(ct_entry); + kfree(ct_map_ent); + +- if (!zt->pre_ct_count) { +- zt->nft = NULL; ++ if (!zt->pre_ct_count && zt->nft) { ++ nft = zt->nft; ++ zt->nft = NULL; /* avoid deadlock */ ++ nf_flow_table_offload_del_cb(nft, ++ nfp_fl_ct_handle_nft_flow, ++ zt); + nfp_fl_ct_clean_nft_entries(zt); + } + break; +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h +index cb799d18682d9..d0ab71ce3d84f 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.h ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h +@@ -281,6 +281,7 @@ struct nfp_fl_internal_ports { + * @predt_list: List to keep track of decap pretun flows + * @neigh_table: Table to keep track of neighbor entries + * @predt_lock: Lock to serialise predt/neigh table updates ++ * @nfp_fl_lock: Lock to protect the flow offload operation + */ + struct nfp_flower_priv { + struct nfp_app *app; +@@ -323,6 +324,7 @@ struct nfp_flower_priv { + struct list_head predt_list; + struct rhashtable neigh_table; + spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ ++ struct mutex nfp_fl_lock; /* Protect the flow operation */ + }; + + /** +diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c +index 0f06ef6e24bf4..80e4675582bfb 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c +@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, + if (err) + goto err_free_stats_ctx_table; + ++ mutex_init(&priv->nfp_fl_lock); ++ + err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); + if (err) + goto err_free_merge_table; +diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c +index 8593cafa63683..99165694f1367 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c +@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, + u64 parent_ctx = 0; + int err; + +- ASSERT_RTNL(); +- + if (sub_flow1 == sub_flow2 || + nfp_flower_is_merge_flow(sub_flow1) || + nfp_flower_is_merge_flow(sub_flow2)) +@@ -1727,19 +1725,30 @@ static int + nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, + struct flow_cls_offload *flower) + { ++ struct nfp_flower_priv *priv = app->priv; ++ int ret; ++ + if (!eth_proto_is_802_3(flower->common.protocol)) + return -EOPNOTSUPP; + ++ mutex_lock(&priv->nfp_fl_lock); + switch (flower->command) { + case FLOW_CLS_REPLACE: +- return nfp_flower_add_offload(app, netdev, flower); ++ ret = nfp_flower_add_offload(app, netdev, flower); ++ break; + case FLOW_CLS_DESTROY: +- return nfp_flower_del_offload(app, netdev, flower); ++ ret = nfp_flower_del_offload(app, netdev, flower); ++ break; + case FLOW_CLS_STATS: +- return nfp_flower_get_stats(app, netdev, flower); ++ ret = nfp_flower_get_stats(app, netdev, flower); ++ break; + default: +- return -EOPNOTSUPP; ++ ret = -EOPNOTSUPP; ++ break; + } ++ mutex_unlock(&priv->nfp_fl_lock); ++ ++ return ret; + } + + static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, +@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, + repr_priv = repr->app_priv; + repr_priv->block_shared = f->block_shared; + f->driver_block_list = &nfp_block_cb_list; ++ f->unlocked_driver_cb = true; + + switch (f->command) { + case FLOW_BLOCK_BIND: +@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str + nfp_flower_internal_port_can_offload(app, netdev))) + return -EOPNOTSUPP; + ++ f->unlocked_driver_cb = true; ++ + switch (f->command) { + case FLOW_BLOCK_BIND: + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); +diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +index 99052a925d9ec..e7180b4793c7d 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, + { + struct netlink_ext_ack *extack = flow->common.extack; + struct nfp_flower_priv *fl_priv = app->priv; ++ int ret; + + if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); + return -EOPNOTSUPP; + } + ++ mutex_lock(&fl_priv->nfp_fl_lock); + switch (flow->command) { + case TC_CLSMATCHALL_REPLACE: +- return nfp_flower_install_rate_limiter(app, netdev, flow, +- extack); ++ ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack); ++ break; + case TC_CLSMATCHALL_DESTROY: +- return nfp_flower_remove_rate_limiter(app, netdev, flow, +- extack); ++ ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack); ++ break; + case TC_CLSMATCHALL_STATS: +- return nfp_flower_stats_rate_limiter(app, netdev, flow, +- extack); ++ ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack); ++ break; + default: +- return -EOPNOTSUPP; ++ ret = -EOPNOTSUPP; ++ break; + } ++ mutex_unlock(&fl_priv->nfp_fl_lock); ++ ++ return ret; + } + + /* Offload tc action, currently only for tc police */ +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 894e2690c6437..9a52283d77544 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -2183,6 +2183,8 @@ static int ravb_close(struct net_device *ndev) + of_phy_deregister_fixed_link(np); + } + ++ cancel_work_sync(&priv->work); ++ + if (info->multi_irqs) { + free_irq(priv->tx_irqs[RAVB_NC], ndev); + free_irq(priv->rx_irqs[RAVB_NC], ndev); +@@ -2907,8 +2909,6 @@ static int ravb_remove(struct platform_device *pdev) + clk_disable_unprepare(priv->gptp_clk); + clk_disable_unprepare(priv->refclk); + +- dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, +- priv->desc_bat_dma); + /* Set reset mode */ + ravb_write(ndev, CCC_OPC_RESET, CCC); + unregister_netdev(ndev); +@@ -2916,6 +2916,8 @@ static int ravb_remove(struct platform_device *pdev) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); ++ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, ++ priv->desc_bat_dma); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c +index d0b5129439ed6..c2201e0adc46c 100644 +--- a/drivers/net/ieee802154/ca8210.c ++++ b/drivers/net/ieee802154/ca8210.c +@@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi) + struct device_node *np = spi->dev.of_node; + struct ca8210_priv *priv = spi_get_drvdata(spi); + struct ca8210_platform_data *pdata = spi->dev.platform_data; +- int ret = 0; + + if (!np) + return -EFAULT; +@@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi) + dev_crit(&spi->dev, "Failed to register external clk\n"); + return PTR_ERR(priv->clk); + } +- ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); +- if (ret) { +- clk_unregister(priv->clk); +- dev_crit( +- &spi->dev, +- "Failed to register external clock as clock provider\n" +- ); +- } else { +- dev_info(&spi->dev, "External clock set as clock provider\n"); +- } + +- return ret; ++ return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); + } + + /** +@@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) + { + struct ca8210_priv *priv = spi_get_drvdata(spi); + +- if (!priv->clk) +- return ++ if (IS_ERR_OR_NULL(priv->clk)) ++ return; + + of_clk_del_provider(spi->dev.of_node); + clk_unregister(priv->clk); +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 578f470e9fad9..81453e84b6413 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -2384,6 +2384,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) + + ctx.sa.assoc_num = assoc_num; + ctx.sa.tx_sa = tx_sa; ++ ctx.sa.update_pn = !!prev_pn.full64; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_upd_txsa, &ctx); +@@ -2477,6 +2478,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) + + ctx.sa.assoc_num = assoc_num; + ctx.sa.rx_sa = rx_sa; ++ ctx.sa.update_pn = !!prev_pn.full64; + ctx.secy = secy; + + ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); +diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c +index f81b077618f40..81fd9bfef5271 100644 +--- a/drivers/net/phy/mscc/mscc_macsec.c ++++ b/drivers/net/phy/mscc/mscc_macsec.c +@@ -844,6 +844,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx) + struct macsec_flow *flow; + int ret; + ++ if (ctx->sa.update_pn) ++ return -EINVAL; ++ + flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR); + if (IS_ERR(flow)) + return PTR_ERR(flow); +@@ -897,6 +900,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx) + struct macsec_flow *flow; + int ret; + ++ if (ctx->sa.update_pn) ++ return -EINVAL; ++ + flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR); + if (IS_ERR(flow)) + return PTR_ERR(flow); +diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c +index 48d7d278631e9..99ec1d4a972db 100644 +--- a/drivers/net/usb/dm9601.c ++++ b/drivers/net/usb/dm9601.c +@@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) + struct usbnet *dev = netdev_priv(netdev); + + __le16 res; ++ int err; + + if (phy_id) { + netdev_dbg(dev->net, "Only internal phy supported\n"); + return 0; + } + +- dm_read_shared_word(dev, 1, loc, &res); ++ err = dm_read_shared_word(dev, 1, loc, &res); ++ if (err < 0) { ++ netdev_err(dev->net, "MDIO read error: %d\n", err); ++ return err; ++ } + + netdev_dbg(dev->net, + "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c +index f3f2c07423a6a..fc3bb63b9ac3e 100644 +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -41,8 +41,6 @@ + #include + #include + +-#define XENVIF_QUEUE_LENGTH 32 +- + /* Number of bytes allowed on the internal guest Rx queue. */ + #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) + +@@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, + dev->features = dev->hw_features | NETIF_F_RXCSUM; + dev->ethtool_ops = &xenvif_ethtool_ops; + +- dev->tx_queue_len = XENVIF_QUEUE_LENGTH; +- + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 90008e24d1cc7..cfb36adf4eb80 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -1822,7 +1822,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) + u64 delta; + int i; + +- for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) { ++ for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) { + if (status & (1U << i)) { + ret = IRQ_HANDLED; + if (WARN_ON(!dtc->counters[i])) +diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c +index 569f12af2aafa..0a8b40edc3f31 100644 +--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c ++++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c +@@ -126,6 +126,10 @@ struct lynx_28g_lane { + struct lynx_28g_priv { + void __iomem *base; + struct device *dev; ++ /* Serialize concurrent access to registers shared between lanes, ++ * like PCCn ++ */ ++ spinlock_t pcc_lock; + struct lynx_28g_pll pll[LYNX_28G_NUM_PLL]; + struct lynx_28g_lane lane[LYNX_28G_NUM_LANE]; + +@@ -396,6 +400,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode) + if (powered_up) + lynx_28g_power_off(phy); + ++ spin_lock(&priv->pcc_lock); ++ + switch (submode) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: +@@ -412,6 +418,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode) + lane->interface = submode; + + out: ++ spin_unlock(&priv->pcc_lock); ++ + /* Power up the lane if necessary */ + if (powered_up) + lynx_28g_power_on(phy); +@@ -507,11 +515,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work) + for (i = 0; i < LYNX_28G_NUM_LANE; i++) { + lane = &priv->lane[i]; + +- if (!lane->init) +- continue; ++ mutex_lock(&lane->phy->mutex); + +- if (!lane->powered_up) ++ if (!lane->init || !lane->powered_up) { ++ mutex_unlock(&lane->phy->mutex); + continue; ++ } + + rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL); + if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) { +@@ -520,6 +529,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work) + rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL); + } while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE)); + } ++ ++ mutex_unlock(&lane->phy->mutex); + } + queue_delayed_work(system_power_efficient_wq, &priv->cdr_check, + msecs_to_jiffies(1000)); +@@ -592,6 +603,7 @@ static int lynx_28g_probe(struct platform_device *pdev) + + dev_set_drvdata(dev, priv); + ++ spin_lock_init(&priv->pcc_lock); + INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check); + + queue_delayed_work(system_power_efficient_wq, &priv->cdr_check, +@@ -603,6 +615,14 @@ static int lynx_28g_probe(struct platform_device *pdev) + return PTR_ERR_OR_ZERO(provider); + } + ++static void lynx_28g_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct lynx_28g_priv *priv = dev_get_drvdata(dev); ++ ++ cancel_delayed_work_sync(&priv->cdr_check); ++} ++ + static const struct of_device_id lynx_28g_of_match_table[] = { + { .compatible = "fsl,lynx-28g" }, + { }, +@@ -611,6 +631,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table); + + static struct platform_driver lynx_28g_driver = { + .probe = lynx_28g_probe, ++ .remove_new = lynx_28g_remove, + .driver = { + .name = "lynx-28g", + .of_match_table = lynx_28g_of_match_table, +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c +index 9e57f4c62e609..27e41873c04ff 100644 +--- a/drivers/pinctrl/core.c ++++ b/drivers/pinctrl/core.c +@@ -1007,17 +1007,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev, + + static struct pinctrl *find_pinctrl(struct device *dev) + { +- struct pinctrl *p; ++ struct pinctrl *entry, *p = NULL; + + mutex_lock(&pinctrl_list_mutex); +- list_for_each_entry(p, &pinctrl_list, node) +- if (p->dev == dev) { +- mutex_unlock(&pinctrl_list_mutex); +- return p; ++ ++ list_for_each_entry(entry, &pinctrl_list, node) { ++ if (entry->dev == dev) { ++ p = entry; ++ kref_get(&p->users); ++ break; + } ++ } + + mutex_unlock(&pinctrl_list_mutex); +- return NULL; ++ return p; + } + + static void pinctrl_free(struct pinctrl *p, bool inlist); +@@ -1126,7 +1129,6 @@ struct pinctrl *pinctrl_get(struct device *dev) + p = find_pinctrl(dev); + if (p) { + dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n"); +- kref_get(&p->users); + return p; + } + +diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c +index 8193b92da4031..274e01d5212d5 100644 +--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c ++++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c +@@ -1041,13 +1041,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev, + if (ret < 0) + return ret; + +- gpio = &pctrl->gpio_bank[reg]; +- gpio->pctrl = pctrl; +- + if (reg >= WPCM450_NUM_BANKS) + return dev_err_probe(dev, -EINVAL, + "GPIO index %d out of range!\n", reg); + ++ gpio = &pctrl->gpio_bank[reg]; ++ gpio->pctrl = pctrl; ++ + bank = &wpcm450_banks[reg]; + gpio->bank = bank; + +diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig +index 0903a0a418319..1ef8759802618 100644 +--- a/drivers/pinctrl/renesas/Kconfig ++++ b/drivers/pinctrl/renesas/Kconfig +@@ -240,6 +240,7 @@ config PINCTRL_RZN1 + depends on OF + depends on ARCH_RZN1 || COMPILE_TEST + select GENERIC_PINCONF ++ select PINMUX + help + This selects pinctrl driver for Renesas RZ/N1 devices. + +diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c +index 3bacee2b8d521..51f23ff1f2b05 100644 +--- a/drivers/platform/x86/hp/hp-wmi.c ++++ b/drivers/platform/x86/hp/hp-wmi.c +@@ -1399,7 +1399,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = { + .restore = hp_wmi_resume_handler, + }; + +-static struct platform_driver hp_wmi_driver = { ++/* ++ * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via ++ * module_platform_driver_probe() this is ok because they cannot get unbound at ++ * runtime. So mark the driver struct with __refdata to prevent modpost ++ * triggering a section mismatch warning. ++ */ ++static struct platform_driver hp_wmi_driver __refdata = { + .driver = { + .name = "hp-wmi", + .pm = &hp_wmi_pm_ops, +diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c +index f6290221d139d..6641f934f15bf 100644 +--- a/drivers/platform/x86/think-lmi.c ++++ b/drivers/platform/x86/think-lmi.c +@@ -1245,6 +1245,24 @@ static void tlmi_release_attr(void) + kset_unregister(tlmi_priv.authentication_kset); + } + ++static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name) ++{ ++ struct kobject *duplicate; ++ ++ if (!strcmp(name, "Reserved")) ++ return -EINVAL; ++ ++ duplicate = kset_find_obj(attribute_kset, name); ++ if (duplicate) { ++ pr_debug("Duplicate attribute name found - %s\n", name); ++ /* kset_find_obj() returns a reference */ ++ kobject_put(duplicate); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ + static int tlmi_sysfs_init(void) + { + int i, ret; +@@ -1273,10 +1291,8 @@ static int tlmi_sysfs_init(void) + continue; + + /* check for duplicate or reserved values */ +- if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) || +- !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) { +- pr_debug("duplicate or reserved attribute name found - %s\n", +- tlmi_priv.setting[i]->display_name); ++ if (tlmi_validate_setting_name(tlmi_priv.attribute_kset, ++ tlmi_priv.setting[i]->display_name) < 0) { + kfree(tlmi_priv.setting[i]->possible_values); + kfree(tlmi_priv.setting[i]); + tlmi_priv.setting[i] = NULL; +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index ed26c52ed8474..bab00b65bc9d1 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1619,12 +1619,13 @@ int scsi_rescan_device(struct scsi_device *sdev) + device_lock(dev); + + /* +- * Bail out if the device is not running. Otherwise, the rescan may +- * block waiting for commands to be executed, with us holding the +- * device lock. This can result in a potential deadlock in the power +- * management core code when system resume is on-going. ++ * Bail out if the device or its queue are not running. Otherwise, ++ * the rescan may block waiting for commands to be executed, with us ++ * holding the device lock. This can result in a potential deadlock ++ * in the power management core code when system resume is on-going. + */ +- if (sdev->sdev_state != SDEV_RUNNING) { ++ if (sdev->sdev_state != SDEV_RUNNING || ++ blk_queue_pm_only(sdev->request_queue)) { + ret = -EWOULDBLOCK; + goto unlock; + } +diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c +index 372d64756ed64..3c15f6a9e91c0 100644 +--- a/drivers/tee/amdtee/core.c ++++ b/drivers/tee/amdtee/core.c +@@ -217,12 +217,12 @@ unlock: + return rc; + } + ++/* mutex must be held by caller */ + static void destroy_session(struct kref *ref) + { + struct amdtee_session *sess = container_of(ref, struct amdtee_session, + refcount); + +- mutex_lock(&session_list_mutex); + list_del(&sess->list_node); + mutex_unlock(&session_list_mutex); + kfree(sess); +@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx, + if (arg->ret != TEEC_SUCCESS) { + pr_err("open_session failed %d\n", arg->ret); + handle_unload_ta(ta_handle); +- kref_put(&sess->refcount, destroy_session); ++ kref_put_mutex(&sess->refcount, destroy_session, ++ &session_list_mutex); + goto out; + } + +@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx, + pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS); + handle_close_session(ta_handle, session_info); + handle_unload_ta(ta_handle); +- kref_put(&sess->refcount, destroy_session); ++ kref_put_mutex(&sess->refcount, destroy_session, ++ &session_list_mutex); + rc = -ENOMEM; + goto out; + } +@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session) + handle_close_session(ta_handle, session_info); + handle_unload_ta(ta_handle); + +- kref_put(&sess->refcount, destroy_session); ++ kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex); + + return 0; + } +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c +index 86521ebb25794..69b2ca95fe37a 100644 +--- a/drivers/thunderbolt/icm.c ++++ b/drivers/thunderbolt/icm.c +@@ -41,6 +41,7 @@ + #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 + + #define ICM_TIMEOUT 5000 /* ms */ ++#define ICM_RETRIES 3 + #define ICM_APPROVE_TIMEOUT 10000 /* ms */ + #define ICM_MAX_LINK 4 + +@@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) + + static int icm_request(struct tb *tb, const void *request, size_t request_size, + void *response, size_t response_size, size_t npackets, +- unsigned int timeout_msec) ++ int retries, unsigned int timeout_msec) + { + struct icm *icm = tb_priv(tb); +- int retries = 3; + + do { + struct tb_cfg_request *req; +@@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) + return -ENOMEM; + + ret = icm_request(tb, &request, sizeof(request), switches, +- sizeof(*switches), npackets, ICM_TIMEOUT); ++ sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + goto err_free; + +@@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) + memset(&reply, 0, sizeof(reply)); + /* Use larger timeout as establishing tunnels can take some time */ + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_APPROVE_TIMEOUT); ++ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); + if (ret) + return ret; + +@@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1022,7 +1022,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, 20000); ++ 1, 10, 2000); + if (ret) + return ret; + +@@ -1055,7 +1055,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_APPROVE_TIMEOUT); ++ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); + if (ret) + return ret; + +@@ -1083,7 +1083,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1112,7 +1112,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1146,7 +1146,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1172,7 +1172,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1498,7 +1498,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1524,7 +1524,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1545,7 +1545,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1606,7 +1606,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +@@ -1628,7 +1628,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, 20000); ++ 1, ICM_RETRIES, 20000); + if (ret) + return ret; + +@@ -2300,7 +2300,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), +- 1, ICM_TIMEOUT); ++ 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index 9699d167d522d..55698a0978f03 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -2763,6 +2763,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) + !tb_port_is_width_supported(down, 2)) + return 0; + ++ /* ++ * Both lanes need to be in CL0. Here we assume lane 0 already be in ++ * CL0 and check just for lane 1. ++ */ ++ if (tb_wait_for_port(down->dual_link_port, false) <= 0) ++ return -ENOTCONN; ++ + ret = tb_port_lane_bonding_enable(up); + if (ret) { + tb_port_warn(up, "failed to enable lane bonding\n"); +diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c +index 9a3c52f6b8c97..18e2ffd095a42 100644 +--- a/drivers/thunderbolt/xdomain.c ++++ b/drivers/thunderbolt/xdomain.c +@@ -704,6 +704,27 @@ out_unlock: + mutex_unlock(&xdomain_lock); + } + ++static void start_handshake(struct tb_xdomain *xd) ++{ ++ xd->state = XDOMAIN_STATE_INIT; ++ queue_delayed_work(xd->tb->wq, &xd->state_work, ++ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); ++} ++ ++/* Can be called from state_work */ ++static void __stop_handshake(struct tb_xdomain *xd) ++{ ++ cancel_delayed_work_sync(&xd->properties_changed_work); ++ xd->properties_changed_retries = 0; ++ xd->state_retries = 0; ++} ++ ++static void stop_handshake(struct tb_xdomain *xd) ++{ ++ cancel_delayed_work_sync(&xd->state_work); ++ __stop_handshake(xd); ++} ++ + static void tb_xdp_handle_request(struct work_struct *work) + { + struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); +@@ -766,6 +787,15 @@ static void tb_xdp_handle_request(struct work_struct *work) + case UUID_REQUEST: + tb_dbg(tb, "%llx: received XDomain UUID request\n", route); + ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); ++ /* ++ * If we've stopped the discovery with an error such as ++ * timing out, we will restart the handshake now that we ++ * received UUID request from the remote host. ++ */ ++ if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) { ++ dev_dbg(&xd->dev, "restarting handshake\n"); ++ start_handshake(xd); ++ } + break; + + case LINK_STATE_STATUS_REQUEST: +@@ -1522,6 +1552,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd) + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); + } + ++static void tb_xdomain_failed(struct tb_xdomain *xd) ++{ ++ xd->state = XDOMAIN_STATE_ERROR; ++ queue_delayed_work(xd->tb->wq, &xd->state_work, ++ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); ++} ++ + static void tb_xdomain_state_work(struct work_struct *work) + { + struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work); +@@ -1548,7 +1585,7 @@ static void tb_xdomain_state_work(struct work_struct *work) + if (ret) { + if (ret == -EAGAIN) + goto retry_state; +- xd->state = XDOMAIN_STATE_ERROR; ++ tb_xdomain_failed(xd); + } else { + tb_xdomain_queue_properties_changed(xd); + if (xd->bonding_possible) +@@ -1613,7 +1650,7 @@ static void tb_xdomain_state_work(struct work_struct *work) + if (ret) { + if (ret == -EAGAIN) + goto retry_state; +- xd->state = XDOMAIN_STATE_ERROR; ++ tb_xdomain_failed(xd); + } else { + xd->state = XDOMAIN_STATE_ENUMERATED; + } +@@ -1624,6 +1661,8 @@ static void tb_xdomain_state_work(struct work_struct *work) + break; + + case XDOMAIN_STATE_ERROR: ++ dev_dbg(&xd->dev, "discovery failed, stopping handshake\n"); ++ __stop_handshake(xd); + break; + + default: +@@ -1793,21 +1832,6 @@ static void tb_xdomain_release(struct device *dev) + kfree(xd); + } + +-static void start_handshake(struct tb_xdomain *xd) +-{ +- xd->state = XDOMAIN_STATE_INIT; +- queue_delayed_work(xd->tb->wq, &xd->state_work, +- msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +-} +- +-static void stop_handshake(struct tb_xdomain *xd) +-{ +- cancel_delayed_work_sync(&xd->properties_changed_work); +- cancel_delayed_work_sync(&xd->state_work); +- xd->properties_changed_retries = 0; +- xd->state_retries = 0; +-} +- + static int __maybe_unused tb_xdomain_suspend(struct device *dev) + { + stop_handshake(tb_to_xdomain(dev)); +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index b4e3f14b9a3d7..6ba4ef2c3949e 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -6749,7 +6749,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) + mask, 0, 1000, 1000); + + dev_err(hba->dev, "Clearing task management function with tag %d %s\n", +- tag, err ? "succeeded" : "failed"); ++ tag, err < 0 ? "failed" : "succeeded"); + + out: + return err; +diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c +index f9aa50ff14d42..0044897ee800d 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.c ++++ b/drivers/usb/cdns3/cdnsp-gadget.c +@@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, + unsigned long flags; + int ret; + ++ if (request->status != -EINPROGRESS) ++ return 0; ++ + if (!pep->endpoint.desc) { + dev_err(pdev->dev, + "%s: can't dequeue to disabled endpoint\n", +diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h +index 4a4dbc2c15615..81a9c9d6be08b 100644 +--- a/drivers/usb/cdns3/core.h ++++ b/drivers/usb/cdns3/core.h +@@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active); + #else /* CONFIG_PM_SLEEP */ + static inline int cdns_resume(struct cdns *cdns) + { return 0; } +-static inline int cdns_set_active(struct cdns *cdns, u8 set_active) +-{ return 0; } ++static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { } + static inline int cdns_suspend(struct cdns *cdns) + { return 0; } + #endif /* CONFIG_PM_SLEEP */ +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 0069a24bd216c..81c8f564cf878 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev) + if (udev->quirks & USB_QUIRK_NO_LPM) + return 0; + ++ /* Skip if the device BOS descriptor couldn't be read */ ++ if (!udev->bos) ++ return 0; ++ + /* USB 2.1 (and greater) devices indicate LPM support through + * their USB 2.0 Extended Capabilities BOS descriptor. + */ +@@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev) + if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) + return; + ++ /* Skip if the device BOS descriptor couldn't be read */ ++ if (!udev->bos) ++ return; ++ + hub = usb_hub_to_struct_hub(udev->parent); + /* It doesn't take time to transition the roothub into U0, since it + * doesn't have an upstream link. +@@ -2705,13 +2713,17 @@ out_authorized: + static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev, + u32 ext_portstatus) + { +- struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; ++ struct usb_ssp_cap_descriptor *ssp_cap; + u32 attr; + u8 speed_id; + u8 ssac; + u8 lanes; + int i; + ++ if (!hdev->bos) ++ goto out; ++ ++ ssp_cap = hdev->bos->ssp_cap; + if (!ssp_cap) + goto out; + +@@ -4187,8 +4199,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, + enum usb3_link_state state) + { + int timeout; +- __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; +- __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; ++ __u8 u1_mel; ++ __le16 u2_mel; ++ ++ /* Skip if the device BOS descriptor couldn't be read */ ++ if (!udev->bos) ++ return; ++ ++ u1_mel = udev->bos->ss_cap->bU1devExitLat; ++ u2_mel = udev->bos->ss_cap->bU2DevExitLat; + + /* If the device says it doesn't have *any* exit latency to come out of + * U1 or U2, it's probably lying. Assume it doesn't implement that link +diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h +index b2925856b4cb4..bc66205ca52c3 100644 +--- a/drivers/usb/core/hub.h ++++ b/drivers/usb/core/hub.h +@@ -145,7 +145,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev) + { + return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS && + le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 && +- hdev->bos->ssp_cap); ++ hdev->bos && hdev->bos->ssp_cap); + } + + static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 3ee70ffaf0035..57e2f4cc744f7 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) + * XHCI driver will reset the host block. If dwc3 was configured for + * host-only mode or current role is host, then we can return early. + */ +- if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) ++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) + return 0; + ++ /* ++ * If the dr_mode is host and the dwc->current_dr_role is not the ++ * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode ++ * isn't executed yet. Ensure the phy is ready before the controller ++ * updates the GCTL.PRTCAPDIR or other settings by soft-resetting ++ * the phy. ++ * ++ * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n ++ * is port index. If this is a multiport host, then we need to reset ++ * all active ports. ++ */ ++ if (dwc->dr_mode == USB_DR_MODE_HOST) { ++ u32 usb3_port; ++ u32 usb2_port; ++ ++ usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); ++ usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST; ++ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); ++ ++ usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); ++ usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST; ++ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); ++ ++ /* Small delay for phy reset assertion */ ++ usleep_range(1000, 2000); ++ ++ usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; ++ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); ++ ++ usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; ++ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); ++ ++ /* Wait for clock synchronization */ ++ msleep(50); ++ return 0; ++ } ++ + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + reg |= DWC3_DCTL_CSFTRST; + reg &= ~DWC3_DCTL_RUN_STOP; +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index 424bb3b666dbd..faf90a2174194 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -1171,7 +1171,8 @@ static int ncm_unwrap_ntb(struct gether *port, + struct sk_buff_head *list) + { + struct f_ncm *ncm = func_to_ncm(&port->func); +- __le16 *tmp = (void *) skb->data; ++ unsigned char *ntb_ptr = skb->data; ++ __le16 *tmp; + unsigned index, index2; + int ndp_index; + unsigned dg_len, dg_len2; +@@ -1184,6 +1185,10 @@ static int ncm_unwrap_ntb(struct gether *port, + const struct ndp_parser_opts *opts = ncm->parser_opts; + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; + int dgram_counter; ++ int to_process = skb->len; ++ ++parse_ntb: ++ tmp = (__le16 *)ntb_ptr; + + /* dwSignature */ + if (get_unaligned_le32(tmp) != opts->nth_sign) { +@@ -1230,7 +1235,7 @@ static int ncm_unwrap_ntb(struct gether *port, + * walk through NDP + * dwSignature + */ +- tmp = (void *)(skb->data + ndp_index); ++ tmp = (__le16 *)(ntb_ptr + ndp_index); + if (get_unaligned_le32(tmp) != ncm->ndp_sign) { + INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); + goto err; +@@ -1287,11 +1292,11 @@ static int ncm_unwrap_ntb(struct gether *port, + if (ncm->is_crc) { + uint32_t crc, crc2; + +- crc = get_unaligned_le32(skb->data + ++ crc = get_unaligned_le32(ntb_ptr + + index + dg_len - + crc_len); + crc2 = ~crc32_le(~0, +- skb->data + index, ++ ntb_ptr + index, + dg_len - crc_len); + if (crc != crc2) { + INFO(port->func.config->cdev, +@@ -1318,7 +1323,7 @@ static int ncm_unwrap_ntb(struct gether *port, + dg_len - crc_len); + if (skb2 == NULL) + goto err; +- skb_put_data(skb2, skb->data + index, ++ skb_put_data(skb2, ntb_ptr + index, + dg_len - crc_len); + + skb_queue_tail(list, skb2); +@@ -1331,10 +1336,17 @@ static int ncm_unwrap_ntb(struct gether *port, + } while (ndp_len > 2 * (opts->dgram_item_len * 2)); + } while (ndp_index); + +- dev_consume_skb_any(skb); +- + VDBG(port->func.config->cdev, + "Parsed NTB with %d frames\n", dgram_counter); ++ ++ to_process -= block_len; ++ if (to_process != 0) { ++ ntb_ptr = (unsigned char *)(ntb_ptr + block_len); ++ goto parse_ntb; ++ } ++ ++ dev_consume_skb_any(skb); ++ + return 0; + err: + skb_queue_purge(list); +diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c +index 4827e3cd38340..4c7a4f7703c21 100644 +--- a/drivers/usb/gadget/udc/udc-xilinx.c ++++ b/drivers/usb/gadget/udc/udc-xilinx.c +@@ -499,11 +499,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, + /* Get the Buffer address and copy the transmit data.*/ + eprambase = (u32 __force *)(udc->addr + ep->rambase); + if (ep->is_in) { +- memcpy(eprambase, bufferptr, bytestosend); ++ memcpy_toio((void __iomem *)eprambase, bufferptr, ++ bytestosend); + udc->write_fn(udc->addr, ep->offset + + XUSB_EP_BUF0COUNT_OFFSET, bufferlen); + } else { +- memcpy(bufferptr, eprambase, bytestosend); ++ memcpy_toio((void __iomem *)bufferptr, eprambase, ++ bytestosend); + } + /* + * Enable the buffer for transmission. +@@ -517,11 +519,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, + eprambase = (u32 __force *)(udc->addr + ep->rambase + + ep->ep_usb.maxpacket); + if (ep->is_in) { +- memcpy(eprambase, bufferptr, bytestosend); ++ memcpy_toio((void __iomem *)eprambase, bufferptr, ++ bytestosend); + udc->write_fn(udc->addr, ep->offset + + XUSB_EP_BUF1COUNT_OFFSET, bufferlen); + } else { +- memcpy(bufferptr, eprambase, bytestosend); ++ memcpy_toio((void __iomem *)bufferptr, eprambase, ++ bytestosend); + } + /* + * Enable the buffer for transmission. +@@ -1023,7 +1027,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req) + udc->addr); + length = req->usb_req.actual = min_t(u32, length, + EP0_MAX_PACKET); +- memcpy(corebuf, req->usb_req.buf, length); ++ memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length); + udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length); + udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); + } else { +@@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc) + + /* Load up the chapter 9 command buffer.*/ + ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET); +- memcpy(&setup, ep0rambase, 8); ++ memcpy_toio((void __iomem *)&setup, ep0rambase, 8); + + udc->setup = setup; + udc->setup.wValue = cpu_to_le16(setup.wValue); +@@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc) + (ep0->rambase << 2)); + buffer = req->usb_req.buf + req->usb_req.actual; + req->usb_req.actual = req->usb_req.actual + bytes_to_rx; +- memcpy(buffer, ep0rambase, bytes_to_rx); ++ memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx); + + if (req->usb_req.length == req->usb_req.actual) { + /* Data transfer completed get ready for Status stage */ +@@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc) + (ep0->rambase << 2)); + buffer = req->usb_req.buf + req->usb_req.actual; + req->usb_req.actual = req->usb_req.actual + length; +- memcpy(ep0rambase, buffer, length); ++ memcpy_toio((void __iomem *)ep0rambase, buffer, length); + } + udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count); + udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 281690c582cba..1239e06dfe411 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -764,7 +764,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, + static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_td *td) + { +- struct device *dev = xhci_to_hcd(xhci)->self.controller; ++ struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_segment *seg = td->bounce_seg; + struct urb *urb = td->urb; + size_t len; +@@ -3455,7 +3455,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + u32 *trb_buff_len, struct xhci_segment *seg) + { +- struct device *dev = xhci_to_hcd(xhci)->self.controller; ++ struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + unsigned int unalign; + unsigned int max_pkt; + u32 new_buff_len; +diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c +index 30a89aa8a3e7a..5401ae66894eb 100644 +--- a/drivers/usb/musb/musb_debugfs.c ++++ b/drivers/usb/musb/musb_debugfs.c +@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = { + { "IntrUsbE", MUSB_INTRUSBE, 8 }, + { "DevCtl", MUSB_DEVCTL, 8 }, + { "VControl", 0x68, 32 }, +- { "HWVers", 0x69, 16 }, ++ { "HWVers", MUSB_HWVERS, 16 }, + { "LinkInfo", MUSB_LINKINFO, 8 }, + { "VPLen", MUSB_VPLEN, 8 }, + { "HS_EOF1", MUSB_HS_EOF1, 8 }, +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c +index 9ff7d891b4b76..ef0b1589b10eb 100644 +--- a/drivers/usb/musb/musb_host.c ++++ b/drivers/usb/musb/musb_host.c +@@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, + musb_giveback(musb, urb, status); + qh->is_ready = ready; + ++ /* ++ * musb->lock had been unlocked in musb_giveback, so qh may ++ * be freed, need to get it again ++ */ ++ qh = musb_ep_get_qh(hw_ep, is_in); ++ + /* reclaim resources (and bandwidth) ASAP; deschedule it, and + * invalidate qh as soon as list_empty(&hep->urb_list) + */ +- if (list_empty(&qh->hep->urb_list)) { ++ if (qh && list_empty(&qh->hep->urb_list)) { + struct list_head *head; + struct dma_controller *dma = musb->dma_controller; + +@@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + * and its URB list has emptied, recycle this qh. + */ + if (ready && list_empty(&qh->hep->urb_list)) { ++ musb_ep_set_qh(qh->hw_ep, is_in, NULL); + qh->hep->hcpriv = NULL; + list_del(&qh->ring); + kfree(qh); +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c +index 7a3caf556dae9..f564d0d471bbc 100644 +--- a/drivers/usb/typec/altmodes/displayport.c ++++ b/drivers/usb/typec/altmodes/displayport.c +@@ -301,6 +301,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt, + case CMD_EXIT_MODE: + dp->data.status = 0; + dp->data.conf = 0; ++ if (dp->hpd) { ++ drm_connector_oob_hotplug_event(dp->connector_fwnode); ++ dp->hpd = false; ++ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); ++ } + break; + case DP_CMD_STATUS_UPDATE: + dp->data.status = *vdo; +diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c +index 384b42267f1fc..b35c6e07911e9 100644 +--- a/drivers/usb/typec/ucsi/psy.c ++++ b/drivers/usb/typec/ucsi/psy.c +@@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con, + struct device *dev = con->ucsi->dev; + + device_property_read_u8(dev, "scope", &scope); ++ if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) { ++ u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY | ++ UCSI_CAP_ATTR_BATTERY_CHARGING; ++ ++ if (con->ucsi->cap.attributes & mask) ++ scope = POWER_SUPPLY_SCOPE_SYSTEM; ++ else ++ scope = POWER_SUPPLY_SCOPE_DEVICE; ++ } + val->intval = scope; + return 0; + } +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index 47a2c73df3420..dc2dea3768fb6 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -785,6 +785,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) + if (ret < 0) { + dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n", + __func__, ret); ++ clear_bit(EVENT_PENDING, &con->ucsi->flags); + goto out_unlock; + } + +diff --git a/fs/ceph/file.c b/fs/ceph/file.c +index 02414437d8abf..882eccfd67e84 100644 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@ -2498,7 +2498,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + ret = do_splice_direct(src_file, &src_off, dst_file, + &dst_off, src_objlen, flags); + /* Abort on short copies or on error */ +- if (ret < src_objlen) { ++ if (ret < (long)src_objlen) { + dout("Failed partial copy (%zd)\n", ret); + goto out; + } +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index bad9eeb6a1a59..29384ec1a524c 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -655,9 +655,7 @@ int ceph_fill_file_size(struct inode *inode, int issued, + ci->i_truncate_seq = truncate_seq; + + /* the MDS should have revoked these caps */ +- WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | +- CEPH_CAP_FILE_RD | +- CEPH_CAP_FILE_WR | ++ WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD | + CEPH_CAP_FILE_LAZYIO)); + /* + * If we hold relevant caps, or in the case where we're +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index 53b65c5300fde..f26ddfcaa5e61 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -233,19 +233,18 @@ static void put_quota_format(struct quota_format_type *fmt) + * All dquots are placed to the end of inuse_list when first created, and this + * list is used for invalidate operation, which must look at every dquot. + * +- * When the last reference of a dquot will be dropped, the dquot will be +- * added to releasing_dquots. We'd then queue work item which would call ++ * When the last reference of a dquot is dropped, the dquot is added to ++ * releasing_dquots. We'll then queue work item which will call + * synchronize_srcu() and after that perform the final cleanup of all the +- * dquots on the list. Both releasing_dquots and free_dquots use the +- * dq_free list_head in the dquot struct. When a dquot is removed from +- * releasing_dquots, a reference count is always subtracted, and if +- * dq_count == 0 at that point, the dquot will be added to the free_dquots. ++ * dquots on the list. Each cleaned up dquot is moved to free_dquots list. ++ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot ++ * struct. + * +- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, +- * and this list is searched whenever we need an available dquot. Dquots are +- * removed from the list as soon as they are used again, and +- * dqstats.free_dquots gives the number of dquots on the list. When +- * dquot is invalidated it's completely released from memory. ++ * Unused and cleaned up dquots are in the free_dquots list and this list is ++ * searched whenever we need an available dquot. Dquots are removed from the ++ * list as soon as they are used again and dqstats.free_dquots gives the number ++ * of dquots on the list. When dquot is invalidated it's completely released ++ * from memory. + * + * Dirty dquots are added to the dqi_dirty_list of quota_info when mark + * dirtied, and this list is searched when writing dirty dquots back to +@@ -321,6 +320,7 @@ static inline void put_dquot_last(struct dquot *dquot) + static inline void put_releasing_dquots(struct dquot *dquot) + { + list_add_tail(&dquot->dq_free, &releasing_dquots); ++ set_bit(DQ_RELEASING_B, &dquot->dq_flags); + } + + static inline void remove_free_dquot(struct dquot *dquot) +@@ -328,8 +328,10 @@ static inline void remove_free_dquot(struct dquot *dquot) + if (list_empty(&dquot->dq_free)) + return; + list_del_init(&dquot->dq_free); +- if (!atomic_read(&dquot->dq_count)) ++ if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags)) + dqstats_dec(DQST_FREE_DQUOTS); ++ else ++ clear_bit(DQ_RELEASING_B, &dquot->dq_flags); + } + + static inline void put_inuse(struct dquot *dquot) +@@ -581,12 +583,6 @@ restart: + continue; + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { +- /* dquot in releasing_dquots, flush and retry */ +- if (!list_empty(&dquot->dq_free)) { +- spin_unlock(&dq_list_lock); +- goto restart; +- } +- + atomic_inc(&dquot->dq_count); + spin_unlock(&dq_list_lock); + /* +@@ -605,6 +601,15 @@ restart: + * restart. */ + goto restart; + } ++ /* ++ * The last user already dropped its reference but dquot didn't ++ * get fully cleaned up yet. Restart the scan which flushes the ++ * work cleaning up released dquots. ++ */ ++ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { ++ spin_unlock(&dq_list_lock); ++ goto restart; ++ } + /* + * Quota now has no users and it has been written on last + * dqput() +@@ -696,6 +701,13 @@ int dquot_writeback_dquots(struct super_block *sb, int type) + dq_dirty); + + WARN_ON(!dquot_active(dquot)); ++ /* If the dquot is releasing we should not touch it */ ++ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { ++ spin_unlock(&dq_list_lock); ++ flush_delayed_work("a_release_work); ++ spin_lock(&dq_list_lock); ++ continue; ++ } + + /* Now we have active dquot from which someone is + * holding reference so we can safely just increase +@@ -809,18 +821,18 @@ static void quota_release_workfn(struct work_struct *work) + /* Exchange the list head to avoid livelock. */ + list_replace_init(&releasing_dquots, &rls_head); + spin_unlock(&dq_list_lock); ++ synchronize_srcu(&dquot_srcu); + + restart: +- synchronize_srcu(&dquot_srcu); + spin_lock(&dq_list_lock); + while (!list_empty(&rls_head)) { + dquot = list_first_entry(&rls_head, struct dquot, dq_free); +- /* Dquot got used again? */ +- if (atomic_read(&dquot->dq_count) > 1) { +- remove_free_dquot(dquot); +- atomic_dec(&dquot->dq_count); +- continue; +- } ++ WARN_ON_ONCE(atomic_read(&dquot->dq_count)); ++ /* ++ * Note that DQ_RELEASING_B protects us from racing with ++ * invalidate_dquots() calls so we are safe to work with the ++ * dquot even after we drop dq_list_lock. ++ */ + if (dquot_dirty(dquot)) { + spin_unlock(&dq_list_lock); + /* Commit dquot before releasing */ +@@ -834,7 +846,6 @@ restart: + } + /* Dquot is inactive and clean, now move it to free list */ + remove_free_dquot(dquot); +- atomic_dec(&dquot->dq_count); + put_dquot_last(dquot); + } + spin_unlock(&dq_list_lock); +@@ -875,6 +886,7 @@ void dqput(struct dquot *dquot) + BUG_ON(!list_empty(&dquot->dq_free)); + #endif + put_releasing_dquots(dquot); ++ atomic_dec(&dquot->dq_count); + spin_unlock(&dq_list_lock); + queue_delayed_work(system_unbound_wq, "a_release_work, 1); + } +@@ -963,7 +975,7 @@ we_slept: + dqstats_inc(DQST_LOOKUPS); + } + /* Wait for dq_lock - after this we know that either dquot_release() is +- * already finished or it will be canceled due to dq_count > 1 test */ ++ * already finished or it will be canceled due to dq_count > 0 test */ + wait_on_dquot(dquot); + /* Read the dquot / allocate space in quota file */ + if (!dquot_active(dquot)) { +diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c +index 0ae5dd0829e92..6ec6c129465d3 100644 +--- a/fs/smb/server/vfs_cache.c ++++ b/fs/smb/server/vfs_cache.c +@@ -105,7 +105,7 @@ int ksmbd_query_inode_status(struct inode *inode) + ci = __ksmbd_inode_lookup(inode); + if (ci) { + ret = KSMBD_INODE_STATUS_OK; +- if (ci->m_flags & S_DEL_PENDING) ++ if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)) + ret = KSMBD_INODE_STATUS_PENDING_DELETE; + atomic_dec(&ci->m_count); + } +@@ -115,7 +115,7 @@ int ksmbd_query_inode_status(struct inode *inode) + + bool ksmbd_inode_pending_delete(struct ksmbd_file *fp) + { +- return (fp->f_ci->m_flags & S_DEL_PENDING); ++ return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)); + } + + void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp) +diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h +index be572c3a4dcdd..3dfb994312b1f 100644 +--- a/include/linux/dma-fence.h ++++ b/include/linux/dma-fence.h +@@ -548,6 +548,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence, + fence->error = error; + } + ++/** ++ * dma_fence_timestamp - helper to get the completion timestamp of a fence ++ * @fence: fence to get the timestamp from. ++ * ++ * After a fence is signaled the timestamp is updated with the signaling time, ++ * but setting the timestamp can race with tasks waiting for the signaling. This ++ * helper busy waits for the correct timestamp to appear. ++ */ ++static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) ++{ ++ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) ++ return ktime_get(); ++ ++ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) ++ cpu_relax(); ++ ++ return fence->timestamp; ++} ++ + signed long dma_fence_wait_timeout(struct dma_fence *, + bool intr, signed long timeout); + signed long dma_fence_wait_any_timeout(struct dma_fence **fences, +diff --git a/include/linux/libata.h b/include/linux/libata.h +index a9ec8d97a715b..45910aebc3778 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -189,6 +189,7 @@ enum { + ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ + ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ + ++ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */ + ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ + ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ + ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ +@@ -311,8 +312,10 @@ enum { + ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, + ATA_EH_ENABLE_LINK = (1 << 3), + ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ ++ ATA_EH_SET_ACTIVE = (1 << 6), /* Set a device to active power mode */ + +- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, ++ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK | ++ ATA_EH_SET_ACTIVE, + ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | + ATA_EH_ENABLE_LINK, + +@@ -350,7 +353,7 @@ enum { + /* This should match the actual table size of + * ata_eh_cmd_timeout_table in libata-eh.c. + */ +- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, ++ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, + + /* Horkage types. May be set by libata or controller on drives + (some horkage may be drive/controller pair dependent */ +diff --git a/include/linux/mcb.h b/include/linux/mcb.h +index f6efb16f9d1b4..91ec9a83149e8 100644 +--- a/include/linux/mcb.h ++++ b/include/linux/mcb.h +@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev) + struct mcb_device { + struct device dev; + struct mcb_bus *bus; +- bool is_added; + struct mcb_driver *driver; + u16 id; + int inst; +diff --git a/include/linux/quota.h b/include/linux/quota.h +index fd692b4a41d5f..07071e64abf3d 100644 +--- a/include/linux/quota.h ++++ b/include/linux/quota.h +@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type) + #define DQ_FAKE_B 3 /* no limits only usage */ + #define DQ_READ_B 4 /* dquot was read into memory */ + #define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ +-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ ++#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting ++ * to be cleaned up */ ++#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\ + * for the mask of entries set via SETQUOTA\ + * quotactl. They are set under dq_data_lock\ + * and the quota format handling dquot can\ +diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h +index 0d8625d717339..3abd249ec3373 100644 +--- a/include/linux/quotaops.h ++++ b/include/linux/quotaops.h +@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot) + { + if (test_bit(DQ_MOD_B, &dquot->dq_flags)) + return true; +- if (atomic_read(&dquot->dq_count) > 1) ++ if (atomic_read(&dquot->dq_count) > 0) + return true; + return false; + } +diff --git a/include/net/macsec.h b/include/net/macsec.h +index 5b9c61c4d3a62..65c93959c2dc5 100644 +--- a/include/net/macsec.h ++++ b/include/net/macsec.h +@@ -257,6 +257,7 @@ struct macsec_context { + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct { ++ bool update_pn; + unsigned char assoc_num; + u8 key[MACSEC_MAX_KEY_LEN]; + union { +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index 1b80046794451..ede2ff1da53a3 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -64,6 +64,7 @@ struct netns_ipv4 { + #endif + bool fib_has_custom_local_routes; + bool fib_offload_disabled; ++ u8 sysctl_tcp_shrink_window; + #ifdef CONFIG_IP_ROUTE_CLASSID + atomic_t fib_num_tclassid_users; + #endif +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 3052680201e57..eb3f52be115d6 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -10778,7 +10778,7 @@ static int check_return_code(struct bpf_verifier_env *env) + struct tnum enforce_attach_type_range = tnum_unknown; + const struct bpf_prog *prog = env->prog; + struct bpf_reg_state *reg; +- struct tnum range = tnum_range(0, 1); ++ struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0); + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); + int err; + struct bpf_func_state *frame = env->cur_state->frame[0]; +@@ -10826,8 +10826,8 @@ static int check_return_code(struct bpf_verifier_env *env) + return -EINVAL; + } + +- if (!tnum_in(tnum_const(0), reg->var_off)) { +- verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); ++ if (!tnum_in(const_0, reg->var_off)) { ++ verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); + return -EINVAL; + } + return 0; +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 5407241dbb45f..289cc873cb719 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -360,10 +360,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, + } + css_task_iter_end(&it); + length = n; +- /* now sort & (if procs) strip out duplicates */ ++ /* now sort & strip out duplicates (tgids or recycled thread PIDs) */ + sort(array, length, sizeof(pid_t), cmppid, NULL); +- if (type == CGROUP_FILE_PROCS) +- length = pidlist_uniq(array, length); ++ length = pidlist_uniq(array, length); + + l = cgroup_pidlist_find_create(cgrp, type); + if (!l) { +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 1e1557e42d2cc..bc1a97ee40b21 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -5355,9 +5355,13 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) + list_for_each_entry(wq, &workqueues, list) { + if (!(wq->flags & WQ_UNBOUND)) + continue; ++ + /* creating multiple pwqs breaks ordering guarantee */ +- if (wq->flags & __WQ_ORDERED) +- continue; ++ if (!list_empty(&wq->pwqs)) { ++ if (wq->flags & __WQ_ORDERED_EXPLICIT) ++ continue; ++ wq->flags &= ~__WQ_ORDERED; ++ } + + ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); + if (!ctx) { +diff --git a/net/can/isotp.c b/net/can/isotp.c +index 8c97f4061ffd7..545889935d39c 100644 +--- a/net/can/isotp.c ++++ b/net/can/isotp.c +@@ -925,21 +925,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) + if (!so->bound || so->tx.state == ISOTP_SHUTDOWN) + return -EADDRNOTAVAIL; + +-wait_free_buffer: +- /* we do not support multiple buffers - for now */ +- if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT)) +- return -EAGAIN; ++ while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) { ++ /* we do not support multiple buffers - for now */ ++ if (msg->msg_flags & MSG_DONTWAIT) ++ return -EAGAIN; + +- /* wait for complete transmission of current pdu */ +- err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE); +- if (err) +- goto err_event_drop; +- +- if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) { + if (so->tx.state == ISOTP_SHUTDOWN) + return -EADDRNOTAVAIL; + +- goto wait_free_buffer; ++ /* wait for complete transmission of current pdu */ ++ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE); ++ if (err) ++ goto err_event_drop; + } + + if (!size || size > MAX_MSG_LENGTH) { +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 09feb3f1fcaa3..b9b64a2427caf 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -454,8 +454,8 @@ int ceph_tcp_connect(struct ceph_connection *con) + set_sock_callbacks(sock, con); + + con_sock_state_connecting(con); +- ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss), +- O_NONBLOCK); ++ ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss), ++ O_NONBLOCK); + if (ret == -EINPROGRESS) { + dout("connect %s EINPROGRESS sk_state = %u\n", + ceph_pr_addr(&con->peer_addr), +diff --git a/net/core/dev.c b/net/core/dev.c +index a2e3c6470ab3f..5374761f5af2c 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3274,15 +3274,19 @@ int skb_checksum_help(struct sk_buff *skb) + + offset = skb_checksum_start_offset(skb); + ret = -EINVAL; +- if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { ++ if (unlikely(offset >= skb_headlen(skb))) { + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); ++ WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n", ++ offset, skb_headlen(skb)); + goto out; + } + csum = skb_checksum(skb, offset, skb->len - offset, 0); + + offset += skb->csum_offset; +- if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) { ++ if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) { + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); ++ WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n", ++ offset + sizeof(__sum16), skb_headlen(skb)); + goto out; + } + ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index f68762ce4d8a3..73e5821584c18 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -1387,6 +1387,15 @@ static struct ctl_table ipv4_net_table[] = { + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_TWO, + }, ++ { ++ .procname = "tcp_shrink_window", ++ .data = &init_net.ipv4.sysctl_tcp_shrink_window, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, + { } + }; + +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index f9b8a4a1d2edc..5df19f93f86ab 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -3221,6 +3221,8 @@ static int __net_init tcp_sk_init(struct net *net) + else + net->ipv4.tcp_congestion_control = &tcp_reno; + ++ net->ipv4.sysctl_tcp_shrink_window = 0; ++ + return 0; + } + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 5921b0f6f9f41..443b1cab25299 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -259,8 +259,8 @@ static u16 tcp_select_window(struct sock *sk) + u32 old_win = tp->rcv_wnd; + u32 cur_win = tcp_receive_window(tp); + u32 new_win = __tcp_select_window(sk); ++ struct net *net = sock_net(sk); + +- /* Never shrink the offered window */ + if (new_win < cur_win) { + /* Danger Will Robinson! + * Don't update rcv_wup/rcv_wnd here or else +@@ -269,11 +269,14 @@ static u16 tcp_select_window(struct sock *sk) + * + * Relax Will Robinson. + */ +- if (new_win == 0) +- NET_INC_STATS(sock_net(sk), +- LINUX_MIB_TCPWANTZEROWINDOWADV); +- new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); ++ if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { ++ /* Never shrink the offered window */ ++ if (new_win == 0) ++ NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV); ++ new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); ++ } + } ++ + tp->rcv_wnd = new_win; + tp->rcv_wup = tp->rcv_nxt; + +@@ -281,7 +284,7 @@ static u16 tcp_select_window(struct sock *sk) + * scaled window. + */ + if (!tp->rx_opt.rcv_wscale && +- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) ++ READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) + new_win = min(new_win, MAX_TCP_WINDOW); + else + new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); +@@ -293,10 +296,9 @@ static u16 tcp_select_window(struct sock *sk) + if (new_win == 0) { + tp->pred_flags = 0; + if (old_win) +- NET_INC_STATS(sock_net(sk), +- LINUX_MIB_TCPTOZEROWINDOWADV); ++ NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV); + } else if (old_win == 0) { +- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); ++ NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV); + } + + return new_win; +@@ -2949,6 +2951,7 @@ u32 __tcp_select_window(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); ++ struct net *net = sock_net(sk); + /* MSS for the peer's data. Previous versions used mss_clamp + * here. I don't know if the value based on our guesses + * of peer's MSS is better for the performance. It's more correct +@@ -2970,6 +2973,15 @@ u32 __tcp_select_window(struct sock *sk) + if (mss <= 0) + return 0; + } ++ ++ /* Only allow window shrink if the sysctl is enabled and we have ++ * a non-zero scaling factor in effect. ++ */ ++ if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) ++ goto shrink_window_allowed; ++ ++ /* do not allow window to shrink */ ++ + if (free_space < (full_space >> 1)) { + icsk->icsk_ack.quick = 0; + +@@ -3024,6 +3036,36 @@ u32 __tcp_select_window(struct sock *sk) + } + + return window; ++ ++shrink_window_allowed: ++ /* new window should always be an exact multiple of scaling factor */ ++ free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); ++ ++ if (free_space < (full_space >> 1)) { ++ icsk->icsk_ack.quick = 0; ++ ++ if (tcp_under_memory_pressure(sk)) ++ tcp_adjust_rcv_ssthresh(sk); ++ ++ /* if free space is too low, return a zero window */ ++ if (free_space < (allowed_space >> 4) || free_space < mss || ++ free_space < (1 << tp->rx_opt.rcv_wscale)) ++ return 0; ++ } ++ ++ if (free_space > tp->rcv_ssthresh) { ++ free_space = tp->rcv_ssthresh; ++ /* new window should always be an exact multiple of scaling factor ++ * ++ * For this case, we ALIGN "up" (increase free_space) because ++ * we know free_space is not zero here, it has been reduced from ++ * the memory-based limit, and rcv_ssthresh is not a hard limit ++ * (unlike sk_rcvbuf). ++ */ ++ free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); ++ } ++ ++ return free_space; + } + + void tcp_skb_collapse_tstamp(struct sk_buff *skb, +diff --git a/net/mctp/route.c b/net/mctp/route.c +index f51a05ec71624..68be8f2b622dd 100644 +--- a/net/mctp/route.c ++++ b/net/mctp/route.c +@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, + { + struct mctp_route *tmp, *rt = NULL; + ++ rcu_read_lock(); ++ + list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { + /* TODO: add metrics */ + if (mctp_rt_match_eid(tmp, dnet, daddr)) { +@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, + } + } + ++ rcu_read_unlock(); ++ + return rt; + } + + static struct mctp_route *mctp_route_lookup_null(struct net *net, + struct net_device *dev) + { +- struct mctp_route *rt; ++ struct mctp_route *tmp, *rt = NULL; + +- list_for_each_entry_rcu(rt, &net->mctp.routes, list) { +- if (rt->dev->dev == dev && rt->type == RTN_LOCAL && +- refcount_inc_not_zero(&rt->refs)) +- return rt; ++ rcu_read_lock(); ++ ++ list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { ++ if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL && ++ refcount_inc_not_zero(&tmp->refs)) { ++ rt = tmp; ++ break; ++ } + } + +- return NULL; ++ rcu_read_unlock(); ++ ++ return rt; + } + + static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index b6e0579e72644..881e05193ac97 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -3456,24 +3456,21 @@ static void schedule_3rdack_retransmission(struct sock *ssk) + sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); + } + +-void mptcp_subflow_process_delegated(struct sock *ssk) ++void mptcp_subflow_process_delegated(struct sock *ssk, long status) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + struct sock *sk = subflow->conn; + +- if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { ++ if (status & BIT(MPTCP_DELEGATE_SEND)) { + mptcp_data_lock(sk); + if (!sock_owned_by_user(sk)) + __mptcp_subflow_push_pending(sk, ssk); + else + __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); + mptcp_data_unlock(sk); +- mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND); + } +- if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) { ++ if (status & BIT(MPTCP_DELEGATE_ACK)) + schedule_3rdack_retransmission(ssk); +- mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK); +- } + } + + static int mptcp_hash(struct sock *sk) +@@ -3981,14 +3978,17 @@ static int mptcp_napi_poll(struct napi_struct *napi, int budget) + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + bh_lock_sock_nested(ssk); +- if (!sock_owned_by_user(ssk) && +- mptcp_subflow_has_delegated_action(subflow)) +- mptcp_subflow_process_delegated(ssk); +- /* ... elsewhere tcp_release_cb_override already processed +- * the action or will do at next release_sock(). +- * In both case must dequeue the subflow here - on the same +- * CPU that scheduled it. +- */ ++ if (!sock_owned_by_user(ssk)) { ++ mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); ++ } else { ++ /* tcp_release_cb_override already processed ++ * the action or will do at next release_sock(). ++ * In both case must dequeue the subflow here - on the same ++ * CPU that scheduled it. ++ */ ++ smp_wmb(); ++ clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); ++ } + bh_unlock_sock(ssk); + sock_put(ssk); + +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 91d89a0aeb586..4ec8e0a81b5a4 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -430,9 +430,11 @@ struct mptcp_delegated_action { + + DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); + +-#define MPTCP_DELEGATE_SEND 0 +-#define MPTCP_DELEGATE_ACK 1 ++#define MPTCP_DELEGATE_SCHEDULED 0 ++#define MPTCP_DELEGATE_SEND 1 ++#define MPTCP_DELEGATE_ACK 2 + ++#define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED)) + /* MPTCP subflow context */ + struct mptcp_subflow_context { + struct list_head node;/* conn_list of subflows */ +@@ -543,23 +545,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) + return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); + } + +-void mptcp_subflow_process_delegated(struct sock *ssk); ++void mptcp_subflow_process_delegated(struct sock *ssk, long actions); + + static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action) + { ++ long old, set_bits = BIT(MPTCP_DELEGATE_SCHEDULED) | BIT(action); + struct mptcp_delegated_action *delegated; + bool schedule; + + /* the caller held the subflow bh socket lock */ + lockdep_assert_in_softirq(); + +- /* The implied barrier pairs with mptcp_subflow_delegated_done(), and +- * ensures the below list check sees list updates done prior to status +- * bit changes ++ /* The implied barrier pairs with tcp_release_cb_override() ++ * mptcp_napi_poll(), and ensures the below list check sees list ++ * updates done prior to delegated status bits changes + */ +- if (!test_and_set_bit(action, &subflow->delegated_status)) { +- /* still on delegated list from previous scheduling */ +- if (!list_empty(&subflow->delegated_node)) ++ old = set_mask_bits(&subflow->delegated_status, 0, set_bits); ++ if (!(old & BIT(MPTCP_DELEGATE_SCHEDULED))) { ++ if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node))) + return; + + delegated = this_cpu_ptr(&mptcp_delegated_actions); +@@ -584,20 +587,6 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated) + return ret; + } + +-static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow) +-{ +- return !!READ_ONCE(subflow->delegated_status); +-} +- +-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action) +-{ +- /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before +- * touching the status bit +- */ +- smp_wmb(); +- clear_bit(action, &subflow->delegated_status); +-} +- + int mptcp_is_enabled(const struct net *net); + unsigned int mptcp_get_add_addr_timeout(const struct net *net); + int mptcp_is_checksum_enabled(const struct net *net); +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index b93b08a75017b..d611783c2601f 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -1886,9 +1886,15 @@ static void subflow_ulp_clone(const struct request_sock *req, + static void tcp_release_cb_override(struct sock *ssk) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); ++ long status; + +- if (mptcp_subflow_has_delegated_action(subflow)) +- mptcp_subflow_process_delegated(ssk); ++ /* process and clear all the pending actions, but leave the subflow into ++ * the napi queue. To respect locking, only the same CPU that originated ++ * the action can touch the list. mptcp_napi_poll will take care of it. ++ */ ++ status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); ++ if (status) ++ mptcp_subflow_process_delegated(ssk, status); + + tcp_release_cb(ssk); + } +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index 6574f4e651b1a..e1dea9a820505 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -1441,7 +1441,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev) + sin.sin_addr.s_addr = addr; + sin.sin_port = 0; + +- return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); ++ return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin)); + } + + static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen, +@@ -1548,7 +1548,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id, + + get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); + sock->sk->sk_bound_dev_if = dev->ifindex; +- result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); ++ result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen); + if (result < 0) { + pr_err("Error binding to the multicast addr\n"); + goto error; +diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c +index 6705bb895e239..1dac28136e6a3 100644 +--- a/net/nfc/llcp_core.c ++++ b/net/nfc/llcp_core.c +@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, + + if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { + llcp_sock = tmp_sock; ++ sock_hold(&llcp_sock->sk); + break; + } + } + + read_unlock(&local->sockets.lock); + +- if (llcp_sock == NULL) +- return NULL; +- +- sock_hold(&llcp_sock->sk); +- + return llcp_sock; + } + +@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len) + + static + struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, +- const u8 *sn, size_t sn_len) ++ const u8 *sn, size_t sn_len, ++ bool needref) + { + struct sock *sk; + struct nfc_llcp_sock *llcp_sock, *tmp_sock; +@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, + + if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) { + llcp_sock = tmp_sock; ++ if (needref) ++ sock_hold(&llcp_sock->sk); + break; + } + } +@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, + * to this service name. + */ + if (nfc_llcp_sock_from_sn(local, sock->service_name, +- sock->service_name_len) != NULL) { ++ sock->service_name_len, ++ false) != NULL) { + mutex_unlock(&local->sdp_lock); + + return LLCP_SAP_MAX; +@@ -824,16 +824,7 @@ out: + static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local, + const u8 *sn, size_t sn_len) + { +- struct nfc_llcp_sock *llcp_sock; +- +- llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len); +- +- if (llcp_sock == NULL) +- return NULL; +- +- sock_hold(&llcp_sock->sk); +- +- return llcp_sock; ++ return nfc_llcp_sock_from_sn(local, sn, sn_len, true); + } + + static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len) +@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local, + } + + llcp_sock = nfc_llcp_sock_from_sn(local, service_name, +- service_name_len); ++ service_name_len, ++ true); + if (!llcp_sock) { + sap = 0; + goto add_snl; +@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local, + + if (sap == LLCP_SAP_MAX) { + sap = 0; ++ nfc_llcp_sock_put(llcp_sock); + goto add_snl; + } + +@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local, + + pr_debug("%p %d\n", llcp_sock, sap); + ++ nfc_llcp_sock_put(llcp_sock); + add_snl: + sdp = nfc_llcp_build_sdres_tlv(tid, sap); + if (sdp == NULL) +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c +index 4ffdf2f45c444..7535afd1537e9 100644 +--- a/net/nfc/nci/core.c ++++ b/net/nfc/nci/core.c +@@ -908,6 +908,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, + return -EINVAL; + } + ++ if (protocol >= NFC_PROTO_MAX) { ++ pr_err("the requested nfc protocol is invalid\n"); ++ return -EINVAL; ++ } ++ + if (!(nci_target->supported_protocols & (1 << protocol))) { + pr_err("target does not support the requested protocol 0x%x\n", + protocol); +diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c +index d788c6d28986f..a0046e99d6df7 100644 +--- a/net/rds/tcp_connect.c ++++ b/net/rds/tcp_connect.c +@@ -145,7 +145,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) + addrlen = sizeof(sin); + } + +- ret = sock->ops->bind(sock, addr, addrlen); ++ ret = kernel_bind(sock, addr, addrlen); + if (ret) { + rdsdebug("bind failed with %d at address %pI6c\n", + ret, &conn->c_laddr); +diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c +index 7edf2e69d3fed..b576bd252fecb 100644 +--- a/net/rds/tcp_listen.c ++++ b/net/rds/tcp_listen.c +@@ -304,7 +304,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6) + addr_len = sizeof(*sin); + } + +- ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len); ++ ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len); + if (ret < 0) { + rdsdebug("could not bind %s listener socket: %d\n", + isv6 ? "IPv6" : "IPv4", ret); +diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h +index 4dbc237b7c19e..ee22d6f9a86aa 100644 +--- a/net/smc/smc_stats.h ++++ b/net/smc/smc_stats.h +@@ -93,13 +93,14 @@ do { \ + typeof(_smc_stats) stats = (_smc_stats); \ + typeof(_tech) t = (_tech); \ + typeof(_len) l = (_len); \ +- int _pos = fls64((l) >> 13); \ ++ int _pos; \ + typeof(_rc) r = (_rc); \ + int m = SMC_BUF_MAX - 1; \ + this_cpu_inc((*stats).smc[t].key ## _cnt); \ +- if (r <= 0) \ ++ if (r <= 0 || l <= 0) \ + break; \ +- _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \ ++ _pos = fls64((l - 1) >> 13); \ ++ _pos = (_pos <= m) ? _pos : m; \ + this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \ + this_cpu_add((*stats).smc[t].key ## _bytes, r); \ + } \ +@@ -139,9 +140,12 @@ while (0) + do { \ + typeof(_len) _l = (_len); \ + typeof(_tech) t = (_tech); \ +- int _pos = fls((_l) >> 13); \ ++ int _pos; \ + int m = SMC_BUF_MAX - 1; \ +- _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \ ++ if (_l <= 0) \ ++ break; \ ++ _pos = fls((_l - 1) >> 13); \ ++ _pos = (_pos <= m) ? _pos : m; \ + this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \ + } \ + while (0) +diff --git a/net/socket.c b/net/socket.c +index b0169168e3f4e..04cba91c7cbe5 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -3454,7 +3454,11 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd, + + int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) + { +- return sock->ops->bind(sock, addr, addrlen); ++ struct sockaddr_storage address; ++ ++ memcpy(&address, addr, addrlen); ++ ++ return sock->ops->bind(sock, (struct sockaddr *)&address, addrlen); + } + EXPORT_SYMBOL(kernel_bind); + +diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c +index c6fc50d67214c..85fb5c22529a7 100644 +--- a/security/keys/trusted-keys/trusted_core.c ++++ b/security/keys/trusted-keys/trusted_core.c +@@ -44,13 +44,12 @@ static const struct trusted_key_source trusted_key_sources[] = { + #endif + }; + +-DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init); + DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal); + DEFINE_STATIC_CALL_NULL(trusted_key_unseal, + *trusted_key_sources[0].ops->unseal); + DEFINE_STATIC_CALL_NULL(trusted_key_get_random, + *trusted_key_sources[0].ops->get_random); +-DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit); ++static void (*trusted_key_exit)(void); + static unsigned char migratable; + + enum { +@@ -359,19 +358,16 @@ static int __init init_trusted(void) + if (!get_random) + get_random = kernel_get_random; + +- static_call_update(trusted_key_init, +- trusted_key_sources[i].ops->init); + static_call_update(trusted_key_seal, + trusted_key_sources[i].ops->seal); + static_call_update(trusted_key_unseal, + trusted_key_sources[i].ops->unseal); + static_call_update(trusted_key_get_random, + get_random); +- static_call_update(trusted_key_exit, +- trusted_key_sources[i].ops->exit); ++ trusted_key_exit = trusted_key_sources[i].ops->exit; + migratable = trusted_key_sources[i].ops->migratable; + +- ret = static_call(trusted_key_init)(); ++ ret = trusted_key_sources[i].ops->init(); + if (!ret) + break; + } +@@ -388,7 +384,8 @@ static int __init init_trusted(void) + + static void __exit cleanup_trusted(void) + { +- static_call_cond(trusted_key_exit)(); ++ if (trusted_key_exit) ++ (*trusted_key_exit)(); + } + + late_initcall(init_trusted); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 57e07aa4e136c..14e70e2f9c881 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4639,6 +4639,22 @@ static void alc236_fixup_hp_mute_led_coefbit2(struct hda_codec *codec, + } + } + ++static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ spec->mute_led_polarity = 0; ++ spec->mute_led_coef.idx = 0x0b; ++ spec->mute_led_coef.mask = 3 << 2; ++ spec->mute_led_coef.on = 2 << 2; ++ spec->mute_led_coef.off = 1 << 2; ++ snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); ++ } ++} ++ + /* turn on/off mic-mute LED per capture hook by coef bit */ + static int coef_micmute_led_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +@@ -6969,6 +6985,29 @@ static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec, + } + } + ++/* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */ ++static void alc287_fixup_bind_dacs(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ ++ static const hda_nid_t preferred_pairs[] = { ++ 0x17, 0x02, 0x21, 0x03, 0 ++ }; ++ ++ if (action != HDA_FIXUP_ACT_PRE_PROBE) ++ return; ++ ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); ++ spec->gen.preferred_dacs = preferred_pairs; ++ spec->gen.auto_mute_via_amp = 1; ++ if (spec->gen.autocfg.speaker_pins[0] != 0x14) { ++ snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, ++ 0x0); /* Make sure 0x14 was disable */ ++ } ++} ++ ++ + enum { + ALC269_FIXUP_GPIO2, + ALC269_FIXUP_SONY_VAIO, +@@ -7227,6 +7266,10 @@ enum { + ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS, + ALC236_FIXUP_DELL_DUAL_CODECS, + ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, ++ ALC245_FIXUP_HP_MUTE_LED_COEFBIT, ++ ALC245_FIXUP_HP_X360_MUTE_LEDS, ++ ALC287_FIXUP_THINKPAD_I2S_SPK, ++ ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD, + }; + + /* A special fixup for Lenovo C940 and Yoga Duet 7; +@@ -9296,6 +9339,26 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI, + }, ++ [ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc245_fixup_hp_mute_led_coefbit, ++ }, ++ [ALC245_FIXUP_HP_X360_MUTE_LEDS] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc245_fixup_hp_mute_led_coefbit, ++ .chained = true, ++ .chain_id = ALC245_FIXUP_HP_GPIO_LED ++ }, ++ [ALC287_FIXUP_THINKPAD_I2S_SPK] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc287_fixup_bind_dacs, ++ }, ++ [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc287_fixup_bind_dacs, ++ .chained = true, ++ .chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -9531,6 +9594,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), ++ SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), + SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), +@@ -9562,6 +9626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), ++ SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), + SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED), +@@ -9697,7 +9762,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), +- SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK), ++ SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP), +@@ -9831,14 +9896,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), +- SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), +- SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI), ++ SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), ++ SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), +@@ -9920,7 +9985,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10), +- SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK), ++ SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + + #if 0 +@@ -10402,6 +10467,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x17, 0x90170111}, + {0x19, 0x03a11030}, + {0x21, 0x03211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK, ++ {0x17, 0x90170110}, ++ {0x19, 0x03a11030}, ++ {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x17, 0x90170110}, +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 533250efcbd83..c494de5f5c066 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -234,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "82V2"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "82YM"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c +index 8c86b578eba83..29af9595dac19 100644 +--- a/sound/soc/codecs/sta32x.c ++++ b/sound/soc/codecs/sta32x.c +@@ -1054,35 +1054,32 @@ static int sta32x_probe_dt(struct device *dev, struct sta32x_priv *sta32x) + of_property_read_u8(np, "st,ch3-output-mapping", + &pdata->ch3_output_mapping); + +- if (of_get_property(np, "st,fault-detect-recovery", NULL)) +- pdata->fault_detect_recovery = 1; +- if (of_get_property(np, "st,thermal-warning-recovery", NULL)) +- pdata->thermal_warning_recovery = 1; +- if (of_get_property(np, "st,thermal-warning-adjustment", NULL)) +- pdata->thermal_warning_adjustment = 1; +- if (of_get_property(np, "st,needs_esd_watchdog", NULL)) +- pdata->needs_esd_watchdog = 1; ++ pdata->fault_detect_recovery = ++ of_property_read_bool(np, "st,fault-detect-recovery"); ++ pdata->thermal_warning_recovery = ++ of_property_read_bool(np, "st,thermal-warning-recovery"); ++ pdata->thermal_warning_adjustment = ++ of_property_read_bool(np, "st,thermal-warning-adjustment"); ++ pdata->needs_esd_watchdog = ++ of_property_read_bool(np, "st,needs_esd_watchdog"); + + tmp = 140; + of_property_read_u16(np, "st,drop-compensation-ns", &tmp); + pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20; + + /* CONFE */ +- if (of_get_property(np, "st,max-power-use-mpcc", NULL)) +- pdata->max_power_use_mpcc = 1; +- +- if (of_get_property(np, "st,max-power-correction", NULL)) +- pdata->max_power_correction = 1; +- +- if (of_get_property(np, "st,am-reduction-mode", NULL)) +- pdata->am_reduction_mode = 1; +- +- if (of_get_property(np, "st,odd-pwm-speed-mode", NULL)) +- pdata->odd_pwm_speed_mode = 1; ++ pdata->max_power_use_mpcc = ++ of_property_read_bool(np, "st,max-power-use-mpcc"); ++ pdata->max_power_correction = ++ of_property_read_bool(np, "st,max-power-correction"); ++ pdata->am_reduction_mode = ++ of_property_read_bool(np, "st,am-reduction-mode"); ++ pdata->odd_pwm_speed_mode = ++ of_property_read_bool(np, "st,odd-pwm-speed-mode"); + + /* CONFF */ +- if (of_get_property(np, "st,invalid-input-detect-mute", NULL)) +- pdata->invalid_input_detect_mute = 1; ++ pdata->invalid_input_detect_mute = ++ of_property_read_bool(np, "st,invalid-input-detect-mute"); + + sta32x->pdata = pdata; + +diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c +index 9ed13aeb3cbdc..b033a5fcd6c04 100644 +--- a/sound/soc/codecs/sta350.c ++++ b/sound/soc/codecs/sta350.c +@@ -1106,12 +1106,12 @@ static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350) + of_property_read_u8(np, "st,ch3-output-mapping", + &pdata->ch3_output_mapping); + +- if (of_get_property(np, "st,thermal-warning-recovery", NULL)) +- pdata->thermal_warning_recovery = 1; +- if (of_get_property(np, "st,thermal-warning-adjustment", NULL)) +- pdata->thermal_warning_adjustment = 1; +- if (of_get_property(np, "st,fault-detect-recovery", NULL)) +- pdata->fault_detect_recovery = 1; ++ pdata->thermal_warning_recovery = ++ of_property_read_bool(np, "st,thermal-warning-recovery"); ++ pdata->thermal_warning_adjustment = ++ of_property_read_bool(np, "st,thermal-warning-adjustment"); ++ pdata->fault_detect_recovery = ++ of_property_read_bool(np, "st,fault-detect-recovery"); + + pdata->ffx_power_output_mode = STA350_FFX_PM_VARIABLE_DROP_COMP; + if (!of_property_read_string(np, "st,ffx-power-output-mode", +@@ -1133,41 +1133,34 @@ static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350) + of_property_read_u16(np, "st,drop-compensation-ns", &tmp); + pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20; + +- if (of_get_property(np, "st,overcurrent-warning-adjustment", NULL)) +- pdata->oc_warning_adjustment = 1; ++ pdata->oc_warning_adjustment = ++ of_property_read_bool(np, "st,overcurrent-warning-adjustment"); + + /* CONFE */ +- if (of_get_property(np, "st,max-power-use-mpcc", NULL)) +- pdata->max_power_use_mpcc = 1; +- +- if (of_get_property(np, "st,max-power-correction", NULL)) +- pdata->max_power_correction = 1; +- +- if (of_get_property(np, "st,am-reduction-mode", NULL)) +- pdata->am_reduction_mode = 1; +- +- if (of_get_property(np, "st,odd-pwm-speed-mode", NULL)) +- pdata->odd_pwm_speed_mode = 1; +- +- if (of_get_property(np, "st,distortion-compensation", NULL)) +- pdata->distortion_compensation = 1; ++ pdata->max_power_use_mpcc = ++ of_property_read_bool(np, "st,max-power-use-mpcc"); ++ pdata->max_power_correction = ++ of_property_read_bool(np, "st,max-power-correction"); ++ pdata->am_reduction_mode = ++ of_property_read_bool(np, "st,am-reduction-mode"); ++ pdata->odd_pwm_speed_mode = ++ of_property_read_bool(np, "st,odd-pwm-speed-mode"); ++ pdata->distortion_compensation = ++ of_property_read_bool(np, "st,distortion-compensation"); + + /* CONFF */ +- if (of_get_property(np, "st,invalid-input-detect-mute", NULL)) +- pdata->invalid_input_detect_mute = 1; ++ pdata->invalid_input_detect_mute = ++ of_property_read_bool(np, "st,invalid-input-detect-mute"); + + /* MISC */ +- if (of_get_property(np, "st,activate-mute-output", NULL)) +- pdata->activate_mute_output = 1; +- +- if (of_get_property(np, "st,bridge-immediate-off", NULL)) +- pdata->bridge_immediate_off = 1; +- +- if (of_get_property(np, "st,noise-shape-dc-cut", NULL)) +- pdata->noise_shape_dc_cut = 1; +- +- if (of_get_property(np, "st,powerdown-master-volume", NULL)) +- pdata->powerdown_master_vol = 1; ++ pdata->activate_mute_output = ++ of_property_read_bool(np, "st,activate-mute-output"); ++ pdata->bridge_immediate_off = ++ of_property_read_bool(np, "st,bridge-immediate-off"); ++ pdata->noise_shape_dc_cut = ++ of_property_read_bool(np, "st,noise-shape-dc-cut"); ++ pdata->powerdown_master_vol = ++ of_property_read_bool(np, "st,powerdown-master-volume"); + + if (!of_property_read_u8(np, "st,powerdown-delay-divider", &tmp8)) { + if (is_power_of_2(tmp8) && tmp8 >= 1 && tmp8 <= 128) +diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c +index 22143cc5afa70..f9e7122894bd2 100644 +--- a/sound/soc/codecs/tas5086.c ++++ b/sound/soc/codecs/tas5086.c +@@ -840,7 +840,7 @@ static int tas5086_probe(struct snd_soc_component *component) + snprintf(name, sizeof(name), + "ti,mid-z-channel-%d", i + 1); + +- if (of_get_property(of_node, name, NULL) != NULL) ++ if (of_property_read_bool(of_node, name)) + priv->pwm_start_mid_z |= 1 << i; + } + } +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c +index b7552b0df7c3c..96fd9095e544b 100644 +--- a/sound/soc/fsl/fsl_sai.c ++++ b/sound/soc/fsl/fsl_sai.c +@@ -710,10 +710,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) + { + unsigned int ofs = sai->soc_data->reg_offset; + bool tx = dir == TX; +- u32 xcsr, count = 100; ++ u32 xcsr, count = 100, mask; ++ ++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output) ++ mask = FSL_SAI_CSR_TERE; ++ else ++ mask = FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE; + + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), +- FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0); ++ mask, 0); + + /* TERE will remain set till the end of current frame */ + do { +@@ -1381,18 +1386,18 @@ static int fsl_sai_probe(struct platform_device *pdev) + sai->cpu_dai_drv.symmetric_channels = 1; + sai->cpu_dai_drv.symmetric_sample_bits = 1; + +- if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) && +- of_find_property(np, "fsl,sai-asynchronous", NULL)) { ++ if (of_property_read_bool(np, "fsl,sai-synchronous-rx") && ++ of_property_read_bool(np, "fsl,sai-asynchronous")) { + /* error out if both synchronous and asynchronous are present */ + dev_err(dev, "invalid binding for synchronous mode\n"); + return -EINVAL; + } + +- if (of_find_property(np, "fsl,sai-synchronous-rx", NULL)) { ++ if (of_property_read_bool(np, "fsl,sai-synchronous-rx")) { + /* Sync Rx with Tx */ + sai->synchronous[RX] = false; + sai->synchronous[TX] = true; +- } else if (of_find_property(np, "fsl,sai-asynchronous", NULL)) { ++ } else if (of_property_read_bool(np, "fsl,sai-asynchronous")) { + /* Discard all settings for asynchronous mode */ + sai->synchronous[RX] = false; + sai->synchronous[TX] = false; +@@ -1401,7 +1406,9 @@ static int fsl_sai_probe(struct platform_device *pdev) + sai->cpu_dai_drv.symmetric_sample_bits = 0; + } + +- if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) && ++ sai->mclk_direction_output = of_property_read_bool(np, "fsl,sai-mclk-direction-output"); ++ ++ if (sai->mclk_direction_output && + of_device_is_compatible(np, "fsl,imx6ul-sai")) { + gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr"); + if (IS_ERR(gpr)) { +@@ -1442,7 +1449,7 @@ static int fsl_sai_probe(struct platform_device *pdev) + dev_warn(dev, "Error reading SAI version: %d\n", ret); + + /* Select MCLK direction */ +- if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) && ++ if (sai->mclk_direction_output && + sai->soc_data->max_register >= FSL_SAI_MCTL) { + regmap_update_bits(sai->regmap, FSL_SAI_MCTL, + FSL_SAI_MCTL_MCLK_EN, FSL_SAI_MCTL_MCLK_EN); +@@ -1560,6 +1567,17 @@ static const struct fsl_sai_soc_data fsl_sai_imx8mm_data = { + .max_register = FSL_SAI_MCTL, + }; + ++static const struct fsl_sai_soc_data fsl_sai_imx8mn_data = { ++ .use_imx_pcm = true, ++ .use_edma = false, ++ .fifo_depth = 128, ++ .reg_offset = 8, ++ .mclk0_is_mclk1 = false, ++ .pins = 8, ++ .flags = 0, ++ .max_register = FSL_SAI_MDIV, ++}; ++ + static const struct fsl_sai_soc_data fsl_sai_imx8mp_data = { + .use_imx_pcm = true, + .use_edma = false, +@@ -1569,6 +1587,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8mp_data = { + .pins = 8, + .flags = 0, + .max_register = FSL_SAI_MDIV, ++ .mclk_with_tere = true, + }; + + static const struct fsl_sai_soc_data fsl_sai_imx8ulp_data = { +@@ -1592,7 +1611,7 @@ static const struct of_device_id fsl_sai_ids[] = { + { .compatible = "fsl,imx8mm-sai", .data = &fsl_sai_imx8mm_data }, + { .compatible = "fsl,imx8mp-sai", .data = &fsl_sai_imx8mp_data }, + { .compatible = "fsl,imx8ulp-sai", .data = &fsl_sai_imx8ulp_data }, +- { .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mp_data }, ++ { .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mn_data }, + { /* sentinel */ } + }; + MODULE_DEVICE_TABLE(of, fsl_sai_ids); +@@ -1656,6 +1675,10 @@ static int fsl_sai_runtime_resume(struct device *dev) + if (ret) + goto disable_rx_clk; + ++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output) ++ regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), ++ FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE); ++ + return 0; + + disable_rx_clk: +diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h +index caad5b0ac4ff4..b4d616a44023c 100644 +--- a/sound/soc/fsl/fsl_sai.h ++++ b/sound/soc/fsl/fsl_sai.h +@@ -232,6 +232,7 @@ struct fsl_sai_soc_data { + bool use_imx_pcm; + bool use_edma; + bool mclk0_is_mclk1; ++ bool mclk_with_tere; + unsigned int fifo_depth; + unsigned int pins; + unsigned int reg_offset; +@@ -288,6 +289,7 @@ struct fsl_sai { + bool synchronous[2]; + struct fsl_sai_dl_cfg *dl_cfg; + unsigned int dl_cfg_cnt; ++ bool mclk_direction_output; + + unsigned int mclk_id[2]; + unsigned int mclk_streams; +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c +index 46a53551b955c..6af00b62a60fa 100644 +--- a/sound/soc/fsl/fsl_ssi.c ++++ b/sound/soc/fsl/fsl_ssi.c +@@ -1447,7 +1447,7 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi) + return -EINVAL; + } + strcpy(ssi->card_name, "ac97-codec"); +- } else if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) { ++ } else if (!of_property_read_bool(np, "fsl,ssi-asynchronous")) { + /* + * In synchronous mode, STCK and STFS ports are used by RX + * as well. So the software should limit the sample rates, +diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c +index 3f128ced41809..64a4d7e9db603 100644 +--- a/sound/soc/fsl/imx-card.c ++++ b/sound/soc/fsl/imx-card.c +@@ -563,7 +563,7 @@ static int imx_card_parse_of(struct imx_card_data *data) + link_data->cpu_sysclk_id = FSL_SAI_CLK_MAST1; + + /* sai may support mclk/bclk = 1 */ +- if (of_find_property(np, "fsl,mclk-equal-bclk", NULL)) { ++ if (of_property_read_bool(np, "fsl,mclk-equal-bclk")) { + link_data->one2one_ratio = true; + } else { + int i; +diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c +index 8811321717fbb..c719354635a3a 100644 +--- a/sound/soc/generic/simple-card-utils.c ++++ b/sound/soc/generic/simple-card-utils.c +@@ -331,7 +331,8 @@ int asoc_simple_startup(struct snd_pcm_substream *substream) + if (fixed_sysclk % props->mclk_fs) { + dev_err(rtd->dev, "fixed sysclk %u not divisible by mclk_fs %u\n", + fixed_sysclk, props->mclk_fs); +- return -EINVAL; ++ ret = -EINVAL; ++ goto codec_err; + } + ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE, + fixed_rate, fixed_rate); +diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c +index 894b6610b9e27..e22d767b6e97a 100644 +--- a/sound/soc/intel/boards/sof_es8336.c ++++ b/sound/soc/intel/boards/sof_es8336.c +@@ -807,6 +807,16 @@ static const struct platform_device_id board_ids[] = { + SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK | + SOF_ES8336_JD_INVERTED), + }, ++ { ++ .name = "mtl_es83x6_c1_h02", ++ .driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) | ++ SOF_NO_OF_HDMI_CAPTURE_SSP(2) | ++ SOF_HDMI_CAPTURE_1_SSP(0) | ++ SOF_HDMI_CAPTURE_2_SSP(2) | ++ SOF_SSP_HDMI_CAPTURE_PRESENT | ++ SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK | ++ SOF_ES8336_JD_INVERTED), ++ }, + { } + }; + MODULE_DEVICE_TABLE(platform, board_ids); +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c +index 414ac90273810..985012f2003e2 100644 +--- a/sound/soc/intel/boards/sof_sdw.c ++++ b/sound/soc/intel/boards/sof_sdw.c +@@ -347,6 +347,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { + /* No Jack */ + .driver_data = (void *)SOF_SDW_TGL_HDMI, + }, ++ { ++ .callback = sof_sdw_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B14"), ++ }, ++ /* No Jack */ ++ .driver_data = (void *)SOF_SDW_TGL_HDMI, ++ }, ++ + { + .callback = sof_sdw_quirk_cb, + .matches = { +diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c +index 36c361fb28a4d..d3b4689460ecf 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c +@@ -20,6 +20,16 @@ static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = { + .codecs = {"10EC5682", "RTL5682"}, + }; + ++static const struct snd_soc_acpi_codecs mtl_lt6911_hdmi = { ++ .num_codecs = 1, ++ .codecs = {"INTC10B0"} ++}; ++ ++static const struct snd_soc_acpi_codecs mtl_essx_83x6 = { ++ .num_codecs = 3, ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"}, ++}; ++ + struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = { + { + .comp_ids = &mtl_rt5682_rt5682s_hp, +@@ -28,6 +38,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = { + .quirk_data = &mtl_max98357a_amp, + .sof_tplg_filename = "sof-mtl-max98357a-rt5682.tplg", + }, ++ { ++ .comp_ids = &mtl_essx_83x6, ++ .drv_name = "sof-essx8336", ++ .sof_tplg_filename = "sof-mtl-es8336", /* the tplg suffix is added at run time */ ++ .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER | ++ SND_SOC_ACPI_TPLG_INTEL_SSP_MSB | ++ SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER, ++ }, + {}, + }; + EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines); +@@ -66,6 +84,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = { + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-mtl-rt711-rt1308-rt715.tplg", + }, ++ { ++ .comp_ids = &mtl_essx_83x6, ++ .drv_name = "mtl_es83x6_c1_h02", ++ .machine_quirk = snd_soc_acpi_codec_list, ++ .quirk_data = &mtl_lt6911_hdmi, ++ .sof_tplg_filename = "sof-mtl-es83x6-ssp1-hdmi-ssp02.tplg", ++ }, + { + .link_mask = BIT(0) | BIT(1) | BIT(3), + .links = sdw_mockup_headset_1amp_mic, +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 7ade6c5ed96ff..cb7fff48959a2 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -1208,10 +1208,10 @@ int rsnd_ssi_probe(struct rsnd_priv *priv) + goto rsnd_ssi_probe_done; + } + +- if (of_get_property(np, "shared-pin", NULL)) ++ if (of_property_read_bool(np, "shared-pin")) + rsnd_flags_set(ssi, RSND_SSI_CLK_PIN_SHARE); + +- if (of_get_property(np, "no-busif", NULL)) ++ if (of_property_read_bool(np, "no-busif")) + rsnd_flags_set(ssi, RSND_SSI_NO_BUSIF); + + ssi->irq = irq_of_parse_and_map(np, 0); +diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c +index 5698d910b26f3..6fa060cab657e 100644 +--- a/sound/soc/sof/amd/pci-rmb.c ++++ b/sound/soc/sof/amd/pci-rmb.c +@@ -54,7 +54,6 @@ static const struct sof_amd_acp_desc rembrandt_chip_info = { + .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET, + .i2s_pin_config_offset = ACP6X_I2S_PIN_CONFIG, + .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0, +- .acp_clkmux_sel = ACP6X_CLKMUX_SEL, + .fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL, + }; + +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 9105ec623120a..783a2493707ea 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -1204,6 +1204,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + cval->res = 16; + } + break; ++ case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */ ++ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { ++ usb_audio_info(chip, ++ "set resolution quirk: cval->res = 16\n"); ++ cval->res = 16; ++ } ++ break; + } + } + +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 4667d543f7481..6129a62316422 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1992,7 +1992,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip, + /* mic works only when ep packet size is set to wMaxPacketSize */ + fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX; + break; +- ++ case USB_ID(0x3511, 0x2b1e): /* Opencomm2 UC USB Bluetooth dongle */ ++ /* mic works only when ep pitch control is not set */ ++ if (stream == SNDRV_PCM_STREAM_CAPTURE) ++ fp->attributes &= ~UAC_EP_CS_ATTR_PITCH_CONTROL; ++ break; + } + } + +@@ -2171,6 +2175,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_FIXED_RATE), + DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ + QUIRK_FLAG_FIXED_RATE), ++ DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */ ++ QUIRK_FLAG_GET_SAMPLE_RATE), + + /* Vendor matches */ + VENDOR_FLG(0x045e, /* MS Lifecam */ diff --git a/patch/kernel/archive/odroidxu4-6.1/patch-6.1.59-60.patch b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.59-60.patch new file mode 100644 index 0000000000..512a62e4a1 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.1/patch-6.1.59-60.patch @@ -0,0 +1,7686 @@ +diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml +index a96f143479c79..eb0e9cca70570 100644 +--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml ++++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml +@@ -59,7 +59,7 @@ properties: + maxItems: 4 + + clocks: +- minItems: 3 ++ minItems: 2 + items: + - description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock + - description: SDC MMC clock, MCLK +diff --git a/Makefile b/Makefile +index 4ad29c852e5f8..d47edcd8888e8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 59 ++SUBLEVEL = 60 + EXTRAVERSION = + NAME = Curry Ramen + +diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi +index d69f0f4b4990d..d2d516d113baa 100644 +--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi ++++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi +@@ -640,6 +640,7 @@ + &uart3 { + interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH + &omap4_pmx_core 0x17c>; ++ overrun-throttle-ms = <500>; + }; + + &uart4 { +diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c +index 227cf0a62800b..486aa03abbe17 100644 +--- a/arch/s390/pci/pci_dma.c ++++ b/arch/s390/pci/pci_dma.c +@@ -544,6 +544,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + s->dma_length = 0; + } + } ++ ++static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags) ++{ ++ size_t n = BITS_TO_LONGS(bits); ++ size_t bytes; ++ ++ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes))) ++ return NULL; ++ ++ return vzalloc(bytes); ++} + + int zpci_dma_init_device(struct zpci_dev *zdev) + { +@@ -584,13 +595,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + zdev->end_dma - zdev->start_dma + 1); + zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1; + zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; +- zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); ++ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL); + if (!zdev->iommu_bitmap) { + rc = -ENOMEM; + goto free_dma_table; + } + if (!s390_iommu_strict) { +- zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8); ++ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL); + if (!zdev->lazy_bitmap) { + rc = -ENOMEM; + goto free_bitmap; +diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c +index e65f0968e0d9d..9c91cc40f4565 100644 +--- a/arch/x86/boot/compressed/sev.c ++++ b/arch/x86/boot/compressed/sev.c +@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + return ES_OK; + } + ++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) ++{ ++ return ES_OK; ++} ++ ++static bool fault_in_kernel_space(unsigned long address) ++{ ++ return false; ++} ++ + #undef __init + #undef __pa + #define __init +diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h +index b475d9a582b88..e829fa4c6788e 100644 +--- a/arch/x86/include/asm/fpu/api.h ++++ b/arch/x86/include/asm/fpu/api.h +@@ -148,7 +148,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { + static inline void fpu_sync_guest_vmexit_xfd_state(void) { } + #endif + +-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru); ++extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, ++ unsigned int size, u64 xfeatures, u32 pkru); + extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru); + + static inline void fpstate_set_confidential(struct fpu_guest *gfpu) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 08a84f801bfea..c1dcaa3d2d6eb 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1324,7 +1324,6 @@ struct kvm_arch { + * the thread holds the MMU lock in write mode. + */ + spinlock_t tdp_mmu_pages_lock; +- struct workqueue_struct *tdp_mmu_zap_wq; + #endif /* CONFIG_X86_64 */ + + /* +@@ -1727,7 +1726,7 @@ void kvm_mmu_vendor_module_exit(void); + + void kvm_mmu_destroy(struct kvm_vcpu *vcpu); + int kvm_mmu_create(struct kvm_vcpu *vcpu); +-int kvm_mmu_init_vm(struct kvm *kvm); ++void kvm_mmu_init_vm(struct kvm *kvm); + void kvm_mmu_uninit_vm(struct kvm *kvm); + + void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h +index 02aac78cb21d4..184fd776cd39f 100644 +--- a/arch/x86/include/asm/svm.h ++++ b/arch/x86/include/asm/svm.h +@@ -259,6 +259,7 @@ enum avic_ipi_failure_cause { + AVIC_IPI_FAILURE_TARGET_NOT_RUNNING, + AVIC_IPI_FAILURE_INVALID_TARGET, + AVIC_IPI_FAILURE_INVALID_BACKING_PAGE, ++ AVIC_IPI_FAILURE_INVALID_IPI_VECTOR, + }; + + #define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0) +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index a083f9ac9e4f6..1d190761d00fd 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) + EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate); + + void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, +- unsigned int size, u32 pkru) ++ unsigned int size, u64 xfeatures, u32 pkru) + { + struct fpstate *kstate = gfpu->fpstate; + union fpregs_state *ustate = buf; + struct membuf mb = { .p = buf, .left = size }; + + if (cpu_feature_enabled(X86_FEATURE_XSAVE)) { +- __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE); ++ __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru, ++ XSTATE_COPY_XSAVE); + } else { + memcpy(&ustate->fxsave, &kstate->regs.fxsave, + sizeof(ustate->fxsave)); +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index 1afbc4866b100..ebe698f8af73b 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -1053,6 +1053,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate, + * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer + * @to: membuf descriptor + * @fpstate: The fpstate buffer from which to copy ++ * @xfeatures: The mask of xfeatures to save (XSAVE mode only) + * @pkru_val: The PKRU value to store in the PKRU component + * @copy_mode: The requested copy mode + * +@@ -1063,7 +1064,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate, + * It supports partial copy but @to.pos always starts from zero. + */ + void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, +- u32 pkru_val, enum xstate_copy_mode copy_mode) ++ u64 xfeatures, u32 pkru_val, ++ enum xstate_copy_mode copy_mode) + { + const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr); + struct xregs_state *xinit = &init_fpstate.regs.xsave; +@@ -1087,7 +1089,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, + break; + + case XSTATE_COPY_XSAVE: +- header.xfeatures &= fpstate->user_xfeatures; ++ header.xfeatures &= fpstate->user_xfeatures & xfeatures; + break; + } + +@@ -1189,6 +1191,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, + enum xstate_copy_mode copy_mode) + { + __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate, ++ tsk->thread.fpu.fpstate->user_xfeatures, + tsk->thread.pkru, copy_mode); + } + +@@ -1540,10 +1543,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, + fpregs_restore_userregs(); + + newfps->xfeatures = curfps->xfeatures | xfeatures; +- +- if (!guest_fpu) +- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures; +- ++ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures; + newfps->xfd = curfps->xfd & ~xfeatures; + + /* Do the final updates within the locked region */ +diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h +index a4ecb04d8d646..3518fb26d06b0 100644 +--- a/arch/x86/kernel/fpu/xstate.h ++++ b/arch/x86/kernel/fpu/xstate.h +@@ -43,7 +43,8 @@ enum xstate_copy_mode { + + struct membuf; + extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, +- u32 pkru_val, enum xstate_copy_mode copy_mode); ++ u64 xfeatures, u32 pkru_val, ++ enum xstate_copy_mode copy_mode); + extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, + enum xstate_copy_mode mode); + extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru); +diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c +index 7dce812ce2538..71d8698702ce3 100644 +--- a/arch/x86/kernel/sev-shared.c ++++ b/arch/x86/kernel/sev-shared.c +@@ -629,6 +629,23 @@ fail: + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); + } + ++static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt, ++ unsigned long address, ++ bool write) ++{ ++ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { ++ ctxt->fi.vector = X86_TRAP_PF; ++ ctxt->fi.error_code = X86_PF_USER; ++ ctxt->fi.cr2 = address; ++ if (write) ++ ctxt->fi.error_code |= X86_PF_WRITE; ++ ++ return ES_EXCEPTION; ++ } ++ ++ return ES_OK; ++} ++ + static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, + void *src, char *buf, + unsigned int data_size, +@@ -636,7 +653,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, + bool backwards) + { + int i, b = backwards ? -1 : 1; +- enum es_result ret = ES_OK; ++ unsigned long address = (unsigned long)src; ++ enum es_result ret; ++ ++ ret = vc_insn_string_check(ctxt, address, false); ++ if (ret != ES_OK) ++ return ret; + + for (i = 0; i < count; i++) { + void *s = src + (i * data_size * b); +@@ -657,7 +679,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, + bool backwards) + { + int i, s = backwards ? -1 : 1; +- enum es_result ret = ES_OK; ++ unsigned long address = (unsigned long)dst; ++ enum es_result ret; ++ ++ ret = vc_insn_string_check(ctxt, address, true); ++ if (ret != ES_OK) ++ return ret; + + for (i = 0; i < count; i++) { + void *d = dst + (i * data_size * s); +@@ -693,6 +720,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, + static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) + { + struct insn *insn = &ctxt->insn; ++ size_t size; ++ u64 port; ++ + *exitinfo = 0; + + switch (insn->opcode.bytes[0]) { +@@ -701,7 +731,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) + case 0x6d: + *exitinfo |= IOIO_TYPE_INS; + *exitinfo |= IOIO_SEG_ES; +- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; ++ port = ctxt->regs->dx & 0xffff; + break; + + /* OUTS opcodes */ +@@ -709,41 +739,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) + case 0x6f: + *exitinfo |= IOIO_TYPE_OUTS; + *exitinfo |= IOIO_SEG_DS; +- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; ++ port = ctxt->regs->dx & 0xffff; + break; + + /* IN immediate opcodes */ + case 0xe4: + case 0xe5: + *exitinfo |= IOIO_TYPE_IN; +- *exitinfo |= (u8)insn->immediate.value << 16; ++ port = (u8)insn->immediate.value & 0xffff; + break; + + /* OUT immediate opcodes */ + case 0xe6: + case 0xe7: + *exitinfo |= IOIO_TYPE_OUT; +- *exitinfo |= (u8)insn->immediate.value << 16; ++ port = (u8)insn->immediate.value & 0xffff; + break; + + /* IN register opcodes */ + case 0xec: + case 0xed: + *exitinfo |= IOIO_TYPE_IN; +- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; ++ port = ctxt->regs->dx & 0xffff; + break; + + /* OUT register opcodes */ + case 0xee: + case 0xef: + *exitinfo |= IOIO_TYPE_OUT; +- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; ++ port = ctxt->regs->dx & 0xffff; + break; + + default: + return ES_DECODE_FAILED; + } + ++ *exitinfo |= port << 16; ++ + switch (insn->opcode.bytes[0]) { + case 0x6c: + case 0x6e: +@@ -753,12 +785,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) + case 0xee: + /* Single byte opcodes */ + *exitinfo |= IOIO_DATA_8; ++ size = 1; + break; + default: + /* Length determined by instruction parsing */ + *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 + : IOIO_DATA_32; ++ size = (insn->opnd_bytes == 2) ? 2 : 4; + } ++ + switch (insn->addr_bytes) { + case 2: + *exitinfo |= IOIO_ADDR_16; +@@ -774,7 +809,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) + if (insn_has_rep_prefix(insn)) + *exitinfo |= IOIO_REP; + +- return ES_OK; ++ return vc_ioio_check(ctxt, (u16)port, size); + } + + static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c +index afda719dd7253..e7968c41ecf57 100644 +--- a/arch/x86/kernel/sev.c ++++ b/arch/x86/kernel/sev.c +@@ -512,6 +512,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt + return ES_OK; + } + ++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) ++{ ++ BUG_ON(size > 4); ++ ++ if (user_mode(ctxt->regs)) { ++ struct thread_struct *t = ¤t->thread; ++ struct io_bitmap *iobm = t->io_bitmap; ++ size_t idx; ++ ++ if (!iobm) ++ goto fault; ++ ++ for (idx = port; idx < port + size; ++idx) { ++ if (test_bit(idx, iobm->bitmap)) ++ goto fault; ++ } ++ } ++ ++ return ES_OK; ++ ++fault: ++ ctxt->fi.vector = X86_TRAP_GP; ++ ctxt->fi.error_code = 0; ++ ++ return ES_EXCEPTION; ++} ++ + /* Include code shared with pre-decompression boot stage */ + #include "sev-shared.c" + +@@ -1552,6 +1579,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) + return ES_DECODE_FAILED; + } + ++ if (user_mode(ctxt->regs)) ++ return ES_UNSUPPORTED; ++ + switch (mmio) { + case MMIO_WRITE: + memcpy(ghcb->shared_buffer, reg_data, bytes); +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 7b4224f5ee2de..c3ef1fc602bf9 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -338,14 +338,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) + vcpu->arch.guest_supported_xcr0 = + cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); + +- /* +- * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if +- * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't +- * supported by the host. +- */ +- vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 | +- XFEATURE_MASK_FPSSE; +- + kvm_update_pv_runtime(vcpu); + + vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 68eba393842f5..7e8dbd54869a6 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2535,13 +2535,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) + { + u32 reg = kvm_lapic_get_reg(apic, lvt_type); + int vector, mode, trig_mode; ++ int r; + + if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { + vector = reg & APIC_VECTOR_MASK; + mode = reg & APIC_MODE_MASK; + trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; +- return __apic_accept_irq(apic, mode, vector, 1, trig_mode, +- NULL); ++ ++ r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL); ++ if (r && lvt_type == APIC_LVTPC) ++ kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED); ++ return r; + } + return 0; + } +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 2a6fec4e2d196..d30325e297a03 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -5994,19 +5994,16 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, + kvm_mmu_zap_all_fast(kvm); + } + +-int kvm_mmu_init_vm(struct kvm *kvm) ++void kvm_mmu_init_vm(struct kvm *kvm) + { + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; +- int r; + + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); + INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); + spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); + +- r = kvm_mmu_init_tdp_mmu(kvm); +- if (r < 0) +- return r; ++ kvm_mmu_init_tdp_mmu(kvm); + + node->track_write = kvm_mmu_pte_write; + node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; +@@ -6019,8 +6016,6 @@ int kvm_mmu_init_vm(struct kvm *kvm) + + kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; + kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; +- +- return 0; + } + + static void mmu_free_vm_memory_caches(struct kvm *kvm) +diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h +index 582def531d4d9..0a9d5f2925c33 100644 +--- a/arch/x86/kvm/mmu/mmu_internal.h ++++ b/arch/x86/kvm/mmu/mmu_internal.h +@@ -56,7 +56,12 @@ struct kvm_mmu_page { + + bool tdp_mmu_page; + bool unsync; +- u8 mmu_valid_gen; ++ union { ++ u8 mmu_valid_gen; ++ ++ /* Only accessed under slots_lock. */ ++ bool tdp_mmu_scheduled_root_to_zap; ++ }; + bool lpage_disallowed; /* Can't be replaced by an equiv large page */ + + /* +@@ -92,13 +97,7 @@ struct kvm_mmu_page { + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ + tdp_ptep_t ptep; + }; +- union { +- DECLARE_BITMAP(unsync_child_bitmap, 512); +- struct { +- struct work_struct tdp_mmu_async_work; +- void *tdp_mmu_async_data; +- }; +- }; ++ DECLARE_BITMAP(unsync_child_bitmap, 512); + + struct list_head lpage_disallowed_link; + #ifdef CONFIG_X86_32 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c +index 9b9fc4e834d09..c3b0f973375b4 100644 +--- a/arch/x86/kvm/mmu/tdp_mmu.c ++++ b/arch/x86/kvm/mmu/tdp_mmu.c +@@ -14,24 +14,16 @@ static bool __read_mostly tdp_mmu_enabled = true; + module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); + + /* Initializes the TDP MMU for the VM, if enabled. */ +-int kvm_mmu_init_tdp_mmu(struct kvm *kvm) ++void kvm_mmu_init_tdp_mmu(struct kvm *kvm) + { +- struct workqueue_struct *wq; +- + if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) +- return 0; +- +- wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); +- if (!wq) +- return -ENOMEM; ++ return; + + /* This should not be changed for the lifetime of the VM. */ + kvm->arch.tdp_mmu_enabled = true; + INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); + spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); + INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); +- kvm->arch.tdp_mmu_zap_wq = wq; +- return 1; + } + + /* Arbitrarily returns true so that this may be used in if statements. */ +@@ -57,20 +49,15 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) + * ultimately frees all roots. + */ + kvm_tdp_mmu_invalidate_all_roots(kvm); +- +- /* +- * Destroying a workqueue also first flushes the workqueue, i.e. no +- * need to invoke kvm_tdp_mmu_zap_invalidated_roots(). +- */ +- destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); ++ kvm_tdp_mmu_zap_invalidated_roots(kvm); + + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); + + /* + * Ensure that all the outstanding RCU callbacks to free shadow pages +- * can run before the VM is torn down. Work items on tdp_mmu_zap_wq +- * can call kvm_tdp_mmu_put_root and create new callbacks. ++ * can run before the VM is torn down. Putting the last reference to ++ * zapped roots will create new callbacks. + */ + rcu_barrier(); + } +@@ -97,46 +84,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) + tdp_mmu_free_sp(sp); + } + +-static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, +- bool shared); +- +-static void tdp_mmu_zap_root_work(struct work_struct *work) +-{ +- struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, +- tdp_mmu_async_work); +- struct kvm *kvm = root->tdp_mmu_async_data; +- +- read_lock(&kvm->mmu_lock); +- +- /* +- * A TLB flush is not necessary as KVM performs a local TLB flush when +- * allocating a new root (see kvm_mmu_load()), and when migrating vCPU +- * to a different pCPU. Note, the local TLB flush on reuse also +- * invalidates any paging-structure-cache entries, i.e. TLB entries for +- * intermediate paging structures, that may be zapped, as such entries +- * are associated with the ASID on both VMX and SVM. +- */ +- tdp_mmu_zap_root(kvm, root, true); +- +- /* +- * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for +- * avoiding an infinite loop. By design, the root is reachable while +- * it's being asynchronously zapped, thus a different task can put its +- * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an +- * asynchronously zapped root is unavoidable. +- */ +- kvm_tdp_mmu_put_root(kvm, root, true); +- +- read_unlock(&kvm->mmu_lock); +-} +- +-static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) +-{ +- root->tdp_mmu_async_data = kvm; +- INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); +- queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); +-} +- + void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared) + { +@@ -222,11 +169,11 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, + #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ + __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) + +-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \ +- for (_root = tdp_mmu_next_root(_kvm, NULL, false, false); \ ++#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \ ++ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \ + _root; \ +- _root = tdp_mmu_next_root(_kvm, _root, false, false)) \ +- if (!kvm_lockdep_assert_mmu_lock_held(_kvm, false)) { \ ++ _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \ ++ if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \ + } else + + /* +@@ -305,7 +252,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) + * by a memslot update or by the destruction of the VM. Initialize the + * refcount to two; one reference for the vCPU, and one reference for + * the TDP MMU itself, which is held until the root is invalidated and +- * is ultimately put by tdp_mmu_zap_root_work(). ++ * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots(). + */ + refcount_set(&root->tdp_mmu_root_count, 2); + +@@ -963,7 +910,7 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush) + { + struct kvm_mmu_page *root; + +- for_each_tdp_mmu_root_yield_safe(kvm, root) ++ for_each_tdp_mmu_root_yield_safe(kvm, root, false) + flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush); + + return flush; +@@ -985,7 +932,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm) + * is being destroyed or the userspace VMM has exited. In both cases, + * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. + */ +- for_each_tdp_mmu_root_yield_safe(kvm, root) ++ for_each_tdp_mmu_root_yield_safe(kvm, root, false) + tdp_mmu_zap_root(kvm, root, false); + } + +@@ -995,18 +942,47 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm) + */ + void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) + { +- flush_workqueue(kvm->arch.tdp_mmu_zap_wq); ++ struct kvm_mmu_page *root; ++ ++ read_lock(&kvm->mmu_lock); ++ ++ for_each_tdp_mmu_root_yield_safe(kvm, root, true) { ++ if (!root->tdp_mmu_scheduled_root_to_zap) ++ continue; ++ ++ root->tdp_mmu_scheduled_root_to_zap = false; ++ KVM_BUG_ON(!root->role.invalid, kvm); ++ ++ /* ++ * A TLB flush is not necessary as KVM performs a local TLB ++ * flush when allocating a new root (see kvm_mmu_load()), and ++ * when migrating a vCPU to a different pCPU. Note, the local ++ * TLB flush on reuse also invalidates paging-structure-cache ++ * entries, i.e. TLB entries for intermediate paging structures, ++ * that may be zapped, as such entries are associated with the ++ * ASID on both VMX and SVM. ++ */ ++ tdp_mmu_zap_root(kvm, root, true); ++ ++ /* ++ * The referenced needs to be put *after* zapping the root, as ++ * the root must be reachable by mmu_notifiers while it's being ++ * zapped ++ */ ++ kvm_tdp_mmu_put_root(kvm, root, true); ++ } ++ ++ read_unlock(&kvm->mmu_lock); + } + + /* + * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that + * is about to be zapped, e.g. in response to a memslots update. The actual +- * zapping is performed asynchronously. Using a separate workqueue makes it +- * easy to ensure that the destruction is performed before the "fast zap" +- * completes, without keeping a separate list of invalidated roots; the list is +- * effectively the list of work items in the workqueue. ++ * zapping is done separately so that it happens with mmu_lock with read, ++ * whereas invalidating roots must be done with mmu_lock held for write (unless ++ * the VM is being destroyed). + * +- * Note, the asynchronous worker is gifted the TDP MMU's reference. ++ * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference. + * See kvm_tdp_mmu_get_vcpu_root_hpa(). + */ + void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) +@@ -1031,19 +1007,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) + /* + * As above, mmu_lock isn't held when destroying the VM! There can't + * be other references to @kvm, i.e. nothing else can invalidate roots +- * or be consuming roots, but walking the list of roots does need to be +- * guarded against roots being deleted by the asynchronous zap worker. ++ * or get/put references to roots. + */ +- rcu_read_lock(); +- +- list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) { ++ list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { ++ /* ++ * Note, invalid roots can outlive a memslot update! Invalid ++ * roots must be *zapped* before the memslot update completes, ++ * but a different task can acquire a reference and keep the ++ * root alive after its been zapped. ++ */ + if (!root->role.invalid) { ++ root->tdp_mmu_scheduled_root_to_zap = true; + root->role.invalid = true; +- tdp_mmu_schedule_zap_root(kvm, root); + } + } +- +- rcu_read_unlock(); + } + + /* +diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h +index d0a9fe0770fdd..c82a8bb321bb9 100644 +--- a/arch/x86/kvm/mmu/tdp_mmu.h ++++ b/arch/x86/kvm/mmu/tdp_mmu.h +@@ -65,7 +65,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, + u64 *spte); + + #ifdef CONFIG_X86_64 +-int kvm_mmu_init_tdp_mmu(struct kvm *kvm); ++void kvm_mmu_init_tdp_mmu(struct kvm *kvm); + void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); + static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } + +@@ -86,7 +86,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) + return sp && is_tdp_mmu_page(sp) && sp->root_count; + } + #else +-static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; } ++static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} + static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} + static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } + static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c +index d3e66740c7c68..fb125b54ee680 100644 +--- a/arch/x86/kvm/svm/avic.c ++++ b/arch/x86/kvm/svm/avic.c +@@ -542,8 +542,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu) + case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: + WARN_ONCE(1, "Invalid backing page\n"); + break; ++ case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR: ++ /* Invalid IPI with vector < 16 */ ++ break; + default: +- pr_err("Unknown IPI interception\n"); ++ vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n"); + } + + return 1; +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c +index 8053974af326c..bc288e6bde642 100644 +--- a/arch/x86/kvm/svm/nested.c ++++ b/arch/x86/kvm/svm/nested.c +@@ -1164,6 +1164,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu) + + nested_svm_uninit_mmu_context(vcpu); + vmcb_mark_all_dirty(svm->vmcb); ++ ++ if (kvm_apicv_activated(vcpu->kvm)) ++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); + } + + kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1931d3fcbbe09..4d6baae1ae748 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5301,26 +5301,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, + return 0; + } + +-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, +- struct kvm_xsave *guest_xsave) +-{ +- if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) +- return; +- +- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, +- guest_xsave->region, +- sizeof(guest_xsave->region), +- vcpu->arch.pkru); +-} + + static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, + u8 *state, unsigned int size) + { ++ /* ++ * Only copy state for features that are enabled for the guest. The ++ * state itself isn't problematic, but setting bits in the header for ++ * features that are supported in *this* host but not exposed to the ++ * guest can result in KVM_SET_XSAVE failing when live migrating to a ++ * compatible host without the features that are NOT exposed to the ++ * guest. ++ * ++ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if ++ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't ++ * supported by the host. ++ */ ++ u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 | ++ XFEATURE_MASK_FPSSE; ++ + if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) + return; + +- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, +- state, size, vcpu->arch.pkru); ++ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size, ++ supported_xcr0, vcpu->arch.pkru); ++} ++ ++static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, ++ struct kvm_xsave *guest_xsave) ++{ ++ return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, ++ sizeof(guest_xsave->region)); + } + + static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, +@@ -12442,9 +12453,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) + if (ret) + goto out; + +- ret = kvm_mmu_init_vm(kvm); +- if (ret) +- goto out_page_track; ++ kvm_mmu_init_vm(kvm); + + ret = static_call(kvm_x86_vm_init)(kvm); + if (ret) +@@ -12489,7 +12498,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) + + out_uninit_mmu: + kvm_mmu_uninit_vm(kvm); +-out_page_track: + kvm_page_track_cleanup(kvm); + out: + return ret; +diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c +index c2c786eb95abc..1687483ff319e 100644 +--- a/drivers/acpi/irq.c ++++ b/drivers/acpi/irq.c +@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, + int polarity) + { + struct irq_fwspec fwspec; ++ unsigned int irq; + + fwspec.fwnode = acpi_get_gsi_domain_id(gsi); + if (WARN_ON(!fwspec.fwnode)) { +@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, + fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); + fwspec.param_count = 2; + +- return irq_create_fwspec_mapping(&fwspec); ++ irq = irq_create_fwspec_mapping(&fwspec); ++ if (!irq) ++ return -EINVAL; ++ ++ return irq; + } + EXPORT_SYMBOL_GPL(acpi_register_gsi); + +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index fbc231a3f7951..fa2fc1953fc26 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -2456,7 +2456,7 @@ static int ata_dev_config_lba(struct ata_device *dev) + { + const u16 *id = dev->id; + const char *lba_desc; +- char ncq_desc[24]; ++ char ncq_desc[32]; + int ret; + + dev->flags |= ATA_DFLAG_LBA; +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 2a04dd36a4948..1eaaf01418ea7 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -2247,7 +2247,7 @@ static void ata_eh_link_report(struct ata_link *link) + struct ata_eh_context *ehc = &link->eh_context; + struct ata_queued_cmd *qc; + const char *frozen, *desc; +- char tries_buf[6] = ""; ++ char tries_buf[16] = ""; + int tag, nr_failed = 0; + + if (ehc->i.flags & ATA_EHI_QUIET) +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index 7de1f27d0323d..df1f78abdf266 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -1572,7 +1572,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) + + /* If the user didn't specify a name match any */ + if (data) +- return !strcmp((*r)->name, data); ++ return (*r)->name && !strcmp((*r)->name, data); + else + return 1; + } +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index f2062c2a28da8..96d4f48e36011 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -3984,6 +3984,7 @@ static int btusb_probe(struct usb_interface *intf, + + if (id->driver_info & BTUSB_QCA_ROME) { + data->setup_on_usb = btusb_setup_qca; ++ hdev->shutdown = btusb_shutdown_qca; + hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + hdev->cmd_timeout = btusb_qca_cmd_timeout; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); +diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c +index c443c3b0a4da5..4415d850d698b 100644 +--- a/drivers/bluetooth/hci_vhci.c ++++ b/drivers/bluetooth/hci_vhci.c +@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) + struct vhci_data *data = hci_get_drvdata(hdev); + + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); ++ ++ mutex_lock(&data->open_mutex); + skb_queue_tail(&data->readq, skb); ++ mutex_unlock(&data->open_mutex); + + wake_up_interruptible(&data->read_wait); + return 0; +diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c +index de14949a3fe5a..92c1f2baa4bff 100644 +--- a/drivers/gpio/gpio-timberdale.c ++++ b/drivers/gpio/gpio-timberdale.c +@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, + unsigned offset, bool enabled) + { + struct timbgpio *tgpio = gpiochip_get_data(gpio); ++ unsigned long flags; + u32 reg; + +- spin_lock(&tgpio->lock); ++ spin_lock_irqsave(&tgpio->lock, flags); + reg = ioread32(tgpio->membase + offset); + + if (enabled) +@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, + reg &= ~(1 << index); + + iowrite32(reg, tgpio->membase + offset); +- spin_unlock(&tgpio->lock); ++ spin_unlock_irqrestore(&tgpio->lock, flags); + + return 0; + } +diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c +index a429176673e7a..314dfaa633857 100644 +--- a/drivers/gpio/gpio-vf610.c ++++ b/drivers/gpio/gpio-vf610.c +@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data { + + struct vf610_gpio_port { + struct gpio_chip gc; +- struct irq_chip ic; + void __iomem *base; + void __iomem *gpio_base; + const struct fsl_gpio_soc_data *sdata; +@@ -128,14 +127,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, + unsigned long mask = BIT(gpio); + u32 val; + ++ vf610_gpio_set(chip, gpio, value); ++ + if (port->sdata && port->sdata->have_paddr) { + val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); + val |= mask; + vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); + } + +- vf610_gpio_set(chip, gpio, value); +- + return pinctrl_gpio_direction_output(chip->base + gpio); + } + +@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type) + + static void vf610_gpio_irq_mask(struct irq_data *d) + { +- struct vf610_gpio_port *port = +- gpiochip_get_data(irq_data_get_irq_chip_data(d)); +- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct vf610_gpio_port *port = gpiochip_get_data(gc); ++ irq_hw_number_t gpio_num = irqd_to_hwirq(d); ++ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num); + + vf610_gpio_writel(0, pcr_base); ++ gpiochip_disable_irq(gc, gpio_num); + } + + static void vf610_gpio_irq_unmask(struct irq_data *d) + { +- struct vf610_gpio_port *port = +- gpiochip_get_data(irq_data_get_irq_chip_data(d)); +- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct vf610_gpio_port *port = gpiochip_get_data(gc); ++ irq_hw_number_t gpio_num = irqd_to_hwirq(d); ++ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num); + +- vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET, ++ gpiochip_enable_irq(gc, gpio_num); ++ vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET, + pcr_base); + } + +@@ -237,6 +240,18 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) + return 0; + } + ++static const struct irq_chip vf610_irqchip = { ++ .name = "gpio-vf610", ++ .irq_ack = vf610_gpio_irq_ack, ++ .irq_mask = vf610_gpio_irq_mask, ++ .irq_unmask = vf610_gpio_irq_unmask, ++ .irq_set_type = vf610_gpio_irq_set_type, ++ .irq_set_wake = vf610_gpio_irq_set_wake, ++ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND ++ | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND, ++ GPIOCHIP_IRQ_RESOURCE_HELPERS, ++}; ++ + static void vf610_gpio_disable_clk(void *data) + { + clk_disable_unprepare(data); +@@ -249,7 +264,6 @@ static int vf610_gpio_probe(struct platform_device *pdev) + struct vf610_gpio_port *port; + struct gpio_chip *gc; + struct gpio_irq_chip *girq; +- struct irq_chip *ic; + int i; + int ret; + +@@ -315,14 +329,6 @@ static int vf610_gpio_probe(struct platform_device *pdev) + gc->direction_output = vf610_gpio_direction_output; + gc->set = vf610_gpio_set; + +- ic = &port->ic; +- ic->name = "gpio-vf610"; +- ic->irq_ack = vf610_gpio_irq_ack; +- ic->irq_mask = vf610_gpio_irq_mask; +- ic->irq_unmask = vf610_gpio_irq_unmask; +- ic->irq_set_type = vf610_gpio_irq_set_type; +- ic->irq_set_wake = vf610_gpio_irq_set_wake; +- + /* Mask all GPIO interrupts */ + for (i = 0; i < gc->ngpio; i++) + vf610_gpio_writel(0, port->base + PORT_PCR(i)); +@@ -331,7 +337,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) + vf610_gpio_writel(~0, port->base + PORT_ISFR); + + girq = &gc->irq; +- girq->chip = ic; ++ gpio_irq_chip_set_chip(girq, &vf610_irqchip); + girq->parent_handler = vf610_gpio_irq_handler; + girq->num_parents = 1; + girq->parents = devm_kcalloc(&pdev->dev, 1, +diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c +index 8472013ff38a2..0e78437c8389d 100644 +--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c ++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c +@@ -1991,6 +1991,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 1): + case IP_VERSION(11, 0, 2): ++ case IP_VERSION(11, 0, 3): + *states = ATTR_STATE_SUPPORTED; + break; + default: +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +index b89f7f7ca1885..1b5c27ed27370 100644 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +@@ -673,7 +673,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) + return container_of(bridge, struct ti_sn65dsi86, bridge); + } + +-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) ++static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata) + { + int val; + struct mipi_dsi_host *host; +@@ -688,7 +688,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) + if (!host) + return -EPROBE_DEFER; + +- dsi = devm_mipi_dsi_device_register_full(dev, host, &info); ++ dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); + +@@ -706,7 +706,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) + + pdata->dsi = dsi; + +- return devm_mipi_dsi_attach(dev, dsi); ++ return devm_mipi_dsi_attach(&adev->dev, dsi); + } + + static int ti_sn_bridge_attach(struct drm_bridge *bridge, +@@ -1279,9 +1279,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, + struct device_node *np = pdata->dev->of_node; + int ret; + +- pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0); ++ pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0); + if (IS_ERR(pdata->next_bridge)) +- return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge), ++ return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge), + "failed to create panel bridge\n"); + + ti_sn_bridge_parse_lanes(pdata, np); +@@ -1300,9 +1300,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, + + drm_bridge_add(&pdata->bridge); + +- ret = ti_sn_attach_host(pdata); ++ ret = ti_sn_attach_host(adev, pdata); + if (ret) { +- dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n"); ++ dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n"); + goto err_remove_bridge; + } + +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c +index 0cb646cb04ee1..d5c15292ae937 100644 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c +@@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = { + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, + }; + ++static const struct drm_dmi_panel_orientation_data gpd_onemix2s = { ++ .width = 1200, ++ .height = 1920, ++ .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018", ++ "03/04/2019", NULL }, ++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, ++}; ++ + static const struct drm_dmi_panel_orientation_data gpd_pocket = { + .width = 1200, + .height = 1920, +@@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = { + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, ++ }, { /* One Mix 2S (generic strings, also match on bios date) */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), ++ }, ++ .driver_data = (void *)&gpd_onemix2s, + }, + {} + }; +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +index d445e2d63c9c8..d7e30d889a5ca 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err) + case 0: + case -EAGAIN: + case -ENOSPC: /* transient failure to evict? */ ++ case -ENOBUFS: /* temporarily out of fences? */ + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c +index 1d0374a577a5e..fb4f0e336b60e 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c +@@ -234,6 +234,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) + npages = obj->size >> PAGE_SHIFT; + mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); + if (!mtk_gem->pages) { ++ sg_free_table(sgt); + kfree(sgt); + return -ENOMEM; + } +@@ -243,12 +244,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!mtk_gem->kvaddr) { ++ sg_free_table(sgt); + kfree(sgt); + kfree(mtk_gem->pages); + return -ENOMEM; + } +-out: ++ sg_free_table(sgt); + kfree(sgt); ++ ++out: + iosys_map_set_vaddr(map, mtk_gem->kvaddr); + + return 0; +diff --git a/drivers/hid/.kunitconfig b/drivers/hid/.kunitconfig +index 04daeff5c970e..675a8209c7aeb 100644 +--- a/drivers/hid/.kunitconfig ++++ b/drivers/hid/.kunitconfig +@@ -1,5 +1,6 @@ + CONFIG_KUNIT=y + CONFIG_USB=y + CONFIG_USB_HID=y ++CONFIG_HID_BATTERY_STRENGTH=y + CONFIG_HID_UCLOGIC=y + CONFIG_HID_KUNIT_TEST=y +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index c1873ccc7248d..9ad5e43d9961b 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -1263,6 +1263,7 @@ config HID_MCP2221 + config HID_KUNIT_TEST + tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS + depends on KUNIT ++ depends on HID_BATTERY_STRENGTH + depends on HID_UCLOGIC + default KUNIT_ALL_TESTS + help +diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c +index 403506b9697e7..b346d68a06f5a 100644 +--- a/drivers/hid/hid-holtek-kbd.c ++++ b/drivers/hid/hid-holtek-kbd.c +@@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, + return -ENODEV; + + boot_hid = usb_get_intfdata(boot_interface); ++ if (list_empty(&boot_hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } + boot_hid_input = list_first_entry(&boot_hid->inputs, + struct hid_input, list); + +diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c +new file mode 100644 +index 0000000000000..77c2d45ac62a7 +--- /dev/null ++++ b/drivers/hid/hid-input-test.c +@@ -0,0 +1,80 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * HID to Linux Input mapping ++ * ++ * Copyright (c) 2022 José Expósito ++ */ ++ ++#include ++ ++static void hid_test_input_set_battery_charge_status(struct kunit *test) ++{ ++ struct hid_device *dev; ++ bool handled; ++ ++ dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL); ++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); ++ ++ handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0); ++ KUNIT_EXPECT_FALSE(test, handled); ++ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN); ++ ++ handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0); ++ KUNIT_EXPECT_TRUE(test, handled); ++ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING); ++ ++ handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1); ++ KUNIT_EXPECT_TRUE(test, handled); ++ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING); ++} ++ ++static void hid_test_input_get_battery_property(struct kunit *test) ++{ ++ struct power_supply *psy; ++ struct hid_device *dev; ++ union power_supply_propval val; ++ int ret; ++ ++ dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL); ++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); ++ dev->battery_avoid_query = true; ++ ++ psy = kunit_kzalloc(test, sizeof(*psy), GFP_KERNEL); ++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, psy); ++ psy->drv_data = dev; ++ ++ dev->battery_status = HID_BATTERY_UNKNOWN; ++ dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING; ++ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val); ++ KUNIT_EXPECT_EQ(test, ret, 0); ++ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_UNKNOWN); ++ ++ dev->battery_status = HID_BATTERY_REPORTED; ++ dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING; ++ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val); ++ KUNIT_EXPECT_EQ(test, ret, 0); ++ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_CHARGING); ++ ++ dev->battery_status = HID_BATTERY_REPORTED; ++ dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING; ++ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val); ++ KUNIT_EXPECT_EQ(test, ret, 0); ++ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_DISCHARGING); ++} ++ ++static struct kunit_case hid_input_tests[] = { ++ KUNIT_CASE(hid_test_input_set_battery_charge_status), ++ KUNIT_CASE(hid_test_input_get_battery_property), ++ { } ++}; ++ ++static struct kunit_suite hid_input_test_suite = { ++ .name = "hid_input", ++ .test_cases = hid_input_tests, ++}; ++ ++kunit_test_suite(hid_input_test_suite); ++ ++MODULE_DESCRIPTION("HID input KUnit tests"); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("José Expósito "); +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 77ee5e01e6111..4ba5df3c1e039 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -492,7 +492,7 @@ static int hidinput_get_battery_property(struct power_supply *psy, + if (dev->battery_status == HID_BATTERY_UNKNOWN) + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + else +- val->intval = POWER_SUPPLY_STATUS_DISCHARGING; ++ val->intval = dev->battery_charge_status; + break; + + case POWER_SUPPLY_PROP_SCOPE: +@@ -560,6 +560,7 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, + dev->battery_max = max; + dev->battery_report_type = report_type; + dev->battery_report_id = field->report->id; ++ dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING; + + /* + * Stylus is normally not connected to the device and thus we +@@ -626,6 +627,20 @@ static void hidinput_update_battery(struct hid_device *dev, int value) + power_supply_changed(dev->battery); + } + } ++ ++static bool hidinput_set_battery_charge_status(struct hid_device *dev, ++ unsigned int usage, int value) ++{ ++ switch (usage) { ++ case HID_BAT_CHARGING: ++ dev->battery_charge_status = value ? ++ POWER_SUPPLY_STATUS_CHARGING : ++ POWER_SUPPLY_STATUS_DISCHARGING; ++ return true; ++ } ++ ++ return false; ++} + #else /* !CONFIG_HID_BATTERY_STRENGTH */ + static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, + struct hid_field *field, bool is_percentage) +@@ -640,6 +655,12 @@ static void hidinput_cleanup_battery(struct hid_device *dev) + static void hidinput_update_battery(struct hid_device *dev, int value) + { + } ++ ++static bool hidinput_set_battery_charge_status(struct hid_device *dev, ++ unsigned int usage, int value) ++{ ++ return false; ++} + #endif /* CONFIG_HID_BATTERY_STRENGTH */ + + static bool hidinput_field_in_collection(struct hid_device *device, struct hid_field *field, +@@ -1239,6 +1260,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + hidinput_setup_battery(device, HID_INPUT_REPORT, field, true); + usage->type = EV_PWR; + return; ++ case HID_BAT_CHARGING: ++ usage->type = EV_PWR; ++ return; + } + goto unknown; + +@@ -1481,7 +1505,11 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct + return; + + if (usage->type == EV_PWR) { +- hidinput_update_battery(hid, value); ++ bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value); ++ ++ if (!handled) ++ hidinput_update_battery(hid, value); ++ + return; + } + +@@ -2346,3 +2374,7 @@ void hidinput_disconnect(struct hid_device *hid) + cancel_work_sync(&hid->led_work); + } + EXPORT_SYMBOL_GPL(hidinput_disconnect); ++ ++#ifdef CONFIG_HID_KUNIT_TEST ++#include "hid-input-test.c" ++#endif +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c +index fb427391c3b86..8d0dad12b2d37 100644 +--- a/drivers/hid/hid-logitech-hidpp.c ++++ b/drivers/hid/hid-logitech-hidpp.c +@@ -4427,6 +4427,8 @@ static const struct hid_device_id hidpp_devices[] = { + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, + { /* MX Master mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) }, ++ { /* M720 Triathlon mouse over Bluetooth */ ++ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) }, + { /* MX Ergo trackball over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) }, +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 521b2ffb42449..8db4ae05febc8 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -2144,6 +2144,10 @@ static const struct hid_device_id mt_devices[] = { + USB_DEVICE_ID_MTP_STM)}, + + /* Synaptics devices */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, ++ USB_VENDOR_ID_SYNAPTICS, 0xcd7e) }, ++ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xce08) }, +diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c +index 5bfc0c4504608..8a8a3dd8af0c1 100644 +--- a/drivers/hid/hid-nintendo.c ++++ b/drivers/hid/hid-nintendo.c +@@ -2011,7 +2011,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) + struct joycon_input_report *report; + + req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; ++ mutex_lock(&ctlr->output_mutex); + ret = joycon_send_subcmd(ctlr, &req, 0, HZ); ++ mutex_unlock(&ctlr->output_mutex); + if (ret) { + hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); + return ret; +@@ -2040,6 +2042,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) + return 0; + } + ++static int joycon_init(struct hid_device *hdev) ++{ ++ struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); ++ int ret = 0; ++ ++ mutex_lock(&ctlr->output_mutex); ++ /* if handshake command fails, assume ble pro controller */ ++ if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && ++ !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { ++ hid_dbg(hdev, "detected USB controller\n"); ++ /* set baudrate for improved latency */ ++ ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); ++ if (ret) { ++ hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); ++ goto out_unlock; ++ } ++ /* handshake */ ++ ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); ++ if (ret) { ++ hid_err(hdev, "Failed handshake; ret=%d\n", ret); ++ goto out_unlock; ++ } ++ /* ++ * Set no timeout (to keep controller in USB mode). ++ * This doesn't send a response, so ignore the timeout. ++ */ ++ joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); ++ } else if (jc_type_is_chrggrip(ctlr)) { ++ hid_err(hdev, "Failed charging grip handshake\n"); ++ ret = -ETIMEDOUT; ++ goto out_unlock; ++ } ++ ++ /* get controller calibration data, and parse it */ ++ ret = joycon_request_calibration(ctlr); ++ if (ret) { ++ /* ++ * We can function with default calibration, but it may be ++ * inaccurate. Provide a warning, and continue on. ++ */ ++ hid_warn(hdev, "Analog stick positions may be inaccurate\n"); ++ } ++ ++ /* get IMU calibration data, and parse it */ ++ ret = joycon_request_imu_calibration(ctlr); ++ if (ret) { ++ /* ++ * We can function with default calibration, but it may be ++ * inaccurate. Provide a warning, and continue on. ++ */ ++ hid_warn(hdev, "Unable to read IMU calibration data\n"); ++ } ++ ++ /* Set the reporting mode to 0x30, which is the full report mode */ ++ ret = joycon_set_report_mode(ctlr); ++ if (ret) { ++ hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); ++ goto out_unlock; ++ } ++ ++ /* Enable rumble */ ++ ret = joycon_enable_rumble(ctlr); ++ if (ret) { ++ hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); ++ goto out_unlock; ++ } ++ ++ /* Enable the IMU */ ++ ret = joycon_enable_imu(ctlr); ++ if (ret) { ++ hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); ++ goto out_unlock; ++ } ++ ++out_unlock: ++ mutex_unlock(&ctlr->output_mutex); ++ return ret; ++} ++ + /* Common handler for parsing inputs */ + static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data, + int size) +@@ -2171,85 +2252,19 @@ static int nintendo_hid_probe(struct hid_device *hdev, + + hid_device_io_start(hdev); + +- /* Initialize the controller */ +- mutex_lock(&ctlr->output_mutex); +- /* if handshake command fails, assume ble pro controller */ +- if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && +- !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { +- hid_dbg(hdev, "detected USB controller\n"); +- /* set baudrate for improved latency */ +- ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); +- if (ret) { +- hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); +- goto err_mutex; +- } +- /* handshake */ +- ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); +- if (ret) { +- hid_err(hdev, "Failed handshake; ret=%d\n", ret); +- goto err_mutex; +- } +- /* +- * Set no timeout (to keep controller in USB mode). +- * This doesn't send a response, so ignore the timeout. +- */ +- joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); +- } else if (jc_type_is_chrggrip(ctlr)) { +- hid_err(hdev, "Failed charging grip handshake\n"); +- ret = -ETIMEDOUT; +- goto err_mutex; +- } +- +- /* get controller calibration data, and parse it */ +- ret = joycon_request_calibration(ctlr); ++ ret = joycon_init(hdev); + if (ret) { +- /* +- * We can function with default calibration, but it may be +- * inaccurate. Provide a warning, and continue on. +- */ +- hid_warn(hdev, "Analog stick positions may be inaccurate\n"); +- } +- +- /* get IMU calibration data, and parse it */ +- ret = joycon_request_imu_calibration(ctlr); +- if (ret) { +- /* +- * We can function with default calibration, but it may be +- * inaccurate. Provide a warning, and continue on. +- */ +- hid_warn(hdev, "Unable to read IMU calibration data\n"); +- } +- +- /* Set the reporting mode to 0x30, which is the full report mode */ +- ret = joycon_set_report_mode(ctlr); +- if (ret) { +- hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); +- goto err_mutex; +- } +- +- /* Enable rumble */ +- ret = joycon_enable_rumble(ctlr); +- if (ret) { +- hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); +- goto err_mutex; +- } +- +- /* Enable the IMU */ +- ret = joycon_enable_imu(ctlr); +- if (ret) { +- hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); +- goto err_mutex; ++ hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret); ++ goto err_close; + } + + ret = joycon_read_info(ctlr); + if (ret) { + hid_err(hdev, "Failed to retrieve controller info; ret=%d\n", + ret); +- goto err_mutex; ++ goto err_close; + } + +- mutex_unlock(&ctlr->output_mutex); +- + /* Initialize the leds */ + ret = joycon_leds_create(ctlr); + if (ret) { +@@ -2275,8 +2290,6 @@ static int nintendo_hid_probe(struct hid_device *hdev, + hid_dbg(hdev, "probe - success\n"); + return 0; + +-err_mutex: +- mutex_unlock(&ctlr->output_mutex); + err_close: + hid_hw_close(hdev); + err_stop: +@@ -2306,6 +2319,20 @@ static void nintendo_hid_remove(struct hid_device *hdev) + hid_hw_stop(hdev); + } + ++#ifdef CONFIG_PM ++ ++static int nintendo_hid_resume(struct hid_device *hdev) ++{ ++ int ret = joycon_init(hdev); ++ ++ if (ret) ++ hid_err(hdev, "Failed to restore controller after resume"); ++ ++ return ret; ++} ++ ++#endif ++ + static const struct hid_device_id nintendo_hid_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, + USB_DEVICE_ID_NINTENDO_PROCON) }, +@@ -2327,6 +2354,10 @@ static struct hid_driver nintendo_hid_driver = { + .probe = nintendo_hid_probe, + .remove = nintendo_hid_remove, + .raw_event = nintendo_hid_event, ++ ++#ifdef CONFIG_PM ++ .resume = nintendo_hid_resume, ++#endif + }; + module_hid_driver(nintendo_hid_driver); + +diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c +index 313904be5f3bd..57ff09f18c371 100644 +--- a/drivers/i2c/i2c-mux.c ++++ b/drivers/i2c/i2c-mux.c +@@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, + priv->adap.lock_ops = &i2c_parent_lock_ops; + + /* Sanity check on class */ +- if (i2c_mux_parent_classes(parent) & class) ++ if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED) + dev_err(&parent->dev, + "Segment %d behind mux can't share classes with ancestors\n", + chan_id); +diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c +index 80eff7090f14a..faf680140c178 100644 +--- a/drivers/iio/adc/ad7192.c ++++ b/drivers/iio/adc/ad7192.c +@@ -177,7 +177,7 @@ struct ad7192_chip_info { + struct ad7192_state { + const struct ad7192_chip_info *chip_info; + struct regulator *avdd; +- struct regulator *dvdd; ++ struct regulator *vref; + struct clk *mclk; + u16 int_vref_mv; + u32 fclk; +@@ -1011,24 +1011,34 @@ static int ad7192_probe(struct spi_device *spi) + if (ret) + return ret; + +- st->dvdd = devm_regulator_get(&spi->dev, "dvdd"); +- if (IS_ERR(st->dvdd)) +- return PTR_ERR(st->dvdd); ++ ret = devm_regulator_get_enable(&spi->dev, "dvdd"); ++ if (ret) ++ return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n"); + +- ret = regulator_enable(st->dvdd); +- if (ret) { +- dev_err(&spi->dev, "Failed to enable specified DVdd supply\n"); +- return ret; +- } ++ st->vref = devm_regulator_get_optional(&spi->dev, "vref"); ++ if (IS_ERR(st->vref)) { ++ if (PTR_ERR(st->vref) != -ENODEV) ++ return PTR_ERR(st->vref); + +- ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->dvdd); +- if (ret) +- return ret; ++ ret = regulator_get_voltage(st->avdd); ++ if (ret < 0) ++ return dev_err_probe(&spi->dev, ret, ++ "Device tree error, AVdd voltage undefined\n"); ++ } else { ++ ret = regulator_enable(st->vref); ++ if (ret) { ++ dev_err(&spi->dev, "Failed to enable specified Vref supply\n"); ++ return ret; ++ } + +- ret = regulator_get_voltage(st->avdd); +- if (ret < 0) { +- dev_err(&spi->dev, "Device tree error, reference voltage undefined\n"); +- return ret; ++ ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref); ++ if (ret) ++ return ret; ++ ++ ret = regulator_get_voltage(st->vref); ++ if (ret < 0) ++ return dev_err_probe(&spi->dev, ret, ++ "Device tree error, Vref voltage undefined\n"); + } + st->int_vref_mv = ret / 1000; + +diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +index d98f7e4d202c1..1ddce991fb3f4 100644 +--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c ++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +@@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev, + /* + * Ignore samples if the buffer is not set: it is needed if the ODR is + * set but the buffer is not enabled yet. ++ * ++ * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer ++ * is not enabled. + */ +- if (!iio_buffer_enabled(indio_dev)) ++ if (iio_device_claim_buffer_mode(indio_dev) < 0) + return 0; + + out = (s16 *)st->samples; +@@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev, + iio_push_to_buffers_with_timestamp(indio_dev, st->samples, + timestamp + delta); + ++ iio_device_release_buffer_mode(indio_dev); + return 0; + } + EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data); +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index f3f8392623a46..c9614982cb671 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -2084,6 +2084,44 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev) + } + EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); + ++/** ++ * iio_device_claim_buffer_mode - Keep device in buffer mode ++ * @indio_dev: the iio_dev associated with the device ++ * ++ * If the device is in buffer mode it is guaranteed to stay ++ * that way until iio_device_release_buffer_mode() is called. ++ * ++ * Use with iio_device_release_buffer_mode(). ++ * ++ * Returns: 0 on success, -EBUSY on failure. ++ */ ++int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) ++{ ++ mutex_lock(&indio_dev->mlock); ++ ++ if (iio_buffer_enabled(indio_dev)) ++ return 0; ++ ++ mutex_unlock(&indio_dev->mlock); ++ return -EBUSY; ++} ++EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); ++ ++/** ++ * iio_device_release_buffer_mode - releases claim on buffer mode ++ * @indio_dev: the iio_dev associated with the device ++ * ++ * Release the claim. Device is no longer guaranteed to stay ++ * in buffer mode. ++ * ++ * Use with iio_device_claim_buffer_mode(). ++ */ ++void iio_device_release_buffer_mode(struct iio_dev *indio_dev) ++{ ++ mutex_unlock(&indio_dev->mlock); ++} ++EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); ++ + /** + * iio_device_get_current_mode() - helper function providing read-only access to + * the opaque @currentmode variable +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c +index 89cd48fcec79f..4a4bab9aa7263 100644 +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card) + case 3: /* MMC v3.1 - v3.3 */ + case 4: /* MMC v4 */ + card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); +- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); ++ card->cid.oemid = UNSTUFF_BITS(resp, 104, 8); + card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); + card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); + card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c +index f64b9ac76a5cd..5914516df2f7f 100644 +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host) + } + err = mmc_sdio_reinit_card(host); + } else if (mmc_card_wake_sdio_irq(host)) { +- /* We may have switched to 1-bit mode during suspend */ ++ /* ++ * We may have switched to 1-bit mode during suspend, ++ * need to hold retuning, because tuning only supprt ++ * 4-bit mode or 8 bit mode. ++ */ ++ mmc_retune_hold_now(host); + err = sdio_enable_4bit_bus(host->card); ++ mmc_retune_release(host); + } + + if (err) +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 1a0d4dc24717c..70e414027155d 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -655,11 +655,11 @@ static void msdc_reset_hw(struct msdc_host *host) + u32 val; + + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); +- readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0); ++ readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0); + + sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); +- readl_poll_timeout(host->base + MSDC_FIFOCS, val, +- !(val & MSDC_FIFOCS_CLR), 0, 0); ++ readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val, ++ !(val & MSDC_FIFOCS_CLR), 0, 0); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); +diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c +index 4d509f6561887..c580ba089a261 100644 +--- a/drivers/mmc/host/sdhci-pci-gli.c ++++ b/drivers/mmc/host/sdhci-pci-gli.c +@@ -756,42 +756,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg) + return value; + } + +-#ifdef CONFIG_PM_SLEEP +-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) +-{ +- struct sdhci_pci_slot *slot = chip->slots[0]; +- +- pci_free_irq_vectors(slot->chip->pdev); +- gli_pcie_enable_msi(slot); +- +- return sdhci_pci_resume_host(chip); +-} +- +-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip) +-{ +- struct sdhci_pci_slot *slot = chip->slots[0]; +- int ret; +- +- ret = sdhci_pci_gli_resume(chip); +- if (ret) +- return ret; +- +- return cqhci_resume(slot->host->mmc); +-} +- +-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip) +-{ +- struct sdhci_pci_slot *slot = chip->slots[0]; +- int ret; +- +- ret = cqhci_suspend(slot->host->mmc); +- if (ret) +- return ret; +- +- return sdhci_suspend_host(slot->host); +-} +-#endif +- + static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) + { +@@ -1040,6 +1004,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip) + } + #endif + ++#ifdef CONFIG_PM_SLEEP ++static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) ++{ ++ struct sdhci_pci_slot *slot = chip->slots[0]; ++ ++ pci_free_irq_vectors(slot->chip->pdev); ++ gli_pcie_enable_msi(slot); ++ ++ return sdhci_pci_resume_host(chip); ++} ++ ++static int gl9763e_resume(struct sdhci_pci_chip *chip) ++{ ++ struct sdhci_pci_slot *slot = chip->slots[0]; ++ int ret; ++ ++ ret = sdhci_pci_gli_resume(chip); ++ if (ret) ++ return ret; ++ ++ ret = cqhci_resume(slot->host->mmc); ++ if (ret) ++ return ret; ++ ++ /* ++ * Disable LPM negotiation to bring device back in sync ++ * with its runtime_pm state. ++ */ ++ gl9763e_set_low_power_negotiation(slot, false); ++ ++ return 0; ++} ++ ++static int gl9763e_suspend(struct sdhci_pci_chip *chip) ++{ ++ struct sdhci_pci_slot *slot = chip->slots[0]; ++ int ret; ++ ++ /* ++ * Certain SoCs can suspend only with the bus in low- ++ * power state, notably x86 SoCs when using S0ix. ++ * Re-enable LPM negotiation to allow entering L1 state ++ * and entering system suspend. ++ */ ++ gl9763e_set_low_power_negotiation(slot, true); ++ ++ ret = cqhci_suspend(slot->host->mmc); ++ if (ret) ++ goto err_suspend; ++ ++ ret = sdhci_suspend_host(slot->host); ++ if (ret) ++ goto err_suspend_host; ++ ++ return 0; ++ ++err_suspend_host: ++ cqhci_resume(slot->host->mmc); ++err_suspend: ++ gl9763e_set_low_power_negotiation(slot, false); ++ return ret; ++} ++#endif ++ + static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) + { + struct pci_dev *pdev = slot->chip->pdev; +@@ -1147,8 +1175,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = { + .probe_slot = gli_probe_slot_gl9763e, + .ops = &sdhci_gl9763e_ops, + #ifdef CONFIG_PM_SLEEP +- .resume = sdhci_cqhci_gli_resume, +- .suspend = sdhci_cqhci_gli_suspend, ++ .resume = gl9763e_resume, ++ .suspend = gl9763e_suspend, + #endif + #ifdef CONFIG_PM + .runtime_suspend = gl9763e_runtime_suspend, +diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c +index c73854da51363..19dad5a23f944 100644 +--- a/drivers/mtd/maps/physmap-core.c ++++ b/drivers/mtd/maps/physmap-core.c +@@ -552,6 +552,17 @@ static int physmap_flash_probe(struct platform_device *dev) + if (info->probe_type) { + info->mtds[i] = do_map_probe(info->probe_type, + &info->maps[i]); ++ ++ /* Fall back to mapping region as ROM */ ++ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) && ++ strcmp(info->probe_type, "map_rom")) { ++ dev_warn(&dev->dev, ++ "map_probe() failed for type %s\n", ++ info->probe_type); ++ ++ info->mtds[i] = do_map_probe("map_rom", ++ &info->maps[i]); ++ } + } else { + int j; + +diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c +index ec7e6eeac55f9..e6ffe87a599eb 100644 +--- a/drivers/mtd/nand/raw/arasan-nand-controller.c ++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c +@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0); + dma_addr_t dma_addr; ++ u8 status; + int ret; + struct anfc_op nfc_op = { + .pkt_reg = +@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + } + + /* Spare data is not protected */ +- if (oob_required) ++ if (oob_required) { + ret = nand_write_oob_std(chip, page); ++ if (ret) ++ return ret; ++ } + +- return ret; ++ /* Check write status on the chip side */ ++ ret = nand_status_op(chip, &status); ++ if (ret) ++ return ret; ++ ++ if (status & NAND_STATUS_FAIL) ++ return -EIO; ++ ++ return 0; + } + + static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c +index a57a1501449aa..d527c03630bce 100644 +--- a/drivers/mtd/nand/raw/marvell_nand.c ++++ b/drivers/mtd/nand/raw/marvell_nand.c +@@ -1154,6 +1154,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, + .ndcb[2] = NDCB2_ADDR5_PAGE(page), + }; + unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0); ++ u8 status; + int ret; + + /* NFCv2 needs more information about the operation being executed */ +@@ -1187,7 +1188,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, + + ret = marvell_nfc_wait_op(chip, + PSEC_TO_MSEC(sdr->tPROG_max)); +- return ret; ++ if (ret) ++ return ret; ++ ++ /* Check write status on the chip side */ ++ ret = nand_status_op(chip, &status); ++ if (ret) ++ return ret; ++ ++ if (status & NAND_STATUS_FAIL) ++ return -EIO; ++ ++ return 0; + } + + static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip, +@@ -1616,6 +1628,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, + int data_len = lt->data_bytes; + int spare_len = lt->spare_bytes; + int chunk, ret; ++ u8 status; + + marvell_nfc_select_target(chip, chip->cur_cs); + +@@ -1652,6 +1665,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, + if (ret) + return ret; + ++ /* Check write status on the chip side */ ++ ret = nand_status_op(chip, &status); ++ if (ret) ++ return ret; ++ ++ if (status & NAND_STATUS_FAIL) ++ return -EIO; ++ + return 0; + } + +diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c +index 3c6f6aff649f8..7bcece135715d 100644 +--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c ++++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c +@@ -513,6 +513,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip, + u32 addr1 = 0, addr2 = 0, row; + u32 cmd_addr; + int i, ret; ++ u8 status; + + ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB); + if (ret) +@@ -565,6 +566,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip, + if (ret) + goto disable_ecc_engine; + ++ /* Check write status on the chip side */ ++ ret = nand_status_op(chip, &status); ++ if (ret) ++ goto disable_ecc_engine; ++ ++ if (status & NAND_STATUS_FAIL) ++ ret = -EIO; ++ + disable_ecc_engine: + pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); + +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index 198a44794d2dc..fbf36cbcbb18d 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -3310,7 +3310,7 @@ err_nandc_alloc: + err_aon_clk: + clk_disable_unprepare(nandc->core_clk); + err_core_clk: +- dma_unmap_resource(dev, res->start, resource_size(res), ++ dma_unmap_resource(dev, nandc->base_dma, resource_size(res), + DMA_BIDIRECTIONAL, 0); + return ret; + } +diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c +index 50b7295bc9222..12601bc4227a7 100644 +--- a/drivers/mtd/nand/spi/micron.c ++++ b/drivers/mtd/nand/spi/micron.c +@@ -12,7 +12,7 @@ + + #define SPINAND_MFR_MICRON 0x2c + +-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4) ++#define MICRON_STATUS_ECC_MASK GENMASK(6, 4) + #define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4) + #define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) + #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 7a3c7a74af04a..b170a3d8d007e 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -3990,7 +3990,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb, + if (likely(n <= hlen)) + return data; + else if (skb && likely(pskb_may_pull(skb, n))) +- return skb->head; ++ return skb->data; + + return NULL; + } +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 72374b066f64a..cd1f240c90f39 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); + if (!priv->master_mii_bus) { +- of_node_put(dn); +- return -EPROBE_DEFER; ++ err = -EPROBE_DEFER; ++ goto err_of_node_put; + } + +- get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = mdiobus_alloc(); + if (!priv->slave_mii_bus) { +- of_node_put(dn); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto err_put_master_mii_bus_dev; + } + + priv->slave_mii_bus->priv = priv; +@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + } + + err = mdiobus_register(priv->slave_mii_bus); +- if (err && dn) { +- mdiobus_free(priv->slave_mii_bus); +- of_node_put(dn); +- } ++ if (err && dn) ++ goto err_free_slave_mii_bus; + ++ return 0; ++ ++err_free_slave_mii_bus: ++ mdiobus_free(priv->slave_mii_bus); ++err_put_master_mii_bus_dev: ++ put_device(&priv->master_mii_bus->dev); ++err_of_node_put: ++ of_node_put(dn); + return err; + } + +@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) + { + mdiobus_unregister(priv->slave_mii_bus); + mdiobus_free(priv->slave_mii_bus); ++ put_device(&priv->master_mii_bus->dev); + of_node_put(priv->master_mii_dn); + } + +diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +index a4256087ac828..5e45bef4fd34f 100644 +--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c ++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev, + struct sock *sk, long *timeo_p) + { + DEFINE_WAIT_FUNC(wait, woken_wake_function); +- int err = 0; ++ int ret, err = 0; + long current_timeo; + long vm_wait = 0; + bool noblock; +@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev, + + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; +- sk_wait_event(sk, ¤t_timeo, sk->sk_err || +- (sk->sk_shutdown & SEND_SHUTDOWN) || +- (csk_mem_free(cdev, sk) && !vm_wait), &wait); ++ ret = sk_wait_event(sk, ¤t_timeo, sk->sk_err || ++ (sk->sk_shutdown & SEND_SHUTDOWN) || ++ (csk_mem_free(cdev, sk) && !vm_wait), ++ &wait); + sk->sk_write_pending--; ++ if (ret < 0) ++ goto do_error; + + if (vm_wait) { + vm_wait -= current_timeo; +@@ -1438,6 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int copied = 0; + int target; + long timeo; ++ int ret; + + buffers_freed = 0; + +@@ -1513,7 +1517,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + if (copied >= target) + break; + chtls_cleanup_rbuf(sk, copied); +- sk_wait_data(sk, &timeo, NULL); ++ ret = sk_wait_data(sk, &timeo, NULL); ++ if (ret < 0) { ++ copied = copied ? : ret; ++ goto unlock; ++ } + continue; + found_ok_skb: + if (!skb->len) { +@@ -1608,6 +1616,8 @@ skip_copy: + + if (buffers_freed) + chtls_cleanup_rbuf(sk, copied); ++ ++unlock: + release_sock(sk); + return copied; + } +@@ -1624,6 +1634,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, + int copied = 0; + size_t avail; /* amount of available data in current skb */ + long timeo; ++ int ret; + + lock_sock(sk); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); +@@ -1675,7 +1686,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, + release_sock(sk); + lock_sock(sk); + } else { +- sk_wait_data(sk, &timeo, NULL); ++ ret = sk_wait_data(sk, &timeo, NULL); ++ if (ret < 0) { ++ /* here 'copied' is 0 due to previous checks */ ++ copied = ret; ++ break; ++ } + } + + if (unlikely(peek_seq != tp->copied_seq)) { +@@ -1746,6 +1762,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int copied = 0; + long timeo; + int target; /* Read at least this many bytes */ ++ int ret; + + buffers_freed = 0; + +@@ -1837,7 +1854,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + if (copied >= target) + break; + chtls_cleanup_rbuf(sk, copied); +- sk_wait_data(sk, &timeo, NULL); ++ ret = sk_wait_data(sk, &timeo, NULL); ++ if (ret < 0) { ++ copied = copied ? : ret; ++ goto unlock; ++ } + continue; + + found_ok_skb: +@@ -1906,6 +1927,7 @@ skip_copy: + if (buffers_freed) + chtls_cleanup_rbuf(sk, copied); + ++unlock: + release_sock(sk); + return copied; + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c +index 82e06272158df..6266756b47b9d 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c +@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw) + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> + I40E_PFLAN_QALLOC_LASTQ_SHIFT; +- if (val & I40E_PFLAN_QALLOC_VALID_MASK) ++ if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) + num_queues = (j - base_queue) + 1; + else + num_queues = 0; +@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw) + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; +- if (val & I40E_PF_VT_PFALLOC_VALID_MASK) ++ if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) + num_vfs = (j - i) + 1; + else + num_vfs = 0; +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c +index 7276badfa19ea..c051503c3a892 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c +@@ -1100,8 +1100,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) + + ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | +- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & +- ICE_AQ_VSI_Q_OPT_RSS_HASH_M); ++ (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + } + + static void +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 3f98781e74b28..f0f39364819ac 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -6,6 +6,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include ++#include + #include "ice.h" + #include "ice_base.h" + #include "ice_lib.h" +@@ -4681,6 +4682,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) + return -EINVAL; + } + ++ /* when under a kdump kernel initiate a reset before enabling the ++ * device in order to clear out any pending DMA transactions. These ++ * transactions can cause some systems to machine check when doing ++ * the pcim_enable_device() below. ++ */ ++ if (is_kdump_kernel()) { ++ pci_save_state(pdev); ++ pci_clear_master(pdev); ++ err = pcie_flr(pdev); ++ if (err) ++ return err; ++ pci_restore_state(pdev); ++ } ++ + /* this driver uses devres, see + * Documentation/driver-api/driver-model/devres.rst + */ +@@ -4708,7 +4723,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) + return err; + } + +- pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + pf->pdev = pdev; +@@ -5001,7 +5015,6 @@ err_init_pf_unroll: + ice_devlink_destroy_regions(pf); + ice_deinit_hw(hw); + err_exit_unroll: +- pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + return err; + } +@@ -5127,7 +5140,6 @@ static void ice_remove(struct pci_dev *pdev) + ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pdev); + ice_clear_interrupt_scheme(pf); +- pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } + +diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h +index d3b17aa1d1a83..43c05b41627f7 100644 +--- a/drivers/net/ethernet/intel/igc/igc.h ++++ b/drivers/net/ethernet/intel/igc/igc.h +@@ -183,9 +183,11 @@ struct igc_adapter { + u32 max_frame_size; + u32 min_frame_size; + ++ int tc_setup_type; + ktime_t base_time; + ktime_t cycle_time; + bool qbv_enable; ++ u32 qbv_config_change_errors; + + /* OS defined structs */ + struct pci_dev *pdev; +@@ -228,6 +230,10 @@ struct igc_adapter { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; ++ /* Access to ptp_tx_skb and ptp_tx_start are protected by the ++ * ptp_tx_lock. ++ */ ++ spinlock_t ptp_tx_lock; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; +@@ -429,7 +435,6 @@ enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, +- __IGC_PTP_TX_IN_PROGRESS, + }; + + enum igc_tx_flags { +diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c +index a15927e772720..a1d815af507d9 100644 +--- a/drivers/net/ethernet/intel/igc/igc_base.c ++++ b/drivers/net/ethernet/intel/igc/igc_base.c +@@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) + rd32(IGC_MPC); + } + ++bool igc_is_device_id_i225(struct igc_hw *hw) ++{ ++ switch (hw->device_id) { ++ case IGC_DEV_ID_I225_LM: ++ case IGC_DEV_ID_I225_V: ++ case IGC_DEV_ID_I225_I: ++ case IGC_DEV_ID_I225_K: ++ case IGC_DEV_ID_I225_K2: ++ case IGC_DEV_ID_I225_LMVP: ++ case IGC_DEV_ID_I225_IT: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++bool igc_is_device_id_i226(struct igc_hw *hw) ++{ ++ switch (hw->device_id) { ++ case IGC_DEV_ID_I226_LM: ++ case IGC_DEV_ID_I226_V: ++ case IGC_DEV_ID_I226_K: ++ case IGC_DEV_ID_I226_IT: ++ return true; ++ default: ++ return false; ++ } ++} ++ + static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, +diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h +index 52849f5e8048d..9f3827eda157c 100644 +--- a/drivers/net/ethernet/intel/igc/igc_base.h ++++ b/drivers/net/ethernet/intel/igc/igc_base.h +@@ -7,6 +7,8 @@ + /* forward declaration */ + void igc_rx_fifo_flush_base(struct igc_hw *hw); + void igc_power_down_phy_copper_base(struct igc_hw *hw); ++bool igc_is_device_id_i225(struct igc_hw *hw); ++bool igc_is_device_id_i226(struct igc_hw *hw); + + /* Transmit Descriptor - Advanced */ + union igc_adv_tx_desc { +diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h +index 90ca01889cd82..efdabcbd66ddd 100644 +--- a/drivers/net/ethernet/intel/igc/igc_defines.h ++++ b/drivers/net/ethernet/intel/igc/igc_defines.h +@@ -515,6 +515,7 @@ + /* Transmit Scheduling */ + #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 + #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 ++#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 + + #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 + #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index 9166fde40c772..e23b95edb05ef 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -67,6 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), ++ IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), + }; + + #define IGC_NETDEV_STAT(_net_stat) { \ +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index 1ac836a55cd31..4b6f882b380dc 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -1606,9 +1606,10 @@ done: + * the other timer registers before skipping the + * timestamping request. + */ +- if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && +- !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, +- &adapter->state)) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&adapter->ptp_tx_lock, flags); ++ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !adapter->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + +@@ -1617,6 +1618,8 @@ done: + } else { + adapter->tx_hwtstamp_skipped++; + } ++ ++ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + if (skb_vlan_tag_present(skb)) { +@@ -6035,6 +6038,7 @@ static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) + { + int queue_uses[IGC_MAX_TX_QUEUES] = { }; ++ struct igc_hw *hw = &adapter->hw; + struct timespec64 now; + size_t n; + +@@ -6047,8 +6051,10 @@ static bool validate_schedule(struct igc_adapter *adapter, + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. ++ * Note: Limitation above is no longer in i226. + */ +- if (!is_base_time_past(qopt->base_time, &now)) ++ if (!is_base_time_past(qopt->base_time, &now) && ++ igc_is_device_id_i225(hw)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { +@@ -6103,6 +6109,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter) + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; ++ adapter->qbv_config_change_errors = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; +@@ -6118,6 +6125,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) + { + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; ++ struct igc_hw *hw = &adapter->hw; + u32 start_time = 0, end_time = 0; + size_t n; + int i; +@@ -6130,7 +6138,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, + if (qopt->base_time < 0) + return -ERANGE; + +- if (adapter->base_time) ++ if (igc_is_device_id_i225(hw) && adapter->base_time) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) +@@ -6283,6 +6291,8 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + { + struct igc_adapter *adapter = netdev_priv(dev); + ++ adapter->tc_setup_type = type; ++ + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c +index d96cdccdc1e1e..14cd7f995280d 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c +@@ -622,6 +622,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + return 0; + } + ++/* Requires adapter->ptp_tx_lock held by caller. */ + static void igc_ptp_tx_timeout(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; +@@ -629,7 +630,6 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter) + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; +- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +@@ -637,20 +637,20 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter) + + void igc_ptp_tx_hang(struct igc_adapter *adapter) + { +- bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + +- IGC_PTP_TX_TIMEOUT); ++ unsigned long flags; + +- if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) +- return; ++ spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + +- /* If we haven't received a timestamp within the timeout, it is +- * reasonable to assume that it will never occur, so we can unlock the +- * timestamp bit when this occurs. +- */ +- if (timeout) { +- cancel_work_sync(&adapter->ptp_tx_work); +- igc_ptp_tx_timeout(adapter); +- } ++ if (!adapter->ptp_tx_skb) ++ goto unlock; ++ ++ if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) ++ goto unlock; ++ ++ igc_ptp_tx_timeout(adapter); ++ ++unlock: ++ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + /** +@@ -660,6 +660,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter) + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. ++ * ++ * Context: Expects adapter->ptp_tx_lock to be held by caller. + */ + static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) + { +@@ -695,13 +697,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + +- /* Clear the lock early before calling skb_tstamp_tx so that +- * applications are not woken up before the lock bit is clear. We use +- * a copy of the skb pointer to ensure other threads can't change it +- * while we're notifying the stack. +- */ + adapter->ptp_tx_skb = NULL; +- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); +@@ -712,24 +708,33 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) + * igc_ptp_tx_work + * @work: pointer to work struct + * +- * This work function polls the TSYNCTXCTL valid bit to determine when a +- * timestamp has been taken for the current stored skb. ++ * This work function checks the TSYNCTXCTL valid bit to determine when ++ * a timestamp has been taken for the current stored skb. + */ + static void igc_ptp_tx_work(struct work_struct *work) + { + struct igc_adapter *adapter = container_of(work, struct igc_adapter, + ptp_tx_work); + struct igc_hw *hw = &adapter->hw; ++ unsigned long flags; + u32 tsynctxctl; + +- if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) +- return; ++ spin_lock_irqsave(&adapter->ptp_tx_lock, flags); ++ ++ if (!adapter->ptp_tx_skb) ++ goto unlock; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); +- if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) +- return; ++ tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0; ++ if (!tsynctxctl) { ++ WARN_ONCE(1, "Received a TSTAMP interrupt but no TSTAMP is ready.\n"); ++ goto unlock; ++ } + + igc_ptp_tx_hwtstamp(adapter); ++ ++unlock: ++ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + /** +@@ -978,6 +983,7 @@ void igc_ptp_init(struct igc_adapter *adapter) + return; + } + ++ spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); + +@@ -1042,7 +1048,6 @@ void igc_ptp_suspend(struct igc_adapter *adapter) + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; +- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); +diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c +index 356c7455c5cee..725db36e399d2 100644 +--- a/drivers/net/ethernet/intel/igc/igc_tsn.c ++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c +@@ -2,6 +2,7 @@ + /* Copyright (c) 2019 Intel Corporation */ + + #include "igc.h" ++#include "igc_hw.h" + #include "igc_tsn.h" + + static bool is_any_launchtime(struct igc_adapter *adapter) +@@ -62,7 +63,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | +- IGC_TQAVCTRL_ENHANCED_QAV); ++ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); ++ + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { +@@ -82,25 +84,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) + static int igc_tsn_enable_offload(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; ++ bool tsn_mode_reconfig = false; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + +- cycle = adapter->cycle_time; +- base_time = adapter->base_time; +- + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + +- tqavctrl = rd32(IGC_TQAVCTRL); +- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; +- wr32(IGC_TQAVCTRL, tqavctrl); +- +- wr32(IGC_QBVCYCLET_S, cycle); +- wr32(IGC_QBVCYCLET, cycle); +- + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; +@@ -203,21 +196,58 @@ skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + ++ tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; ++ ++ if (tqavctrl & IGC_TQAVCTRL_TRANSMIT_MODE_TSN) ++ tsn_mode_reconfig = true; ++ ++ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; ++ ++ cycle = adapter->cycle_time; ++ base_time = adapter->base_time; ++ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); +- + if (ktime_compare(systim, base_time) > 0) { +- s64 n; ++ s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + +- n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + base_time = ktime_add_ns(base_time, (n + 1) * cycle); ++ ++ /* Increase the counter if scheduling into the past while ++ * Gate Control List (GCL) is running. ++ */ ++ if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && ++ (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && ++ tsn_mode_reconfig) ++ adapter->qbv_config_change_errors++; ++ } else { ++ /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit ++ * has to be configured before the cycle time and base time. ++ * Tx won't hang if there is a GCL is already running, ++ * so in this case we don't need to set FutScdDis. ++ */ ++ if (igc_is_device_id_i226(hw) && ++ !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) ++ tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; + } + +- baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); ++ wr32(IGC_TQAVCTRL, tqavctrl); + ++ wr32(IGC_QBVCYCLET_S, cycle); ++ wr32(IGC_QBVCYCLET, cycle); ++ ++ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_BASET_H, baset_h); ++ ++ /* In i226, Future base time is only supported when FutScdDis bit ++ * is enabled and only active for re-configuration. ++ * In this case, initialize the base time with zero to create ++ * "re-configuration" scenario then only set the desired base time. ++ */ ++ if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) ++ wr32(IGC_BASET_L, 0); + wr32(IGC_BASET_L, baset_l); + + return 0; +@@ -244,17 +274,14 @@ int igc_tsn_reset(struct igc_adapter *adapter) + + int igc_tsn_offload_apply(struct igc_adapter *adapter) + { +- int err; ++ struct igc_hw *hw = &adapter->hw; + +- if (netif_running(adapter->netdev)) { ++ if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) { + schedule_work(&adapter->reset_task); + return 0; + } + +- err = igc_tsn_enable_offload(adapter); +- if (err < 0) +- return err; ++ igc_tsn_reset(adapter); + +- adapter->flags = igc_tsn_new_flags(adapter); + return 0; + } +diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +index 61354f7985035..e171097c13654 100644 +--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c ++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +@@ -707,20 +707,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, + hw_desc->dptr = tx_buffer->sglist_dma; + } + +- /* Flush the hw descriptor before writing to doorbell */ +- wmb(); +- +- /* Ring Doorbell to notify the NIC there is a new packet */ +- writel(1, iq->doorbell_reg); ++ netdev_tx_sent_queue(iq->netdev_q, skb->len); ++ skb_tx_timestamp(skb); + atomic_inc(&iq->instr_pending); + wi++; + if (wi == iq->max_count) + wi = 0; + iq->host_write_index = wi; ++ /* Flush the hw descriptor before writing to doorbell */ ++ wmb(); + +- netdev_tx_sent_queue(iq->netdev_q, skb->len); ++ /* Ring Doorbell to notify the NIC there is a new packet */ ++ writel(1, iq->doorbell_reg); + iq->stats.instr_posted++; +- skb_tx_timestamp(skb); + return NETDEV_TX_OK; + + dma_map_sg_err: +diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h +index ddec1627f1a7b..8d0bacf4e49cc 100644 +--- a/drivers/net/ethernet/marvell/sky2.h ++++ b/drivers/net/ethernet/marvell/sky2.h +@@ -2195,7 +2195,7 @@ struct rx_ring_info { + struct sk_buff *skb; + dma_addr_t data_addr; + DEFINE_DMA_UNMAP_LEN(data_size); +- dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; ++ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; + }; + + enum flow_control { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +index c4e40834e3ff9..374c0011a127b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +@@ -821,7 +821,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) + + mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); + if (tracer->owner) { +- tracer->owner = false; ++ mlx5_fw_tracer_ownership_acquire(tracer); + return; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +index cd15d36b1507e..907ad6ffe7275 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +@@ -23,7 +23,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv, + + route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex); + +- if (!route_dev || !netif_is_ovs_master(route_dev)) ++ if (!route_dev || !netif_is_ovs_master(route_dev) || ++ attr->parse_attr->filter_dev == e->out_dev) + goto out; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 4b9d567c8f473..48939c72b5925 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -969,11 +969,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) + return ERR_PTR(err); + } + +-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) ++static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw) + { +- MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); +- mlx5_eq_notifier_register(esw->dev, &esw->nb); +- + if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { + MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, + ESW_FUNCTIONS_CHANGED); +@@ -981,13 +978,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) + } + } + +-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) ++static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw) + { + if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) + mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); + +- mlx5_eq_notifier_unregister(esw->dev, &esw->nb); +- + flush_workqueue(esw->work_queue); + } + +@@ -1273,6 +1268,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) + + mlx5_eswitch_update_num_of_vfs(esw, num_vfs); + ++ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); ++ mlx5_eq_notifier_register(esw->dev, &esw->nb); ++ + if (esw->mode == MLX5_ESWITCH_LEGACY) { + err = esw_legacy_enable(esw); + } else { +@@ -1285,7 +1283,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) + + esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; + +- mlx5_eswitch_event_handlers_register(esw); ++ mlx5_eswitch_event_handler_register(esw); + + esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", + esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", +@@ -1394,7 +1392,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) + */ + mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); + +- mlx5_eswitch_event_handlers_unregister(esw); ++ mlx5_eq_notifier_unregister(esw->dev, &esw->nb); ++ mlx5_eswitch_event_handler_unregister(esw); + + esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", + esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +index ed274f033626d..810df65cdf085 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt, + static int qed_ll2_alloc_buffer(struct qed_dev *cdev, + u8 **data, dma_addr_t *phys_addr) + { +- *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC); ++ size_t size = cdev->ll2->rx_size + NET_SKB_PAD + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ ++ *data = kmalloc(size, GFP_ATOMIC); + if (!(*data)) { + DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); + return -ENOMEM; +@@ -2590,7 +2593,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) + INIT_LIST_HEAD(&cdev->ll2->list); + spin_lock_init(&cdev->ll2->lock); + +- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + ++ cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN + + L1_CACHE_BYTES + params->mtu; + + /* Allocate memory for LL2. +diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c +index 6cebf3aaa621f..dc5b27cb48fb0 100644 +--- a/drivers/net/phy/bcm7xxx.c ++++ b/drivers/net/phy/bcm7xxx.c +@@ -907,6 +907,9 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev) + .name = _name, \ + /* PHY_BASIC_FEATURES */ \ + .flags = PHY_IS_INTERNAL, \ ++ .get_sset_count = bcm_phy_get_sset_count, \ ++ .get_strings = bcm_phy_get_strings, \ ++ .get_stats = bcm7xxx_28nm_get_phy_stats, \ + .probe = bcm7xxx_28nm_probe, \ + .remove = bcm7xxx_28nm_remove, \ + .config_init = bcm7xxx_16nm_ephy_config_init, \ +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 7544df1ff50ec..d373953ddc300 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -3056,10 +3056,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, + struct net *net = sock_net(&tfile->sk); + struct tun_struct *tun; + void __user* argp = (void __user*)arg; +- unsigned int ifindex, carrier; ++ unsigned int carrier; + struct ifreq ifr; + kuid_t owner; + kgid_t group; ++ int ifindex; + int sndbuf; + int vnet_hdr_sz; + int le; +@@ -3115,7 +3116,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, + ret = -EFAULT; + if (copy_from_user(&ifindex, argp, sizeof(ifindex))) + goto unlock; +- ++ ret = -EINVAL; ++ if (ifindex < 0) ++ goto unlock; + ret = 0; + tfile->ifindex = ifindex; + goto unlock; +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c +index 563ecd27b93ea..17da42fe605c3 100644 +--- a/drivers/net/usb/smsc95xx.c ++++ b/drivers/net/usb/smsc95xx.c +@@ -897,7 +897,7 @@ static int smsc95xx_reset(struct usbnet *dev) + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n"); +- return ret; ++ return -ETIMEDOUT; + } + + ret = smsc95xx_set_mac_address(dev); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +index 542cfcad6e0e6..2d01f6226b7c6 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +@@ -1585,6 +1585,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); ++ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); + + /* inform mac80211 about what happened with the frame */ + switch (status & TX_STATUS_MSK) { +@@ -1936,6 +1937,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, + */ + if (!is_flush) + info->flags |= IEEE80211_TX_STAT_ACK; ++ else ++ info->flags &= ~IEEE80211_TX_STAT_ACK; + } + + /* +diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +index 7351acac6932d..54ab8b54369ba 100644 +--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c ++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +@@ -921,6 +921,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, + while (tlv_buf_left >= sizeof(*tlv_rxba)) { + tlv_type = le16_to_cpu(tlv_rxba->header.type); + tlv_len = le16_to_cpu(tlv_rxba->header.len); ++ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) { ++ mwifiex_dbg(priv->adapter, WARN, ++ "TLV size (%zu) overflows event_buf buf_left=%d\n", ++ size_add(sizeof(tlv_rxba->header), tlv_len), ++ tlv_buf_left); ++ return; ++ } ++ + if (tlv_type != TLV_TYPE_RXBA_SYNC) { + mwifiex_dbg(priv->adapter, ERROR, + "Wrong TLV id=0x%x\n", tlv_type); +@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, + + tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num); + tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len); ++ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) { ++ mwifiex_dbg(priv->adapter, WARN, ++ "TLV size (%zu) overflows event_buf buf_left=%d\n", ++ size_add(sizeof(*tlv_rxba), tlv_bitmap_len), ++ tlv_buf_left); ++ return; ++ } ++ + mwifiex_dbg(priv->adapter, INFO, + "%pM tid=%d seq_num=%d bitmap_len=%d\n", + tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, +diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c +index 8224675f8de25..b33004a4bcb5a 100644 +--- a/drivers/nvme/host/ioctl.c ++++ b/drivers/nvme/host/ioctl.c +@@ -32,9 +32,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf, + if (!buf) + goto out; + +- ret = -EFAULT; +- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len)) +- goto out_free_meta; ++ if (req_op(req) == REQ_OP_DRV_OUT) { ++ ret = -EFAULT; ++ if (copy_from_user(buf, ubuf, len)) ++ goto out_free_meta; ++ } else { ++ memset(buf, 0, len); ++ } + + bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); + if (IS_ERR(bip)) { +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 64990a2cfd0a7..886c3fc9578e4 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3439,7 +3439,8 @@ static const struct pci_device_id nvme_id_table[] = { + { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES | +- NVME_QUIRK_IGNORE_DEV_SUBNQN, }, ++ NVME_QUIRK_IGNORE_DEV_SUBNQN | ++ NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES, }, +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index c478480f54aa2..aa1734e2fd44e 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -643,6 +643,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) + + static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) + { ++ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) ++ return; ++ + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + __nvme_rdma_stop_queue(queue); +diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c +index 7970a7640e585..fbae76cdc2546 100644 +--- a/drivers/nvme/target/fabrics-cmd-auth.c ++++ b/drivers/nvme/target/fabrics-cmd-auth.c +@@ -337,19 +337,21 @@ done: + __func__, ctrl->cntlid, req->sq->qid, + status, req->error_loc); + req->cqe->result.u64 = 0; +- nvmet_req_complete(req, status); + if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && + req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { + unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; + + mod_delayed_work(system_wq, &req->sq->auth_expired_work, + auth_expire_secs * HZ); +- return; ++ goto complete; + } + /* Final states, clear up variables */ + nvmet_auth_sq_free(req->sq); + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) + nvmet_ctrl_fatal_error(ctrl); ++ ++complete: ++ nvmet_req_complete(req, status); + } + + static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) +@@ -527,11 +529,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) + kfree(d); + done: + req->cqe->result.u64 = 0; +- nvmet_req_complete(req, status); ++ + if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) + nvmet_auth_sq_free(req->sq); + else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { + nvmet_auth_sq_free(req->sq); + nvmet_ctrl_fatal_error(ctrl); + } ++ nvmet_req_complete(req, status); + } +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index 5e29da94f72d6..355d80323b836 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -345,6 +345,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) + + static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) + { ++ queue->rcv_state = NVMET_TCP_RECV_ERR; + if (status == -EPIPE || status == -ECONNRESET) + kernel_sock_shutdown(queue->sock, SHUT_RDWR); + else +@@ -871,15 +872,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) + iov.iov_len = sizeof(*icresp); + ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); + if (ret < 0) +- goto free_crypto; ++ return ret; /* queue removal will cleanup */ + + queue->state = NVMET_TCP_Q_LIVE; + nvmet_prepare_receive_pdu(queue); + return 0; +-free_crypto: +- if (queue->hdr_digest || queue->data_digest) +- nvmet_tcp_free_crypto(queue); +- return ret; + } + + static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, +diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c +index 3cd4d51c247c3..67802f9e40ba0 100644 +--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c ++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c +@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x) + { + struct phy_mdm6600 *ddata = phy_get_drvdata(x); + struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE]; +- int error; + + if (!ddata->enabled) + return -ENODEV; + +- error = pinctrl_pm_select_default_state(ddata->dev); +- if (error) +- dev_warn(ddata->dev, "%s: error with default_state: %i\n", +- __func__, error); +- + gpiod_set_value_cansleep(enable_gpio, 1); + + /* Allow aggressive PM for USB, it's only needed for n_gsm port */ +@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x) + + gpiod_set_value_cansleep(enable_gpio, 0); + +- error = pinctrl_pm_select_sleep_state(ddata->dev); +- if (error) +- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n", +- __func__, error); +- + return 0; + } + +@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata) + { + struct gpio_desc *reset_gpio = + ddata->ctrl_gpios[PHY_MDM6600_RESET]; ++ int error; + + ddata->enabled = false; + phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ); +@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata) + } else { + dev_err(ddata->dev, "Timed out powering down\n"); + } ++ ++ /* ++ * Keep reset gpio high with padconf internal pull-up resistor to ++ * prevent modem from waking up during deeper SoC idle states. The ++ * gpio bank lines can have glitches if not in the always-on wkup ++ * domain. ++ */ ++ error = pinctrl_pm_select_sleep_state(ddata->dev); ++ if (error) ++ dev_warn(ddata->dev, "%s: error with sleep_state: %i\n", ++ __func__, error); + } + + static void phy_mdm6600_deferred_power_on(struct work_struct *work) +@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev) + ddata->dev = &pdev->dev; + platform_set_drvdata(pdev, ddata); + +- /* Active state selected in phy_mdm6600_power_on() */ +- error = pinctrl_pm_select_sleep_state(ddata->dev); +- if (error) +- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n", +- __func__, error); +- + error = phy_mdm6600_init_lines(ddata); + if (error) + return error; +@@ -627,10 +622,12 @@ idle: + pm_runtime_put_autosuspend(ddata->dev); + + cleanup: +- if (error < 0) ++ if (error < 0) { + phy_mdm6600_device_power_off(ddata); +- pm_runtime_disable(ddata->dev); +- pm_runtime_dont_use_autosuspend(ddata->dev); ++ pm_runtime_disable(ddata->dev); ++ pm_runtime_dont_use_autosuspend(ddata->dev); ++ } ++ + return error; + } + +@@ -639,6 +636,7 @@ static int phy_mdm6600_remove(struct platform_device *pdev) + struct phy_mdm6600 *ddata = platform_get_drvdata(pdev); + struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET]; + ++ pm_runtime_get_noresume(ddata->dev); + pm_runtime_dont_use_autosuspend(ddata->dev); + pm_runtime_put_sync(ddata->dev); + pm_runtime_disable(ddata->dev); +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c +index 27e41873c04ff..9e57f4c62e609 100644 +--- a/drivers/pinctrl/core.c ++++ b/drivers/pinctrl/core.c +@@ -1007,20 +1007,17 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev, + + static struct pinctrl *find_pinctrl(struct device *dev) + { +- struct pinctrl *entry, *p = NULL; ++ struct pinctrl *p; + + mutex_lock(&pinctrl_list_mutex); +- +- list_for_each_entry(entry, &pinctrl_list, node) { +- if (entry->dev == dev) { +- p = entry; +- kref_get(&p->users); +- break; ++ list_for_each_entry(p, &pinctrl_list, node) ++ if (p->dev == dev) { ++ mutex_unlock(&pinctrl_list_mutex); ++ return p; + } +- } + + mutex_unlock(&pinctrl_list_mutex); +- return p; ++ return NULL; + } + + static void pinctrl_free(struct pinctrl *p, bool inlist); +@@ -1129,6 +1126,7 @@ struct pinctrl *pinctrl_get(struct device *dev) + p = find_pinctrl(dev); + if (p) { + dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n"); ++ kref_get(&p->users); + return p; + } + +diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c +index fbf2e11fd6ce7..37c761f577149 100644 +--- a/drivers/platform/surface/surface_platform_profile.c ++++ b/drivers/platform/surface/surface_platform_profile.c +@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev) + set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices); + set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices); + +- platform_profile_register(&tpd->handler); +- return 0; ++ return platform_profile_register(&tpd->handler); + } + + static void surface_platform_profile_remove(struct ssam_device *sdev) +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c +index d85d895fee894..df1db54d4e183 100644 +--- a/drivers/platform/x86/asus-nb-wmi.c ++++ b/drivers/platform/x86/asus-nb-wmi.c +@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) + static const struct key_entry asus_nb_wmi_keymap[] = { + { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } }, + { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } }, ++ { KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } }, ++ { KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */ ++ { KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */ + { KE_KEY, 0x30, { KEY_VOLUMEUP } }, + { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0x32, { KEY_MUTE } }, +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 36effe04c6f33..49dd55b8e8faf 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -3268,7 +3268,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) + { + unsigned int key_value = 1; + bool autorelease = 1; +- int orig_code = code; + + if (asus->driver->key_filter) { + asus->driver->key_filter(asus->driver, &code, &key_value, +@@ -3277,16 +3276,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) + return; + } + +- if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) +- code = ASUS_WMI_BRN_UP; +- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) +- code = ASUS_WMI_BRN_DOWN; +- +- if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) { +- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { +- asus_wmi_backlight_notify(asus, orig_code); +- return; +- } ++ if (acpi_video_get_backlight_type() == acpi_backlight_vendor && ++ code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) { ++ asus_wmi_backlight_notify(asus, code); ++ return; + } + + if (code == NOTIFY_KBD_BRTUP) { +diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h +index a478ebfd34dfa..fc41d1b1bb7f8 100644 +--- a/drivers/platform/x86/asus-wmi.h ++++ b/drivers/platform/x86/asus-wmi.h +@@ -18,7 +18,7 @@ + #include + + #define ASUS_WMI_KEY_IGNORE (-1) +-#define ASUS_WMI_BRN_DOWN 0x20 ++#define ASUS_WMI_BRN_DOWN 0x2e + #define ASUS_WMI_BRN_UP 0x2f + + struct module; +diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +index fa8f14c925ec3..9b12fe8e95c91 100644 +--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c ++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +@@ -153,7 +153,7 @@ show_uncore_data(initial_max_freq_khz); + + static int create_attr_group(struct uncore_data *data, char *name) + { +- int ret, index = 0; ++ int ret, freq, index = 0; + + init_attribute_rw(max_freq_khz); + init_attribute_rw(min_freq_khz); +@@ -165,7 +165,11 @@ static int create_attr_group(struct uncore_data *data, char *name) + data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr; + data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr; + data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr; +- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr; ++ ++ ret = uncore_read_freq(data, &freq); ++ if (!ret) ++ data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr; ++ + data->uncore_attrs[index] = NULL; + + data->uncore_attr_group.name = name; +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c +index 68e66b60445c3..9a92d515abb9b 100644 +--- a/drivers/platform/x86/touchscreen_dmi.c ++++ b/drivers/platform/x86/touchscreen_dmi.c +@@ -740,6 +740,21 @@ static const struct ts_dmi_data pipo_w11_data = { + .properties = pipo_w11_props, + }; + ++static const struct property_entry positivo_c4128b_props[] = { ++ PROPERTY_ENTRY_U32("touchscreen-min-x", 4), ++ PROPERTY_ENTRY_U32("touchscreen-min-y", 13), ++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1915), ++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1269), ++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"), ++ PROPERTY_ENTRY_U32("silead,max-fingers", 10), ++ { } ++}; ++ ++static const struct ts_dmi_data positivo_c4128b_data = { ++ .acpi_name = "MSSL1680:00", ++ .properties = positivo_c4128b_props, ++}; ++ + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 32), + PROPERTY_ENTRY_U32("touchscreen-min-y", 16), +@@ -1457,6 +1472,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { + DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"), + }, + }, ++ { ++ /* Positivo C4128B */ ++ .driver_data = (void *)&positivo_c4128b_data, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"), ++ }, ++ }, + { + /* Point of View mobii wintab p800w (v2.0) */ + .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data, +diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig +index a8c46ba5878fe..54201f0374104 100644 +--- a/drivers/power/reset/Kconfig ++++ b/drivers/power/reset/Kconfig +@@ -299,7 +299,7 @@ config NVMEM_REBOOT_MODE + + config POWER_MLXBF + tristate "Mellanox BlueField power handling driver" +- depends on (GPIO_MLXBF2 && ACPI) ++ depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI + help + This driver supports reset or low power mode handling for Mellanox BlueField. + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index f6a95f72af18d..34d3d82819064 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -5725,15 +5725,11 @@ wash: + mutex_lock(®ulator_list_mutex); + regulator_ena_gpio_free(rdev); + mutex_unlock(®ulator_list_mutex); +- put_device(&rdev->dev); +- rdev = NULL; + clean: + if (dangling_of_gpiod) + gpiod_put(config->ena_gpiod); +- if (rdev && rdev->dev.of_node) +- of_node_put(rdev->dev.of_node); +- kfree(rdev); + kfree(config); ++ put_device(&rdev->dev); + rinse: + if (dangling_cfg_gpiod) + gpiod_put(cfg->ena_gpiod); +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c +index c7db953985002..98a14c1f3d672 100644 +--- a/drivers/s390/cio/css.c ++++ b/drivers/s390/cio/css.c +@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, + */ + ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31)); + if (ret) +- goto err; ++ goto err_lock; + /* + * But we don't have such restrictions imposed on the stuff that + * is handled by the streaming API. + */ + ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64)); + if (ret) +- goto err; ++ goto err_lock; + + return sch; + ++err_lock: ++ kfree(sch->lock); + err: + kfree(sch); + return ERR_PTR(ret); +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index adc85e250822c..2e21f74a24705 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -32,6 +32,7 @@ + #include "8250.h" + + #define DEFAULT_CLK_SPEED 48000000 ++#define OMAP_UART_REGSHIFT 2 + + #define UART_ERRATA_i202_MDR1_ACCESS (1 << 0) + #define OMAP_UART_WER_HAS_TX_WAKEUP (1 << 1) +@@ -109,6 +110,7 @@ + #define UART_OMAP_RX_LVL 0x19 + + struct omap8250_priv { ++ void __iomem *membase; + int line; + u8 habit; + u8 mdr1; +@@ -152,9 +154,9 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p); + static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { } + #endif + +-static u32 uart_read(struct uart_8250_port *up, u32 reg) ++static u32 uart_read(struct omap8250_priv *priv, u32 reg) + { +- return readl(up->port.membase + (reg << up->port.regshift)); ++ return readl(priv->membase + (reg << OMAP_UART_REGSHIFT)); + } + + /* +@@ -538,7 +540,7 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up, + u32 mvr, scheme; + u16 revision, major, minor; + +- mvr = uart_read(up, UART_OMAP_MVER); ++ mvr = uart_read(priv, UART_OMAP_MVER); + + /* Check revision register scheme */ + scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT; +@@ -1319,7 +1321,7 @@ static int omap8250_probe(struct platform_device *pdev) + UPF_HARD_FLOW; + up.port.private_data = priv; + +- up.port.regshift = 2; ++ up.port.regshift = OMAP_UART_REGSHIFT; + up.port.fifosize = 64; + up.tx_loadsz = 64; + up.capabilities = UART_CAP_FIFO; +@@ -1381,6 +1383,8 @@ static int omap8250_probe(struct platform_device *pdev) + DEFAULT_CLK_SPEED); + } + ++ priv->membase = membase; ++ priv->line = -ENODEV; + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; + cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency); +@@ -1388,6 +1392,8 @@ static int omap8250_probe(struct platform_device *pdev) + + spin_lock_init(&priv->rx_dma_lock); + ++ platform_set_drvdata(pdev, priv); ++ + device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); +@@ -1449,7 +1455,6 @@ static int omap8250_probe(struct platform_device *pdev) + goto err; + } + priv->line = ret; +- platform_set_drvdata(pdev, priv); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; +@@ -1471,17 +1476,17 @@ static int omap8250_remove(struct platform_device *pdev) + if (err) + return err; + ++ serial8250_unregister_port(priv->line); ++ priv->line = -ENODEV; + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); + pm_runtime_disable(&pdev->dev); +- serial8250_unregister_port(priv->line); + cpu_latency_qos_remove_request(&priv->pm_qos_request); + device_init_wakeup(&pdev->dev, false); + return 0; + } + +-#ifdef CONFIG_PM_SLEEP + static int omap8250_prepare(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); +@@ -1505,7 +1510,7 @@ static int omap8250_suspend(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); +- int err; ++ int err = 0; + + serial8250_suspend_port(priv->line); + +@@ -1515,7 +1520,8 @@ static int omap8250_suspend(struct device *dev) + if (!device_may_wakeup(dev)) + priv->wer = 0; + serial_out(up, UART_OMAP_WER, priv->wer); +- err = pm_runtime_force_suspend(dev); ++ if (uart_console(&up->port) && console_suspend_enabled) ++ err = pm_runtime_force_suspend(dev); + flush_work(&priv->qos_work); + + return err; +@@ -1524,11 +1530,15 @@ static int omap8250_suspend(struct device *dev) + static int omap8250_resume(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); ++ struct uart_8250_port *up = serial8250_get_port(priv->line); + int err; + +- err = pm_runtime_force_resume(dev); +- if (err) +- return err; ++ if (uart_console(&up->port) && console_suspend_enabled) { ++ err = pm_runtime_force_resume(dev); ++ if (err) ++ return err; ++ } ++ + serial8250_resume_port(priv->line); + /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */ + pm_runtime_mark_last_busy(dev); +@@ -1536,12 +1546,7 @@ static int omap8250_resume(struct device *dev) + + return 0; + } +-#else +-#define omap8250_prepare NULL +-#define omap8250_complete NULL +-#endif + +-#ifdef CONFIG_PM + static int omap8250_lost_context(struct uart_8250_port *up) + { + u32 val; +@@ -1557,11 +1562,15 @@ static int omap8250_lost_context(struct uart_8250_port *up) + return 0; + } + ++static void uart_write(struct omap8250_priv *priv, u32 reg, u32 val) ++{ ++ writel(val, priv->membase + (reg << OMAP_UART_REGSHIFT)); ++} ++ + /* TODO: in future, this should happen via API in drivers/reset/ */ + static int omap8250_soft_reset(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); +- struct uart_8250_port *up = serial8250_get_port(priv->line); + int timeout = 100; + int sysc; + int syss; +@@ -1575,20 +1584,20 @@ static int omap8250_soft_reset(struct device *dev) + * needing omap8250_soft_reset() quirk. Do it in two writes as + * recommended in the comment for omap8250_update_scr(). + */ +- serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1); +- serial_out(up, UART_OMAP_SCR, ++ uart_write(priv, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1); ++ uart_write(priv, UART_OMAP_SCR, + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL); + +- sysc = serial_in(up, UART_OMAP_SYSC); ++ sysc = uart_read(priv, UART_OMAP_SYSC); + + /* softreset the UART */ + sysc |= OMAP_UART_SYSC_SOFTRESET; +- serial_out(up, UART_OMAP_SYSC, sysc); ++ uart_write(priv, UART_OMAP_SYSC, sysc); + + /* By experiments, 1us enough for reset complete on AM335x */ + do { + udelay(1); +- syss = serial_in(up, UART_OMAP_SYSS); ++ syss = uart_read(priv, UART_OMAP_SYSS); + } while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE)); + + if (!timeout) { +@@ -1602,23 +1611,10 @@ static int omap8250_soft_reset(struct device *dev) + static int omap8250_runtime_suspend(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); +- struct uart_8250_port *up; ++ struct uart_8250_port *up = NULL; + +- /* In case runtime-pm tries this before we are setup */ +- if (!priv) +- return 0; +- +- up = serial8250_get_port(priv->line); +- /* +- * When using 'no_console_suspend', the console UART must not be +- * suspended. Since driver suspend is managed by runtime suspend, +- * preventing runtime suspend (by returning error) will keep device +- * active during suspend. +- */ +- if (priv->is_suspending && !console_suspend_enabled) { +- if (uart_console(&up->port)) +- return -EBUSY; +- } ++ if (priv->line >= 0) ++ up = serial8250_get_port(priv->line); + + if (priv->habit & UART_ERRATA_CLOCK_DISABLE) { + int ret; +@@ -1627,13 +1623,15 @@ static int omap8250_runtime_suspend(struct device *dev) + if (ret) + return ret; + +- /* Restore to UART mode after reset (for wakeup) */ +- omap8250_update_mdr1(up, priv); +- /* Restore wakeup enable register */ +- serial_out(up, UART_OMAP_WER, priv->wer); ++ if (up) { ++ /* Restore to UART mode after reset (for wakeup) */ ++ omap8250_update_mdr1(up, priv); ++ /* Restore wakeup enable register */ ++ serial_out(up, UART_OMAP_WER, priv->wer); ++ } + } + +- if (up->dma && up->dma->rxchan) ++ if (up && up->dma && up->dma->rxchan) + omap_8250_rx_dma_flush(up); + + priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; +@@ -1645,25 +1643,21 @@ static int omap8250_runtime_suspend(struct device *dev) + static int omap8250_runtime_resume(struct device *dev) + { + struct omap8250_priv *priv = dev_get_drvdata(dev); +- struct uart_8250_port *up; ++ struct uart_8250_port *up = NULL; + +- /* In case runtime-pm tries this before we are setup */ +- if (!priv) +- return 0; +- +- up = serial8250_get_port(priv->line); ++ if (priv->line >= 0) ++ up = serial8250_get_port(priv->line); + +- if (omap8250_lost_context(up)) ++ if (up && omap8250_lost_context(up)) + omap8250_restore_regs(up); + +- if (up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) ++ if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) + omap_8250_rx_dma(up); + + priv->latency = priv->calc_latency; + schedule_work(&priv->qos_work); + return 0; + } +-#endif + + #ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP + static int __init omap8250_console_fixup(void) +@@ -1706,17 +1700,17 @@ console_initcall(omap8250_console_fixup); + #endif + + static const struct dev_pm_ops omap8250_dev_pm_ops = { +- SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume) +- SET_RUNTIME_PM_OPS(omap8250_runtime_suspend, ++ SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume) ++ RUNTIME_PM_OPS(omap8250_runtime_suspend, + omap8250_runtime_resume, NULL) +- .prepare = omap8250_prepare, +- .complete = omap8250_complete, ++ .prepare = pm_sleep_ptr(omap8250_prepare), ++ .complete = pm_sleep_ptr(omap8250_complete), + }; + + static struct platform_driver omap8250_platform_driver = { + .driver = { + .name = "omap8250", +- .pm = &omap8250_dev_pm_ops, ++ .pm = pm_ptr(&omap8250_dev_pm_ops), + .of_match_table = omap8250_dt_ids, + }, + .probe = omap8250_probe, +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 2cc5c68c8689f..d4e57f9017db9 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -48,8 +48,6 @@ static struct lock_class_key port_lock_key; + */ + #define RS485_MAX_RTS_DELAY 100 /* msecs */ + +-static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, +- const struct ktermios *old_termios); + static void uart_wait_until_sent(struct tty_struct *tty, int timeout); + static void uart_change_pm(struct uart_state *state, + enum uart_pm_state pm_state); +@@ -177,6 +175,52 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) + uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); + } + ++/* Caller holds port mutex */ ++static void uart_change_line_settings(struct tty_struct *tty, struct uart_state *state, ++ const struct ktermios *old_termios) ++{ ++ struct uart_port *uport = uart_port_check(state); ++ struct ktermios *termios; ++ int hw_stopped; ++ ++ /* ++ * If we have no tty, termios, or the port does not exist, ++ * then we can't set the parameters for this port. ++ */ ++ if (!tty || uport->type == PORT_UNKNOWN) ++ return; ++ ++ termios = &tty->termios; ++ uport->ops->set_termios(uport, termios, old_termios); ++ ++ /* ++ * Set modem status enables based on termios cflag ++ */ ++ spin_lock_irq(&uport->lock); ++ if (termios->c_cflag & CRTSCTS) ++ uport->status |= UPSTAT_CTS_ENABLE; ++ else ++ uport->status &= ~UPSTAT_CTS_ENABLE; ++ ++ if (termios->c_cflag & CLOCAL) ++ uport->status &= ~UPSTAT_DCD_ENABLE; ++ else ++ uport->status |= UPSTAT_DCD_ENABLE; ++ ++ /* reset sw-assisted CTS flow control based on (possibly) new mode */ ++ hw_stopped = uport->hw_stopped; ++ uport->hw_stopped = uart_softcts_mode(uport) && ++ !(uport->ops->get_mctrl(uport) & TIOCM_CTS); ++ if (uport->hw_stopped) { ++ if (!hw_stopped) ++ uport->ops->stop_tx(uport); ++ } else { ++ if (hw_stopped) ++ __uart_start(tty); ++ } ++ spin_unlock_irq(&uport->lock); ++} ++ + /* + * Startup the port. This will be called once per open. All calls + * will be serialised by the per-port mutex. +@@ -232,7 +276,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + /* + * Initialise the hardware port settings. + */ +- uart_change_speed(tty, state, NULL); ++ uart_change_line_settings(tty, state, NULL); + + /* + * Setup the RTS and DTR signals once the +@@ -485,52 +529,6 @@ uart_get_divisor(struct uart_port *port, unsigned int baud) + } + EXPORT_SYMBOL(uart_get_divisor); + +-/* Caller holds port mutex */ +-static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, +- const struct ktermios *old_termios) +-{ +- struct uart_port *uport = uart_port_check(state); +- struct ktermios *termios; +- int hw_stopped; +- +- /* +- * If we have no tty, termios, or the port does not exist, +- * then we can't set the parameters for this port. +- */ +- if (!tty || uport->type == PORT_UNKNOWN) +- return; +- +- termios = &tty->termios; +- uport->ops->set_termios(uport, termios, old_termios); +- +- /* +- * Set modem status enables based on termios cflag +- */ +- spin_lock_irq(&uport->lock); +- if (termios->c_cflag & CRTSCTS) +- uport->status |= UPSTAT_CTS_ENABLE; +- else +- uport->status &= ~UPSTAT_CTS_ENABLE; +- +- if (termios->c_cflag & CLOCAL) +- uport->status &= ~UPSTAT_DCD_ENABLE; +- else +- uport->status |= UPSTAT_DCD_ENABLE; +- +- /* reset sw-assisted CTS flow control based on (possibly) new mode */ +- hw_stopped = uport->hw_stopped; +- uport->hw_stopped = uart_softcts_mode(uport) && +- !(uport->ops->get_mctrl(uport) & TIOCM_CTS); +- if (uport->hw_stopped) { +- if (!hw_stopped) +- uport->ops->stop_tx(uport); +- } else { +- if (hw_stopped) +- __uart_start(tty); +- } +- spin_unlock_irq(&uport->lock); +-} +- + static int uart_put_char(struct tty_struct *tty, unsigned char c) + { + struct uart_state *state = tty->driver_data; +@@ -994,7 +992,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, + current->comm, + tty_name(port->tty)); + } +- uart_change_speed(tty, state, NULL); ++ uart_change_line_settings(tty, state, NULL); + } + } else { + retval = uart_startup(tty, state, 1); +@@ -1389,12 +1387,18 @@ static void uart_set_rs485_termination(struct uart_port *port, + static int uart_rs485_config(struct uart_port *port) + { + struct serial_rs485 *rs485 = &port->rs485; ++ unsigned long flags; + int ret; + ++ if (!(rs485->flags & SER_RS485_ENABLED)) ++ return 0; ++ + uart_sanitize_serial_rs485(port, rs485); + uart_set_rs485_termination(port, rs485); + ++ spin_lock_irqsave(&port->lock, flags); + ret = port->rs485_config(port, NULL, rs485); ++ spin_unlock_irqrestore(&port->lock, flags); + if (ret) + memset(rs485, 0, sizeof(*rs485)); + +@@ -1656,7 +1660,7 @@ static void uart_set_termios(struct tty_struct *tty, + goto out; + } + +- uart_change_speed(tty, state, old_termios); ++ uart_change_line_settings(tty, state, old_termios); + /* reload cflag from termios; port driver may have overridden flags */ + cflag = tty->termios.c_cflag; + +@@ -2456,12 +2460,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) + ret = ops->startup(uport); + if (ret == 0) { + if (tty) +- uart_change_speed(tty, state, NULL); ++ uart_change_line_settings(tty, state, NULL); ++ uart_rs485_config(uport); + spin_lock_irq(&uport->lock); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, uport->mctrl); +- else +- uart_rs485_config(uport); + ops->start_tx(uport); + spin_unlock_irq(&uport->lock); + tty_port_set_initialized(port, 1); +@@ -2570,10 +2573,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, + port->mctrl &= TIOCM_DTR; + if (!(port->rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); +- else +- uart_rs485_config(port); + spin_unlock_irqrestore(&port->lock, flags); + ++ uart_rs485_config(port); ++ + /* + * If this driver supports console, and it hasn't been + * successfully registered yet, try to re-register it. +diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c +index 832d3ba9368ff..8edd0375e0a8a 100644 +--- a/drivers/usb/misc/onboard_usb_hub.c ++++ b/drivers/usb/misc/onboard_usb_hub.c +@@ -329,6 +329,7 @@ static struct platform_driver onboard_hub_driver = { + + /************************** USB driver **************************/ + ++#define VENDOR_ID_GENESYS 0x05e3 + #define VENDOR_ID_MICROCHIP 0x0424 + #define VENDOR_ID_REALTEK 0x0bda + #define VENDOR_ID_TI 0x0451 +@@ -405,6 +406,10 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev) + } + + static const struct usb_device_id onboard_hub_id_table[] = { ++ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ ++ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ ++ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */ ++ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ +diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h +index 2cde54b69eede..d023fb90b4118 100644 +--- a/drivers/usb/misc/onboard_usb_hub.h ++++ b/drivers/usb/misc/onboard_usb_hub.h +@@ -22,11 +22,23 @@ static const struct onboard_hub_pdata ti_tusb8041_data = { + .reset_us = 3000, + }; + ++static const struct onboard_hub_pdata genesys_gl850g_data = { ++ .reset_us = 3, ++}; ++ ++static const struct onboard_hub_pdata genesys_gl852g_data = { ++ .reset_us = 50, ++}; ++ + static const struct of_device_id onboard_hub_match[] = { ++ { .compatible = "usb424,2412", .data = µchip_usb424_data, }, + { .compatible = "usb424,2514", .data = µchip_usb424_data, }, + { .compatible = "usb424,2517", .data = µchip_usb424_data, }, + { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, + { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, ++ { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, ++ { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, }, ++ { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, }, + { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index f13930b4534c1..b9dd714a3ae69 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb); + #define DELL_PRODUCT_5829E_ESIM 0x81e4 + #define DELL_PRODUCT_5829E 0x81e6 + ++#define DELL_PRODUCT_FM101R 0x8213 ++#define DELL_PRODUCT_FM101R_ESIM 0x8215 ++ + #define KYOCERA_VENDOR_ID 0x0c88 + #define KYOCERA_PRODUCT_KPC650 0x17da + #define KYOCERA_PRODUCT_KPC680 0x180a +@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, ++ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) }, ++ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, +@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ + .driver_info = NCTRL(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */ + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), +@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ + { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { } /* Terminating entry */ +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index 1a327eb3580b4..e08688844f1e1 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -563,18 +563,30 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, + u64 search_start; + int ret; + +- if (test_bit(BTRFS_ROOT_DELETING, &root->state)) +- btrfs_err(fs_info, +- "COW'ing blocks on a fs root that's being dropped"); +- +- if (trans->transaction != fs_info->running_transaction) +- WARN(1, KERN_CRIT "trans %llu running %llu\n", +- trans->transid, +- fs_info->running_transaction->transid); ++ if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { ++ btrfs_abort_transaction(trans, -EUCLEAN); ++ btrfs_crit(fs_info, ++ "attempt to COW block %llu on root %llu that is being deleted", ++ buf->start, btrfs_root_id(root)); ++ return -EUCLEAN; ++ } + +- if (trans->transid != fs_info->generation) +- WARN(1, KERN_CRIT "trans %llu running %llu\n", +- trans->transid, fs_info->generation); ++ /* ++ * COWing must happen through a running transaction, which always ++ * matches the current fs generation (it's a transaction with a state ++ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs ++ * into error state to prevent the commit of any transaction. ++ */ ++ if (unlikely(trans->transaction != fs_info->running_transaction || ++ trans->transid != fs_info->generation)) { ++ btrfs_abort_transaction(trans, -EUCLEAN); ++ btrfs_crit(fs_info, ++"unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", ++ buf->start, btrfs_root_id(root), trans->transid, ++ fs_info->running_transaction->transid, ++ fs_info->generation); ++ return -EUCLEAN; ++ } + + if (!should_cow_block(trans, root, buf)) { + *cow_ret = buf; +@@ -686,8 +698,22 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, + int progress_passed = 0; + struct btrfs_disk_key disk_key; + +- WARN_ON(trans->transaction != fs_info->running_transaction); +- WARN_ON(trans->transid != fs_info->generation); ++ /* ++ * COWing must happen through a running transaction, which always ++ * matches the current fs generation (it's a transaction with a state ++ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs ++ * into error state to prevent the commit of any transaction. ++ */ ++ if (unlikely(trans->transaction != fs_info->running_transaction || ++ trans->transid != fs_info->generation)) { ++ btrfs_abort_transaction(trans, -EUCLEAN); ++ btrfs_crit(fs_info, ++"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu", ++ parent->start, btrfs_root_id(root), trans->transid, ++ fs_info->running_transaction->transid, ++ fs_info->generation); ++ return -EUCLEAN; ++ } + + parent_nritems = btrfs_header_nritems(parent); + blocksize = fs_info->nodesize; +diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c +index 36a3debe94930..e08e3852c4788 100644 +--- a/fs/btrfs/delayed-ref.c ++++ b/fs/btrfs/delayed-ref.c +@@ -141,24 +141,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) + * Transfer bytes to our delayed refs rsv + * + * @fs_info: the filesystem +- * @src: source block rsv to transfer from + * @num_bytes: number of bytes to transfer + * +- * This transfers up to the num_bytes amount from the src rsv to the ++ * This transfers up to the num_bytes amount, previously reserved, to the + * delayed_refs_rsv. Any extra bytes are returned to the space info. + */ + void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, +- struct btrfs_block_rsv *src, + u64 num_bytes) + { + struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; + u64 to_free = 0; + +- spin_lock(&src->lock); +- src->reserved -= num_bytes; +- src->size -= num_bytes; +- spin_unlock(&src->lock); +- + spin_lock(&delayed_refs_rsv->lock); + if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) { + u64 delta = delayed_refs_rsv->size - +diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h +index d6304b690ec4a..712a6315e956b 100644 +--- a/fs/btrfs/delayed-ref.h ++++ b/fs/btrfs/delayed-ref.h +@@ -383,7 +383,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans); + int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, + enum btrfs_reserve_flush_enum flush); + void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, +- struct btrfs_block_rsv *src, + u64 num_bytes); + int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans); + bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 08ff10a81cb90..2a7c9088fe1f8 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -1663,12 +1663,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, + parent = ref->parent; + ref_root = ref->root; + +- if (node->ref_mod != 1) { ++ if (unlikely(node->ref_mod != 1)) { + btrfs_err(trans->fs_info, +- "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", ++ "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu", + node->bytenr, node->ref_mod, node->action, ref_root, + parent); +- return -EIO; ++ return -EUCLEAN; + } + if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { + BUG_ON(!extent_op || !extent_op->update_flags); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 9e323420c96d3..9474265ee7ea3 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -3869,7 +3869,7 @@ static void get_block_group_info(struct list_head *groups_list, + static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, + void __user *arg) + { +- struct btrfs_ioctl_space_args space_args; ++ struct btrfs_ioctl_space_args space_args = { 0 }; + struct btrfs_ioctl_space_info space; + struct btrfs_ioctl_space_info *dest; + struct btrfs_ioctl_space_info *dest_orig; +@@ -5223,7 +5223,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat + + if (compat) { + #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) +- struct btrfs_ioctl_send_args_32 args32; ++ struct btrfs_ioctl_send_args_32 args32 = { 0 }; + + ret = copy_from_user(&args32, argp, sizeof(args32)); + if (ret) +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 1193214ba8c10..60db4c3b82fa1 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -614,14 +614,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, + reloc_reserved = true; + } + +- ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush); ++ ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush); + if (ret) + goto reserve_fail; + if (delayed_refs_bytes) { +- btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv, +- delayed_refs_bytes); ++ btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes); + num_bytes -= delayed_refs_bytes; + } ++ btrfs_block_rsv_add_bytes(rsv, num_bytes, true); + + if (rsv->space_info->force_alloc) + do_chunk_alloc = true; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index c03ff6a5a7f6b..7c33b28c02aeb 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -4767,7 +4767,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, + struct extent_buffer *leaf; + int slot; + int ins_nr = 0; +- int start_slot; ++ int start_slot = 0; + int ret; + + if (!(inode->flags & BTRFS_INODE_PREALLOC)) +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index a40ebd2321d01..e62b4c139a72d 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -5139,7 +5139,7 @@ static void init_alloc_chunk_ctl_policy_regular( + ASSERT(space_info); + + ctl->max_chunk_size = READ_ONCE(space_info->chunk_size); +- ctl->max_stripe_size = ctl->max_chunk_size; ++ ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G); + + if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) + ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index d387708977a50..a5c31a479aacc 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -1522,10 +1522,15 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, + + if (wbc->pages_skipped) { + /* +- * writeback is not making progress due to locked +- * buffers. Skip this inode for now. ++ * Writeback is not making progress due to locked buffers. ++ * Skip this inode for now. Although having skipped pages ++ * is odd for clean inodes, it can happen for some ++ * filesystems so handle that gracefully. + */ +- redirty_tail_locked(inode, wb); ++ if (inode->i_state & I_DIRTY_ALL) ++ redirty_tail_locked(inode, wb); ++ else ++ inode_cgwb_move_to_attached(inode, wb); + return; + } + +diff --git a/fs/namei.c b/fs/namei.c +index 4248647f1ab24..5e1c2ab2ae709 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -187,7 +187,7 @@ getname_flags(const char __user *filename, int flags, int *empty) + } + } + +- result->refcnt = 1; ++ atomic_set(&result->refcnt, 1); + /* The empty path is special. */ + if (unlikely(!len)) { + if (empty) +@@ -248,7 +248,7 @@ getname_kernel(const char * filename) + memcpy((char *)result->name, filename, len); + result->uptr = NULL; + result->aname = NULL; +- result->refcnt = 1; ++ atomic_set(&result->refcnt, 1); + audit_getname(result); + + return result; +@@ -259,9 +259,10 @@ void putname(struct filename *name) + if (IS_ERR(name)) + return; + +- BUG_ON(name->refcnt <= 0); ++ if (WARN_ON_ONCE(!atomic_read(&name->refcnt))) ++ return; + +- if (--name->refcnt > 0) ++ if (!atomic_dec_and_test(&name->refcnt)) + return; + + if (name->name != name->iname) { +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 5c69a6e9ab3e1..81bbafab18a99 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, + return i; + } + +-static int +-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) ++static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) + { ++ struct pnfs_layout_hdr *lo; + struct nfs4_flexfile_layout *ff_layout; + const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; + +@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) + return -ENOMEM; + + spin_lock(&args->inode->i_lock); +- ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); +- args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, +- &args->devinfo[0], +- dev_count, +- NFS4_FF_OP_LAYOUTSTATS); ++ lo = NFS_I(args->inode)->layout; ++ if (lo && pnfs_layout_is_valid(lo)) { ++ ff_layout = FF_LAYOUT_FROM_HDR(lo); ++ args->num_dev = ff_layout_mirror_prepare_stats( ++ &ff_layout->generic_hdr, &args->devinfo[0], dev_count, ++ NFS4_FF_OP_LAYOUTSTATS); ++ } else ++ args->num_dev = 0; + spin_unlock(&args->inode->i_lock); + if (!args->num_dev) { + kfree(args->devinfo); +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c +index d903ea10410c2..5a8fe0e57a3d3 100644 +--- a/fs/nfs/nfs42proc.c ++++ b/fs/nfs/nfs42proc.c +@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + if (status == 0) { + if (nfs_should_remove_suid(inode)) { + spin_lock(&inode->i_lock); +- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); ++ nfs_set_cache_invalid(inode, ++ NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE); + spin_unlock(&inode->i_lock); + } + status = nfs_post_op_update_inode_force_wcc(inode, +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index e1297c6bcfbe2..5cf53def987e5 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -8875,8 +8875,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre + /* Save the EXCHANGE_ID verifier session trunk tests */ + memcpy(clp->cl_confirm.data, argp->verifier.data, + sizeof(clp->cl_confirm.data)); +- if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS) +- set_bit(NFS_CS_DS, &clp->cl_flags); + out: + trace_nfs4_exchange_id(clp, status); + rpc_put_task(task); +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index a5db5158c6345..1ffb1068216b6 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo, + return mode == 0; + } + +-static int +-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data) ++static int pnfs_layout_return_unused_byserver(struct nfs_server *server, ++ void *data) + { + const struct pnfs_layout_range *range = data; ++ const struct cred *cred; + struct pnfs_layout_hdr *lo; + struct inode *inode; ++ nfs4_stateid stateid; ++ enum pnfs_iomode iomode; ++ + restart: + rcu_read_lock(); + list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { +- if (!pnfs_layout_can_be_returned(lo) || ++ inode = lo->plh_inode; ++ if (!inode || !pnfs_layout_can_be_returned(lo) || + test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) + continue; +- inode = lo->plh_inode; + spin_lock(&inode->i_lock); +- if (!pnfs_should_return_unused_layout(lo, range)) { ++ if (!lo->plh_inode || ++ !pnfs_should_return_unused_layout(lo, range)) { + spin_unlock(&inode->i_lock); + continue; + } ++ pnfs_get_layout_hdr(lo); ++ pnfs_set_plh_return_info(lo, range->iomode, 0); ++ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, ++ range, 0) != 0 || ++ !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) { ++ spin_unlock(&inode->i_lock); ++ rcu_read_unlock(); ++ pnfs_put_layout_hdr(lo); ++ cond_resched(); ++ goto restart; ++ } + spin_unlock(&inode->i_lock); +- inode = pnfs_grab_inode_layout_hdr(lo); +- if (!inode) +- continue; + rcu_read_unlock(); +- pnfs_mark_layout_for_return(inode, range); +- iput(inode); ++ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false); ++ pnfs_put_layout_hdr(lo); + cond_resched(); + goto restart; + } +diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c +index 829b62d3bb889..9c0fc3a29d0c9 100644 +--- a/fs/ntfs3/fsntfs.c ++++ b/fs/ntfs3/fsntfs.c +@@ -2428,10 +2428,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim) + { + CLST end, i, zone_len, zlen; + struct wnd_bitmap *wnd = &sbi->used.bitmap; ++ bool dirty = false; + + down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); + if (!wnd_is_used(wnd, lcn, len)) { +- ntfs_set_state(sbi, NTFS_DIRTY_ERROR); ++ /* mark volume as dirty out of wnd->rw_lock */ ++ dirty = true; + + end = lcn + len; + len = 0; +@@ -2485,6 +2487,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim) + + out: + up_write(&wnd->rw_lock); ++ if (dirty) ++ ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + } + + /* +diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c +index 495cfb37962fa..b89a33f5761ef 100644 +--- a/fs/ntfs3/index.c ++++ b/fs/ntfs3/index.c +@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, + u32 total = le32_to_cpu(hdr->total); + u16 offs[128]; + ++ if (unlikely(!cmp)) ++ return NULL; ++ + fill_table: + if (end > total) + return NULL; +diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c +index f5d3092f478c5..df15e00c2a3a0 100644 +--- a/fs/ntfs3/xattr.c ++++ b/fs/ntfs3/xattr.c +@@ -209,7 +209,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer, + size = le32_to_cpu(info->size); + + /* Enumerate all xattrs. */ +- for (ret = 0, off = 0; off < size; off += ea_size) { ++ ret = 0; ++ for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) { + ea = Add2Ptr(ea_all, off); + ea_size = unpacked_ea_size(ea); + +@@ -217,6 +218,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer, + break; + + if (buffer) { ++ /* Check if we can use field ea->name */ ++ if (off + ea_size > size) ++ break; ++ + if (ret + ea->name_len + 1 > bytes_per_buffer) { + err = -ERANGE; + goto out; +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index e6d711f42607b..86d4b6975dbcb 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -300,7 +300,7 @@ static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry, + { + struct iattr attr = { + .ia_valid = +- ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, ++ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME, + .ia_atime = stat->atime, + .ia_mtime = stat->mtime, + }; +diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h +index 1c2bde0ead736..678f741a7b330 100644 +--- a/include/linux/fprobe.h ++++ b/include/linux/fprobe.h +@@ -13,6 +13,8 @@ + * @nmissed: The counter for missing events. + * @flags: The status flag. + * @rethook: The rethook data structure. (internal data) ++ * @entry_data_size: The private data storage size. ++ * @nr_maxactive: The max number of active functions. + * @entry_handler: The callback function for function entry. + * @exit_handler: The callback function for function exit. + */ +@@ -29,9 +31,13 @@ struct fprobe { + unsigned long nmissed; + unsigned int flags; + struct rethook *rethook; ++ size_t entry_data_size; ++ int nr_maxactive; + +- void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); +- void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); ++ void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, ++ struct pt_regs *regs, void *entry_data); ++ void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, ++ struct pt_regs *regs, void *entry_data); + }; + + /* This fprobe is soft-disabled. */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 26ea1a0a59a10..dc745317e1bdb 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2735,7 +2735,7 @@ struct audit_names; + struct filename { + const char *name; /* pointer to actual string */ + const __user char *uptr; /* original userland pointer */ +- int refcnt; ++ atomic_t refcnt; + struct audit_names *aname; + const char iname[]; + }; +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 784dd6b6046eb..58f5ab29c11a7 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -312,6 +312,7 @@ struct hid_item { + #define HID_DG_LATENCYMODE 0x000d0060 + + #define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065 ++#define HID_BAT_CHARGING 0x00850044 + + #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 + +@@ -612,6 +613,7 @@ struct hid_device { /* device report descriptor */ + __s32 battery_max; + __s32 battery_report_type; + __s32 battery_report_id; ++ __s32 battery_charge_status; + enum hid_battery_status battery_status; + bool battery_avoid_query; + ktime_t battery_ratelimit_time; +diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h +index f0ec8a5e5a7a9..9d3bd6379eb87 100644 +--- a/include/linux/iio/iio.h ++++ b/include/linux/iio/iio.h +@@ -629,6 +629,8 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, + int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); + int iio_device_claim_direct_mode(struct iio_dev *indio_dev); + void iio_device_release_direct_mode(struct iio_dev *indio_dev); ++int iio_device_claim_buffer_mode(struct iio_dev *indio_dev); ++void iio_device_release_buffer_mode(struct iio_dev *indio_dev); + + extern struct bus_type iio_bus_type; + +diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h +index 649faac31ddb1..0cd33be7142ad 100644 +--- a/include/linux/kallsyms.h ++++ b/include/linux/kallsyms.h +@@ -69,6 +69,8 @@ static inline void *dereference_symbol_descriptor(void *ptr) + int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); ++int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), ++ const char *name, void *data); + + /* Lookup the address for a symbol. Returns 0 if not found. */ + unsigned long kallsyms_lookup_name(const char *name); +@@ -168,6 +170,12 @@ static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct + { + return -EOPNOTSUPP; + } ++ ++static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), ++ const char *name, void *data) ++{ ++ return -EOPNOTSUPP; ++} + #endif /*CONFIG_KALLSYMS*/ + + static inline void print_ip_sym(const char *loglvl, unsigned long ip) +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 63fae3c7ae430..1578a4de1f3cb 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -694,6 +694,7 @@ struct perf_event { + /* The cumulative AND of all event_caps for events in this group. */ + int group_caps; + ++ unsigned int group_generation; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index ddbcbf9ccb2ce..583aebd8c1e01 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -348,7 +348,7 @@ struct hci_dev { + struct list_head list; + struct mutex lock; + +- char name[8]; ++ const char *name; + unsigned long flags; + __u16 id; + __u8 bus; +diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h +index 2d5fcda1bcd05..082f89531b889 100644 +--- a/include/net/bluetooth/hci_mon.h ++++ b/include/net/bluetooth/hci_mon.h +@@ -56,7 +56,7 @@ struct hci_mon_new_index { + __u8 type; + __u8 bus; + bdaddr_t bdaddr; +- char name[8]; ++ char name[8] __nonstring; + } __packed; + #define HCI_MON_NEW_INDEX_SIZE 16 + +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index f0c13864180e2..15de07d365405 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -154,6 +154,7 @@ struct fib_info { + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; ++ bool pfsrc_removed; + struct nexthop *nh; + struct rcu_head rcu; + struct fib_nh fib_nh[]; +diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h +index bd7c3be4af5d7..423b52eca908d 100644 +--- a/include/net/netns/xfrm.h ++++ b/include/net/netns/xfrm.h +@@ -50,6 +50,7 @@ struct netns_xfrm { + struct list_head policy_all; + struct hlist_head *policy_byidx; + unsigned int policy_idx_hmask; ++ unsigned int idx_generator; + struct hlist_head policy_inexact[XFRM_POLICY_MAX]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; + unsigned int policy_count[XFRM_POLICY_MAX * 2]; +diff --git a/include/net/sock.h b/include/net/sock.h +index fe695e8bfe289..a1fcbb2a8a2ce 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -333,7 +333,7 @@ struct sk_filter; + * @sk_cgrp_data: cgroup data for this cgroup + * @sk_memcg: this socket's memory cgroup association + * @sk_write_pending: a write to stream socket waits to start +- * @sk_wait_pending: number of threads blocked on this socket ++ * @sk_disconnects: number of disconnect operations performed on this sock + * @sk_state_change: callback to indicate change in the state of the sock + * @sk_data_ready: callback to indicate there is data to be processed + * @sk_write_space: callback to indicate there is bf sending space available +@@ -426,7 +426,7 @@ struct sock { + unsigned int sk_napi_id; + #endif + int sk_rcvbuf; +- int sk_wait_pending; ++ int sk_disconnects; + + struct sk_filter __rcu *sk_filter; + union { +@@ -1185,8 +1185,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) + } + + #define sk_wait_event(__sk, __timeo, __condition, __wait) \ +- ({ int __rc; \ +- __sk->sk_wait_pending++; \ ++ ({ int __rc, __dis = __sk->sk_disconnects; \ + release_sock(__sk); \ + __rc = __condition; \ + if (!__rc) { \ +@@ -1196,8 +1195,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) + } \ + sched_annotate_sleep(); \ + lock_sock(__sk); \ +- __sk->sk_wait_pending--; \ +- __rc = __condition; \ ++ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \ + __rc; \ + }) + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 9ebb54122bb71..548c75c8a34c7 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); + #define TCP_RTO_MAX ((unsigned)(120*HZ)) + #define TCP_RTO_MIN ((unsigned)(HZ/5)) + #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ ++ ++#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ ++ + #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ + #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now + * used as a fallback RTO for the +diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h +index 5eaa1fa991715..833143d0992e0 100644 +--- a/include/trace/events/neigh.h ++++ b/include/trace/events/neigh.h +@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create, + ), + + TP_fast_assign( +- struct in6_addr *pin6; + __be32 *p32; + + __entry->family = tbl->family; +@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create, + __entry->entries = atomic_read(&tbl->gc_entries); + __entry->created = n != NULL; + __entry->gc_exempt = exempt_from_gc; +- pin6 = (struct in6_addr *)__entry->primary_key6; + p32 = (__be32 *)__entry->primary_key4; + + if (tbl->family == AF_INET) +@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create, + + #if IS_ENABLED(CONFIG_IPV6) + if (tbl->family == AF_INET6) { ++ struct in6_addr *pin6; ++ + pin6 = (struct in6_addr *)__entry->primary_key6; + *pin6 = *(struct in6_addr *)pkey; + } +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index a2240f54fc224..c5f41fc75d543 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -2208,7 +2208,7 @@ __audit_reusename(const __user char *uptr) + if (!n->name) + continue; + if (n->name->uptr == uptr) { +- n->name->refcnt++; ++ atomic_inc(&n->name->refcnt); + return n->name; + } + } +@@ -2237,7 +2237,7 @@ void __audit_getname(struct filename *name) + n->name = name; + n->name_len = AUDIT_NAME_FULL; + name->aname = n; +- name->refcnt++; ++ atomic_inc(&name->refcnt); + } + + static inline int audit_copy_fcaps(struct audit_names *name, +@@ -2369,7 +2369,7 @@ out_alloc: + return; + if (name) { + n->name = name; +- name->refcnt++; ++ atomic_inc(&name->refcnt); + } + + out: +@@ -2496,7 +2496,7 @@ void __audit_inode_child(struct inode *parent, + if (found_parent) { + found_child->name = found_parent->name; + found_child->name_len = AUDIT_NAME_FULL; +- found_child->name->refcnt++; ++ atomic_inc(&found_child->name->refcnt); + } + } + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index db1065daabb62..2b8315a948a2c 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -1962,6 +1962,7 @@ static void perf_group_attach(struct perf_event *event) + + list_add_tail(&event->sibling_list, &group_leader->sibling_list); + group_leader->nr_siblings++; ++ group_leader->group_generation++; + + perf_event__header_size(group_leader); + +@@ -2156,6 +2157,7 @@ static void perf_group_detach(struct perf_event *event) + if (leader != event) { + list_del_init(&event->sibling_list); + event->group_leader->nr_siblings--; ++ event->group_leader->group_generation++; + goto out; + } + +@@ -5279,7 +5281,7 @@ static int __perf_read_group_add(struct perf_event *leader, + u64 read_format, u64 *values) + { + struct perf_event_context *ctx = leader->ctx; +- struct perf_event *sub; ++ struct perf_event *sub, *parent; + unsigned long flags; + int n = 1; /* skip @nr */ + int ret; +@@ -5289,6 +5291,33 @@ static int __perf_read_group_add(struct perf_event *leader, + return ret; + + raw_spin_lock_irqsave(&ctx->lock, flags); ++ /* ++ * Verify the grouping between the parent and child (inherited) ++ * events is still in tact. ++ * ++ * Specifically: ++ * - leader->ctx->lock pins leader->sibling_list ++ * - parent->child_mutex pins parent->child_list ++ * - parent->ctx->mutex pins parent->sibling_list ++ * ++ * Because parent->ctx != leader->ctx (and child_list nests inside ++ * ctx->mutex), group destruction is not atomic between children, also ++ * see perf_event_release_kernel(). Additionally, parent can grow the ++ * group. ++ * ++ * Therefore it is possible to have parent and child groups in a ++ * different configuration and summing over such a beast makes no sense ++ * what so ever. ++ * ++ * Reject this. ++ */ ++ parent = leader->parent; ++ if (parent && ++ (parent->group_generation != leader->group_generation || ++ parent->nr_siblings != leader->nr_siblings)) { ++ ret = -ECHILD; ++ goto unlock; ++ } + + /* + * Since we co-schedule groups, {enabled,running} times of siblings +@@ -5322,8 +5351,9 @@ static int __perf_read_group_add(struct perf_event *leader, + values[n++] = atomic64_read(&sub->lost_samples); + } + ++unlock: + raw_spin_unlock_irqrestore(&ctx->lock, flags); +- return 0; ++ return ret; + } + + static int perf_read_group(struct perf_event *event, +@@ -5342,10 +5372,6 @@ static int perf_read_group(struct perf_event *event, + + values[0] = 1 + leader->nr_siblings; + +- /* +- * By locking the child_mutex of the leader we effectively +- * lock the child list of all siblings.. XXX explain how. +- */ + mutex_lock(&leader->child_mutex); + + ret = __perf_read_group_add(leader, read_format, values); +@@ -13267,6 +13293,7 @@ static int inherit_group(struct perf_event *parent_event, + !perf_get_aux_event(child_ctr, leader)) + return -EINVAL; + } ++ leader->group_generation = parent_event->group_generation; + return 0; + } + +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c +index ad3cccb0970f8..824bcc7b5dbc3 100644 +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -197,6 +197,16 @@ static int compare_symbol_name(const char *name, char *namebuf) + return strcmp(name, namebuf); + } + ++static unsigned int get_symbol_seq(int index) ++{ ++ unsigned int i, seq = 0; ++ ++ for (i = 0; i < 3; i++) ++ seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i]; ++ ++ return seq; ++} ++ + static int kallsyms_lookup_names(const char *name, + unsigned int *start, + unsigned int *end) +@@ -211,7 +221,7 @@ static int kallsyms_lookup_names(const char *name, + + while (low <= high) { + mid = low + (high - low) / 2; +- seq = kallsyms_seqs_of_names[mid]; ++ seq = get_symbol_seq(mid); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + ret = compare_symbol_name(name, namebuf); +@@ -228,7 +238,7 @@ static int kallsyms_lookup_names(const char *name, + + low = mid; + while (low) { +- seq = kallsyms_seqs_of_names[low - 1]; ++ seq = get_symbol_seq(low - 1); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + if (compare_symbol_name(name, namebuf)) +@@ -240,7 +250,7 @@ static int kallsyms_lookup_names(const char *name, + if (end) { + high = mid; + while (high < kallsyms_num_syms - 1) { +- seq = kallsyms_seqs_of_names[high + 1]; ++ seq = get_symbol_seq(high + 1); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + if (compare_symbol_name(name, namebuf)) +@@ -265,7 +275,7 @@ unsigned long kallsyms_lookup_name(const char *name) + + ret = kallsyms_lookup_names(name, &i, NULL); + if (!ret) +- return kallsyms_sym_address(kallsyms_seqs_of_names[i]); ++ return kallsyms_sym_address(get_symbol_seq(i)); + + return module_kallsyms_lookup_name(name); + } +@@ -293,6 +303,24 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + return 0; + } + ++int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), ++ const char *name, void *data) ++{ ++ int ret; ++ unsigned int i, start, end; ++ ++ ret = kallsyms_lookup_names(name, &start, &end); ++ if (ret) ++ return 0; ++ ++ for (i = start; !ret && i <= end; i++) { ++ ret = fn(data, kallsyms_sym_address(get_symbol_seq(i))); ++ cond_resched(); ++ } ++ ++ return ret; ++} ++ + static unsigned long get_symbol_pos(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset) +diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h +index a04b7a5cb1e3e..27fabdcc40f57 100644 +--- a/kernel/kallsyms_internal.h ++++ b/kernel/kallsyms_internal.h +@@ -26,6 +26,6 @@ extern const char kallsyms_token_table[] __weak; + extern const u16 kallsyms_token_index[] __weak; + + extern const unsigned int kallsyms_markers[] __weak; +-extern const unsigned int kallsyms_seqs_of_names[] __weak; ++extern const u8 kallsyms_seqs_of_names[] __weak; + + #endif // LINUX_KALLSYMS_INTERNAL_H_ +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 1207c78f85c11..853a07618a3cf 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -345,7 +345,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, + * Except when the rq is capped by uclamp_max. + */ + if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && +- sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { ++ sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq && ++ !sg_policy->need_freq_update) { + next_f = sg_policy->next_freq; + + /* Restore cached freq as next_freq has changed */ +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 8c77c54e6348b..f4a494a457c52 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -2646,7 +2646,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, + + static void + kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, +- struct pt_regs *regs) ++ struct pt_regs *regs, void *data) + { + struct bpf_kprobe_multi_link *link; + +diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c +index 1322247ce6488..f386d6bd8e0e3 100644 +--- a/kernel/trace/fprobe.c ++++ b/kernel/trace/fprobe.c +@@ -17,14 +17,16 @@ + struct fprobe_rethook_node { + struct rethook_node node; + unsigned long entry_ip; ++ char data[]; + }; + + static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) + { + struct fprobe_rethook_node *fpr; +- struct rethook_node *rh; ++ struct rethook_node *rh = NULL; + struct fprobe *fp; ++ void *entry_data = NULL; + int bit; + + fp = container_of(ops, struct fprobe, ops); +@@ -37,9 +39,6 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + return; + } + +- if (fp->entry_handler) +- fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); +- + if (fp->exit_handler) { + rh = rethook_try_get(fp->rethook); + if (!rh) { +@@ -48,9 +47,16 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + } + fpr = container_of(rh, struct fprobe_rethook_node, node); + fpr->entry_ip = ip; +- rethook_hook(rh, ftrace_get_regs(fregs), true); ++ if (fp->entry_data_size) ++ entry_data = fpr->data; + } + ++ if (fp->entry_handler) ++ fp->entry_handler(fp, ip, ftrace_get_regs(fregs), entry_data); ++ ++ if (rh) ++ rethook_hook(rh, ftrace_get_regs(fregs), true); ++ + out: + ftrace_test_recursion_unlock(bit); + } +@@ -81,7 +87,8 @@ static void fprobe_exit_handler(struct rethook_node *rh, void *data, + + fpr = container_of(rh, struct fprobe_rethook_node, node); + +- fp->exit_handler(fp, fpr->entry_ip, regs); ++ fp->exit_handler(fp, fpr->entry_ip, regs, ++ fp->entry_data_size ? (void *)fpr->data : NULL); + } + NOKPROBE_SYMBOL(fprobe_exit_handler); + +@@ -127,7 +134,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) + { + int i, size; + +- if (num < 0) ++ if (num <= 0) + return -EINVAL; + + if (!fp->exit_handler) { +@@ -136,9 +143,12 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) + } + + /* Initialize rethook if needed */ +- size = num * num_possible_cpus() * 2; +- if (size < 0) +- return -E2BIG; ++ if (fp->nr_maxactive) ++ size = fp->nr_maxactive; ++ else ++ size = num * num_possible_cpus() * 2; ++ if (size <= 0) ++ return -EINVAL; + + fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); + if (!fp->rethook) +@@ -146,7 +156,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) + for (i = 0; i < size; i++) { + struct fprobe_rethook_node *node; + +- node = kzalloc(sizeof(*node), GFP_KERNEL); ++ node = kzalloc(sizeof(*node) + fp->entry_data_size, GFP_KERNEL); + if (!node) { + rethook_free(fp->rethook); + fp->rethook = NULL; +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 9da418442a063..2e3dce5e2575e 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -2777,6 +2777,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) + update_event_fields(call, map[i]); + } + } ++ cond_resched(); + } + up_write(&trace_event_sem); + } +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 5a75b039e5860..22852029c6924 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -705,6 +705,25 @@ static struct notifier_block trace_kprobe_module_nb = { + .priority = 1 /* Invoked after kprobe module callback */ + }; + ++static int count_symbols(void *data, unsigned long unused) ++{ ++ unsigned int *count = data; ++ ++ (*count)++; ++ ++ return 0; ++} ++ ++static unsigned int number_of_same_symbols(char *func_name) ++{ ++ unsigned int count; ++ ++ count = 0; ++ kallsyms_on_each_match_symbol(count_symbols, func_name, &count); ++ ++ return count; ++} ++ + static int __trace_kprobe_create(int argc, const char *argv[]) + { + /* +@@ -834,6 +853,31 @@ static int __trace_kprobe_create(int argc, const char *argv[]) + } + } + ++ if (symbol && !strchr(symbol, ':')) { ++ unsigned int count; ++ ++ count = number_of_same_symbols(symbol); ++ if (count > 1) { ++ /* ++ * Users should use ADDR to remove the ambiguity of ++ * using KSYM only. ++ */ ++ trace_probe_log_err(0, NON_UNIQ_SYMBOL); ++ ret = -EADDRNOTAVAIL; ++ ++ goto error; ++ } else if (count == 0) { ++ /* ++ * We can return ENOENT earlier than when register the ++ * kprobe. ++ */ ++ trace_probe_log_err(0, BAD_PROBE_ADDR); ++ ret = -ENOENT; ++ ++ goto error; ++ } ++ } ++ + trace_probe_log_set_index(0); + if (event) { + ret = traceprobe_parse_event_name(&event, &group, gbuf, +@@ -1744,6 +1788,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk) + } + + #ifdef CONFIG_PERF_EVENTS ++ + /* create a trace_kprobe, but don't add it to global lists */ + struct trace_event_call * + create_local_trace_kprobe(char *func, void *addr, unsigned long offs, +@@ -1754,6 +1799,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, + int ret; + char *event; + ++ if (func) { ++ unsigned int count; ++ ++ count = number_of_same_symbols(func); ++ if (count > 1) ++ /* ++ * Users should use addr to remove the ambiguity of ++ * using func only. ++ */ ++ return ERR_PTR(-EADDRNOTAVAIL); ++ else if (count == 0) ++ /* ++ * We can return ENOENT earlier than when register the ++ * kprobe. ++ */ ++ return ERR_PTR(-ENOENT); ++ } ++ + /* + * local trace_kprobes are not added to dyn_event, so they are never + * searched in find_trace_kprobe(). Therefore, there is no concern of +diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h +index f41c330bd60f1..f48b3ed20b095 100644 +--- a/kernel/trace/trace_probe.h ++++ b/kernel/trace/trace_probe.h +@@ -404,6 +404,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, + C(BAD_MAXACT, "Invalid maxactive number"), \ + C(MAXACT_TOO_BIG, "Maxactive is too big"), \ + C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \ ++ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \ + C(BAD_RETPROBE, "Retprobe address must be an function entry"), \ + C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \ + C(NO_GROUP_NAME, "Group name is not specified"), \ +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 12dfe6691dd52..4db0199651f56 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -1219,13 +1219,16 @@ config DEBUG_TIMEKEEPING + config DEBUG_PREEMPT + bool "Debug preemptible kernel" + depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT +- default y + help + If you say Y here then the kernel will use a debug variant of the + commonly used smp_processor_id() function and will print warnings + if kernel code uses it in a preemption-unsafe way. Also, the kernel + will detect preemption count underflows. + ++ This option has potential to introduce high runtime overhead, ++ depending on workload as it triggers debugging routines for each ++ this_cpu operation. It should only be used for debugging purposes. ++ + menu "Lock Debugging (spinlocks, mutexes, etc...)" + + config LOCK_DEBUGGING_SUPPORT +diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c +index e0381b3ec410c..34fa5a5bbda1f 100644 +--- a/lib/test_fprobe.c ++++ b/lib/test_fprobe.c +@@ -30,7 +30,8 @@ static noinline u32 fprobe_selftest_target2(u32 value) + return (value / div_factor) + 1; + } + +-static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) ++static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, ++ struct pt_regs *regs, void *data) + { + KUNIT_EXPECT_FALSE(current_test, preemptible()); + /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */ +@@ -39,7 +40,8 @@ static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct + entry_val = (rand1 / div_factor); + } + +-static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) ++static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, ++ struct pt_regs *regs, void *data) + { + unsigned long ret = regs_return_value(regs); + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 61059571c8779..728be9307f526 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1583,6 +1583,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + return ERR_PTR(-EOPNOTSUPP); + } + ++ /* Reject outgoing connection to device with same BD ADDR against ++ * CVE-2020-26555 ++ */ ++ if (!bacmp(&hdev->bdaddr, dst)) { ++ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", ++ dst); ++ return ERR_PTR(-ECONNREFUSED); ++ } ++ + acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (!acl) { + acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); +@@ -2355,34 +2364,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, + if (!test_bit(HCI_CONN_AUTH, &conn->flags)) + goto auth; + +- /* An authenticated FIPS approved combination key has sufficient +- * security for security level 4. */ +- if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 && +- sec_level == BT_SECURITY_FIPS) +- goto encrypt; +- +- /* An authenticated combination key has sufficient security for +- security level 3. */ +- if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 || +- conn->key_type == HCI_LK_AUTH_COMBINATION_P256) && +- sec_level == BT_SECURITY_HIGH) +- goto encrypt; +- +- /* An unauthenticated combination key has sufficient security for +- security level 1 and 2. */ +- if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 || +- conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) && +- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) +- goto encrypt; +- +- /* A combination key has always sufficient security for the security +- levels 1 or 2. High security level requires the combination key +- is generated using maximum PIN code length (16). +- For pre 2.1 units. */ +- if (conn->key_type == HCI_LK_COMBINATION && +- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW || +- conn->pin_length == 16)) +- goto encrypt; ++ switch (conn->key_type) { ++ case HCI_LK_AUTH_COMBINATION_P256: ++ /* An authenticated FIPS approved combination key has ++ * sufficient security for security level 4 or lower. ++ */ ++ if (sec_level <= BT_SECURITY_FIPS) ++ goto encrypt; ++ break; ++ case HCI_LK_AUTH_COMBINATION_P192: ++ /* An authenticated combination key has sufficient security for ++ * security level 3 or lower. ++ */ ++ if (sec_level <= BT_SECURITY_HIGH) ++ goto encrypt; ++ break; ++ case HCI_LK_UNAUTH_COMBINATION_P192: ++ case HCI_LK_UNAUTH_COMBINATION_P256: ++ /* An unauthenticated combination key has sufficient security ++ * for security level 2 or lower. ++ */ ++ if (sec_level <= BT_SECURITY_MEDIUM) ++ goto encrypt; ++ break; ++ case HCI_LK_COMBINATION: ++ /* A combination key has always sufficient security for the ++ * security levels 2 or lower. High security level requires the ++ * combination key is generated using maximum PIN code length ++ * (16). For pre 2.1 units. ++ */ ++ if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) ++ goto encrypt; ++ break; ++ default: ++ break; ++ } + + auth: + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index d13b498f148cc..6a1db678d032f 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -2616,7 +2616,11 @@ int hci_register_dev(struct hci_dev *hdev) + if (id < 0) + return id; + +- snprintf(hdev->name, sizeof(hdev->name), "hci%d", id); ++ error = dev_set_name(&hdev->dev, "hci%u", id); ++ if (error) ++ return error; ++ ++ hdev->name = dev_name(&hdev->dev); + hdev->id = id; + + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); +@@ -2638,8 +2642,6 @@ int hci_register_dev(struct hci_dev *hdev) + if (!IS_ERR_OR_NULL(bt_debugfs)) + hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + +- dev_set_name(&hdev->dev, "%s", hdev->name); +- + error = device_add(&hdev->dev); + if (error < 0) + goto err_wqueue; +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index e4d8857716eb7..c86a45344fe28 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -25,6 +25,8 @@ + /* Bluetooth HCI event handling. */ + + #include ++#include ++#include + + #include + #include +@@ -3277,6 +3279,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, + + bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); + ++ /* Reject incoming connection from device with same BD ADDR against ++ * CVE-2020-26555 ++ */ ++ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { ++ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", ++ &ev->bdaddr); ++ hci_reject_conn(hdev, &ev->bdaddr); ++ return; ++ } ++ + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, + &flags); + +@@ -4686,6 +4698,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, + if (!conn) + goto unlock; + ++ /* Ignore NULL link key against CVE-2020-26555 */ ++ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { ++ bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", ++ &ev->bdaddr); ++ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); ++ hci_conn_drop(conn); ++ goto unlock; ++ } ++ + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(conn); +@@ -5221,8 +5242,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) + * available, then do not declare that OOB data is + * present. + */ +- if (!memcmp(data->rand256, ZERO_KEY, 16) || +- !memcmp(data->hash256, ZERO_KEY, 16)) ++ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || ++ !crypto_memneq(data->hash256, ZERO_KEY, 16)) + return 0x00; + + return 0x02; +@@ -5232,8 +5253,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) + * not supported by the hardware, then check that if + * P-192 data values are present. + */ +- if (!memcmp(data->rand192, ZERO_KEY, 16) || +- !memcmp(data->hash192, ZERO_KEY, 16)) ++ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || ++ !crypto_memneq(data->hash192, ZERO_KEY, 16)) + return 0x00; + + return 0x01; +@@ -5250,7 +5271,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); +- if (!conn) ++ if (!conn || !hci_conn_ssp_enabled(conn)) + goto unlock; + + hci_conn_hold(conn); +@@ -5497,7 +5518,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); +- if (!conn) ++ if (!conn || !hci_conn_ssp_enabled(conn)) + goto unlock; + + /* Reset the authentication requirement to unknown */ +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index 1d249d839819d..484fc2a8e4baa 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -439,7 +439,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) + ni->type = hdev->dev_type; + ni->bus = hdev->bus; + bacpy(&ni->bdaddr, &hdev->bdaddr); +- memcpy(ni->name, hdev->name, 8); ++ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name, ++ strnlen(hdev->name, sizeof(ni->name)), '\0'); + + opcode = cpu_to_le16(HCI_MON_NEW_INDEX); + break; +diff --git a/net/core/dev.c b/net/core/dev.c +index 5374761f5af2c..0d5aa820fd830 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name) + static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) + { + list_del(&name_node->list); +- netdev_name_node_del(name_node); + kfree(name_node->name); + netdev_name_node_free(name_node); + } +@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) + if (name_node == dev->name_node || name_node->dev != dev) + return -EINVAL; + ++ netdev_name_node_del(name_node); ++ synchronize_rcu(); + __netdev_name_node_alt_destroy(name_node); + + return 0; +@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev) + /* Device list insertion */ + static void list_netdevice(struct net_device *dev) + { ++ struct netdev_name_node *name_node; + struct net *net = dev_net(dev); + + ASSERT_RTNL(); +@@ -391,6 +393,9 @@ static void list_netdevice(struct net_device *dev) + dev_index_hash(net, dev->ifindex)); + write_unlock(&dev_base_lock); + ++ netdev_for_each_altname(dev, name_node) ++ netdev_name_node_add(net, name_node); ++ + dev_base_seq_inc(net); + } + +@@ -399,8 +404,13 @@ static void list_netdevice(struct net_device *dev) + */ + static void unlist_netdevice(struct net_device *dev, bool lock) + { ++ struct netdev_name_node *name_node; ++ + ASSERT_RTNL(); + ++ netdev_for_each_altname(dev, name_node) ++ netdev_name_node_del(name_node); ++ + /* Unlink dev from the device chain */ + if (lock) + write_lock(&dev_base_lock); +@@ -1053,7 +1063,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf) + + for_each_netdev(net, d) { + struct netdev_name_node *name_node; +- list_for_each_entry(name_node, &d->name_node->list, list) { ++ ++ netdev_for_each_altname(d, name_node) { + if (!sscanf(name_node->name, name, &i)) + continue; + if (i < 0 || i >= max_netdevices) +@@ -1090,6 +1101,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf) + return -ENFILE; + } + ++static int dev_prep_valid_name(struct net *net, struct net_device *dev, ++ const char *want_name, char *out_name) ++{ ++ int ret; ++ ++ if (!dev_valid_name(want_name)) ++ return -EINVAL; ++ ++ if (strchr(want_name, '%')) { ++ ret = __dev_alloc_name(net, want_name, out_name); ++ return ret < 0 ? ret : 0; ++ } else if (netdev_name_in_use(net, want_name)) { ++ return -EEXIST; ++ } else if (out_name != want_name) { ++ strscpy(out_name, want_name, IFNAMSIZ); ++ } ++ ++ return 0; ++} ++ + static int dev_alloc_name_ns(struct net *net, + struct net_device *dev, + const char *name) +@@ -1127,19 +1158,13 @@ EXPORT_SYMBOL(dev_alloc_name); + static int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name) + { +- BUG_ON(!net); +- +- if (!dev_valid_name(name)) +- return -EINVAL; +- +- if (strchr(name, '%')) +- return dev_alloc_name_ns(net, dev, name); +- else if (netdev_name_in_use(net, name)) +- return -EEXIST; +- else if (dev->name != name) +- strscpy(dev->name, name, IFNAMSIZ); ++ char buf[IFNAMSIZ]; ++ int ret; + +- return 0; ++ ret = dev_prep_valid_name(net, dev, name, buf); ++ if (ret >= 0) ++ strscpy(dev->name, buf, IFNAMSIZ); ++ return ret; + } + + /** +@@ -10930,7 +10955,9 @@ EXPORT_SYMBOL(unregister_netdev); + int __dev_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat, int new_ifindex) + { ++ struct netdev_name_node *name_node; + struct net *net_old = dev_net(dev); ++ char new_name[IFNAMSIZ] = {}; + int err, new_nsid; + + ASSERT_RTNL(); +@@ -10957,10 +10984,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, + /* We get here if we can't use the current device name */ + if (!pat) + goto out; +- err = dev_get_valid_name(net, dev, pat); ++ err = dev_prep_valid_name(net, dev, pat, new_name); + if (err < 0) + goto out; + } ++ /* Check that none of the altnames conflicts. */ ++ err = -EEXIST; ++ netdev_for_each_altname(dev, name_node) ++ if (netdev_name_in_use(net, name_node->name)) ++ goto out; + + /* Check that new_ifindex isn't used yet. */ + err = -EBUSY; +@@ -11025,6 +11057,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, + kobject_uevent(&dev->dev.kobj, KOBJ_ADD); + netdev_adjacent_add_links(dev); + ++ if (new_name[0]) /* Rename the netdev to prepared name */ ++ strscpy(dev->name, new_name, IFNAMSIZ); ++ + /* Fixup kobjects */ + err = device_rename(&dev->dev, dev->name); + WARN_ON(err); +diff --git a/net/core/dev.h b/net/core/dev.h +index cbb8a925175a2..9ca91457c197e 100644 +--- a/net/core/dev.h ++++ b/net/core/dev.h +@@ -61,6 +61,9 @@ struct netdev_name_node { + int netdev_get_name(struct net *net, char *name, int ifindex); + int dev_change_name(struct net_device *dev, const char *newname); + ++#define netdev_for_each_altname(dev, namenode) \ ++ list_for_each_entry((namenode), &(dev)->name_node->list, list) ++ + int netdev_name_node_alt_create(struct net_device *dev, const char *name); + int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); + +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index c3763056c554a..471d4effa8b49 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v) + seq_puts(seq, " Flags: "); + + for (i = 0; i < NR_PKT_FLAGS; i++) { +- if (i == F_FLOW_SEQ) ++ if (i == FLOW_SEQ_SHIFT) + if (!pkt_dev->cflows) + continue; + +- if (pkt_dev->flags & (1 << i)) ++ if (pkt_dev->flags & (1 << i)) { + seq_printf(seq, "%s ", pkt_flag_names[i]); +- else if (i == F_FLOW_SEQ) +- seq_puts(seq, "FLOW_RND "); +- + #ifdef CONFIG_XFRM +- if (i == F_IPSEC && pkt_dev->spi) +- seq_printf(seq, "spi:%u", pkt_dev->spi); ++ if (i == IPSEC_SHIFT && pkt_dev->spi) ++ seq_printf(seq, "spi:%u ", pkt_dev->spi); + #endif ++ } else if (i == FLOW_SEQ_SHIFT) { ++ seq_puts(seq, "FLOW_RND "); ++ } + } + + seq_puts(seq, "\n"); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 9d4507aa736b7..854b3fd66b1be 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -5394,13 +5394,11 @@ static unsigned int + rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, + enum netdev_offload_xstats_type type) + { +- bool enabled = netdev_offload_xstats_enabled(dev, type); +- + return nla_total_size(0) + + /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ + nla_total_size(sizeof(u8)) + + /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ +- (enabled ? nla_total_size(sizeof(u8)) : 0) + ++ nla_total_size(sizeof(u8)) + + 0; + } + +diff --git a/net/core/stream.c b/net/core/stream.c +index 5b05b889d31af..051aa71a8ad0f 100644 +--- a/net/core/stream.c ++++ b/net/core/stream.c +@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close); + */ + int sk_stream_wait_memory(struct sock *sk, long *timeo_p) + { +- int err = 0; ++ int ret, err = 0; + long vm_wait = 0; + long current_timeo = *timeo_p; + DEFINE_WAIT_FUNC(wait, woken_wake_function); +@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) + + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; +- sk_wait_event(sk, ¤t_timeo, READ_ONCE(sk->sk_err) || +- (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || +- (sk_stream_memory_free(sk) && +- !vm_wait), &wait); ++ ret = sk_wait_event(sk, ¤t_timeo, READ_ONCE(sk->sk_err) || ++ (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || ++ (sk_stream_memory_free(sk) && !vm_wait), ++ &wait); + sk->sk_write_pending--; ++ if (ret < 0) ++ goto do_error; + + if (vm_wait) { + vm_wait -= current_timeo; +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 04853c83c85c4..5d379df90c826 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -589,7 +589,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) + + add_wait_queue(sk_sleep(sk), &wait); + sk->sk_write_pending += writebias; +- sk->sk_wait_pending++; + + /* Basic assumption: if someone sets sk->sk_err, he _must_ + * change state of the socket from TCP_SYN_*. +@@ -605,7 +604,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) + } + remove_wait_queue(sk_sleep(sk), &wait); + sk->sk_write_pending -= writebias; +- sk->sk_wait_pending--; + return timeo; + } + +@@ -634,6 +632,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, + return -EINVAL; + + if (uaddr->sa_family == AF_UNSPEC) { ++ sk->sk_disconnects++; + err = sk->sk_prot->disconnect(sk, flags); + sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; + goto out; +@@ -688,6 +687,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, + int writebias = (sk->sk_protocol == IPPROTO_TCP) && + tcp_sk(sk)->fastopen_req && + tcp_sk(sk)->fastopen_req->data ? 1 : 0; ++ int dis = sk->sk_disconnects; + + /* Error code is set above */ + if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) +@@ -696,6 +696,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, + err = sock_intr_errno(timeo); + if (signal_pending(current)) + goto out; ++ ++ if (dis != sk->sk_disconnects) { ++ err = -EPIPE; ++ goto out; ++ } + } + + /* Connection was closed by RST, timeout, ICMP error +@@ -717,6 +722,7 @@ out: + sock_error: + err = sock_error(sk) ? : -ECONNABORTED; + sock->state = SS_UNCONNECTED; ++ sk->sk_disconnects++; + if (sk->sk_prot->disconnect(sk, flags)) + sock->state = SS_DISCONNECTING; + goto out; +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index 2d094d417ecae..e2546961add3e 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb) + skb->csum = csum_block_sub(skb->csum, csumdiff, + skb->len - trimlen); + } +- pskb_trim(skb, skb->len - trimlen); ++ ret = pskb_trim(skb, skb->len - trimlen); ++ if (unlikely(ret)) ++ return ret; + + ret = nexthdr[1]; + +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index eafa4a0335157..5eb1b8d302bbd 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc, + unsigned char scope) + { + struct fib_nh *nh; ++ __be32 saddr; + + if (nhc->nhc_family != AF_INET) + return inet_select_addr(nhc->nhc_dev, 0, scope); + + nh = container_of(nhc, struct fib_nh, nh_common); +- nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope); +- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); ++ saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope); + +- return nh->nh_saddr; ++ WRITE_ONCE(nh->nh_saddr, saddr); ++ WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid)); ++ ++ return saddr; + } + + __be32 fib_result_prefsrc(struct net *net, struct fib_result *res) +@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res) + struct fib_nh *nh; + + nh = container_of(nhc, struct fib_nh, nh_common); +- if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid)) +- return nh->nh_saddr; ++ if (READ_ONCE(nh->nh_saddr_genid) == ++ atomic_read(&net->ipv4.dev_addr_genid)) ++ return READ_ONCE(nh->nh_saddr); + } + + return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope); +@@ -1887,6 +1891,7 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local) + continue; + if (fi->fib_prefsrc == local) { + fi->fib_flags |= RTNH_F_DEAD; ++ fi->pfsrc_removed = true; + ret++; + } + } +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index d13fb9e76b971..9bdfdab906fe0 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -2027,6 +2027,7 @@ void fib_table_flush_external(struct fib_table *tb) + int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) + { + struct trie *t = (struct trie *)tb->tb_data; ++ struct nl_info info = { .nl_net = net }; + struct key_vector *pn = t->kv; + unsigned long cindex = 1; + struct hlist_node *tmp; +@@ -2089,6 +2090,9 @@ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) + + fib_notify_alias_delete(net, n->key, &n->leaf, fa, + NULL); ++ if (fi->pfsrc_removed) ++ rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa, ++ KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0); + hlist_del_rcu(&fa->fa_list); + fib_release_info(fa->fa_info); + alias_free_mem_rcu(fa); +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 62a3b103f258a..80ce0112e24b4 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -1143,7 +1143,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, + if (newsk) { + struct inet_connection_sock *newicsk = inet_csk(newsk); + +- newsk->sk_wait_pending = 0; + inet_sk_set_state(newsk, TCP_SYN_RECV); + newicsk->icsk_bind_hash = NULL; + newicsk->icsk_bind2_hash = NULL; +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index d79de4b95186b..62d9472ac8bca 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -148,8 +148,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, + const struct sock *sk) + { + #if IS_ENABLED(CONFIG_IPV6) +- if (sk->sk_family != tb2->family) +- return false; ++ if (sk->sk_family != tb2->family) { ++ if (sk->sk_family == AF_INET) ++ return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) && ++ tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr; ++ ++ return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) && ++ sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr; ++ } + + if (sk->sk_family == AF_INET6) + return ipv6_addr_equal(&tb2->v6_rcv_saddr, +@@ -799,19 +805,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, + tb->l3mdev != l3mdev) + return false; + +-#if IS_ENABLED(CONFIG_IPV6) +- if (sk->sk_family != tb->family) { +- if (sk->sk_family == AF_INET) +- return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) && +- tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr; +- +- return false; +- } +- +- if (sk->sk_family == AF_INET6) +- return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); +-#endif +- return tb->rcv_saddr == sk->sk_rcv_saddr; ++ return inet_bind2_bucket_addr_match(tb, sk); + } + + bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 96fdde6e42b1b..288678f17ccaf 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -827,7 +827,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, + */ + if (!skb_queue_empty(&sk->sk_receive_queue)) + break; +- sk_wait_data(sk, &timeo, NULL); ++ ret = sk_wait_data(sk, &timeo, NULL); ++ if (ret < 0) ++ break; + if (signal_pending(current)) { + ret = sock_intr_errno(timeo); + break; +@@ -2549,7 +2551,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, + __sk_flush_backlog(sk); + } else { + tcp_cleanup_rbuf(sk, copied); +- sk_wait_data(sk, &timeo, last); ++ err = sk_wait_data(sk, &timeo, last); ++ if (err < 0) { ++ err = copied ? : err; ++ goto out; ++ } + } + + if ((flags & MSG_PEEK) && +@@ -3073,12 +3079,6 @@ int tcp_disconnect(struct sock *sk, int flags) + int old_state = sk->sk_state; + u32 seq; + +- /* Deny disconnect if other threads are blocked in sk_wait_event() +- * or inet_wait_for_connect(). +- */ +- if (sk->sk_wait_pending) +- return -EBUSY; +- + if (old_state != TCP_CLOSE) + tcp_set_state(sk, TCP_CLOSE); + +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index f53380fd89bcf..f8037d142bb75 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -302,6 +302,10 @@ msg_bytes_ready: + } + + data = tcp_msg_wait_data(sk, psock, timeo); ++ if (data < 0) { ++ copied = data; ++ goto unlock; ++ } + if (data && !sk_psock_queue_empty(psock)) + goto msg_bytes_ready; + copied = -EAGAIN; +@@ -312,6 +316,8 @@ out: + tcp_rcv_space_adjust(sk); + if (copied > 0) + __tcp_cleanup_rbuf(sk, copied); ++ ++unlock: + release_sock(sk); + sk_psock_put(sk, psock); + return copied; +@@ -346,6 +352,10 @@ msg_bytes_ready: + + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + data = tcp_msg_wait_data(sk, psock, timeo); ++ if (data < 0) { ++ ret = data; ++ goto unlock; ++ } + if (data) { + if (!sk_psock_queue_empty(psock)) + goto msg_bytes_ready; +@@ -356,6 +366,8 @@ msg_bytes_ready: + copied = -EAGAIN; + } + ret = copied; ++ ++unlock: + release_sock(sk); + sk_psock_put(sk, psock); + return ret; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 5df19f93f86ab..7ebbbe561e402 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -1818,6 +1818,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, + #ifdef CONFIG_TLS_DEVICE + tail->decrypted != skb->decrypted || + #endif ++ !mptcp_skb_can_collapse(tail, skb) || + thtail->doff != th->doff || + memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) + goto no_coalesce; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 443b1cab25299..cc7ed86fb0a57 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2489,6 +2489,18 @@ static bool tcp_pacing_check(struct sock *sk) + return true; + } + ++static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk) ++{ ++ const struct rb_node *node = sk->tcp_rtx_queue.rb_node; ++ ++ /* No skb in the rtx queue. */ ++ if (!node) ++ return true; ++ ++ /* Only one skb in rtx queue. */ ++ return !node->rb_left && !node->rb_right; ++} ++ + /* TCP Small Queues : + * Control number of packets in qdisc/devices to two packets / or ~1 ms. + * (These limits are doubled for retransmits) +@@ -2526,12 +2538,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, + limit += extra_bytes; + } + if (refcount_read(&sk->sk_wmem_alloc) > limit) { +- /* Always send skb if rtx queue is empty. ++ /* Always send skb if rtx queue is empty or has one skb. + * No need to wait for TX completion to call us back, + * after softirq/tasklet schedule. + * This helps when TX completions are delayed too much. + */ +- if (tcp_rtx_queue_empty(sk)) ++ if (tcp_rtx_queue_empty_or_single_skb(sk)) + return false; + + set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); +@@ -2735,7 +2747,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) + { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); +- u32 timeout, rto_delta_us; ++ u32 timeout, timeout_us, rto_delta_us; + int early_retrans; + + /* Don't do any loss probe on a Fast Open connection before 3WHS +@@ -2759,11 +2771,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) + * sample is available then probe after TCP_TIMEOUT_INIT. + */ + if (tp->srtt_us) { +- timeout = usecs_to_jiffies(tp->srtt_us >> 2); ++ timeout_us = tp->srtt_us >> 2; + if (tp->packets_out == 1) +- timeout += TCP_RTO_MIN; ++ timeout_us += tcp_rto_min_us(sk); + else +- timeout += TCP_TIMEOUT_MIN; ++ timeout_us += TCP_TIMEOUT_MIN_US; ++ timeout = usecs_to_jiffies(timeout_us); + } else { + timeout = TCP_TIMEOUT_INIT; + } +diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c +index 50abaa941387d..c085793691102 100644 +--- a/net/ipv4/tcp_recovery.c ++++ b/net/ipv4/tcp_recovery.c +@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk) + tp->rack.advanced = 0; + tcp_rack_detect_loss(sk, &timeout); + if (timeout) { +- timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; ++ timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, + timeout, inet_csk(sk)->icsk_rto); + } +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 14ed868680c6a..c2dcb5c613b6b 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb) + skb->csum = csum_block_sub(skb->csum, csumdiff, + skb->len - trimlen); + } +- pskb_trim(skb, skb->len - trimlen); ++ ret = pskb_trim(skb, skb->len - trimlen); ++ if (unlikely(ret)) ++ return ret; + + ret = nexthdr[1]; + +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c +index ea435eba30534..f0053087d2e47 100644 +--- a/net/ipv6/xfrm6_policy.c ++++ b/net/ipv6/xfrm6_policy.c +@@ -118,11 +118,11 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) + { + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + +- if (likely(xdst->u.rt6.rt6i_idev)) +- in6_dev_put(xdst->u.rt6.rt6i_idev); + dst_destroy_metrics_generic(dst); + if (xdst->u.rt6.rt6i_uncached_list) + rt6_uncached_list_del(&xdst->u.rt6); ++ if (likely(xdst->u.rt6.rt6i_idev)) ++ in6_dev_put(xdst->u.rt6.rt6i_idev); + xfrm_dst_destroy(xdst); + } + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 0167413d56972..ee9f455bb2d18 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1748,7 +1748,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, + /* VHT can override some HT caps such as the A-MSDU max length */ + if (params->vht_capa) + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, +- params->vht_capa, link_sta); ++ params->vht_capa, NULL, ++ link_sta); + + if (params->he_capa) + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c +index 9dffc30795887..79d2c55052897 100644 +--- a/net/mac80211/ibss.c ++++ b/net/mac80211/ibss.c +@@ -1068,7 +1068,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, + &chandef); + memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, +- &cap_ie, ++ &cap_ie, NULL, + &sta->deflink); + if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap))) + rates_updated |= true; +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 27479bbb093ac..99a976ea17498 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -2062,6 +2062,7 @@ void + ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_vht_cap *vht_cap_ie, ++ const struct ieee80211_vht_cap *vht_cap_ie2, + struct link_sta_info *link_sta); + enum ieee80211_sta_rx_bandwidth + ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta); +diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c +index ddfe5102b9a43..bd0b7c189adfa 100644 +--- a/net/mac80211/mesh_plink.c ++++ b/net/mac80211/mesh_plink.c +@@ -443,7 +443,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, + changed |= IEEE80211_RC_BW_CHANGED; + + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, +- elems->vht_cap_elem, ++ elems->vht_cap_elem, NULL, + &sta->deflink); + + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap, +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index dc9e7eb7dd857..c07645c999f9a 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -4083,10 +4083,33 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, + elems->ht_cap_elem, + link_sta); + +- if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) ++ if (elems->vht_cap_elem && ++ !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) { ++ const struct ieee80211_vht_cap *bss_vht_cap = NULL; ++ const struct cfg80211_bss_ies *ies; ++ ++ /* ++ * Cisco AP module 9115 with FW 17.3 has a bug and sends a ++ * too large maximum MPDU length in the association response ++ * (indicating 12k) that it cannot actually process ... ++ * Work around that. ++ */ ++ rcu_read_lock(); ++ ies = rcu_dereference(cbss->ies); ++ if (ies) { ++ const struct element *elem; ++ ++ elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY, ++ ies->data, ies->len); ++ if (elem && elem->datalen >= sizeof(*bss_vht_cap)) ++ bss_vht_cap = (const void *)elem->data; ++ } ++ + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, + elems->vht_cap_elem, +- link_sta); ++ bss_vht_cap, link_sta); ++ rcu_read_unlock(); ++ } + + if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) && + elems->he_cap) { +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 2f9e1abdf375d..2db103a56a28f 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -680,7 +680,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) + } + + if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && +- !ieee80211_is_deauth(hdr->frame_control))) ++ !ieee80211_is_deauth(hdr->frame_control)) && ++ tx->skb->protocol != tx->sdata->control_port_protocol) + return TX_DROP; + + if (!skip_hw && tx->key && +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c +index 803de58814852..f7526be8a1c7e 100644 +--- a/net/mac80211/vht.c ++++ b/net/mac80211/vht.c +@@ -4,7 +4,7 @@ + * + * Portions of this file + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH +- * Copyright (C) 2018 - 2022 Intel Corporation ++ * Copyright (C) 2018 - 2023 Intel Corporation + */ + + #include +@@ -116,12 +116,14 @@ void + ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_vht_cap *vht_cap_ie, ++ const struct ieee80211_vht_cap *vht_cap_ie2, + struct link_sta_info *link_sta) + { + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; + struct ieee80211_sta_vht_cap own_cap; + u32 cap_info, i; + bool have_80mhz; ++ u32 mpdu_len; + + memset(vht_cap, 0, sizeof(*vht_cap)); + +@@ -317,11 +319,21 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + + link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); + ++ /* ++ * Work around the Cisco 9115 FW 17.3 bug by taking the min of ++ * both reported MPDU lengths. ++ */ ++ mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK; ++ if (vht_cap_ie2) ++ mpdu_len = min_t(u32, mpdu_len, ++ le32_get_bits(vht_cap_ie2->vht_cap_info, ++ IEEE80211_VHT_CAP_MAX_MPDU_MASK)); ++ + /* + * FIXME - should the amsdu len be per link? store per link + * and maintain a minimum? + */ +- switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { ++ switch (mpdu_len) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; + break; +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 881e05193ac97..0eb20274459c8 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -1342,7 +1342,7 @@ alloc_skb: + if (copy == 0) { + u64 snd_una = READ_ONCE(msk->snd_una); + +- if (snd_una != msk->snd_nxt) { ++ if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { + tcp_remove_empty_skb(ssk); + return 0; + } +@@ -1350,11 +1350,6 @@ alloc_skb: + zero_window_probe = true; + data_seq = snd_una - 1; + copy = 1; +- +- /* all mptcp-level data is acked, no skbs should be present into the +- * ssk write queue +- */ +- WARN_ON_ONCE(reuse_skb); + } + + copy = min_t(size_t, copy, info->limit - info->sent); +@@ -1383,7 +1378,6 @@ alloc_skb: + if (reuse_skb) { + TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; + mpext->data_len += copy; +- WARN_ON_ONCE(zero_window_probe); + goto out; + } + +@@ -2374,6 +2368,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk) + #define MPTCP_CF_PUSH BIT(1) + #define MPTCP_CF_FASTCLOSE BIT(2) + ++/* be sure to send a reset only if the caller asked for it, also ++ * clean completely the subflow status when the subflow reaches ++ * TCP_CLOSE state ++ */ ++static void __mptcp_subflow_disconnect(struct sock *ssk, ++ struct mptcp_subflow_context *subflow, ++ unsigned int flags) ++{ ++ if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || ++ (flags & MPTCP_CF_FASTCLOSE)) { ++ /* The MPTCP code never wait on the subflow sockets, TCP-level ++ * disconnect should never fail ++ */ ++ WARN_ON_ONCE(tcp_disconnect(ssk, 0)); ++ mptcp_subflow_ctx_reset(subflow); ++ } else { ++ tcp_shutdown(ssk, SEND_SHUTDOWN); ++ } ++} ++ + /* subflow sockets can be either outgoing (connect) or incoming + * (accept). + * +@@ -2411,7 +2425,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); + + if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { +- /* be sure to force the tcp_disconnect() path, ++ /* be sure to force the tcp_close path + * to generate the egress reset + */ + ssk->sk_lingertime = 0; +@@ -2421,12 +2435,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + + need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); + if (!dispose_it) { +- /* The MPTCP code never wait on the subflow sockets, TCP-level +- * disconnect should never fail +- */ +- WARN_ON_ONCE(tcp_disconnect(ssk, 0)); ++ __mptcp_subflow_disconnect(ssk, subflow, flags); + msk->subflow->state = SS_UNCONNECTED; +- mptcp_subflow_ctx_reset(subflow); + release_sock(ssk); + + goto out; +@@ -3107,12 +3117,6 @@ static int mptcp_disconnect(struct sock *sk, int flags) + { + struct mptcp_sock *msk = mptcp_sk(sk); + +- /* Deny disconnect if other threads are blocked in sk_wait_event() +- * or inet_wait_for_connect(). +- */ +- if (sk->sk_wait_pending) +- return -EBUSY; +- + /* We are on the fastopen error path. We can't call straight into the + * subflows cleanup code due to lock nesting (we are already under + * msk->firstsocket lock). +@@ -3180,7 +3184,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk, + inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); + #endif + +- nsk->sk_wait_pending = 0; + __mptcp_init_sock(nsk); + + msk = mptcp_sk(nsk); +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c +index d7de2ecb287eb..f44f2eaf32172 100644 +--- a/net/netfilter/nft_payload.c ++++ b/net/netfilter/nft_payload.c +@@ -132,7 +132,7 @@ void nft_payload_eval(const struct nft_expr *expr, + + switch (priv->base) { + case NFT_PAYLOAD_LL_HEADER: +- if (!skb_mac_header_was_set(skb)) ++ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0) + goto err; + + if (skb_vlan_tag_present(skb)) { +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index 2660ceab3759d..e34662f4a71e0 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net, + nft_rbtree_interval_end(this)) { + parent = parent->rb_right; + continue; ++ } else if (nft_set_elem_expired(&rbe->ext)) { ++ break; + } else if (!nft_set_elem_active(&rbe->ext, genmask)) { + parent = parent->rb_left; + continue; +diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c +index 0935527d1d12b..b68150c971d0b 100644 +--- a/net/nfc/nci/spi.c ++++ b/net/nfc/nci/spi.c +@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge) + int ret; + + skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL); ++ if (!skb) ++ return -ENOMEM; + + /* add the NCI SPI header to the start of the buffer */ + hdr = skb_push(skb, NCI_SPI_HDR_LEN); +diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c +index f5afc9bcdee65..2cc95c8dc4c7b 100644 +--- a/net/rfkill/rfkill-gpio.c ++++ b/net/rfkill/rfkill-gpio.c +@@ -98,13 +98,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev) + + rfkill->clk = devm_clk_get(&pdev->dev, NULL); + +- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); ++ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + rfkill->reset_gpio = gpio; + +- gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW); ++ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index 61d52594ff6d8..54dddc2ff5025 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -903,6 +903,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, + cl->cl_flags |= HFSC_USC; + } + ++static void ++hfsc_upgrade_rt(struct hfsc_class *cl) ++{ ++ cl->cl_fsc = cl->cl_rsc; ++ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); ++ cl->cl_flags |= HFSC_FSC; ++} ++ + static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { + [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, + [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, +@@ -1012,10 +1020,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + if (parent == NULL) + return -ENOENT; + } +- if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) { +- NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC"); +- return -EINVAL; +- } + + if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) + return -EINVAL; +@@ -1066,6 +1070,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + cl->cf_tree = RB_ROOT; + + sch_tree_lock(sch); ++ /* Check if the inner class is a misconfigured 'rt' */ ++ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) { ++ NL_SET_ERR_MSG(extack, ++ "Forced curve change on parent 'rt' to 'sc'"); ++ hfsc_upgrade_rt(parent); ++ } + qdisc_class_hash_insert(&q->clhash, &cl->cl_common); + list_add_tail(&cl->siblings, &parent->children); + if (parent->level == 0) +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c +index f774d840759d6..4ea41d6e36969 100644 +--- a/net/smc/af_smc.c ++++ b/net/smc/af_smc.c +@@ -1187,6 +1187,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, + struct smc_clc_first_contact_ext *fce = + (struct smc_clc_first_contact_ext *) + (((u8 *)clc_v2) + sizeof(*clc_v2)); ++ struct net *net = sock_net(&smc->sk); + + if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1) + return 0; +@@ -1195,7 +1196,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, + memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN); + ini->smcrv2.uses_gateway = false; + } else { +- if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr, ++ if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr, + smc_ib_gid_to_ipv4(aclc->r0.lcl.gid), + ini->smcrv2.nexthop_mac, + &ini->smcrv2.uses_gateway)) +@@ -2322,7 +2323,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc, + smc_find_ism_store_rc(rc, ini); + return (!rc) ? 0 : ini->rc; + } +- return SMC_CLC_DECL_NOSMCDEV; ++ return prfx_rc; + } + + /* listen worker: finish RDMA setup */ +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c +index 854772dd52fd1..ace8611735321 100644 +--- a/net/smc/smc_ib.c ++++ b/net/smc/smc_ib.c +@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) + return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; + } + +-int smc_ib_find_route(__be32 saddr, __be32 daddr, ++int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr, + u8 nexthop_mac[], u8 *uses_gateway) + { + struct neighbour *neigh = NULL; +@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr, + + if (daddr == cpu_to_be32(INADDR_NONE)) + goto out; +- rt = ip_route_output_flow(&init_net, &fl4, NULL); ++ rt = ip_route_output_flow(net, &fl4, NULL); + if (IS_ERR(rt)) + goto out; + if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET) +@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev, + if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) { + struct in_device *in_dev = __in_dev_get_rcu(ndev); ++ struct net *net = dev_net(ndev); + const struct in_ifaddr *ifa; + bool subnet_match = false; + +@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev, + } + if (!subnet_match) + goto out; +- if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr, ++ if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr, + smcrv2->daddr, + smcrv2->nexthop_mac, + &smcrv2->uses_gateway)) +diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h +index 034295676e881..ebcb05ede7f55 100644 +--- a/net/smc/smc_ib.h ++++ b/net/smc/smc_ib.h +@@ -113,7 +113,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk, + int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, + unsigned short vlan_id, u8 gid[], u8 *sgid_index, + struct smc_init_info_smcrv2 *smcrv2); +-int smc_ib_find_route(__be32 saddr, __be32 daddr, ++int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr, + u8 nexthop_mac[], u8 *uses_gateway); + bool smc_ib_is_valid_local_systemid(void); + int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb); +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index f2e7302a4d96b..338a443fa47b2 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -96,8 +96,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx) + + int wait_on_pending_writer(struct sock *sk, long *timeo) + { +- int rc = 0; + DEFINE_WAIT_FUNC(wait, woken_wake_function); ++ int ret, rc = 0; + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +@@ -111,9 +111,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo) + break; + } + +- if (sk_wait_event(sk, timeo, +- !READ_ONCE(sk->sk_write_pending), &wait)) ++ ret = sk_wait_event(sk, timeo, ++ !READ_ONCE(sk->sk_write_pending), &wait); ++ if (ret) { ++ if (ret < 0) ++ rc = ret; + break; ++ } + } + remove_wait_queue(sk_sleep(sk), &wait); + return rc; +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 9be00ebbb2341..2af72d349192e 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -1296,6 +1296,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + DEFINE_WAIT_FUNC(wait, woken_wake_function); ++ int ret = 0; + long timeo; + + timeo = sock_rcvtimeo(sk, nonblock); +@@ -1307,6 +1308,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, + if (sk->sk_err) + return sock_error(sk); + ++ if (ret < 0) ++ return ret; ++ + if (!skb_queue_empty(&sk->sk_receive_queue)) { + tls_strp_check_rcv(&ctx->strp); + if (tls_strp_msg_ready(ctx)) +@@ -1325,10 +1329,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, + released = true; + add_wait_queue(sk_sleep(sk), &wait); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); +- sk_wait_event(sk, &timeo, +- tls_strp_msg_ready(ctx) || +- !sk_psock_queue_empty(psock), +- &wait); ++ ret = sk_wait_event(sk, &timeo, ++ tls_strp_msg_ready(ctx) || ++ !sk_psock_queue_empty(psock), ++ &wait); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + remove_wait_queue(sk_sleep(sk), &wait); + +@@ -1851,13 +1855,11 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, + return sk_flush_backlog(sk); + } + +-static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, +- bool nonblock) ++static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, ++ bool nonblock) + { + long timeo; +- int err; +- +- lock_sock(sk); ++ int ret; + + timeo = sock_rcvtimeo(sk, nonblock); + +@@ -1867,30 +1869,36 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, + ctx->reader_contended = 1; + + add_wait_queue(&ctx->wq, &wait); +- sk_wait_event(sk, &timeo, +- !READ_ONCE(ctx->reader_present), &wait); ++ ret = sk_wait_event(sk, &timeo, ++ !READ_ONCE(ctx->reader_present), &wait); + remove_wait_queue(&ctx->wq, &wait); + +- if (timeo <= 0) { +- err = -EAGAIN; +- goto err_unlock; +- } +- if (signal_pending(current)) { +- err = sock_intr_errno(timeo); +- goto err_unlock; +- } ++ if (timeo <= 0) ++ return -EAGAIN; ++ if (signal_pending(current)) ++ return sock_intr_errno(timeo); ++ if (ret < 0) ++ return ret; + } + + WRITE_ONCE(ctx->reader_present, 1); + + return 0; ++} + +-err_unlock: +- release_sock(sk); ++static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, ++ bool nonblock) ++{ ++ int err; ++ ++ lock_sock(sk); ++ err = tls_rx_reader_acquire(sk, ctx, nonblock); ++ if (err) ++ release_sock(sk); + return err; + } + +-static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) ++static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) + { + if (unlikely(ctx->reader_contended)) { + if (wq_has_sleeper(&ctx->wq)) +@@ -1902,6 +1910,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) + } + + WRITE_ONCE(ctx->reader_present, 0); ++} ++ ++static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) ++{ ++ tls_rx_reader_release(sk, ctx); + release_sock(sk); + } + +diff --git a/net/wireless/core.c b/net/wireless/core.c +index 2c79604672062..bf2f1f583fb12 100644 +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -1618,7 +1618,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work) + list_add_tail(&work->entry, &rdev->wiphy_work_list); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + +- schedule_work(&rdev->wiphy_work); ++ queue_work(system_unbound_wq, &rdev->wiphy_work); + } + EXPORT_SYMBOL_GPL(wiphy_work_queue); + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 1d993a490ac4b..b19b5acfaf3a9 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -8289,7 +8289,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb, + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; +- struct mesh_config cfg; ++ struct mesh_config cfg = {}; + u32 mask; + int err; + +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index e5c1510c098fd..b7e1631b3d80d 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -876,6 +876,10 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) + !cfg80211_find_ssid_match(ap, request)) + continue; + ++ if (!is_broadcast_ether_addr(request->bssid) && ++ !ether_addr_equal(request->bssid, ap->bssid)) ++ continue; ++ + if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid) + continue; + +diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c +index d71dbe822096a..85501b77f4e37 100644 +--- a/net/xfrm/xfrm_interface_core.c ++++ b/net/xfrm/xfrm_interface_core.c +@@ -379,8 +379,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err) + skb->dev = dev; + + if (err) { +- dev->stats.rx_errors++; +- dev->stats.rx_dropped++; ++ DEV_STATS_INC(dev, rx_errors); ++ DEV_STATS_INC(dev, rx_dropped); + + return 0; + } +@@ -425,7 +425,6 @@ static int + xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + { + struct xfrm_if *xi = netdev_priv(dev); +- struct net_device_stats *stats = &xi->dev->stats; + struct dst_entry *dst = skb_dst(skb); + unsigned int length = skb->len; + struct net_device *tdev; +@@ -464,7 +463,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + tdev = dst->dev; + + if (tdev == dev) { +- stats->collisions++; ++ DEV_STATS_INC(dev, collisions); + net_warn_ratelimited("%s: Local routing loop detected!\n", + dev->name); + goto tx_err_dst_release; +@@ -503,13 +502,13 @@ xmit: + if (net_xmit_eval(err) == 0) { + dev_sw_netstats_tx_add(dev, 1, length); + } else { +- stats->tx_errors++; +- stats->tx_aborted_errors++; ++ DEV_STATS_INC(dev, tx_errors); ++ DEV_STATS_INC(dev, tx_aborted_errors); + } + + return 0; + tx_err_link_failure: +- stats->tx_carrier_errors++; ++ DEV_STATS_INC(dev, tx_carrier_errors); + dst_link_failure(skb); + tx_err_dst_release: + dst_release(dst); +@@ -519,7 +518,6 @@ tx_err_dst_release: + static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct xfrm_if *xi = netdev_priv(dev); +- struct net_device_stats *stats = &xi->dev->stats; + struct dst_entry *dst = skb_dst(skb); + struct flowi fl; + int ret; +@@ -536,7 +534,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) + dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6); + if (dst->error) { + dst_release(dst); +- stats->tx_carrier_errors++; ++ DEV_STATS_INC(dev, tx_carrier_errors); + goto tx_err; + } + skb_dst_set(skb, dst); +@@ -552,7 +550,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) + fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; + rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4); + if (IS_ERR(rt)) { +- stats->tx_carrier_errors++; ++ DEV_STATS_INC(dev, tx_carrier_errors); + goto tx_err; + } + skb_dst_set(skb, &rt->dst); +@@ -571,8 +569,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) + return NETDEV_TX_OK; + + tx_err: +- stats->tx_errors++; +- stats->tx_dropped++; ++ DEV_STATS_INC(dev, tx_errors); ++ DEV_STATS_INC(dev, tx_dropped); + kfree_skb(skb); + return NETDEV_TX_OK; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index e65de78cb61bf..e47c670c7e2cd 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -850,7 +850,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net, + struct hlist_node *newpos = NULL; + bool matches_s, matches_d; + +- if (!policy->bydst_reinsert) ++ if (policy->walk.dead || !policy->bydst_reinsert) + continue; + + WARN_ON_ONCE(policy->family != family); +@@ -1255,8 +1255,11 @@ static void xfrm_hash_rebuild(struct work_struct *work) + struct xfrm_pol_inexact_bin *bin; + u8 dbits, sbits; + ++ if (policy->walk.dead) ++ continue; ++ + dir = xfrm_policy_id2dir(policy->index); +- if (policy->walk.dead || dir >= XFRM_POLICY_MAX) ++ if (dir >= XFRM_POLICY_MAX) + continue; + + if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { +@@ -1371,8 +1374,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild); + * of an absolute inpredictability of ordering of rules. This will not pass. */ + static u32 xfrm_gen_index(struct net *net, int dir, u32 index) + { +- static u32 idx_generator; +- + for (;;) { + struct hlist_head *list; + struct xfrm_policy *p; +@@ -1380,8 +1381,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index) + int found; + + if (!index) { +- idx = (idx_generator | dir); +- idx_generator += 8; ++ idx = (net->xfrm.idx_generator | dir); ++ net->xfrm.idx_generator += 8; + } else { + idx = index; + index = 0; +@@ -1790,9 +1791,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) + + again: + list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { ++ if (pol->walk.dead) ++ continue; ++ + dir = xfrm_policy_id2dir(pol->index); +- if (pol->walk.dead || +- dir >= XFRM_POLICY_MAX || ++ if (dir >= XFRM_POLICY_MAX || + pol->type != type) + continue; + +@@ -3138,7 +3141,7 @@ no_transform: + } + + for (i = 0; i < num_pols; i++) +- pols[i]->curlft.use_time = ktime_get_real_seconds(); ++ WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds()); + + if (num_xfrms < 0) { + /* Prohibit the flow */ +diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c +index e22da8573116e..dd794990ad7ec 100644 +--- a/samples/fprobe/fprobe_example.c ++++ b/samples/fprobe/fprobe_example.c +@@ -48,7 +48,8 @@ static void show_backtrace(void) + stack_trace_print(stacks, len, 24); + } + +-static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) ++static void sample_entry_handler(struct fprobe *fp, unsigned long ip, ++ struct pt_regs *regs, void *data) + { + if (use_trace) + /* +@@ -63,7 +64,8 @@ static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_ + show_backtrace(); + } + +-static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) ++static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs, ++ void *data) + { + unsigned long rip = instruction_pointer(regs); + +diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c +index 80aab2aa72246..ff8cce1757849 100644 +--- a/scripts/kallsyms.c ++++ b/scripts/kallsyms.c +@@ -602,7 +602,10 @@ static void write_src(void) + sort_symbols_by_name(); + output_label("kallsyms_seqs_of_names"); + for (i = 0; i < table_cnt; i++) +- printf("\t.long\t%u\n", table[i]->seq); ++ printf("\t.byte 0x%02x, 0x%02x, 0x%02x\n", ++ (unsigned char)(table[i]->seq >> 16), ++ (unsigned char)(table[i]->seq >> 8), ++ (unsigned char)(table[i]->seq >> 0)); + printf("\n"); + + output_label("kallsyms_token_table"); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 14e70e2f9c881..0163d4c7fdda8 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7006,6 +7006,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec, + 0x0); /* Make sure 0x14 was disable */ + } + } ++/* Fix none verb table of Headset Mic pin */ ++static void alc_fixup_headset_mic(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ static const struct hda_pintbl pincfgs[] = { ++ { 0x19, 0x03a1103c }, ++ { } ++ }; ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PRE_PROBE: ++ snd_hda_apply_pincfgs(codec, pincfgs); ++ alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12); ++ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; ++ break; ++ } ++} + + + enum { +@@ -7270,6 +7288,7 @@ enum { + ALC245_FIXUP_HP_X360_MUTE_LEDS, + ALC287_FIXUP_THINKPAD_I2S_SPK, + ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD, ++ ALC2XX_FIXUP_HEADSET_MIC, + }; + + /* A special fixup for Lenovo C940 and Yoga Duet 7; +@@ -9359,6 +9378,10 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, + }, ++ [ALC2XX_FIXUP_HEADSET_MIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_headset_mic, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -9626,6 +9649,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), ++ SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), + SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED), +@@ -9694,6 +9718,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), ++ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), +@@ -10633,6 +10658,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, + {0x19, 0x40000000}, + {0x1a, 0x40000000}), ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC, ++ {0x19, 0x40000000}), + {} + }; + +diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c +index 9c10200ff34b2..5b5b7c267a616 100644 +--- a/sound/soc/codecs/wcd938x-sdw.c ++++ b/sound/soc/codecs/wcd938x-sdw.c +@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev, + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + +- return component_add(dev, &wcd938x_sdw_component_ops); ++ ret = component_add(dev, &wcd938x_sdw_component_ops); ++ if (ret) ++ goto err_disable_rpm; ++ ++ return 0; ++ ++err_disable_rpm: ++ pm_runtime_disable(dev); ++ pm_runtime_set_suspended(dev); ++ pm_runtime_dont_use_autosuspend(dev); ++ ++ return ret; ++} ++ ++static int wcd9380_remove(struct sdw_slave *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ ++ component_del(dev, &wcd938x_sdw_component_ops); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_set_suspended(dev); ++ pm_runtime_dont_use_autosuspend(dev); ++ ++ return 0; + } + + static const struct sdw_device_id wcd9380_slave_id[] = { +@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = { + + static struct sdw_driver wcd9380_codec_driver = { + .probe = wcd9380_probe, ++ .remove = wcd9380_remove, + .ops = &wcd9380_slave_ops, + .id_table = wcd9380_slave_id, + .driver = { +diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c +index 2316481c2541b..c3964aa00b288 100644 +--- a/sound/soc/codecs/wcd938x.c ++++ b/sound/soc/codecs/wcd938x.c +@@ -3441,7 +3441,8 @@ static int wcd938x_bind(struct device *dev) + wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode); + if (!wcd938x->rxdev) { + dev_err(dev, "could not find slave with matching of node\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_unbind; + } + wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev); + wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x; +@@ -3449,46 +3450,47 @@ static int wcd938x_bind(struct device *dev) + wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode); + if (!wcd938x->txdev) { + dev_err(dev, "could not find txslave with matching of node\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_put_rxdev; + } + wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev); + wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x; + wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev); +- if (!wcd938x->tx_sdw_dev) { +- dev_err(dev, "could not get txslave with matching of dev\n"); +- return -EINVAL; +- } + + /* As TX is main CSR reg interface, which should not be suspended first. + * expicilty add the dependency link */ + if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME)) { + dev_err(dev, "could not devlink tx and rx\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_put_txdev; + } + + if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME)) { + dev_err(dev, "could not devlink wcd and tx\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_remove_rxtx_link; + } + + if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME)) { + dev_err(dev, "could not devlink wcd and rx\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_remove_tx_link; + } + + wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL); + if (!wcd938x->regmap) { + dev_err(dev, "could not get TX device regmap\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_remove_rx_link; + } + + ret = wcd938x_irq_init(wcd938x, dev); + if (ret) { + dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret); +- return ret; ++ goto err_remove_rx_link; + } + + wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq; +@@ -3497,27 +3499,45 @@ static int wcd938x_bind(struct device *dev) + ret = wcd938x_set_micbias_data(wcd938x); + if (ret < 0) { + dev_err(dev, "%s: bad micbias pdata\n", __func__); +- return ret; ++ goto err_remove_rx_link; + } + + ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x, + wcd938x_dais, ARRAY_SIZE(wcd938x_dais)); +- if (ret) ++ if (ret) { + dev_err(dev, "%s: Codec registration failed\n", + __func__); ++ goto err_remove_rx_link; ++ } + +- return ret; ++ return 0; ++ ++err_remove_rx_link: ++ device_link_remove(dev, wcd938x->rxdev); ++err_remove_tx_link: ++ device_link_remove(dev, wcd938x->txdev); ++err_remove_rxtx_link: ++ device_link_remove(wcd938x->rxdev, wcd938x->txdev); ++err_put_txdev: ++ put_device(wcd938x->txdev); ++err_put_rxdev: ++ put_device(wcd938x->rxdev); ++err_unbind: ++ component_unbind_all(dev, wcd938x); + ++ return ret; + } + + static void wcd938x_unbind(struct device *dev) + { + struct wcd938x_priv *wcd938x = dev_get_drvdata(dev); + ++ snd_soc_unregister_component(dev); + device_link_remove(dev, wcd938x->txdev); + device_link_remove(dev, wcd938x->rxdev); + device_link_remove(wcd938x->rxdev, wcd938x->txdev); +- snd_soc_unregister_component(dev); ++ put_device(wcd938x->txdev); ++ put_device(wcd938x->rxdev); + component_unbind_all(dev, wcd938x); + } + +diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c +index 430dd446321e5..452f0caf415b9 100644 +--- a/sound/soc/pxa/pxa-ssp.c ++++ b/sound/soc/pxa/pxa-ssp.c +@@ -779,7 +779,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai) + if (IS_ERR(priv->extclk)) { + ret = PTR_ERR(priv->extclk); + if (ret == -EPROBE_DEFER) +- return ret; ++ goto err_priv; + + priv->extclk = NULL; + } +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc +new file mode 100644 +index 0000000000000..bc9514428dbaf +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc +@@ -0,0 +1,13 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++# description: Test failure of registering kprobe on non unique symbol ++# requires: kprobe_events ++ ++SYMBOL='name_show' ++ ++# We skip this test on kernel where SYMBOL is unique or does not exist. ++if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then ++ exit_unsupported ++fi ++ ++! echo "p:test_non_unique ${SYMBOL}" > kprobe_events +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 7b20878a1af59..ea6fc59e9f62f 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -1413,7 +1413,9 @@ chk_rst_nr() + count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx") + if [ -z "$count" ]; then + echo -n "[skip]" +- elif [ $count -lt $rst_tx ]; then ++ # accept more rst than expected except if we don't expect any ++ elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } || ++ { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then + echo "[fail] got $count MP_RST[s] TX expected $rst_tx" + fail_test + dump_stats=1 +@@ -1425,7 +1427,9 @@ chk_rst_nr() + count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx") + if [ -z "$count" ]; then + echo -n "[skip]" +- elif [ "$count" -lt "$rst_rx" ]; then ++ # accept more rst than expected except if we don't expect any ++ elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } || ++ { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then + echo "[fail] got $count MP_RST[s] RX expected $rst_rx" + fail_test + dump_stats=1 +@@ -2259,6 +2263,7 @@ remove_tests() + run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow + chk_join_nr 1 1 1 + chk_rm_nr 1 1 ++ chk_rst_nr 0 0 + fi + + # multiple subflows, remove +@@ -2270,6 +2275,7 @@ remove_tests() + run_tests $ns1 $ns2 10.0.1.1 0 0 -2 slow + chk_join_nr 2 2 2 + chk_rm_nr 2 2 ++ chk_rst_nr 0 0 + fi + + # single address, remove +@@ -2281,6 +2287,7 @@ remove_tests() + chk_join_nr 1 1 1 + chk_add_nr 1 1 + chk_rm_nr 1 1 invert ++ chk_rst_nr 0 0 + fi + + # subflow and signal, remove +@@ -2293,6 +2300,7 @@ remove_tests() + chk_join_nr 2 2 2 + chk_add_nr 1 1 + chk_rm_nr 1 1 ++ chk_rst_nr 0 0 + fi + + # subflows and signal, remove +@@ -2306,6 +2314,7 @@ remove_tests() + chk_join_nr 3 3 3 + chk_add_nr 1 1 + chk_rm_nr 2 2 ++ chk_rst_nr 0 0 + fi + + # addresses remove +@@ -2319,6 +2328,7 @@ remove_tests() + chk_join_nr 3 3 3 + chk_add_nr 3 3 + chk_rm_nr 3 3 invert ++ chk_rst_nr 0 0 + fi + + # invalid addresses remove +@@ -2332,6 +2342,7 @@ remove_tests() + chk_join_nr 1 1 1 + chk_add_nr 3 3 + chk_rm_nr 3 1 invert ++ chk_rst_nr 0 0 + fi + + # subflows and signal, flush +@@ -2345,6 +2356,7 @@ remove_tests() + chk_join_nr 3 3 3 + chk_add_nr 1 1 + chk_rm_nr 1 3 invert simult ++ chk_rst_nr 0 0 + fi + + # subflows flush +@@ -2362,6 +2374,7 @@ remove_tests() + else + chk_rm_nr 3 3 + fi ++ chk_rst_nr 0 0 + fi + + # addresses flush +@@ -2375,6 +2388,7 @@ remove_tests() + chk_join_nr 3 3 3 + chk_add_nr 3 3 + chk_rm_nr 3 3 invert simult ++ chk_rst_nr 0 0 + fi + + # invalid addresses flush +@@ -2388,6 +2402,7 @@ remove_tests() + chk_join_nr 1 1 1 + chk_add_nr 3 3 + chk_rm_nr 3 1 invert ++ chk_rst_nr 0 0 + fi + + # remove id 0 subflow +@@ -2398,6 +2413,7 @@ remove_tests() + run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow + chk_join_nr 1 1 1 + chk_rm_nr 1 1 ++ chk_rst_nr 0 0 + fi + + # remove id 0 address +@@ -2409,6 +2425,7 @@ remove_tests() + chk_join_nr 1 1 1 + chk_add_nr 1 1 + chk_rm_nr 1 1 invert ++ chk_rst_nr 0 0 invert + fi + } + +diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh +index 7ce46700a3ae3..52054a09d575c 100755 +--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh ++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh +@@ -3,6 +3,8 @@ + # + # OVS kernel module self tests + ++trap ovs_exit_sig EXIT TERM INT ERR ++ + # Kselftest framework requirement - SKIP code is 4. + ksft_skip=4 + +@@ -115,7 +117,7 @@ run_test() { + fi + + if python3 ovs-dpctl.py -h 2>&1 | \ +- grep "Need to install the python" >/dev/null 2>&1; then ++ grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then + stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}" + return $ksft_skip + fi +diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +index 5d467d1993cb1..e787a1f967b0d 100644 +--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py ++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +@@ -17,8 +17,10 @@ try: + from pyroute2.netlink import nla + from pyroute2.netlink.exceptions import NetlinkError + from pyroute2.netlink.generic import GenericNetlinkSocket ++ import pyroute2 ++ + except ModuleNotFoundError: +- print("Need to install the python pyroute2 package.") ++ print("Need to install the python pyroute2 package >= 0.6.") + sys.exit(0) + + +@@ -280,6 +282,12 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()): + + + def main(argv): ++ # version check for pyroute2 ++ prverscheck = pyroute2.__version__.split(".") ++ if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6: ++ print("Need to upgrade the python pyroute2 package to >= 0.6.") ++ sys.exit(0) ++ + parser = argparse.ArgumentParser() + parser.add_argument( + "-v", +diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh +index bb34329e02a7f..5267c88496d51 100755 +--- a/tools/testing/selftests/netfilter/nft_audit.sh ++++ b/tools/testing/selftests/netfilter/nft_audit.sh +@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || { + exit $SKIP_RC + } + ++# Run everything in a separate network namespace ++[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; } ++ ++# give other scripts a chance to finish - audit_logread sees all activity ++sleep 1 ++ + logfile=$(mktemp) + rulefile=$(mktemp) + echo "logging into $logfile" +diff --git a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh +index a5cb4b09a46c4..0899019a7fcb4 100644 +--- a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh ++++ b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh +@@ -25,7 +25,7 @@ if [[ "$1" == "-cgroup-v2" ]]; then + fi + + if [[ $cgroup2 ]]; then +- cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') ++ cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') + if [[ -z "$cgroup_path" ]]; then + cgroup_path=/dev/cgroup/memory + mount -t cgroup2 none $cgroup_path +@@ -33,7 +33,7 @@ if [[ $cgroup2 ]]; then + fi + echo "+hugetlb" >$cgroup_path/cgroup.subtree_control + else +- cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') ++ cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') + if [[ -z "$cgroup_path" ]]; then + cgroup_path=/dev/cgroup/memory + mount -t cgroup memory,hugetlb $cgroup_path +diff --git a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh +index bf2d2a684edfd..14d26075c8635 100644 +--- a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh ++++ b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh +@@ -20,7 +20,7 @@ fi + + + if [[ $cgroup2 ]]; then +- CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') ++ CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') + if [[ -z "$CGROUP_ROOT" ]]; then + CGROUP_ROOT=/dev/cgroup/memory + mount -t cgroup2 none $CGROUP_ROOT +@@ -28,7 +28,7 @@ if [[ $cgroup2 ]]; then + fi + echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control + else +- CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') ++ CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') + if [[ -z "$CGROUP_ROOT" ]]; then + CGROUP_ROOT=/dev/cgroup/memory + mount -t cgroup memory,hugetlb $CGROUP_ROOT