From 7282020937525dd50f3ada72de022959eb15b6dc Mon Sep 17 00:00:00 2001 From: Julian Sikorski Date: Thu, 5 Jun 2025 09:00:16 +0000 Subject: [PATCH] Update odroidxu4-current to 6.6.93 --- .../odroidxu4-6.6/patch-6.6.88-89.patch | 6884 ++++++ .../odroidxu4-6.6/patch-6.6.89-90.patch | 5797 +++++ .../odroidxu4-6.6/patch-6.6.90-91.patch | 4952 ++++ .../odroidxu4-6.6/patch-6.6.91-92.patch | 4627 ++++ .../odroidxu4-6.6/patch-6.6.92-93.patch | 20602 ++++++++++++++++ 5 files changed, 42862 insertions(+) create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.88-89.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.89-90.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.90-91.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.91-92.patch create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.92-93.patch diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.88-89.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.88-89.patch new file mode 100644 index 0000000000..56b336c431 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.88-89.patch @@ -0,0 +1,6884 @@ +diff --git a/Documentation/scheduler/sched-capacity.rst b/Documentation/scheduler/sched-capacity.rst +index e2c1cf7431588e..de414b33dd2abd 100644 +--- a/Documentation/scheduler/sched-capacity.rst ++++ b/Documentation/scheduler/sched-capacity.rst +@@ -39,14 +39,15 @@ per Hz, leading to:: + ------------------- + + Two different capacity values are used within the scheduler. A CPU's +-``capacity_orig`` is its maximum attainable capacity, i.e. its maximum +-attainable performance level. A CPU's ``capacity`` is its ``capacity_orig`` to +-which some loss of available performance (e.g. time spent handling IRQs) is +-subtracted. ++``original capacity`` is its maximum attainable capacity, i.e. its maximum ++attainable performance level. This original capacity is returned by ++the function arch_scale_cpu_capacity(). A CPU's ``capacity`` is its ``original ++capacity`` to which some loss of available performance (e.g. time spent ++handling IRQs) is subtracted. + + Note that a CPU's ``capacity`` is solely intended to be used by the CFS class, +-while ``capacity_orig`` is class-agnostic. The rest of this document will use +-the term ``capacity`` interchangeably with ``capacity_orig`` for the sake of ++while ``original capacity`` is class-agnostic. The rest of this document will use ++the term ``capacity`` interchangeably with ``original capacity`` for the sake of + brevity. + + 1.3 Platform examples +diff --git a/Makefile b/Makefile +index b1dfe3df7dfc9d..23e90df5785c84 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 88 ++SUBLEVEL = 89 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi +index 39110c1232e0da..db10b4b46cca9d 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000.dtsi +@@ -196,13 +196,6 @@ key-power { + wakeup-event-action = ; + wakeup-source; + }; +- +- key-suspend { +- label = "Suspend"; +- gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>; +- linux,input-type = ; +- linux,code = ; +- }; + }; + + fan: pwm-fan { +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig +index 623cf80639decc..25aa993abebcea 100644 +--- a/arch/loongarch/Kconfig ++++ b/arch/loongarch/Kconfig +@@ -59,6 +59,7 @@ config LOONGARCH + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF ++ select ARCH_USE_MEMTEST + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT +diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h +index f3ddaed9ef7f08..a5b63c84f8541a 100644 +--- a/arch/loongarch/include/asm/ptrace.h ++++ b/arch/loongarch/include/asm/ptrace.h +@@ -33,9 +33,9 @@ struct pt_regs { + unsigned long __last[]; + } __aligned(8); + +-static inline int regs_irqs_disabled(struct pt_regs *regs) ++static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) + { +- return arch_irqs_disabled_flags(regs->csr_prmd); ++ return !(regs->csr_prmd & CSR_PRMD_PIE); + } + + static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c +index d59052c03d9b7e..2b4b99b4e6c94e 100644 +--- a/arch/loongarch/kernel/traps.c ++++ b/arch/loongarch/kernel/traps.c +@@ -527,9 +527,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) + die_if_kernel("Kernel ale access", regs); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); + #else ++ bool pie = regs_irqs_disabled(regs); + unsigned int *pc; + +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_enable(); + + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); +@@ -556,7 +557,7 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) + die_if_kernel("Kernel ale access", regs); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); + out: +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_disable(); + #endif + irqentry_exit(regs, state); +@@ -588,12 +589,13 @@ static void bug_handler(struct pt_regs *regs) + asmlinkage void noinstr do_bce(struct pt_regs *regs) + { + bool user = user_mode(regs); ++ bool pie = regs_irqs_disabled(regs); + unsigned long era = exception_era(regs); + u64 badv = 0, lower = 0, upper = ULONG_MAX; + union loongarch_instruction insn; + irqentry_state_t state = irqentry_enter(regs); + +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_enable(); + + current->thread.trap_nr = read_csr_excode(); +@@ -659,7 +661,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) + force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper); + + out: +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_disable(); + + irqentry_exit(regs, state); +@@ -677,11 +679,12 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) + asmlinkage void noinstr do_bp(struct pt_regs *regs) + { + bool user = user_mode(regs); ++ bool pie = regs_irqs_disabled(regs); + unsigned int opcode, bcode; + unsigned long era = exception_era(regs); + irqentry_state_t state = irqentry_enter(regs); + +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_enable(); + + if (__get_inst(&opcode, (u32 *)era, user)) +@@ -747,7 +750,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) + } + + out: +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_disable(); + + irqentry_exit(regs, state); +@@ -982,6 +985,7 @@ static void init_restore_lbt(void) + + asmlinkage void noinstr do_lbt(struct pt_regs *regs) + { ++ bool pie = regs_irqs_disabled(regs); + irqentry_state_t state = irqentry_enter(regs); + + /* +@@ -991,7 +995,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs) + * (including the user using 'MOVGR2GCSR' to turn on TM, which + * will not trigger the BTE), we need to check PRMD first. + */ +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_enable(); + + if (!cpu_has_lbt) { +@@ -1005,7 +1009,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs) + preempt_enable(); + + out: +- if (regs->csr_prmd & CSR_PRMD_PIE) ++ if (!pie) + local_irq_disable(); + + irqentry_exit(regs, state); +diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c +index 1e76fcb83093dd..41308429f44612 100644 +--- a/arch/loongarch/mm/hugetlbpage.c ++++ b/arch/loongarch/mm/hugetlbpage.c +@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + pmd = pmd_offset(pud, addr); + } + } +- return (pte_t *) pmd; ++ return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; + } + + int pmd_huge(pmd_t pmd) +diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c +index 4dd53427f65785..a5bf96993bb1a6 100644 +--- a/arch/loongarch/mm/init.c ++++ b/arch/loongarch/mm/init.c +@@ -64,9 +64,6 @@ void __init paging_init(void) + { + unsigned long max_zone_pfns[MAX_NR_ZONES]; + +-#ifdef CONFIG_ZONE_DMA +- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +-#endif + #ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; + #endif +diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h +index 696b40beb774f5..8494466740ccad 100644 +--- a/arch/mips/include/asm/mips-cm.h ++++ b/arch/mips/include/asm/mips-cm.h +@@ -47,6 +47,16 @@ extern phys_addr_t __mips_cm_phys_base(void); + */ + extern int mips_cm_is64; + ++/* ++ * mips_cm_is_l2_hci_broken - determine if HCI is broken ++ * ++ * Some CM reports show that Hardware Cache Initialization is ++ * complete, but in reality it's not the case. They also incorrectly ++ * indicate that Hardware Cache Initialization is supported. This ++ * flags allows warning about this broken feature. ++ */ ++extern bool mips_cm_is_l2_hci_broken; ++ + /** + * mips_cm_error_report - Report CM cache errors + */ +@@ -85,6 +95,18 @@ static inline bool mips_cm_present(void) + #endif + } + ++/** ++ * mips_cm_update_property - update property from the device tree ++ * ++ * Retrieve the properties from the device tree if a CM node exist and ++ * update the internal variable based on this. ++ */ ++#ifdef CONFIG_MIPS_CM ++extern void mips_cm_update_property(void); ++#else ++static inline void mips_cm_update_property(void) {} ++#endif ++ + /** + * mips_cm_has_l2sync - determine whether an L2-only sync region is present + * +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c +index 3f00788b08718d..4f75160f08949f 100644 +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -5,6 +5,7 @@ + */ + + #include ++#include + #include + #include + +@@ -14,6 +15,7 @@ + void __iomem *mips_gcr_base; + void __iomem *mips_cm_l2sync_base; + int mips_cm_is64; ++bool mips_cm_is_l2_hci_broken; + + static char *cm2_tr[8] = { + "mem", "gcr", "gic", "mmio", +@@ -243,6 +245,18 @@ static void mips_cm_probe_l2sync(void) + mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); + } + ++void mips_cm_update_property(void) ++{ ++ struct device_node *cm_node; ++ ++ cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); ++ if (!cm_node) ++ return; ++ pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); ++ mips_cm_is_l2_hci_broken = true; ++ of_node_put(cm_node); ++} ++ + int mips_cm_probe(void) + { + phys_addr_t addr; +diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c +index 0f9b3b5914cf69..b70b67adb855f6 100644 +--- a/arch/parisc/kernel/pdt.c ++++ b/arch/parisc/kernel/pdt.c +@@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; + #define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL) + #define PDT_ADDR_SINGLE_ERR 1UL + ++#ifdef CONFIG_PROC_FS + /* report PDT entries via /proc/meminfo */ + void arch_report_meminfo(struct seq_file *m) + { +@@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m) + seq_printf(m, "PDT_cur_entries: %7lu\n", + pdt_status.pdt_entries); + } ++#endif + + static int get_info_pat_new(void) + { +diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h +index 721ec275ce57e3..231d777d936c2d 100644 +--- a/arch/riscv/include/asm/alternative-macros.h ++++ b/arch/riscv/include/asm/alternative-macros.h +@@ -115,24 +115,19 @@ + \old_c + .endm + +-#define _ALTERNATIVE_CFG(old_c, ...) \ +- ALTERNATIVE_CFG old_c +- +-#define _ALTERNATIVE_CFG_2(old_c, ...) \ +- ALTERNATIVE_CFG old_c ++#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c ++#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c + + #else /* !__ASSEMBLY__ */ + +-#define __ALTERNATIVE_CFG(old_c) \ +- old_c "\n" ++#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n" ++#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n" + +-#define _ALTERNATIVE_CFG(old_c, ...) \ +- __ALTERNATIVE_CFG(old_c) ++#endif /* __ASSEMBLY__ */ + +-#define _ALTERNATIVE_CFG_2(old_c, ...) \ +- __ALTERNATIVE_CFG(old_c) ++#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c) ++#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c) + +-#endif /* __ASSEMBLY__ */ + #endif /* CONFIG_RISCV_ALTERNATIVE */ + + /* +diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c +index b16352083ff987..f0be263b334ced 100644 +--- a/arch/s390/kvm/intercept.c ++++ b/arch/s390/kvm/intercept.c +@@ -94,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu) + + vcpu->stat.exit_validity++; + trace_kvm_s390_intercept_validity(vcpu, viwhy); +- KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy, ++ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy, + current->pid, vcpu->kvm); + + /* do not warn on invalid runtime instrumentation mode */ +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c +index efaebba5ee19c7..fe4841104ed924 100644 +--- a/arch/s390/kvm/interrupt.c ++++ b/arch/s390/kvm/interrupt.c +@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm) + if (!gi->origin) + return; + gisa_clear_ipm(gi->origin); +- VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin); ++ VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin); + } + + void kvm_s390_gisa_init(struct kvm *kvm) +@@ -3178,7 +3178,7 @@ void kvm_s390_gisa_init(struct kvm *kvm) + gi->timer.function = gisa_vcpu_kicker; + memset(gi->origin, 0, sizeof(struct kvm_s390_gisa)); + gi->origin->next_alert = (u32)virt_to_phys(gi->origin); +- VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin); ++ VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin); + } + + void kvm_s390_gisa_enable(struct kvm *kvm) +@@ -3219,7 +3219,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm) + process_gib_alert_list(); + hrtimer_cancel(&gi->timer); + gi->origin = NULL; +- VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa); ++ VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa); + } + + void kvm_s390_gisa_disable(struct kvm *kvm) +@@ -3468,7 +3468,7 @@ int __init kvm_s390_gib_init(u8 nisc) + } + } + +- KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc); ++ KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc); + goto out; + + out_unreg_gal: +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index 348d030d2660ca..890d850f51f076 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -990,7 +990,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att + } + mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); +- VM_EVENT(kvm, 3, "New guest asce: 0x%pK", ++ VM_EVENT(kvm, 3, "New guest asce: 0x%p", + (void *) kvm->arch.gmap->asce); + break; + } +@@ -3418,7 +3418,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) + kvm_s390_gisa_init(kvm); + INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); + kvm->arch.pv.set_aside = NULL; +- KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); ++ KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); + + return 0; + out_err: +@@ -3481,7 +3481,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) + kvm_s390_destroy_adapters(kvm); + kvm_s390_clear_float_irqs(kvm); + kvm_s390_vsie_destroy(kvm); +- KVM_EVENT(3, "vm 0x%pK destroyed", kvm); ++ KVM_EVENT(3, "vm 0x%p destroyed", kvm); + } + + /* Section: vcpu related */ +@@ -3602,7 +3602,7 @@ static int sca_switch_to_extended(struct kvm *kvm) + + free_page((unsigned long)old_sca); + +- VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", ++ VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)", + old_sca, kvm->arch.sca); + return 0; + } +@@ -3974,7 +3974,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) + goto out_free_sie_block; + } + +- VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", ++ VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p", + vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); + trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); + +diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h +index 6f0209d45164f0..9c5f546a2e1a3c 100644 +--- a/arch/s390/kvm/trace-s390.h ++++ b/arch/s390/kvm/trace-s390.h +@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu, + __entry->sie_block = sie_block; + ), + +- TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK", ++ TP_printk("create cpu %d at 0x%p, sie block at 0x%p", + __entry->id, __entry->vcpu, __entry->sie_block) + ); + +@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css, + __entry->kvm = kvm; + ), + +- TP_printk("enabling channel I/O support (kvm @ %pK)\n", ++ TP_printk("enabling channel I/O support (kvm @ %p)\n", + __entry->kvm) + ); + +diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S +index 2143358d0c4c74..78fd2442b49dcd 100644 +--- a/arch/x86/entry/entry.S ++++ b/arch/x86/entry/entry.S +@@ -16,7 +16,7 @@ + + SYM_FUNC_START(entry_ibpb) + movl $MSR_IA32_PRED_CMD, %ecx +- movl $PRED_CMD_IBPB, %eax ++ movl _ASM_RIP(x86_pred_cmd), %eax + xorl %edx, %edx + wrmsr + +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 1458ccaa6a0579..ad63bd408cd900 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -623,7 +623,7 @@ int x86_pmu_hw_config(struct perf_event *event) + if (event->attr.type == event->pmu->type) + event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; + +- if (!event->attr.freq && x86_pmu.limit_period) { ++ if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) { + s64 left = event->attr.sample_period; + x86_pmu.limit_period(event, &left); + if (left > event->attr.sample_period) +diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h +index ca8eed1d496ab4..2bec0c89a95c27 100644 +--- a/arch/x86/include/asm/asm.h ++++ b/arch/x86/include/asm/asm.h +@@ -229,9 +229,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP); + #define _ASM_EXTABLE_UA(from, to) \ + _ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS) + +-#define _ASM_EXTABLE_CPY(from, to) \ +- _ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY) +- + #define _ASM_EXTABLE_FAULT(from, to) \ + _ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT) + +diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h +index 991e31cfde94cc..afad9c0b07e0c8 100644 +--- a/arch/x86/include/asm/extable_fixup_types.h ++++ b/arch/x86/include/asm/extable_fixup_types.h +@@ -36,7 +36,7 @@ + #define EX_TYPE_DEFAULT 1 + #define EX_TYPE_FAULT 2 + #define EX_TYPE_UACCESS 3 +-#define EX_TYPE_COPY 4 ++/* unused, was: #define EX_TYPE_COPY 4 */ + #define EX_TYPE_CLEAR_FS 5 + #define EX_TYPE_FPU_RESTORE 6 + #define EX_TYPE_BPF 7 +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h +index f81a851c46dca5..652c0137e909f3 100644 +--- a/arch/x86/include/asm/intel-family.h ++++ b/arch/x86/include/asm/intel-family.h +@@ -159,6 +159,8 @@ + #define INTEL_FAM6_GRANITERAPIDS_D 0xAE + #define INTEL_GRANITERAPIDS_D IFM(6, 0xAE) + ++#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */ ++ + /* "Hybrid" Processors (P-Core/E-Core) */ + + #define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 7df458a6553eb2..78545f7e9cc6ca 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1574,7 +1574,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void) + rrsba_disabled = true; + } + +-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) ++static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) + { + /* + * Similar to context switches, there are two types of RSB attacks +@@ -1598,27 +1598,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ + */ + switch (mode) { + case SPECTRE_V2_NONE: +- return; ++ break; + +- case SPECTRE_V2_EIBRS_LFENCE: + case SPECTRE_V2_EIBRS: ++ case SPECTRE_V2_EIBRS_LFENCE: ++ case SPECTRE_V2_EIBRS_RETPOLINE: + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { +- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); ++ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); + } +- return; ++ break; + +- case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_IBRS: ++ pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); ++ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); +- pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); +- return; +- } ++ break; + +- pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); +- dump_stack(); ++ default: ++ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); ++ dump_stack(); ++ break; ++ } + } + + /* +@@ -1844,10 +1847,7 @@ static void __init spectre_v2_select_mitigation(void) + * + * FIXME: Is this pointless for retbleed-affected AMD? + */ +- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); +- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); +- +- spectre_v2_determine_rsb_fill_type_at_vmexit(mode); ++ spectre_v2_select_rsb_mitigation(mode); + + /* + * Retpoline protects the kernel, but doesn't protect firmware. IBRS +diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c +index c4477162c07d13..9c5754229d6ed3 100644 +--- a/arch/x86/kernel/cpu/mce/severity.c ++++ b/arch/x86/kernel/cpu/mce/severity.c +@@ -288,14 +288,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) + copy_user = is_copy_from_user(regs); + instrumentation_end(); + +- switch (fixup_type) { +- case EX_TYPE_UACCESS: +- case EX_TYPE_COPY: +- if (!copy_user) +- return IN_KERNEL; +- m->kflags |= MCE_IN_KERNEL_COPYIN; +- fallthrough; ++ if (copy_user) { ++ m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; ++ return IN_KERNEL_RECOV; ++ } + ++ switch (fixup_type) { + case EX_TYPE_FAULT_MCE_SAFE: + case EX_TYPE_DEFAULT_MCE_SAFE: + m->kflags |= MCE_IN_KERNEL_RECOV; +diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c +index 80e262bb627fe1..cb9852ad609893 100644 +--- a/arch/x86/kernel/i8253.c ++++ b/arch/x86/kernel/i8253.c +@@ -46,7 +46,8 @@ bool __init pit_timer_init(void) + * VMMs otherwise steal CPU time just to pointlessly waggle + * the (masked) IRQ. + */ +- clockevent_i8253_disable(); ++ scoped_guard(irq) ++ clockevent_i8253_disable(); + return false; + } + clockevent_i8253_init(true); +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c +index 4b74ea91f4e6bb..6970b11a6b4c62 100644 +--- a/arch/x86/kvm/svm/avic.c ++++ b/arch/x86/kvm/svm/avic.c +@@ -820,7 +820,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) + * Allocating new amd_iommu_pi_data, which will get + * add to the per-vcpu ir_list. + */ +- ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT); ++ ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT); + if (!ir) { + ret = -ENOMEM; + goto out; +@@ -896,6 +896,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + { + struct kvm_kernel_irq_routing_entry *e; + struct kvm_irq_routing_table *irq_rt; ++ bool enable_remapped_mode = true; + int idx, ret = 0; + + if (!kvm_arch_has_assigned_device(kvm) || +@@ -933,6 +934,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + kvm_vcpu_apicv_active(&svm->vcpu)) { + struct amd_iommu_pi_data pi; + ++ enable_remapped_mode = false; ++ + /* Try to enable guest_mode in IRTE */ + pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & + AVIC_HPA_MASK); +@@ -951,33 +954,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + */ + if (!ret && pi.is_guest_mode) + svm_ir_list_add(svm, &pi); +- } else { +- /* Use legacy mode in IRTE */ +- struct amd_iommu_pi_data pi; +- +- /** +- * Here, pi is used to: +- * - Tell IOMMU to use legacy mode for this interrupt. +- * - Retrieve ga_tag of prior interrupt remapping data. +- */ +- pi.prev_ga_tag = 0; +- pi.is_guest_mode = false; +- ret = irq_set_vcpu_affinity(host_irq, &pi); +- +- /** +- * Check if the posted interrupt was previously +- * setup with the guest_mode by checking if the ga_tag +- * was cached. If so, we need to clean up the per-vcpu +- * ir_list. +- */ +- if (!ret && pi.prev_ga_tag) { +- int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); +- struct kvm_vcpu *vcpu; +- +- vcpu = kvm_get_vcpu_by_id(kvm, id); +- if (vcpu) +- svm_ir_list_del(to_svm(vcpu), &pi); +- } + } + + if (!ret && svm) { +@@ -993,6 +969,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + } + + ret = 0; ++ if (enable_remapped_mode) { ++ /* Use legacy mode in IRTE */ ++ struct amd_iommu_pi_data pi; ++ ++ /** ++ * Here, pi is used to: ++ * - Tell IOMMU to use legacy mode for this interrupt. ++ * - Retrieve ga_tag of prior interrupt remapping data. ++ */ ++ pi.prev_ga_tag = 0; ++ pi.is_guest_mode = false; ++ ret = irq_set_vcpu_affinity(host_irq, &pi); ++ ++ /** ++ * Check if the posted interrupt was previously ++ * setup with the guest_mode by checking if the ga_tag ++ * was cached. If so, we need to clean up the per-vcpu ++ * ir_list. ++ */ ++ if (!ret && pi.prev_ga_tag) { ++ int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); ++ struct kvm_vcpu *vcpu; ++ ++ vcpu = kvm_get_vcpu_by_id(kvm, id); ++ if (vcpu) ++ svm_ir_list_del(to_svm(vcpu), &pi); ++ } ++ } + out: + srcu_read_unlock(&kvm->irq_srcu, idx); + return ret; +diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c +index af662312fd0778..b54e0cb86e5d61 100644 +--- a/arch/x86/kvm/vmx/posted_intr.c ++++ b/arch/x86/kvm/vmx/posted_intr.c +@@ -274,6 +274,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + { + struct kvm_kernel_irq_routing_entry *e; + struct kvm_irq_routing_table *irq_rt; ++ bool enable_remapped_mode = true; + struct kvm_lapic_irq irq; + struct kvm_vcpu *vcpu; + struct vcpu_data vcpu_info; +@@ -312,21 +313,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + + kvm_set_msi_irq(kvm, e, &irq); + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || +- !kvm_irq_is_postable(&irq)) { +- /* +- * Make sure the IRTE is in remapped mode if +- * we don't handle it in posted mode. +- */ +- ret = irq_set_vcpu_affinity(host_irq, NULL); +- if (ret < 0) { +- printk(KERN_INFO +- "failed to back to remapped mode, irq: %u\n", +- host_irq); +- goto out; +- } +- ++ !kvm_irq_is_postable(&irq)) + continue; +- } + + vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); + vcpu_info.vector = irq.vector; +@@ -334,11 +322,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, + vcpu_info.vector, vcpu_info.pi_desc_addr, set); + +- if (set) +- ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); +- else +- ret = irq_set_vcpu_affinity(host_irq, NULL); ++ if (!set) ++ continue; + ++ enable_remapped_mode = false; ++ ++ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); + if (ret < 0) { + printk(KERN_INFO "%s: failed to update PI IRTE\n", + __func__); +@@ -346,6 +335,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, + } + } + ++ if (enable_remapped_mode) ++ ret = irq_set_vcpu_affinity(host_irq, NULL); ++ + ret = 0; + out: + srcu_read_unlock(&kvm->irq_srcu, idx); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 2a2dbeb56897d8..f67fe8a65820c8 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -13297,7 +13297,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) + { +- if (new->type != KVM_IRQ_ROUTING_MSI) ++ if (old->type != KVM_IRQ_ROUTING_MSI || ++ new->type != KVM_IRQ_ROUTING_MSI) + return true; + + return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c +index 271dcb2deabc31..2354c0156e51c9 100644 +--- a/arch/x86/mm/extable.c ++++ b/arch/x86/mm/extable.c +@@ -163,13 +163,6 @@ static bool ex_handler_uaccess(const struct exception_table_entry *fixup, + return ex_handler_default(fixup, regs); + } + +-static bool ex_handler_copy(const struct exception_table_entry *fixup, +- struct pt_regs *regs, int trapnr) +-{ +- WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); +- return ex_handler_fault(fixup, regs, trapnr); +-} +- + static bool ex_handler_msr(const struct exception_table_entry *fixup, + struct pt_regs *regs, bool wrmsr, bool safe, int reg) + { +@@ -267,8 +260,6 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, + return ex_handler_fault(e, regs, trapnr); + case EX_TYPE_UACCESS: + return ex_handler_uaccess(e, regs, trapnr, fault_addr); +- case EX_TYPE_COPY: +- return ex_handler_copy(e, regs, trapnr); + case EX_TYPE_CLEAR_FS: + return ex_handler_clear_fs(e, regs); + case EX_TYPE_FPU_RESTORE: +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index df1794a5e38a57..4872bb082b1935 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -392,9 +392,9 @@ static void cond_mitigation(struct task_struct *next) + prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec); + + /* +- * Avoid user/user BTB poisoning by flushing the branch predictor +- * when switching between processes. This stops one process from +- * doing Spectre-v2 attacks on another. ++ * Avoid user->user BTB/RSB poisoning by flushing them when switching ++ * between processes. This stops one process from doing Spectre-v2 ++ * attacks on another. + * + * Both, the conditional and the always IBPB mode use the mm + * pointer to avoid the IBPB when switching between tasks of the +diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S +index c4365a05ab83b3..fc46b4dfbd7475 100644 +--- a/arch/x86/platform/pvh/head.S ++++ b/arch/x86/platform/pvh/head.S +@@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen) + xor %edx, %edx + wrmsr + +- call xen_prepare_pvh ++ /* Call xen_prepare_pvh() via the kernel virtual mapping */ ++ leaq xen_prepare_pvh(%rip), %rax ++ subq phys_base(%rip), %rax ++ addq $__START_KERNEL_map, %rax ++ ANNOTATE_RETPOLINE_SAFE ++ call *%rax + + /* startup_64 expects boot_params in %rsi. */ + mov $_pa(pvh_bootparams), %rsi +diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c +index 5b84b0f7cc178f..3378670286535a 100644 +--- a/crypto/crypto_null.c ++++ b/crypto/crypto_null.c +@@ -17,10 +17,10 @@ + #include + #include + #include +-#include ++#include + #include + +-static DEFINE_MUTEX(crypto_default_null_skcipher_lock); ++static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock); + static struct crypto_sync_skcipher *crypto_default_null_skcipher; + static int crypto_default_null_skcipher_refcnt; + +@@ -152,23 +152,32 @@ MODULE_ALIAS_CRYPTO("cipher_null"); + + struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) + { ++ struct crypto_sync_skcipher *ntfm = NULL; + struct crypto_sync_skcipher *tfm; + +- mutex_lock(&crypto_default_null_skcipher_lock); ++ spin_lock_bh(&crypto_default_null_skcipher_lock); + tfm = crypto_default_null_skcipher; + + if (!tfm) { +- tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); +- if (IS_ERR(tfm)) +- goto unlock; +- +- crypto_default_null_skcipher = tfm; ++ spin_unlock_bh(&crypto_default_null_skcipher_lock); ++ ++ ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); ++ if (IS_ERR(ntfm)) ++ return ntfm; ++ ++ spin_lock_bh(&crypto_default_null_skcipher_lock); ++ tfm = crypto_default_null_skcipher; ++ if (!tfm) { ++ tfm = ntfm; ++ ntfm = NULL; ++ crypto_default_null_skcipher = tfm; ++ } + } + + crypto_default_null_skcipher_refcnt++; ++ spin_unlock_bh(&crypto_default_null_skcipher_lock); + +-unlock: +- mutex_unlock(&crypto_default_null_skcipher_lock); ++ crypto_free_sync_skcipher(ntfm); + + return tfm; + } +@@ -176,12 +185,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); + + void crypto_put_default_null_skcipher(void) + { +- mutex_lock(&crypto_default_null_skcipher_lock); ++ struct crypto_sync_skcipher *tfm = NULL; ++ ++ spin_lock_bh(&crypto_default_null_skcipher_lock); + if (!--crypto_default_null_skcipher_refcnt) { +- crypto_free_sync_skcipher(crypto_default_null_skcipher); ++ tfm = crypto_default_null_skcipher; + crypto_default_null_skcipher = NULL; + } +- mutex_unlock(&crypto_default_null_skcipher_lock); ++ spin_unlock_bh(&crypto_default_null_skcipher_lock); ++ ++ crypto_free_sync_skcipher(tfm); + } + EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); + +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 115994dfefec1e..77d6af61158936 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), + }, + }, ++ /* ++ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC ++ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 ++ */ ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), ++ } ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), ++ } ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), ++ } ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), ++ } ++ }, + { }, + }; + +diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c +index a35dd0e41c2704..f73ce6e13065dd 100644 +--- a/drivers/acpi/pptt.c ++++ b/drivers/acpi/pptt.c +@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, + node_entry = ACPI_PTR_DIFF(node, table_hdr); + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); +- proc_sz = sizeof(struct acpi_pptt_processor *); ++ proc_sz = sizeof(struct acpi_pptt_processor); + + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; +@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); +- proc_sz = sizeof(struct acpi_pptt_processor *); ++ proc_sz = sizeof(struct acpi_pptt_processor); + + /* find the processor structure associated with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 5377d094bf7548..6a1460d35447cc 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -2354,8 +2354,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev, + */ + put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); + +- if (dev->flags & ATA_DFLAG_CDL) +- buf[4] = 0x02; /* Support T2A and T2B pages */ ++ if (dev->flags & ATA_DFLAG_CDL_ENABLED) ++ buf[4] = 0x02; /* T2A and T2B pages enabled */ + else + buf[4] = 0; + +@@ -3764,12 +3764,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc, + } + + /* +- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode ++ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode + * page) into a SET FEATURES command. + */ +-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, +- const u8 *buf, int len, +- u16 *fp) ++static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, ++ const u8 *buf, int len, u16 *fp) + { + struct ata_device *dev = qc->dev; + struct ata_taskfile *tf = &qc->tf; +@@ -3787,17 +3786,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, + /* Check cdl_ctrl */ + switch (buf[0] & 0x03) { + case 0: +- /* Disable CDL */ ++ /* Disable CDL if it is enabled */ ++ if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) ++ return 0; ++ ata_dev_dbg(dev, "Disabling CDL\n"); + cdl_action = 0; + dev->flags &= ~ATA_DFLAG_CDL_ENABLED; + break; + case 0x02: +- /* Enable CDL T2A/T2B: NCQ priority must be disabled */ ++ /* ++ * Enable CDL if not already enabled. Since this is mutually ++ * exclusive with NCQ priority, allow this only if NCQ priority ++ * is disabled. ++ */ ++ if (dev->flags & ATA_DFLAG_CDL_ENABLED) ++ return 0; + if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { + ata_dev_err(dev, + "NCQ priority must be disabled to enable CDL\n"); + return -EINVAL; + } ++ ata_dev_dbg(dev, "Enabling CDL\n"); + cdl_action = 1; + dev->flags |= ATA_DFLAG_CDL_ENABLED; + break; +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c +index d56a5d508ccd7b..8b690f59df27d6 100644 +--- a/drivers/auxdisplay/hd44780.c ++++ b/drivers/auxdisplay/hd44780.c +@@ -313,13 +313,13 @@ static int hd44780_probe(struct platform_device *pdev) + fail3: + kfree(hd); + fail2: +- kfree(lcd); ++ charlcd_free(lcd); + fail1: + kfree(hdc); + return ret; + } + +-static int hd44780_remove(struct platform_device *pdev) ++static void hd44780_remove(struct platform_device *pdev) + { + struct charlcd *lcd = platform_get_drvdata(pdev); + struct hd44780_common *hdc = lcd->drvdata; +@@ -328,8 +328,7 @@ static int hd44780_remove(struct platform_device *pdev) + kfree(hdc->hd44780); + kfree(lcd->drvdata); + +- kfree(lcd); +- return 0; ++ charlcd_free(lcd); + } + + static const struct of_device_id hd44780_of_match[] = { +@@ -340,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match); + + static struct platform_driver hd44780_driver = { + .probe = hd44780_probe, +- .remove = hd44780_remove, ++ .remove_new = hd44780_remove, + .driver = { + .name = "hd44780", + .of_match_table = hd44780_of_match, +diff --git a/drivers/base/base.h b/drivers/base/base.h +index a8e3d8165232fd..0b491449b022a1 100644 +--- a/drivers/base/base.h ++++ b/drivers/base/base.h +@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp) + kset_put(&sp->subsys); + } + ++struct subsys_private *bus_to_subsys(const struct bus_type *bus); + struct subsys_private *class_to_subsys(const struct class *class); + + struct driver_private { +@@ -179,6 +180,22 @@ int driver_add_groups(struct device_driver *drv, const struct attribute_group ** + void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); + void device_driver_detach(struct device *dev); + ++static inline void device_set_driver(struct device *dev, const struct device_driver *drv) ++{ ++ /* ++ * Majority (all?) read accesses to dev->driver happens either ++ * while holding device lock or in bus/driver code that is only ++ * invoked when the device is bound to a driver and there is no ++ * concern of the pointer being changed while it is being read. ++ * However when reading device's uevent file we read driver pointer ++ * without taking device lock (so we do not block there for ++ * arbitrary amount of time). We use WRITE_ONCE() here to prevent ++ * tearing so that READ_ONCE() can safely be used in uevent code. ++ */ ++ // FIXME - this cast should not be needed "soon" ++ WRITE_ONCE(dev->driver, (struct device_driver *)drv); ++} ++ + int devres_release_all(struct device *dev); + void device_block_probing(void); + void device_unblock_probing(void); +diff --git a/drivers/base/bus.c b/drivers/base/bus.c +index d4361ad3b433f5..b97e13a52c3308 100644 +--- a/drivers/base/bus.c ++++ b/drivers/base/bus.c +@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, + * NULL. A call to subsys_put() must be done when finished with the pointer in + * order for it to be properly freed. + */ +-static struct subsys_private *bus_to_subsys(const struct bus_type *bus) ++struct subsys_private *bus_to_subsys(const struct bus_type *bus) + { + struct subsys_private *sp = NULL; + struct kobject *kobj; +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 8e2caa9eb5cd41..a192ce5bb8f902 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -2570,6 +2570,35 @@ static const char *dev_uevent_name(const struct kobject *kobj) + return NULL; + } + ++/* ++ * Try filling "DRIVER=" uevent variable for a device. Because this ++ * function may race with binding and unbinding the device from a driver, ++ * we need to be careful. Binding is generally safe, at worst we miss the ++ * fact that the device is already bound to a driver (but the driver ++ * information that is delivered through uevents is best-effort, it may ++ * become obsolete as soon as it is generated anyways). Unbinding is more ++ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should ++ * be used to make sure we are dealing with the same pointer, and to ++ * ensure that driver structure is not going to disappear from under us ++ * we take bus' drivers klist lock. The assumption that only registered ++ * driver can be bound to a device, and to unregister a driver bus code ++ * will take the same lock. ++ */ ++static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env) ++{ ++ struct subsys_private *sp = bus_to_subsys(dev->bus); ++ ++ if (sp) { ++ scoped_guard(spinlock, &sp->klist_drivers.k_lock) { ++ struct device_driver *drv = READ_ONCE(dev->driver); ++ if (drv) ++ add_uevent_var(env, "DRIVER=%s", drv->name); ++ } ++ ++ subsys_put(sp); ++ } ++} ++ + static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) + { + const struct device *dev = kobj_to_dev(kobj); +@@ -2601,8 +2630,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) + if (dev->type && dev->type->name) + add_uevent_var(env, "DEVTYPE=%s", dev->type->name); + +- if (dev->driver) +- add_uevent_var(env, "DRIVER=%s", dev->driver->name); ++ /* Add "DRIVER=%s" variable if the device is bound to a driver */ ++ dev_driver_uevent(dev, env); + + /* Add common DT information about the device */ + of_device_uevent(dev, env); +@@ -2672,11 +2701,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, + if (!env) + return -ENOMEM; + +- /* Synchronize with really_probe() */ +- device_lock(dev); + /* let the kset specific function add its keys */ + retval = kset->uevent_ops->uevent(&dev->kobj, env); +- device_unlock(dev); + if (retval) + goto out; + +@@ -3691,7 +3717,7 @@ int device_add(struct device *dev) + device_pm_remove(dev); + dpm_sysfs_remove(dev); + DPMError: +- dev->driver = NULL; ++ device_set_driver(dev, NULL); + bus_remove_device(dev); + BusError: + device_remove_attrs(dev); +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index 0c3725c3eefa46..7e2fb159bb895b 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev) + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; +- dev->driver = NULL; ++ device_set_driver(dev, NULL); + dev_set_drvdata(dev, NULL); + if (dev->pm_domain && dev->pm_domain->dismiss) + dev->pm_domain->dismiss(dev); +@@ -629,7 +629,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) + } + + re_probe: +- dev->driver = drv; ++ device_set_driver(dev, drv); + + /* If using pinctrl, bind pins now before probing */ + ret = pinctrl_bind_pins(dev); +@@ -1014,7 +1014,7 @@ static int __device_attach(struct device *dev, bool allow_async) + if (ret == 0) + ret = 1; + else { +- dev->driver = NULL; ++ device_set_driver(dev, NULL); + ret = 0; + } + } else { +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 8a6c1146df00fd..455e2a2b149f4b 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -441,7 +441,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, + cmd->iocb.ki_filp = file; + cmd->iocb.ki_complete = lo_rw_aio_complete; + cmd->iocb.ki_flags = IOCB_DIRECT; +- cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); ++ cmd->iocb.ki_ioprio = req_get_ioprio(rq); + + if (rw == ITER_SOURCE) + ret = call_write_iter(file, &cmd->iocb, &iter); +diff --git a/drivers/char/misc.c b/drivers/char/misc.c +index f7dd455dd0dd3c..dda466f9181acf 100644 +--- a/drivers/char/misc.c ++++ b/drivers/char/misc.c +@@ -315,7 +315,7 @@ static int __init misc_init(void) + goto fail_remove; + + err = -EIO; +- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) ++ if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops)) + goto fail_printk; + return 0; + +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 796ab9a4e48fa1..80e0f485170a8f 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1612,8 +1612,8 @@ static void handle_control_message(struct virtio_device *vdev, + break; + case VIRTIO_CONSOLE_RESIZE: { + struct { +- __u16 rows; +- __u16 cols; ++ __virtio16 rows; ++ __virtio16 cols; + } size; + + if (!is_console_port(port)) +@@ -1621,7 +1621,8 @@ static void handle_control_message(struct virtio_device *vdev, + + memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), + sizeof(size)); +- set_console_size(port, size.rows, size.cols); ++ set_console_size(port, virtio16_to_cpu(vdev, size.rows), ++ virtio16_to_cpu(vdev, size.cols)); + + port->cons.hvc->irq_requested = 1; + resize_console(port); +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 5bbd036f5295f5..8474099e2cac19 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -5216,6 +5216,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) + if (!clkspec) + return ERR_PTR(-EINVAL); + ++ /* Check if node in clkspec is in disabled/fail state */ ++ if (!of_device_is_available(clkspec->np)) ++ return ERR_PTR(-ENOENT); ++ + mutex_lock(&of_clk_mutex); + list_for_each_entry(provider, &of_clk_providers, link) { + if (provider->node == clkspec->np) { +diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c +index 6c6bc79b2e9cec..865d47800791bd 100644 +--- a/drivers/clk/renesas/r9a07g043-cpg.c ++++ b/drivers/clk/renesas/r9a07g043-cpg.c +@@ -14,6 +14,17 @@ + + #include "rzg2l-cpg.h" + ++/* Specific registers. */ ++#define CPG_PL2SDHI_DSEL (0x218) ++ ++/* Clock select configuration. */ ++#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) ++#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) ++ ++/* Clock status configuration. */ ++#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) ++#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) ++ + enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2, +@@ -75,8 +86,12 @@ static const struct clk_div_table dtable_1_32[] = { + + /* Mux clock tables */ + static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; ++#ifdef CONFIG_ARM64 + static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; +-static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; ++#endif ++static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" }; ++ ++static const u32 mtable_sdhi[] = { 1, 2, 3 }; + + static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { + /* External Clock Inputs */ +@@ -120,11 +135,18 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { + DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32), + DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), + DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), ++#ifdef CONFIG_ARM64 + DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2), ++#endif ++#ifdef CONFIG_RISCV ++ DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1), ++#endif + DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), + DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), +- DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi), +- DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi), ++ DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi, ++ mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), ++ DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi, ++ mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), + DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4), + DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4), + }; +diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c +index c597414a94d8a0..48404cafea3f51 100644 +--- a/drivers/clk/renesas/r9a07g044-cpg.c ++++ b/drivers/clk/renesas/r9a07g044-cpg.c +@@ -15,6 +15,17 @@ + + #include "rzg2l-cpg.h" + ++/* Specific registers. */ ++#define CPG_PL2SDHI_DSEL (0x218) ++ ++/* Clock select configuration. */ ++#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) ++#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) ++ ++/* Clock status configuration. */ ++#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) ++#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) ++ + enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A, +@@ -95,9 +106,11 @@ static const struct clk_div_table dtable_16_128[] = { + static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; + static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" }; + static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; +-static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; ++static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" }; + static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" }; + ++static const u32 mtable_sdhi[] = { 1, 2, 3 }; ++ + static const struct { + struct cpg_core_clk common[56]; + #ifdef CONFIG_CLK_R9A07G054 +@@ -163,8 +176,10 @@ static const struct { + DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2), + DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), + DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), +- DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi), +- DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi), ++ DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi, ++ mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), ++ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi, ++ mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), + DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4), + DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4), + DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8), +diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c +index f8dbb092b9f1b2..77eefb6ee4538a 100644 +--- a/drivers/clk/renesas/rzg2l-cpg.c ++++ b/drivers/clk/renesas/rzg2l-cpg.c +@@ -56,15 +56,37 @@ + #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) + #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) + ++#define CPG_WEN_BIT BIT(16) ++ + #define MAX_VCLK_FREQ (148500000) + +-struct sd_hw_data { ++/** ++ * struct clk_hw_data - clock hardware data ++ * @hw: clock hw ++ * @conf: clock configuration (register offset, shift, width) ++ * @sconf: clock status configuration (register offset, shift, width) ++ * @priv: CPG private data structure ++ */ ++struct clk_hw_data { + struct clk_hw hw; + u32 conf; ++ u32 sconf; + struct rzg2l_cpg_priv *priv; + }; + +-#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw) ++#define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw) ++ ++/** ++ * struct sd_mux_hw_data - SD MUX clock hardware data ++ * @hw_data: clock hw data ++ * @mtable: clock mux table ++ */ ++struct sd_mux_hw_data { ++ struct clk_hw_data hw_data; ++ const u32 *mtable; ++}; ++ ++#define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data) + + struct rzg2l_pll5_param { + u32 pl5_fracin; +@@ -121,6 +143,76 @@ static void rzg2l_cpg_del_clk_provider(void *data) + of_clk_del_provider(data); + } + ++/* Must be called in atomic context. */ ++static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf) ++{ ++ u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf); ++ u32 off = GET_REG_OFFSET(conf); ++ u32 val; ++ ++ return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200); ++} ++ ++int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, ++ void *data) ++{ ++ struct clk_notifier_data *cnd = data; ++ struct clk_hw *hw = __clk_get_hw(cnd->clk); ++ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); ++ struct rzg2l_cpg_priv *priv = clk_hw_data->priv; ++ u32 off = GET_REG_OFFSET(clk_hw_data->conf); ++ u32 shift = GET_SHIFT(clk_hw_data->conf); ++ const u32 clk_src_266 = 3; ++ unsigned long flags; ++ int ret; ++ ++ if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266)) ++ return NOTIFY_DONE; ++ ++ spin_lock_irqsave(&priv->rmw_lock, flags); ++ ++ /* ++ * As per the HW manual, we should not directly switch from 533 MHz to ++ * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) ++ * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, ++ * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 ++ * (400 MHz)). ++ * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock ++ * switching register is prohibited. ++ * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and ++ * the index to value mapping is done by adding 1 to the index. ++ */ ++ ++ writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off); ++ ++ /* Wait for the update done. */ ++ ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); ++ ++ spin_unlock_irqrestore(&priv->rmw_lock, flags); ++ ++ if (ret) ++ dev_err(priv->dev, "failed to switch to safe clk source\n"); ++ ++ return notifier_from_errno(ret); ++} ++ ++static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core, ++ struct rzg2l_cpg_priv *priv) ++{ ++ struct notifier_block *nb; ++ ++ if (!core->notifier) ++ return 0; ++ ++ nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL); ++ if (!nb) ++ return -ENOMEM; ++ ++ nb->notifier_call = core->notifier; ++ ++ return clk_notifier_register(hw->clk, nb); ++} ++ + static struct clk * __init + rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core, + struct clk **clks, +@@ -183,63 +275,44 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core, + + static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) + { +- struct sd_hw_data *hwdata = to_sd_hw_data(hw); +- struct rzg2l_cpg_priv *priv = hwdata->priv; +- u32 off = GET_REG_OFFSET(hwdata->conf); +- u32 shift = GET_SHIFT(hwdata->conf); +- const u32 clk_src_266 = 2; +- u32 msk, val, bitmask; ++ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); ++ struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); ++ struct rzg2l_cpg_priv *priv = clk_hw_data->priv; ++ u32 off = GET_REG_OFFSET(clk_hw_data->conf); ++ u32 shift = GET_SHIFT(clk_hw_data->conf); + unsigned long flags; ++ u32 val; + int ret; + +- /* +- * As per the HW manual, we should not directly switch from 533 MHz to +- * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) +- * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, +- * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 +- * (400 MHz)). +- * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock +- * switching register is prohibited. +- * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and +- * the index to value mapping is done by adding 1 to the index. +- */ +- bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16; +- msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; ++ val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index); ++ + spin_lock_irqsave(&priv->rmw_lock, flags); +- if (index != clk_src_266) { +- writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off); +- +- ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, +- !(val & msk), 10, +- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); +- if (ret) +- goto unlock; +- } + +- writel(bitmask | ((index + 1) << shift), priv->base + off); ++ writel((CPG_WEN_BIT | val) << shift, priv->base + off); ++ ++ /* Wait for the update done. */ ++ ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); + +- ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, +- !(val & msk), 10, +- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); +-unlock: + spin_unlock_irqrestore(&priv->rmw_lock, flags); + + if (ret) +- dev_err(priv->dev, "failed to switch clk source\n"); ++ dev_err(priv->dev, "Failed to switch parent\n"); + + return ret; + } + + static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) + { +- struct sd_hw_data *hwdata = to_sd_hw_data(hw); +- struct rzg2l_cpg_priv *priv = hwdata->priv; +- u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf)); ++ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); ++ struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); ++ struct rzg2l_cpg_priv *priv = clk_hw_data->priv; ++ u32 val; + +- val >>= GET_SHIFT(hwdata->conf); +- val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0); ++ val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); ++ val >>= GET_SHIFT(clk_hw_data->conf); ++ val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); + +- return val ? val - 1 : 0; ++ return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val); + } + + static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { +@@ -253,31 +326,40 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, + void __iomem *base, + struct rzg2l_cpg_priv *priv) + { +- struct sd_hw_data *clk_hw_data; ++ struct sd_mux_hw_data *sd_mux_hw_data; + struct clk_init_data init; + struct clk_hw *clk_hw; + int ret; + +- clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); +- if (!clk_hw_data) ++ sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL); ++ if (!sd_mux_hw_data) + return ERR_PTR(-ENOMEM); + +- clk_hw_data->priv = priv; +- clk_hw_data->conf = core->conf; ++ sd_mux_hw_data->hw_data.priv = priv; ++ sd_mux_hw_data->hw_data.conf = core->conf; ++ sd_mux_hw_data->hw_data.sconf = core->sconf; ++ sd_mux_hw_data->mtable = core->mtable; + + init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0"; + init.ops = &rzg2l_cpg_sd_clk_mux_ops; +- init.flags = 0; ++ init.flags = core->flag; + init.num_parents = core->num_parents; + init.parent_names = core->parent_names; + +- clk_hw = &clk_hw_data->hw; ++ clk_hw = &sd_mux_hw_data->hw_data.hw; + clk_hw->init = &init; + + ret = devm_clk_hw_register(priv->dev, clk_hw); + if (ret) + return ERR_PTR(ret); + ++ ret = rzg2l_register_notifier(clk_hw, core, priv); ++ if (ret) { ++ dev_err(priv->dev, "Failed to register notifier for %s\n", ++ core->name); ++ return ERR_PTR(ret); ++ } ++ + return clk_hw->clk; + } + +diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h +index 91e9c2569f801b..e662459cc6d963 100644 +--- a/drivers/clk/renesas/rzg2l-cpg.h ++++ b/drivers/clk/renesas/rzg2l-cpg.h +@@ -9,6 +9,8 @@ + #ifndef __RENESAS_RZG2L_CPG_H__ + #define __RENESAS_RZG2L_CPG_H__ + ++#include ++ + #define CPG_SIPLL5_STBY (0x140) + #define CPG_SIPLL5_CLK1 (0x144) + #define CPG_SIPLL5_CLK3 (0x14C) +@@ -19,7 +21,6 @@ + #define CPG_PL2_DDIV (0x204) + #define CPG_PL3A_DDIV (0x208) + #define CPG_PL6_DDIV (0x210) +-#define CPG_PL2SDHI_DSEL (0x218) + #define CPG_CLKSTATUS (0x280) + #define CPG_PL3_SSEL (0x408) + #define CPG_PL6_SSEL (0x414) +@@ -43,8 +44,6 @@ + #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) + #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) + +-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200 +- + /* n = 0/1/2 for PLL1/4/6 */ + #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n)) + #define CPG_SAMPLL_CLK2(n) (0x08 + (16 * n)) +@@ -69,9 +68,6 @@ + #define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1) + #define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1) + +-#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2) +-#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2) +- + #define EXTAL_FREQ_IN_MEGA_HZ (24) + + /** +@@ -90,10 +86,13 @@ struct cpg_core_clk { + unsigned int mult; + unsigned int type; + unsigned int conf; ++ unsigned int sconf; + const struct clk_div_table *dtable; ++ const u32 *mtable; + const char * const *parent_names; +- int flag; +- int mux_flags; ++ notifier_fn_t notifier; ++ u32 flag; ++ u32 mux_flags; + int num_parents; + }; + +@@ -151,10 +150,11 @@ enum clk_types { + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names), \ + .mux_flags = CLK_MUX_READ_ONLY) +-#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \ +- DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \ ++#define DEF_SD_MUX(_name, _id, _conf, _sconf, _parent_names, _mtable, _clk_flags, _notifier) \ ++ DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, .sconf = _sconf, \ + .parent_names = _parent_names, \ +- .num_parents = ARRAY_SIZE(_parent_names)) ++ .num_parents = ARRAY_SIZE(_parent_names), \ ++ .mtable = _mtable, .flag = _clk_flags, .notifier = _notifier) + #define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \ + DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent) + #define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \ +@@ -273,4 +273,6 @@ extern const struct rzg2l_cpg_info r9a07g044_cpg_info; + extern const struct rzg2l_cpg_info r9a07g054_cpg_info; + extern const struct rzg2l_cpg_info r9a09g011_cpg_info; + ++int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data); ++ + #endif +diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c +index 951c23fa0369ea..75dce1ff24193b 100644 +--- a/drivers/comedi/drivers/jr3_pci.c ++++ b/drivers/comedi/drivers/jr3_pci.c +@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev) + struct jr3_pci_dev_private *devpriv = dev->private; + + if (devpriv) +- del_timer_sync(&devpriv->timer); ++ timer_shutdown_sync(&devpriv->timer); + + comedi_pci_detach(dev); + } +diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c +index 021f423705e1b1..9ba6b09775f617 100644 +--- a/drivers/cpufreq/apple-soc-cpufreq.c ++++ b/drivers/cpufreq/apple-soc-cpufreq.c +@@ -103,11 +103,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] = { + + static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu) + { +- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); +- struct apple_cpu_priv *priv = policy->driver_data; ++ struct cpufreq_policy *policy; ++ struct apple_cpu_priv *priv; + struct cpufreq_frequency_table *p; + unsigned int pstate; + ++ policy = cpufreq_cpu_get_raw(cpu); ++ if (unlikely(!policy)) ++ return 0; ++ ++ priv = policy->driver_data; ++ + if (priv->info->cur_pstate_mask) { + u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS); + +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c +index c8447ecad797e7..aa34af940cb53b 100644 +--- a/drivers/cpufreq/cppc_cpufreq.c ++++ b/drivers/cpufreq/cppc_cpufreq.c +@@ -773,7 +773,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) + int ret; + + if (!policy) +- return -ENODEV; ++ return 0; + + cpu_data = policy->driver_data; + +diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c +index 079940c69ee0ba..e4989764efe2a8 100644 +--- a/drivers/cpufreq/scmi-cpufreq.c ++++ b/drivers/cpufreq/scmi-cpufreq.c +@@ -33,11 +33,17 @@ static const struct scmi_perf_proto_ops *perf_ops; + + static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) + { +- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); +- struct scmi_data *priv = policy->driver_data; ++ struct cpufreq_policy *policy; ++ struct scmi_data *priv; + unsigned long rate; + int ret; + ++ policy = cpufreq_cpu_get_raw(cpu); ++ if (unlikely(!policy)) ++ return 0; ++ ++ priv = policy->driver_data; ++ + ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false); + if (ret) + return 0; +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c +index bfc2e65e1e5022..2aef39bff7d6f5 100644 +--- a/drivers/cpufreq/scpi-cpufreq.c ++++ b/drivers/cpufreq/scpi-cpufreq.c +@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops; + + static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) + { +- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); +- struct scpi_data *priv = policy->driver_data; +- unsigned long rate = clk_get_rate(priv->clk); ++ struct cpufreq_policy *policy; ++ struct scpi_data *priv; ++ unsigned long rate; ++ ++ policy = cpufreq_cpu_get_raw(cpu); ++ if (unlikely(!policy)) ++ return 0; ++ ++ priv = policy->driver_data; ++ rate = clk_get_rate(priv->clk); + + return rate / 1000; + } +diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c +index c77f482d2a97e9..5bc809146ffea0 100644 +--- a/drivers/crypto/atmel-sha204a.c ++++ b/drivers/crypto/atmel-sha204a.c +@@ -107,6 +107,12 @@ static int atmel_sha204a_probe(struct i2c_client *client) + i2c_priv->hwrng.name = dev_name(&client->dev); + i2c_priv->hwrng.read = atmel_sha204a_rng_read; + ++ /* ++ * According to review by Bill Cox [1], this HWRNG has very low entropy. ++ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html ++ */ ++ i2c_priv->hwrng.quality = 1; ++ + ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); + if (ret) + dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c +index 0caa57dafc525a..b1e60542351a66 100644 +--- a/drivers/crypto/ccp/sp-pci.c ++++ b/drivers/crypto/ccp/sp-pci.c +@@ -577,6 +577,7 @@ static const struct pci_device_id sp_pci_table[] = { + { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, + { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, + { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, ++ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, + { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, + { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + /* Last entry must be zero */ +diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c +index bab4592db647f7..92ef68849fbea8 100644 +--- a/drivers/cxl/core/regs.c ++++ b/drivers/cxl/core/regs.c +@@ -478,7 +478,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri + resource_size_t rcrb = ri->base; + void __iomem *addr; + u32 bar0, bar1; +- u16 cmd; + u32 id; + + if (which == CXL_RCRB_UPSTREAM) +@@ -500,7 +499,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri + } + + id = readl(addr + PCI_VENDOR_ID); +- cmd = readw(addr + PCI_COMMAND); + bar0 = readl(addr + PCI_BASE_ADDRESS_0); + bar1 = readl(addr + PCI_BASE_ADDRESS_1); + iounmap(addr); +@@ -515,8 +513,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri + dev_err(dev, "Failed to access Downstream Port RCRB\n"); + return CXL_RESOURCE_NONE; + } +- if (!(cmd & PCI_COMMAND_MEMORY)) +- return CXL_RESOURCE_NONE; + /* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */ + if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO)) + return CXL_RESOURCE_NONE; +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c +index d1fcdd1f9aaed3..373282beeb6068 100644 +--- a/drivers/dma-buf/udmabuf.c ++++ b/drivers/dma-buf/udmabuf.c +@@ -214,7 +214,7 @@ static long udmabuf_create(struct miscdevice *device, + if (!ubuf) + return -ENOMEM; + +- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; ++ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + for (i = 0; i < head->count; i++) { + if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) + goto err; +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index ffe621695e472b..78b8a97b236376 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -827,9 +827,9 @@ static int dmatest_func(void *data) + } else { + dma_async_issue_pending(chan); + +- wait_event_freezable_timeout(thread->done_wait, +- done->done, +- msecs_to_jiffies(params->timeout)); ++ wait_event_timeout(thread->done_wait, ++ done->done, ++ msecs_to_jiffies(params->timeout)); + + status = dma_async_is_tx_complete(chan, cookie, NULL, + NULL); +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index cec9e8f29bbdfe..a0a2a0f75bba46 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -247,6 +247,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np, + { "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" }, + { "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" }, + #endif ++#if IS_ENABLED(CONFIG_MMC_ATMELMCI) ++ { "atmel,hsmci", "cd-gpios", "cd-inverted" }, ++#endif + #if IS_ENABLED(CONFIG_PCI_IMX6) + { "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" }, + { "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" }, +@@ -272,9 +275,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np, + #if IS_ENABLED(CONFIG_REGULATOR_GPIO) + { "regulator-gpio", "enable-gpio", "enable-active-high" }, + { "regulator-gpio", "enable-gpios", "enable-active-high" }, +-#endif +-#if IS_ENABLED(CONFIG_MMC_ATMELMCI) +- { "atmel,hsmci", "cd-gpios", "cd-inverted" }, + #endif + }; + unsigned int i; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 28f2b4022d34e3..e6bc590533194d 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2789,16 +2789,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, + for (k = 0; k < dc_state->stream_count; k++) { + bundle->stream_update.stream = dc_state->streams[k]; + +- for (m = 0; m < dc_state->stream_status->plane_count; m++) { ++ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { + bundle->surface_updates[m].surface = +- dc_state->stream_status->plane_states[m]; ++ dc_state->stream_status[k].plane_states[m]; + bundle->surface_updates[m].surface->force_full_update = + true; + } + + update_planes_and_stream_adapter(dm->dc, + UPDATE_TYPE_FULL, +- dc_state->stream_status->plane_count, ++ dc_state->stream_status[k].plane_count, + dc_state->streams[k], + &bundle->stream_update, + bundle->surface_updates); +@@ -9590,6 +9590,9 @@ static bool should_reset_plane(struct drm_atomic_state *state, + if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) + return true; + ++ if (amdgpu_in_reset(adev) && state->allow_modeset) ++ return true; ++ + /* Exit early if we know that we're adding or removing the plane. */ + if (old_plane_state->crtc != new_plane_state->crtc) + return true; +diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c +index 70a25949142c0d..74b0c85944bd61 100644 +--- a/drivers/iio/adc/ad7768-1.c ++++ b/drivers/iio/adc/ad7768-1.c +@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = { + .channel = 0, + .scan_index = 0, + .scan_type = { +- .sign = 'u', ++ .sign = 's', + .realbits = 24, + .storagebits = 32, + .shift = 8, +@@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev, + return ret; + + ret = ad7768_scan_direct(indio_dev); +- if (ret >= 0) +- *val = ret; + + iio_device_release_direct_mode(indio_dev); + if (ret < 0) + return ret; ++ *val = sign_extend32(ret, chan->scan_type.realbits - 1); + + return IIO_VAL_INT; + +diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c +index 11155e0fb8395c..35d777976c2952 100644 +--- a/drivers/infiniband/hw/qib/qib_fs.c ++++ b/drivers/infiniband/hw/qib/qib_fs.c +@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry, + struct inode *inode = new_inode(dir->i_sb); + + if (!inode) { ++ dput(dentry); + error = -EPERM; + goto bail; + } +diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c +index 95bd7c25ba6f36..83c5d786686d07 100644 +--- a/drivers/iommu/amd/iommu.c ++++ b/drivers/iommu/amd/iommu.c +@@ -3619,7 +3619,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) + * we should not modify the IRTE + */ + if (!dev_data || !dev_data->use_vapic) +- return 0; ++ return -EINVAL; + + ir_data->cfg = irqd_cfg(data); + pi_data->ir_data = ir_data; +diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c +index d83c2c85962c37..683e8721e3b498 100644 +--- a/drivers/irqchip/irq-gic-v2m.c ++++ b/drivers/irqchip/irq-gic-v2m.c +@@ -454,7 +454,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, + #ifdef CONFIG_ACPI + static int acpi_num_msi; + +-static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) ++static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) + { + struct v2m_data *data; + +diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c +index 82102a4c5d6883..f8215a8f656a46 100644 +--- a/drivers/mailbox/pcc.c ++++ b/drivers/mailbox/pcc.c +@@ -313,6 +313,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) + int ret; + + pchan = chan->con_priv; ++ ++ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack)) ++ return IRQ_NONE; ++ + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE && + !pchan->chan_in_use) + return IRQ_NONE; +@@ -330,13 +334,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) + return IRQ_NONE; + } + +- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack)) +- return IRQ_NONE; +- ++ /* ++ * Clear this flag after updating interrupt ack register and just ++ * before mbox_chan_received_data() which might call pcc_send_data() ++ * where the flag is set again to start new transfer. This is ++ * required to avoid any possible race in updatation of this flag. ++ */ ++ pchan->chan_in_use = false; + mbox_chan_received_data(chan, NULL); + + check_and_ack(pchan, chan); +- pchan->chan_in_use = false; + + return IRQ_HANDLED; + } +diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c +index 1ae37e693de045..d080e21df666d9 100644 +--- a/drivers/mcb/mcb-parse.c ++++ b/drivers/mcb/mcb-parse.c +@@ -101,7 +101,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, + + ret = mcb_device_register(bus, mdev); + if (ret < 0) +- goto err; ++ return ret; + + return 0; + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 65309da1dca340..8b25287c89ed6d 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -2061,14 +2061,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio) + if (!rdev_set_badblocks(rdev, sect, s, 0)) + abort = 1; + } +- if (abort) { +- conf->recovery_disabled = +- mddev->recovery_disabled; +- set_bit(MD_RECOVERY_INTR, &mddev->recovery); +- md_done_sync(mddev, r1_bio->sectors, 0); +- put_buf(r1_bio); ++ if (abort) + return 0; +- } ++ + /* Try next page */ + sectors -= s; + sect += s; +@@ -2207,10 +2202,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) + int disks = conf->raid_disks * 2; + struct bio *wbio; + +- if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) +- /* ouch - failed to read all of that. */ +- if (!fix_sync_read_error(r1_bio)) ++ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { ++ /* ++ * ouch - failed to read all of that. ++ * No need to fix read error for check/repair ++ * because all member disks are read. ++ */ ++ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) || ++ !fix_sync_read_error(r1_bio)) { ++ conf->recovery_disabled = mddev->recovery_disabled; ++ set_bit(MD_RECOVERY_INTR, &mddev->recovery); ++ md_done_sync(mddev, r1_bio->sectors, 0); ++ put_buf(r1_bio); + return; ++ } ++ } + + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + process_checks(r1_bio); +diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c +index 807551a5143b78..15d863f97cbf96 100644 +--- a/drivers/media/test-drivers/vimc/vimc-streamer.c ++++ b/drivers/media/test-drivers/vimc/vimc-streamer.c +@@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) + continue; + + sd = media_entity_to_v4l2_subdev(ved->ent); ++ /* ++ * Do not call .s_stream() to stop an already ++ * stopped/unstarted subdev. ++ */ ++ if (!v4l2_subdev_is_streaming(sd)) ++ continue; + v4l2_subdev_call(sd, video, s_stream, 0); + } + } +diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c +index a32ef739eb4490..5f115438d07228 100644 +--- a/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/drivers/media/v4l2-core/v4l2-subdev.c +@@ -363,12 +363,8 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable) + * The .s_stream() operation must never be called to start or stop an + * already started or stopped subdev. Catch offenders but don't return + * an error yet to avoid regressions. +- * +- * As .s_stream() is mutually exclusive with the .enable_streams() and +- * .disable_streams() operation, we can use the enabled_streams field +- * to store the subdev streaming state. + */ +- if (WARN_ON(!!sd->enabled_streams == !!enable)) ++ if (WARN_ON(sd->s_stream_enabled == !!enable)) + return 0; + + ret = sd->ops->video->s_stream(sd, enable); +@@ -379,7 +375,7 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable) + } + + if (!ret) { +- sd->enabled_streams = enable ? BIT(0) : 0; ++ sd->s_stream_enabled = enable; + + #if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) { +@@ -1929,37 +1925,43 @@ static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) + { + struct device *dev = sd->entity.graph_obj.mdev->dev; +- unsigned int i; + int ret; + + /* + * The subdev doesn't implement pad-based stream enable, fall back +- * on the .s_stream() operation. This can only be done for subdevs that +- * have a single source pad, as sd->enabled_streams is global to the +- * subdev. ++ * to the .s_stream() operation. + */ + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + +- for (i = 0; i < sd->entity.num_pads; ++i) { +- if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) +- return -EOPNOTSUPP; +- } ++ /* ++ * .s_stream() means there is no streams support, so the only allowed ++ * stream is the implicit stream 0. ++ */ ++ if (streams_mask != BIT_ULL(0)) ++ return -EOPNOTSUPP; ++ ++ /* ++ * We use a 64-bit bitmask for tracking enabled pads, so only subdevices ++ * with 64 pads or less can be supported. ++ */ ++ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) ++ return -EOPNOTSUPP; + +- if (sd->enabled_streams & streams_mask) { +- dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n", +- streams_mask, sd->entity.name, pad); ++ if (sd->enabled_pads & BIT_ULL(pad)) { ++ dev_dbg(dev, "pad %u already enabled on %s\n", ++ pad, sd->entity.name); + return -EALREADY; + } + +- /* Start streaming when the first streams are enabled. */ +- if (!sd->enabled_streams) { ++ /* Start streaming when the first pad is enabled. */ ++ if (!sd->enabled_pads) { + ret = v4l2_subdev_call(sd, video, s_stream, 1); + if (ret) + return ret; + } + +- sd->enabled_streams |= streams_mask; ++ sd->enabled_pads |= BIT_ULL(pad); + + return 0; + } +@@ -2046,37 +2048,43 @@ static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad, + u64 streams_mask) + { + struct device *dev = sd->entity.graph_obj.mdev->dev; +- unsigned int i; + int ret; + + /* +- * If the subdev doesn't implement pad-based stream enable, fall back +- * on the .s_stream() operation. This can only be done for subdevs that +- * have a single source pad, as sd->enabled_streams is global to the +- * subdev. ++ * If the subdev doesn't implement pad-based stream enable, fall back ++ * to the .s_stream() operation. + */ + if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return -EOPNOTSUPP; + +- for (i = 0; i < sd->entity.num_pads; ++i) { +- if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) +- return -EOPNOTSUPP; +- } ++ /* ++ * .s_stream() means there is no streams support, so the only allowed ++ * stream is the implicit stream 0. ++ */ ++ if (streams_mask != BIT_ULL(0)) ++ return -EOPNOTSUPP; ++ ++ /* ++ * We use a 64-bit bitmask for tracking enabled pads, so only subdevices ++ * with 64 pads or less can be supported. ++ */ ++ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) ++ return -EOPNOTSUPP; + +- if ((sd->enabled_streams & streams_mask) != streams_mask) { +- dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n", +- streams_mask, sd->entity.name, pad); ++ if (!(sd->enabled_pads & BIT_ULL(pad))) { ++ dev_dbg(dev, "pad %u already disabled on %s\n", ++ pad, sd->entity.name); + return -EALREADY; + } + + /* Stop streaming when the last streams are disabled. */ +- if (!(sd->enabled_streams & ~streams_mask)) { ++ if (!(sd->enabled_pads & ~BIT_ULL(pad))) { + ret = v4l2_subdev_call(sd, video, s_stream, 0); + if (ret) + return ret; + } + +- sd->enabled_streams &= ~streams_mask; ++ sd->enabled_pads &= ~BIT_ULL(pad); + + return 0; + } +@@ -2232,6 +2240,31 @@ void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + } + EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); + ++bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) ++{ ++ struct v4l2_subdev_state *state; ++ ++ if (!v4l2_subdev_has_op(sd, pad, enable_streams)) ++ return sd->s_stream_enabled; ++ ++ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) ++ return !!sd->enabled_pads; ++ ++ state = v4l2_subdev_get_locked_active_state(sd); ++ ++ for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { ++ const struct v4l2_subdev_stream_config *cfg; ++ ++ cfg = &state->stream_configs.configs[i]; ++ ++ if (cfg->enabled) ++ return true; ++ } ++ ++ return false; ++} ++EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); ++ + int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) + { + #if IS_REACHABLE(CONFIG_LEDS_CLASS) +diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c +index 5b861dbff27e9a..6c24426104ba6f 100644 +--- a/drivers/misc/lkdtm/perms.c ++++ b/drivers/misc/lkdtm/perms.c +@@ -28,6 +28,13 @@ static const unsigned long rodata = 0xAA55AA55; + /* This is marked __ro_after_init, so it should ultimately be .rodata. */ + static unsigned long ro_after_init __ro_after_init = 0x55AA5500; + ++/* ++ * This is a pointer to do_nothing() which is initialized at runtime rather ++ * than build time to avoid objtool IBT validation warnings caused by an ++ * inlined unrolled memcpy() in execute_location(). ++ */ ++static void __ro_after_init *do_nothing_ptr; ++ + /* + * This just returns to the caller. It is designed to be copied into + * non-executable memory regions. +@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write) + { + void (*func)(void); + func_desc_t fdesc; +- void *do_nothing_text = dereference_function_descriptor(do_nothing); + +- pr_info("attempting ok execution at %px\n", do_nothing_text); ++ pr_info("attempting ok execution at %px\n", do_nothing_ptr); + do_nothing(); + + if (write == CODE_WRITE) { +- memcpy(dst, do_nothing_text, EXEC_SIZE); ++ memcpy(dst, do_nothing_ptr, EXEC_SIZE); + flush_icache_range((unsigned long)dst, + (unsigned long)dst + EXEC_SIZE); + } +@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void) + + void __init lkdtm_perms_init(void) + { ++ do_nothing_ptr = dereference_function_descriptor(do_nothing); ++ + /* Make sure we can write to __ro_after_init values during __init */ + ro_after_init |= 0xAA; + } +diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c +index 3c1359d8d4e692..55b892f982e93e 100644 +--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c ++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c +@@ -37,6 +37,7 @@ + struct pci1xxxx_gpio { + struct auxiliary_device *aux_dev; + void __iomem *reg_base; ++ raw_spinlock_t wa_lock; + struct gpio_chip gpio; + spinlock_t lock; + int irq_base; +@@ -164,7 +165,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data) + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); +- pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true); ++ writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio)); + spin_unlock_irqrestore(&priv->lock, flags); + } + +@@ -254,6 +255,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) + struct pci1xxxx_gpio *priv = dev_id; + struct gpio_chip *gc = &priv->gpio; + unsigned long int_status = 0; ++ unsigned long wa_flags; + unsigned long flags; + u8 pincount; + int bit; +@@ -277,7 +279,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) + writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank)); + spin_unlock_irqrestore(&priv->lock, flags); + irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32))); +- handle_nested_irq(irq); ++ raw_spin_lock_irqsave(&priv->wa_lock, wa_flags); ++ generic_handle_irq(irq); ++ raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags); + } + } + spin_lock_irqsave(&priv->lock, flags); +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index a4668ddd94551a..4adfa5af162f1d 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -117,6 +117,7 @@ + + #define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ + ++#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ + #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + + /* +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 6826cc50d29f36..93b98a7f4c7fd9 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + + {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + + /* required last entry */ +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 945d08531de376..82808cc373f68b 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -1866,7 +1866,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, + if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS)) + return 0; + +- ice = of_qcom_ice_get(dev); ++ ice = devm_of_qcom_ice_get(dev); + if (ice == ERR_PTR(-EOPNOTSUPP)) { + dev_warn(dev, "Disabling inline encryption support\n"); + ice = NULL; +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index 90ab2f1058ce0e..2d18a03d927421 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -2596,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds) + struct mt7530_priv *priv = ds->priv; + int ret, i; + ++ ds->assisted_learning_on_cpu_port = true; ++ ds->mtu_enforcement_ingress = true; ++ + mt753x_trap_frames(priv); + + /* Enable and reset MIB counters */ +@@ -2735,9 +2738,6 @@ mt7531_setup(struct dsa_switch *ds) + + mt7531_setup_common(ds); + +- ds->assisted_learning_on_cpu_port = true; +- ds->mtu_enforcement_ingress = true; +- + return 0; + } + +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index da7260e505a2e4..ef52d1ae27d694 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -5047,6 +5047,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { + .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, + .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, + .port_tag_remap = mv88e6095_port_tag_remap, ++ .port_set_policy = mv88e6352_port_set_policy, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, +@@ -5071,8 +5072,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, + .reset = mv88e6352_g1_reset, +- .vtu_getnext = mv88e6185_g1_vtu_getnext, +- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, ++ .vtu_getnext = mv88e6352_g1_vtu_getnext, ++ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, ++ .stu_getnext = mv88e6352_g1_stu_getnext, ++ .stu_loadpurge = mv88e6352_g1_stu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, +@@ -5097,6 +5100,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { + .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, + .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, + .port_tag_remap = mv88e6095_port_tag_remap, ++ .port_set_policy = mv88e6352_port_set_policy, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, +@@ -5120,8 +5124,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, + .reset = mv88e6352_g1_reset, +- .vtu_getnext = mv88e6185_g1_vtu_getnext, +- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, ++ .vtu_getnext = mv88e6352_g1_vtu_getnext, ++ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, ++ .stu_getnext = mv88e6352_g1_stu_getnext, ++ .stu_loadpurge = mv88e6352_g1_stu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, +@@ -5713,7 +5719,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, +- .atu_move_port_mask = 0x1f, ++ .atu_move_port_mask = 0xf, + .g1_irqs = 9, + .g2_irqs = 10, + .pvt = true, +@@ -6114,9 +6120,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { + .num_databases = 4096, + .num_macs = 8192, + .num_ports = 7, +- .num_internal_phys = 5, ++ .num_internal_phys = 2, ++ .internal_phys_offset = 3, + .num_gpio = 15, + .max_vid = 4095, ++ .max_sid = 63, + .port_base_addr = 0x10, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, +@@ -6139,9 +6147,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { + .num_databases = 4096, + .num_macs = 8192, + .num_ports = 7, +- .num_internal_phys = 5, ++ .num_internal_phys = 2, ++ .internal_phys_offset = 3, + .num_gpio = 15, + .max_vid = 4095, ++ .max_sid = 63, + .port_base_addr = 0x10, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, +@@ -6150,6 +6160,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { + .g1_irqs = 8, + .g2_irqs = 10, + .atu_move_port_mask = 0xf, ++ .pvt = true, + .multi_chip = true, + .edsa_support = MV88E6XXX_EDSA_SUPPORTED, + .ptp_support = true, +@@ -6172,7 +6183,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, +- .atu_move_port_mask = 0x1f, ++ .atu_move_port_mask = 0xf, + .g1_irqs = 9, + .g2_irqs = 10, + .pvt = true, +diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c +index ea773cfa0af67b..733f133d69e75f 100644 +--- a/drivers/net/ethernet/amd/pds_core/adminq.c ++++ b/drivers/net/ethernet/amd/pds_core/adminq.c +@@ -5,11 +5,6 @@ + + #include "core.h" + +-struct pdsc_wait_context { +- struct pdsc_qcq *qcq; +- struct completion wait_completion; +-}; +- + static int pdsc_process_notifyq(struct pdsc_qcq *qcq) + { + union pds_core_notifyq_comp *comp; +@@ -110,10 +105,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) + q_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + +- /* Copy out the completion data */ +- memcpy(q_info->dest, comp, sizeof(*comp)); +- +- complete_all(&q_info->wc->wait_completion); ++ if (!completion_done(&q_info->completion)) { ++ memcpy(q_info->dest, comp, sizeof(*comp)); ++ complete(&q_info->completion); ++ } + + if (cq->tail_idx == cq->num_descs - 1) + cq->done_color = !cq->done_color; +@@ -166,8 +161,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data) + static int __pdsc_adminq_post(struct pdsc *pdsc, + struct pdsc_qcq *qcq, + union pds_core_adminq_cmd *cmd, +- union pds_core_adminq_comp *comp, +- struct pdsc_wait_context *wc) ++ union pds_core_adminq_comp *comp) + { + struct pdsc_queue *q = &qcq->q; + struct pdsc_q_info *q_info; +@@ -209,9 +203,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc, + /* Post the request */ + index = q->head_idx; + q_info = &q->info[index]; +- q_info->wc = wc; + q_info->dest = comp; + memcpy(q_info->desc, cmd, sizeof(*cmd)); ++ reinit_completion(&q_info->completion); + + dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n", + q->head_idx, q->tail_idx); +@@ -235,16 +229,13 @@ int pdsc_adminq_post(struct pdsc *pdsc, + union pds_core_adminq_comp *comp, + bool fast_poll) + { +- struct pdsc_wait_context wc = { +- .wait_completion = +- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion), +- }; + unsigned long poll_interval = 1; + unsigned long poll_jiffies; + unsigned long time_limit; + unsigned long time_start; + unsigned long time_done; + unsigned long remaining; ++ struct completion *wc; + int err = 0; + int index; + +@@ -254,20 +245,19 @@ int pdsc_adminq_post(struct pdsc *pdsc, + return -ENXIO; + } + +- wc.qcq = &pdsc->adminqcq; +- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc); ++ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp); + if (index < 0) { + err = index; + goto err_out; + } + ++ wc = &pdsc->adminqcq.q.info[index].completion; + time_start = jiffies; + time_limit = time_start + HZ * pdsc->devcmd_timeout; + do { + /* Timeslice the actual wait to catch IO errors etc early */ + poll_jiffies = msecs_to_jiffies(poll_interval); +- remaining = wait_for_completion_timeout(&wc.wait_completion, +- poll_jiffies); ++ remaining = wait_for_completion_timeout(wc, poll_jiffies); + if (remaining) + break; + +@@ -296,9 +286,11 @@ int pdsc_adminq_post(struct pdsc *pdsc, + dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n", + __func__, jiffies_to_msecs(time_done - time_start)); + +- /* Check the results */ +- if (time_after_eq(time_done, time_limit)) ++ /* Check the results and clear an un-completed timeout */ ++ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) { + err = -ETIMEDOUT; ++ complete(wc); ++ } + + dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index); + dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, +diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c +index fd1a5149c00319..fb7a5403e630db 100644 +--- a/drivers/net/ethernet/amd/pds_core/auxbus.c ++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c +@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev, + dev_dbg(pf->dev, "%s: %s opcode %d\n", + __func__, dev_name(&padev->aux_dev.dev), req->opcode); + +- if (pf->state) +- return -ENXIO; +- + /* Wrap the client's request */ + cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD; + cmd.client_request.client_id = cpu_to_le16(padev->client_id); +diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c +index eb73c921dc1ed9..b3fa867c8ccd91 100644 +--- a/drivers/net/ethernet/amd/pds_core/core.c ++++ b/drivers/net/ethernet/amd/pds_core/core.c +@@ -169,8 +169,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa) + q->base = base; + q->base_pa = base_pa; + +- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) ++ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) { + cur->desc = base + (i * q->desc_size); ++ init_completion(&cur->completion); ++ } + } + + static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa) +diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h +index f410f7d132056b..858bebf7977624 100644 +--- a/drivers/net/ethernet/amd/pds_core/core.h ++++ b/drivers/net/ethernet/amd/pds_core/core.h +@@ -96,7 +96,7 @@ struct pdsc_q_info { + unsigned int bytes; + unsigned int nbufs; + struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS]; +- struct pdsc_wait_context *wc; ++ struct completion completion; + void *dest; + }; + +diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c +index 971d4278280d65..0032e8e3518117 100644 +--- a/drivers/net/ethernet/amd/pds_core/devlink.c ++++ b/drivers/net/ethernet/amd/pds_core/devlink.c +@@ -101,7 +101,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL, + .fw_control.oper = PDS_CORE_FW_GET_LIST, + }; +- struct pds_core_fw_list_info fw_list; ++ struct pds_core_fw_list_info fw_list = {}; + struct pdsc *pdsc = devlink_priv(dl); + union pds_core_dev_comp comp; + char buf[32]; +@@ -114,8 +114,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + if (!err) + memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); + mutex_unlock(&pdsc->devcmd_lock); +- if (err && err != -EIO) +- return err; + + listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names)); + for (i = 0; i < listlen; i++) { +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index c201ea20e40476..dc89dbc13b251f 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3949,11 +3949,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); + + if (mtk_is_netsys_v3_or_greater(eth)) { +- /* PSE should not drop port1, port8 and port9 packets */ +- mtk_w32(eth, 0x00000302, PSE_DROP_CFG); ++ /* PSE dummy page mechanism */ ++ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) | ++ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ); ++ ++ /* PSE free buffer drop threshold */ ++ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8)); ++ ++ /* PSE should not drop port8, port9 and port13 packets from ++ * WDMA Tx ++ */ ++ mtk_w32(eth, 0x00002300, PSE_DROP_CFG); ++ ++ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx ++ * ring full ++ */ ++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0)); ++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1)); ++ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2)); + + /* GDM and CDM Threshold */ +- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); ++ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES); + mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); + + /* Disable GDM1 RX CRC stripping */ +@@ -3970,7 +3986,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) + mtk_w32(eth, 0x00000300, PSE_DROP_CFG); + + /* PSE should drop packets to port 8/9 on WDMA Rx ring full */ +- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); ++ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0)); + + /* PSE Free Queue Flow Control */ + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +index 403219d987eff5..d1c7b5f1ee4a9c 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -149,7 +149,15 @@ + #define PSE_FQFC_CFG1 0x100 + #define PSE_FQFC_CFG2 0x104 + #define PSE_DROP_CFG 0x108 +-#define PSE_PPE0_DROP 0x110 ++#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4)) ++ ++/* PSE Last FreeQ Page Request Control */ ++#define PSE_DUMY_REQ 0x10C ++/* PSE_DUMY_REQ is not a typo but actually called like that also in ++ * MediaTek's datasheet ++ */ ++#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x)) ++#define DUMMY_PAGE_THR 0x1 + + /* PSE Input Queue Reservation Register*/ + #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) +diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c +index 0b88635f4fbca9..623607fd2cefd3 100644 +--- a/drivers/net/phy/microchip.c ++++ b/drivers/net/phy/microchip.c +@@ -31,47 +31,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page) + return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); + } + +-static int lan88xx_phy_config_intr(struct phy_device *phydev) +-{ +- int rc; +- +- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { +- /* unmask all source and clear them before enable */ +- rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF); +- rc = phy_read(phydev, LAN88XX_INT_STS); +- rc = phy_write(phydev, LAN88XX_INT_MASK, +- LAN88XX_INT_MASK_MDINTPIN_EN_ | +- LAN88XX_INT_MASK_LINK_CHANGE_); +- } else { +- rc = phy_write(phydev, LAN88XX_INT_MASK, 0); +- if (rc) +- return rc; +- +- /* Ack interrupts after they have been disabled */ +- rc = phy_read(phydev, LAN88XX_INT_STS); +- } +- +- return rc < 0 ? rc : 0; +-} +- +-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev) +-{ +- int irq_status; +- +- irq_status = phy_read(phydev, LAN88XX_INT_STS); +- if (irq_status < 0) { +- phy_error(phydev); +- return IRQ_NONE; +- } +- +- if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_)) +- return IRQ_NONE; +- +- phy_trigger_machine(phydev); +- +- return IRQ_HANDLED; +-} +- + static int lan88xx_suspend(struct phy_device *phydev) + { + struct lan88xx_priv *priv = phydev->priv; +@@ -392,8 +351,9 @@ static struct phy_driver microchip_phy_driver[] = { + .config_aneg = lan88xx_config_aneg, + .link_change_notify = lan88xx_link_change_notify, + +- .config_intr = lan88xx_phy_config_intr, +- .handle_interrupt = lan88xx_handle_interrupt, ++ /* Interrupt handling is broken, do not define related ++ * functions to force polling. ++ */ + + .suspend = lan88xx_suspend, + .resume = genphy_resume, +diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c +index f550576eb9dae7..6f9d8da76c4dfb 100644 +--- a/drivers/net/phy/phy_led_triggers.c ++++ b/drivers/net/phy/phy_led_triggers.c +@@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy) + if (!phy->phy_num_led_triggers) + return 0; + +- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev, +- sizeof(*phy->led_link_trigger), +- GFP_KERNEL); ++ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger), ++ GFP_KERNEL); + if (!phy->led_link_trigger) { + err = -ENOMEM; + goto out_clear; +@@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy) + if (err) + goto out_free_link; + +- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev, +- phy->phy_num_led_triggers, +- sizeof(struct phy_led_trigger), +- GFP_KERNEL); ++ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers, ++ sizeof(struct phy_led_trigger), ++ GFP_KERNEL); + if (!phy->phy_led_triggers) { + err = -ENOMEM; + goto out_unreg_link; +@@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy) + out_unreg: + while (i--) + phy_led_trigger_unregister(&phy->phy_led_triggers[i]); +- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); ++ kfree(phy->phy_led_triggers); + out_unreg_link: + phy_led_trigger_unregister(phy->led_link_trigger); + out_free_link: +- devm_kfree(&phy->mdio.dev, phy->led_link_trigger); ++ kfree(phy->led_link_trigger); + phy->led_link_trigger = NULL; + out_clear: + phy->phy_num_led_triggers = 0; +@@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy) + + for (i = 0; i < phy->phy_num_led_triggers; i++) + phy_led_trigger_unregister(&phy->phy_led_triggers[i]); ++ kfree(phy->phy_led_triggers); ++ phy->phy_led_triggers = NULL; + +- if (phy->led_link_trigger) ++ if (phy->led_link_trigger) { + phy_led_trigger_unregister(phy->led_link_trigger); ++ kfree(phy->led_link_trigger); ++ phy->led_link_trigger = NULL; ++ } + } + EXPORT_SYMBOL_GPL(phy_led_triggers_unregister); +diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c +index 616ecc38d1726c..5f470499e60024 100644 +--- a/drivers/net/vmxnet3/vmxnet3_xdp.c ++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c +@@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, + + xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); + xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, +- rbi->len, false); ++ rcd->len, false); + xdp_buff_clear_frags_flag(&xdp); + + xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index bcb5651f18e0f5..0115f8f5b7245f 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_TX: +- get_page(pdata); + xdpf = xdp_convert_buff_to_frame(xdp); ++ if (unlikely(!xdpf)) { ++ trace_xdp_exception(queue->info->netdev, prog, act); ++ break; ++ } ++ get_page(pdata); + err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); +- if (unlikely(!err)) ++ if (unlikely(err <= 0)) { ++ if (err < 0) ++ trace_xdp_exception(queue->info->netdev, prog, act); + xdp_return_frame_rx_napi(xdpf); +- else if (unlikely(err < 0)) +- trace_xdp_exception(queue->info->netdev, prog, act); ++ } + break; + case XDP_REDIRECT: + get_page(pdata); + err = xdp_do_redirect(queue->info->netdev, xdp, prog); + *need_xdp_flush = true; +- if (unlikely(err)) ++ if (unlikely(err)) { + trace_xdp_exception(queue->info->netdev, prog, act); ++ xdp_return_buff(xdp); ++ } + break; + case XDP_PASS: + case XDP_DROP: +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c +index d687e8c2cc78dc..63ceed89b62ef9 100644 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c +@@ -1318,6 +1318,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = { + { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, ++ { PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, + { 0, } + }; +diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c +index 48823b53ede3e9..22aaa60d2d3846 100644 +--- a/drivers/ntb/hw/idt/ntb_hw_idt.c ++++ b/drivers/ntb/hw/idt/ntb_hw_idt.c +@@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type) + static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, + unsigned char *mw_cnt) + { +- struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws; ++ struct idt_mw_cfg *mws; + const struct idt_ntb_bar *bars; + enum idt_mw_type mw_type; + unsigned char widx, bidx, en_cnt; +@@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, + int aprt_size; + u32 data; + ++ mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS, ++ sizeof(*mws), GFP_KERNEL); ++ if (!mws) ++ return ERR_PTR(-ENOMEM); ++ + /* Retrieve the array of the BARs registers */ + bars = portdata_tbl[port].bars; + +@@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, + } + } + +- /* Allocate memory for memory window descriptors */ +- ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), +- GFP_KERNEL); +- if (!ret_mws) +- return ERR_PTR(-ENOMEM); +- +- /* Copy the info of detected memory windows */ +- memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws)); +- +- return ret_mws; ++ return mws; + } + + /* +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index f00665ad0c11a3..c6b0637e61debd 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -3972,6 +3972,15 @@ static void nvme_scan_work(struct work_struct *work) + nvme_scan_ns_sequential(ctrl); + } + mutex_unlock(&ctrl->scan_lock); ++ ++ /* Requeue if we have missed AENs */ ++ if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) ++ nvme_queue_scan(ctrl); ++#ifdef CONFIG_NVME_MULTIPATH ++ else if (ctrl->ana_log_buf) ++ /* Re-read the ANA log page to not miss updates */ ++ queue_work(nvme_wq, &ctrl->ana_work); ++#endif + } + + /* +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 32283301199f01..119afdfe4b91e9 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -426,7 +426,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) + struct nvme_ns *ns; + + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) +- return NULL; ++ return false; + + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c +index d40d5a4ea932e0..570c58d2b5a585 100644 +--- a/drivers/nvme/target/fc.c ++++ b/drivers/nvme/target/fc.c +@@ -1030,33 +1030,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) + struct nvmet_fc_hostport *newhost, *match = NULL; + unsigned long flags; + ++ /* ++ * Caller holds a reference on tgtport. ++ */ ++ + /* if LLDD not implemented, leave as NULL */ + if (!hosthandle) + return NULL; + +- /* +- * take reference for what will be the newly allocated hostport if +- * we end up using a new allocation +- */ +- if (!nvmet_fc_tgtport_get(tgtport)) +- return ERR_PTR(-EINVAL); +- + spin_lock_irqsave(&tgtport->lock, flags); + match = nvmet_fc_match_hostport(tgtport, hosthandle); + spin_unlock_irqrestore(&tgtport->lock, flags); + +- if (match) { +- /* no new allocation - release reference */ +- nvmet_fc_tgtport_put(tgtport); ++ if (match) + return match; +- } + + newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); +- if (!newhost) { +- /* no new allocation - release reference */ +- nvmet_fc_tgtport_put(tgtport); ++ if (!newhost) + return ERR_PTR(-ENOMEM); +- } + + spin_lock_irqsave(&tgtport->lock, flags); + match = nvmet_fc_match_hostport(tgtport, hosthandle); +@@ -1065,6 +1056,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) + kfree(newhost); + newhost = match; + } else { ++ nvmet_fc_tgtport_get(tgtport); + newhost->tgtport = tgtport; + newhost->hosthandle = hosthandle; + INIT_LIST_HEAD(&newhost->host_list); +@@ -1099,7 +1091,8 @@ static void + nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) + { + nvmet_fc_tgtport_get(assoc->tgtport); +- queue_work(nvmet_wq, &assoc->del_work); ++ if (!queue_work(nvmet_wq, &assoc->del_work)) ++ nvmet_fc_tgtport_put(assoc->tgtport); + } + + static struct nvmet_fc_tgt_assoc * +diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c +index b278ab4338ceb5..d5c1b2a126a560 100644 +--- a/drivers/of/resolver.c ++++ b/drivers/of/resolver.c +@@ -262,25 +262,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, + */ + int of_resolve_phandles(struct device_node *overlay) + { +- struct device_node *child, *local_fixups, *refnode; +- struct device_node *tree_symbols, *overlay_fixups; ++ struct device_node *child, *refnode; ++ struct device_node *overlay_fixups; ++ struct device_node __free(device_node) *local_fixups = NULL; + struct property *prop; + const char *refpath; + phandle phandle, phandle_delta; + int err; + +- tree_symbols = NULL; +- + if (!overlay) { + pr_err("null overlay\n"); +- err = -EINVAL; +- goto out; ++ return -EINVAL; + } + + if (!of_node_check_flag(overlay, OF_DETACHED)) { + pr_err("overlay not detached\n"); +- err = -EINVAL; +- goto out; ++ return -EINVAL; + } + + phandle_delta = live_tree_max_phandle() + 1; +@@ -292,7 +289,7 @@ int of_resolve_phandles(struct device_node *overlay) + + err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta); + if (err) +- goto out; ++ return err; + + overlay_fixups = NULL; + +@@ -301,16 +298,13 @@ int of_resolve_phandles(struct device_node *overlay) + overlay_fixups = child; + } + +- if (!overlay_fixups) { +- err = 0; +- goto out; +- } ++ if (!overlay_fixups) ++ return 0; + +- tree_symbols = of_find_node_by_path("/__symbols__"); ++ struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__"); + if (!tree_symbols) { + pr_err("no symbols in root of device tree.\n"); +- err = -EINVAL; +- goto out; ++ return -EINVAL; + } + + for_each_property_of_node(overlay_fixups, prop) { +@@ -324,14 +318,12 @@ int of_resolve_phandles(struct device_node *overlay) + if (err) { + pr_err("node label '%s' not found in live devicetree symbols table\n", + prop->name); +- goto out; ++ return err; + } + + refnode = of_find_node_by_path(refpath); +- if (!refnode) { +- err = -ENOENT; +- goto out; +- } ++ if (!refnode) ++ return -ENOENT; + + phandle = refnode->phandle; + of_node_put(refnode); +@@ -341,11 +333,8 @@ int of_resolve_phandles(struct device_node *overlay) + break; + } + +-out: + if (err) + pr_err("overlay phandle fixup failed: %d\n", err); +- of_node_put(tree_symbols); +- + return err; + } + EXPORT_SYMBOL_GPL(of_resolve_phandles); +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 8e5d818c29a983..b7cec139d816ba 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -885,6 +885,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) + resource_size_t offset, next_offset; + LIST_HEAD(resources); + struct resource *res, *next_res; ++ bool bus_registered = false; + char addr[64], *fmt; + const char *name; + int err; +@@ -948,6 +949,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) + name = dev_name(&bus->dev); + + err = device_register(&bus->dev); ++ bus_registered = true; + if (err) + goto unregister; + +@@ -1031,12 +1033,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) + unregister: + put_device(&bridge->dev); + device_del(&bridge->dev); +- + free: + #ifdef CONFIG_PCI_DOMAINS_GENERIC + pci_bus_release_domain_nr(bus, parent); + #endif +- kfree(bus); ++ if (bus_registered) ++ put_device(&bus->dev); ++ else ++ kfree(bus); ++ + return err; + } + +diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c +index c5d733216508e8..df660b7e1300ca 100644 +--- a/drivers/pinctrl/renesas/pinctrl-rza2.c ++++ b/drivers/pinctrl/renesas/pinctrl-rza2.c +@@ -243,6 +243,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) + int ret; + + chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np); ++ if (!chip.label) ++ return -ENOMEM; ++ + chip.parent = priv->dev; + chip.ngpio = priv->npins; + +diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c +index 374d80dc6d17ab..bec22a001a5dd5 100644 +--- a/drivers/regulator/rk808-regulator.c ++++ b/drivers/regulator/rk808-regulator.c +@@ -267,8 +267,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = { + + static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode) + { +- int rid = rdev_get_id(rdev); +- int ctr_bit, reg; ++ unsigned int rid = rdev_get_id(rdev); ++ unsigned int ctr_bit, reg; + + reg = RK806_POWER_FPWM_EN0 + rid / 8; + ctr_bit = rid % 8; +diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c +index 905986c616559b..73848f764559b4 100644 +--- a/drivers/rtc/rtc-pcf85063.c ++++ b/drivers/rtc/rtc-pcf85063.c +@@ -35,6 +35,7 @@ + #define PCF85063_REG_CTRL1_CAP_SEL BIT(0) + #define PCF85063_REG_CTRL1_STOP BIT(5) + #define PCF85063_REG_CTRL1_EXT_TEST BIT(7) ++#define PCF85063_REG_CTRL1_SWR 0x58 + + #define PCF85063_REG_CTRL2 0x01 + #define PCF85063_CTRL2_AF BIT(6) +@@ -589,7 +590,7 @@ static int pcf85063_probe(struct i2c_client *client) + + i2c_set_clientdata(client, pcf85063); + +- err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp); ++ err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp); + if (err) { + dev_err(&client->dev, "RTC chip is not present\n"); + return err; +@@ -599,6 +600,22 @@ static int pcf85063_probe(struct i2c_client *client) + if (IS_ERR(pcf85063->rtc)) + return PTR_ERR(pcf85063->rtc); + ++ /* ++ * If a Power loss is detected, SW reset the device. ++ * From PCF85063A datasheet: ++ * There is a low probability that some devices will have corruption ++ * of the registers after the automatic power-on reset... ++ */ ++ if (tmp & PCF85063_REG_SC_OS) { ++ dev_warn(&client->dev, ++ "POR issue detected, sending a SW reset\n"); ++ err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1, ++ PCF85063_REG_CTRL1_SWR); ++ if (err < 0) ++ dev_warn(&client->dev, ++ "SW reset failed, trying to continue\n"); ++ } ++ + err = pcf85063_load_capacitance(pcf85063, client->dev.of_node, + config->force_cap_7000 ? 7000 : 0); + if (err < 0) +diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c +index e5d947c763ea5d..6a030ba38bf360 100644 +--- a/drivers/s390/char/sclp_con.c ++++ b/drivers/s390/char/sclp_con.c +@@ -263,6 +263,19 @@ static struct console sclp_console = + .index = 0 /* ttyS0 */ + }; + ++/* ++ * Release allocated pages. ++ */ ++static void __init __sclp_console_free_pages(void) ++{ ++ struct list_head *page, *p; ++ ++ list_for_each_safe(page, p, &sclp_con_pages) { ++ list_del(page); ++ free_page((unsigned long)page); ++ } ++} ++ + /* + * called by console_init() in drivers/char/tty_io.c at boot-time. + */ +@@ -282,6 +295,10 @@ sclp_console_init(void) + /* Allocate pages for output buffering */ + for (i = 0; i < sclp_console_pages; i++) { + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); ++ if (!page) { ++ __sclp_console_free_pages(); ++ return -ENOMEM; ++ } + list_add_tail(page, &sclp_con_pages); + } + sclp_conbuf = NULL; +diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c +index 892c18d2f87e90..d3edacb6ee148b 100644 +--- a/drivers/s390/char/sclp_tty.c ++++ b/drivers/s390/char/sclp_tty.c +@@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = { + .flush_buffer = sclp_tty_flush_buffer, + }; + ++/* Release allocated pages. */ ++static void __init __sclp_tty_free_pages(void) ++{ ++ struct list_head *page, *p; ++ ++ list_for_each_safe(page, p, &sclp_tty_pages) { ++ list_del(page); ++ free_page((unsigned long)page); ++ } ++} ++ + static int __init + sclp_tty_init(void) + { +@@ -516,6 +527,7 @@ sclp_tty_init(void) + for (i = 0; i < MAX_KMEM_PAGES; i++) { + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (page == NULL) { ++ __sclp_tty_free_pages(); + tty_driver_kref_put(driver); + return -ENOMEM; + } +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c +index f78c5f8a49ffac..7e64661d215bd2 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c +@@ -911,8 +911,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work, + container_of(work, typeof(*phy), works[event]); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; ++ struct asd_sas_port *sas_port = sas_phy->port; ++ struct hisi_sas_port *port = phy->port; ++ struct device *dev = hisi_hba->dev; ++ struct domain_device *port_dev; + int phy_no = sas_phy->id; + ++ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) && ++ sas_port && port && (port->id != phy->port_id)) { ++ dev_info(dev, "phy%d's hw port id changed from %d to %llu\n", ++ phy_no, port->id, phy->port_id); ++ port_dev = sas_port->port_dev; ++ if (port_dev && !dev_is_expander(port_dev->dev_type)) { ++ /* ++ * Set the device state to gone to block ++ * sending IO to the device. ++ */ ++ set_bit(SAS_DEV_GONE, &port_dev->state); ++ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); ++ return; ++ } ++ } ++ + phy->wait_phyup_cnt = 0; + if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) + hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c +index ee2da8e49d4cfb..a9d6dac4133466 100644 +--- a/drivers/scsi/pm8001/pm8001_sas.c ++++ b/drivers/scsi/pm8001/pm8001_sas.c +@@ -719,6 +719,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); ++ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; + pm8001_free_dev(pm8001_dev); + } else { + pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c +index 22bdce0bc32792..9c0142a010bac1 100644 +--- a/drivers/scsi/scsi.c ++++ b/drivers/scsi/scsi.c +@@ -693,26 +693,23 @@ void scsi_cdl_check(struct scsi_device *sdev) + */ + int scsi_cdl_enable(struct scsi_device *sdev, bool enable) + { +- struct scsi_mode_data data; +- struct scsi_sense_hdr sshdr; +- struct scsi_vpd *vpd; +- bool is_ata = false; + char buf[64]; ++ bool is_ata; + int ret; + + if (!sdev->cdl_supported) + return -EOPNOTSUPP; + + rcu_read_lock(); +- vpd = rcu_dereference(sdev->vpd_pg89); +- if (vpd) +- is_ata = true; ++ is_ata = rcu_dereference(sdev->vpd_pg89); + rcu_read_unlock(); + + /* + * For ATA devices, CDL needs to be enabled with a SET FEATURES command. + */ + if (is_ata) { ++ struct scsi_mode_data data; ++ struct scsi_sense_hdr sshdr; + char *buf_data; + int len; + +@@ -721,16 +718,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) + if (ret) + return -EINVAL; + +- /* Enable CDL using the ATA feature page */ ++ /* Enable or disable CDL using the ATA feature page */ + len = min_t(size_t, sizeof(buf), + data.length - data.header_length - + data.block_descriptor_length); + buf_data = buf + data.header_length + + data.block_descriptor_length; +- if (enable) +- buf_data[4] = 0x02; +- else +- buf_data[4] = 0; ++ ++ /* ++ * If we want to enable CDL and CDL is already enabled on the ++ * device, do nothing. This avoids needlessly resetting the CDL ++ * statistics on the device as that is implied by the CDL enable ++ * action. Similar to this, there is no need to do anything if ++ * we want to disable CDL and CDL is already disabled. ++ */ ++ if (enable) { ++ if ((buf_data[4] & 0x03) == 0x02) ++ goto out; ++ buf_data[4] &= ~0x03; ++ buf_data[4] |= 0x02; ++ } else { ++ if ((buf_data[4] & 0x03) == 0x00) ++ goto out; ++ buf_data[4] &= ~0x03; ++ } + + ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, + &data, &sshdr); +@@ -742,6 +753,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) + } + } + ++out: + sdev->cdl_enable = enable; + + return 0; +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index e6dc2c556fde9e..bd75e3ebc14da3 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1152,8 +1152,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); + */ + static void scsi_cleanup_rq(struct request *rq) + { ++ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); ++ ++ cmd->flags = 0; ++ + if (rq->rq_flags & RQF_DONTPREP) { +- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); ++ scsi_mq_uninit_cmd(cmd); + rq->rq_flags &= ~RQF_DONTPREP; + } + } +diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c +index fbab7fe5c652b9..d6e205e3812a96 100644 +--- a/drivers/soc/qcom/ice.c ++++ b/drivers/soc/qcom/ice.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -328,6 +329,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev) + } + EXPORT_SYMBOL_GPL(of_qcom_ice_get); + ++static void qcom_ice_put(const struct qcom_ice *ice) ++{ ++ struct platform_device *pdev = to_platform_device(ice->dev); ++ ++ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice")) ++ platform_device_put(pdev); ++} ++ ++static void devm_of_qcom_ice_put(struct device *dev, void *res) ++{ ++ qcom_ice_put(*(struct qcom_ice **)res); ++} ++ ++/** ++ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from ++ * a DT node. ++ * @dev: device pointer for the consumer device. ++ * ++ * This function will provide an ICE instance either by creating one for the ++ * consumer device if its DT node provides the 'ice' reg range and the 'ice' ++ * clock (for legacy DT style). On the other hand, if consumer provides a ++ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already ++ * be created and so this function will return that instead. ++ * ++ * Return: ICE pointer on success, NULL if there is no ICE data provided by the ++ * consumer or ERR_PTR() on error. ++ */ ++struct qcom_ice *devm_of_qcom_ice_get(struct device *dev) ++{ ++ struct qcom_ice *ice, **dr; ++ ++ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL); ++ if (!dr) ++ return ERR_PTR(-ENOMEM); ++ ++ ice = of_qcom_ice_get(dev); ++ if (!IS_ERR_OR_NULL(ice)) { ++ *dr = ice; ++ devres_add(dev, dr); ++ } else { ++ devres_free(dr); ++ } ++ ++ return ice; ++} ++EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get); ++ + static int qcom_ice_probe(struct platform_device *pdev) + { + struct qcom_ice *engine; +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c +index daa32bde615561..da4442954375b1 100644 +--- a/drivers/spi/spi-imx.c ++++ b/drivers/spi/spi-imx.c +@@ -1614,10 +1614,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller, + struct spi_device *spi, + struct spi_transfer *transfer) + { ++ int ret; + struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); + unsigned long hz_per_byte, byte_limit; + +- spi_imx_setupxfer(spi, transfer); ++ ret = spi_imx_setupxfer(spi, transfer); ++ if (ret < 0) ++ return ret; + transfer->effective_speed_hz = spi_imx->spi_bus_clk; + + /* flush rxfifo before transfer */ +diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c +index d1afa4140e8a26..e3c236025a7b3b 100644 +--- a/drivers/spi/spi-tegra210-quad.c ++++ b/drivers/spi/spi-tegra210-quad.c +@@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, + (&tqspi->xfer_completion, + QSPI_DMA_TIMEOUT); + +- if (WARN_ON(ret == 0)) { +- dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", +- ret); ++ if (WARN_ON_ONCE(ret == 0)) { ++ dev_err_ratelimited(tqspi->dev, ++ "QSPI Transfer failed with timeout\n"); + if (tqspi->is_curr_dma_xfer && + (tqspi->cur_direction & DATA_DIR_TX)) + dmaengine_terminate_all +diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c +index 7c3310a2b28a41..b92a8a5b2e8c97 100644 +--- a/drivers/thunderbolt/tb.c ++++ b/drivers/thunderbolt/tb.c +@@ -1370,11 +1370,15 @@ static void tb_scan_port(struct tb_port *port) + goto out_rpm_put; + } + +- tb_retimer_scan(port, true); +- + sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, + tb_downstream_route(port)); + if (IS_ERR(sw)) { ++ /* ++ * Make the downstream retimers available even if there ++ * is no router connected. ++ */ ++ tb_retimer_scan(port, true); ++ + /* + * If there is an error accessing the connected switch + * it may be connected to another domain. Also we allow +@@ -1424,6 +1428,14 @@ static void tb_scan_port(struct tb_port *port) + upstream_port = tb_upstream_port(sw); + tb_configure_link(port, upstream_port, sw); + ++ /* ++ * Scan for downstream retimers. We only scan them after the ++ * router has been enumerated to avoid issues with certain ++ * Pluggable devices that expect the host to enumerate them ++ * within certain timeout. ++ */ ++ tb_retimer_scan(port, true); ++ + /* + * CL0s and CL1 are enabled and supported together. + * Silently ignore CLx enabling in case CLx is not supported. +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index 90953e679e386a..76b6429fb9e92e 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -1741,6 +1741,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device, + if (!device->port.membase) + return -ENODEV; + ++ /* Disable DM / single-character modes */ ++ msm_write(&device->port, 0, UARTDM_DMEN); ++ msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR); ++ msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR); ++ msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR); ++ + device->con->write = msm_serial_early_write_dm; + return 0; + } +diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c +index d195c5de52e78f..e86b00873d0ea6 100644 +--- a/drivers/tty/serial/sifive.c ++++ b/drivers/tty/serial/sifive.c +@@ -562,8 +562,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state) + static int sifive_serial_startup(struct uart_port *port) + { + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); ++ unsigned long flags; + ++ uart_port_lock_irqsave(&ssp->port, &flags); + __ssp_enable_rxwm(ssp); ++ uart_port_unlock_irqrestore(&ssp->port, flags); + + return 0; + } +@@ -571,9 +574,12 @@ static int sifive_serial_startup(struct uart_port *port) + static void sifive_serial_shutdown(struct uart_port *port) + { + struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); ++ unsigned long flags; + ++ uart_port_lock_irqsave(&ssp->port, &flags); + __ssp_disable_rxwm(ssp); + __ssp_disable_txwm(ssp); ++ uart_port_unlock_irqrestore(&ssp->port, flags); + } + + /** +diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c +index da8c1734d33358..411109a5ebbffd 100644 +--- a/drivers/ufs/core/ufs-mcq.c ++++ b/drivers/ufs/core/ufs-mcq.c +@@ -632,13 +632,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) + unsigned long flags; + int err; + +- if (!ufshcd_cmd_inflight(lrbp->cmd)) { +- dev_err(hba->dev, +- "%s: skip abort. cmd at tag %d already completed.\n", +- __func__, tag); +- return FAILED; +- } +- + /* Skip task abort in case previous aborts failed and report failure */ + if (lrbp->req_abort_skip) { + dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", +@@ -647,6 +640,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) + } + + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); ++ if (!hwq) { ++ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", ++ __func__, tag); ++ return FAILED; ++ } + + if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { + /* +diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c +index d138b66d5e350b..f61126189876e9 100644 +--- a/drivers/ufs/host/ufs-exynos.c ++++ b/drivers/ufs/host/ufs-exynos.c +@@ -990,9 +990,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) + exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); + exynos_ufs_set_unipro_pclk_div(ufs); + ++ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); ++ + /* unipro */ + exynos_ufs_config_unipro(ufs); + ++ if (ufs->drv_data->pre_link) ++ ufs->drv_data->pre_link(ufs); ++ + /* m-phy */ + exynos_ufs_phy_init(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { +@@ -1000,11 +1005,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) + exynos_ufs_config_phy_cap_attr(ufs); + } + +- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); +- +- if (ufs->drv_data->pre_link) +- ufs->drv_data->pre_link(ufs); +- + return 0; + } + +diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c +index 51ed40529f9a7b..c6417ef074a478 100644 +--- a/drivers/ufs/host/ufs-qcom.c ++++ b/drivers/ufs/host/ufs-qcom.c +@@ -121,7 +121,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) + struct device *dev = hba->dev; + struct qcom_ice *ice; + +- ice = of_qcom_ice_get(dev); ++ ice = devm_of_qcom_ice_get(dev); + if (ice == ERR_PTR(-EOPNOTSUPP)) { + dev_warn(dev, "Disabling inline encryption support\n"); + ice = NULL; +diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c +index b1b46c7c63f8b3..05e8414c31df4c 100644 +--- a/drivers/usb/cdns3/cdns3-gadget.c ++++ b/drivers/usb/cdns3/cdns3-gadget.c +@@ -1962,6 +1962,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) + unsigned int bit; + unsigned long reg; + ++ local_bh_disable(); + spin_lock_irqsave(&priv_dev->lock, flags); + + reg = readl(&priv_dev->regs->usb_ists); +@@ -2003,6 +2004,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) + irqend: + writel(~0, &priv_dev->regs->ep_ien); + spin_unlock_irqrestore(&priv_dev->lock, flags); ++ local_bh_enable(); + + return ret; + } +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c +index b3cbca361a9696..73d5b9466676c4 100644 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c +@@ -328,6 +328,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) + return ret; + } + ++static void ci_hdrc_imx_disable_regulator(void *arg) ++{ ++ struct ci_hdrc_imx_data *data = arg; ++ ++ regulator_disable(data->hsic_pad_regulator); ++} ++ + static int ci_hdrc_imx_probe(struct platform_device *pdev) + { + struct ci_hdrc_imx_data *data; +@@ -386,6 +393,13 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + "Failed to enable HSIC pad regulator\n"); + goto err_put; + } ++ ret = devm_add_action_or_reset(dev, ++ ci_hdrc_imx_disable_regulator, data); ++ if (ret) { ++ dev_err(dev, ++ "Failed to add regulator devm action\n"); ++ goto err_put; ++ } + } + } + +@@ -424,11 +438,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + + ret = imx_get_clks(dev); + if (ret) +- goto disable_hsic_regulator; ++ goto qos_remove_request; + + ret = imx_prepare_enable_clks(dev); + if (ret) +- goto disable_hsic_regulator; ++ goto qos_remove_request; + + data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0); + if (IS_ERR(data->phy)) { +@@ -458,7 +472,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) { + pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL; + data->override_phy_control = true; +- usb_phy_init(pdata.usb_phy); ++ ret = usb_phy_init(pdata.usb_phy); ++ if (ret) { ++ dev_err(dev, "Failed to init phy\n"); ++ goto err_clk; ++ } + } + + if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM) +@@ -467,7 +485,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + ret = imx_usbmisc_init(data->usbmisc_data); + if (ret) { + dev_err(dev, "usbmisc init failed, ret=%d\n", ret); +- goto err_clk; ++ goto phy_shutdown; + } + + data->ci_pdev = ci_hdrc_add_device(dev, +@@ -476,7 +494,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + if (IS_ERR(data->ci_pdev)) { + ret = PTR_ERR(data->ci_pdev); + dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n"); +- goto err_clk; ++ goto phy_shutdown; + } + + if (data->usbmisc_data) { +@@ -510,17 +528,18 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) + + disable_device: + ci_hdrc_remove_device(data->ci_pdev); ++phy_shutdown: ++ if (data->override_phy_control) ++ usb_phy_shutdown(data->phy); + err_clk: + imx_disable_unprepare_clks(dev); +-disable_hsic_regulator: +- if (data->hsic_pad_regulator) +- /* don't overwrite original ret (cf. EPROBE_DEFER) */ +- regulator_disable(data->hsic_pad_regulator); ++qos_remove_request: + if (pdata.flags & CI_HDRC_PMQOS) + cpu_latency_qos_remove_request(&data->pm_qos_req); + data->ci_pdev = NULL; + err_put: +- put_device(data->usbmisc_data->dev); ++ if (data->usbmisc_data) ++ put_device(data->usbmisc_data->dev); + return ret; + } + +@@ -541,10 +560,9 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev) + imx_disable_unprepare_clks(&pdev->dev); + if (data->plat_data->flags & CI_HDRC_PMQOS) + cpu_latency_qos_remove_request(&data->pm_qos_req); +- if (data->hsic_pad_regulator) +- regulator_disable(data->hsic_pad_regulator); + } +- put_device(data->usbmisc_data->dev); ++ if (data->usbmisc_data) ++ put_device(data->usbmisc_data->dev); + } + + static void ci_hdrc_imx_shutdown(struct platform_device *pdev) +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 6830be4419e20a..559c121f092300 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file) + rv = -EBUSY; + goto out; + } +- ++ smp_rmb(); /* ordered against wdm_wwan_port_stop() */ + rv = usb_autopm_get_interface(desc->intf); + if (rv < 0) { + dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); +@@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = { + static int wdm_wwan_port_start(struct wwan_port *port) + { + struct wdm_device *desc = wwan_port_get_drvdata(port); ++ int rv; + + /* The interface is both exposed via the WWAN framework and as a + * legacy usbmisc chardev. If chardev is already open, just fail +@@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port) + wwan_port_txon(port); + + /* Start getting events */ +- return usb_submit_urb(desc->validity, GFP_KERNEL); ++ rv = usb_submit_urb(desc->validity, GFP_KERNEL); ++ if (rv < 0) { ++ wwan_port_txoff(port); ++ desc->manage_power(desc->intf, 0); ++ /* this must be last lest we race with chardev open */ ++ clear_bit(WDM_WWAN_IN_USE, &desc->flags); ++ } ++ ++ return rv; + } + + static void wdm_wwan_port_stop(struct wwan_port *port) +@@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port) + poison_urbs(desc); + desc->manage_power(desc->intf, 0); + clear_bit(WDM_READ, &desc->flags); +- clear_bit(WDM_WWAN_IN_USE, &desc->flags); + unpoison_urbs(desc); ++ smp_wmb(); /* ordered against wdm_open() */ ++ /* this must be last lest we open a poisoned device */ ++ clear_bit(WDM_WWAN_IN_USE, &desc->flags); + } + + static void wdm_wwan_port_tx_complete(struct urb *urb) +@@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb) + struct sk_buff *skb = urb->context; + struct wdm_device *desc = skb_shinfo(skb)->destructor_arg; + +- usb_autopm_put_interface(desc->intf); ++ usb_autopm_put_interface_async(desc->intf); + wwan_port_txon(desc->wwanp); + kfree_skb(skb); + } +@@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb) + req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); + req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; + req->wValue = 0; +- req->wIndex = desc->inum; ++ req->wIndex = desc->inum; /* already converted */ + req->wLength = cpu_to_le16(skb->len); + + skb_shinfo(skb)->destructor_arg = desc; +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 6926bd639ec6ff..4903c733d37ae7 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -369,6 +369,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + ++ /* SanDisk Corp. SanDisk 3.2Gen1 */ ++ { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, ++ + /* Realforce 87U Keyboard */ + { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, + +@@ -383,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x0904, 0x6103), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + ++ /* Silicon Motion Flash Drive */ ++ { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT }, ++ + /* Sound Devices USBPre2 */ + { USB_DEVICE(0x0926, 0x0202), .driver_info = + USB_QUIRK_ENDPOINT_IGNORE }, +@@ -536,6 +542,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x2040, 0x7200), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + ++ /* VLI disk */ ++ { USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* Raydium Touchscreen */ + { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM }, + +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 052852f8014676..54a4ee2b90b7f4 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = { + {} + }; + ++/* ++ * Intel Merrifield SoC uses these endpoints for tracing and they cannot ++ * be re-allocated if being used because the side band flow control signals ++ * are hard wired to certain endpoints: ++ * - 1 High BW Bulk IN (IN#1) (RTIT) ++ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8) ++ */ ++static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 }; ++ + static const struct property_entry dwc3_pci_mrfld_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "otg"), + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), + PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), ++ PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints), + PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + {} +diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c +index d19a5d2d65adb9..ae30aa50a58257 100644 +--- a/drivers/usb/dwc3/dwc3-xilinx.c ++++ b/drivers/usb/dwc3/dwc3-xilinx.c +@@ -207,15 +207,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) + + skip_usb3_phy: + /* ulpi reset via gpio-modepin or gpio-framework driver */ +- reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); ++ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(reset_gpio)) { + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "Failed to request reset GPIO\n"); + } + + if (reset_gpio) { +- /* Toggle ulpi to reset the phy. */ +- gpiod_set_value_cansleep(reset_gpio, 1); + usleep_range(5000, 10000); + gpiod_set_value_cansleep(reset_gpio, 0); + usleep_range(5000, 10000); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index fdaace1564f96f..f51d743bb3ecc6 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -548,6 +548,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) + int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index) + { + struct dwc3_gadget_ep_cmd_params params; ++ struct dwc3_ep *dep; + u32 cmd; + int i; + int ret; +@@ -564,8 +565,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index) + return ret; + + /* Reset resource allocation flags */ +- for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++) +- dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED; ++ for (i = resource_index; i < dwc->num_eps; i++) { ++ dep = dwc->eps[i]; ++ if (!dep) ++ continue; ++ ++ dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED; ++ } + + return 0; + } +@@ -752,9 +758,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc) + + dwc->last_fifo_depth = fifo_depth; + /* Clear existing TXFIFO for all IN eps except ep0 */ +- for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); +- num += 2) { ++ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) { + dep = dwc->eps[num]; ++ if (!dep) ++ continue; ++ + /* Don't change TXFRAMNUM on usb31 version */ + size = DWC3_IP_IS(DWC3) ? 0 : + dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) & +@@ -3670,6 +3678,8 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep, + + for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { + dep = dwc->eps[i]; ++ if (!dep) ++ continue; + + if (!(dep->flags & DWC3_EP_ENABLED)) + continue; +@@ -3858,6 +3868,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, + u8 epnum = event->endpoint_number; + + dep = dwc->eps[epnum]; ++ if (!dep) { ++ dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum); ++ return; ++ } + + if (!(dep->flags & DWC3_EP_ENABLED)) { + if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED)) +@@ -4570,6 +4584,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) + if (!count) + return IRQ_NONE; + ++ if (count > evt->length) { ++ dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n", ++ count, evt->length); ++ return IRQ_NONE; ++ } ++ + evt->count = count; + evt->flags |= DWC3_EVENT_PENDING; + +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c +index 573109ca5b7990..a09f72772e6e95 100644 +--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c ++++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c +@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) + d->vhub = vhub; + d->index = idx; + d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1); ++ if (!d->name) ++ return -ENOMEM; ++ + d->regs = vhub->regs + 0x100 + 0x10 * idx; + + ast_vhub_init_ep0(vhub, &d->ep0, d); +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c +index a219260ad3e6c2..cc1f579f02de1c 100644 +--- a/drivers/usb/host/max3421-hcd.c ++++ b/drivers/usb/host/max3421-hcd.c +@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi) + usb_put_hcd(hcd); + } + ++static const struct spi_device_id max3421_spi_ids[] = { ++ { "max3421" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(spi, max3421_spi_ids); ++ + static const struct of_device_id max3421_of_match_table[] = { + { .compatible = "maxim,max3421", }, + {}, +@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table); + static struct spi_driver max3421_driver = { + .probe = max3421_probe, + .remove = max3421_remove, ++ .id_table = max3421_spi_ids, + .driver = { + .name = "max3421-hcd", + .of_match_table = max3421_of_match_table, +diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c +index 900ea0d368e034..9f0a6b27e47cb6 100644 +--- a/drivers/usb/host/ohci-pci.c ++++ b/drivers/usb/host/ohci-pci.c +@@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) + return 0; + } + ++static int ohci_quirk_loongson(struct usb_hcd *hcd) ++{ ++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller); ++ ++ /* ++ * Loongson's LS7A OHCI controller (rev 0x02) has a ++ * flaw. MMIO register with offset 0x60/64 is treated ++ * as legacy PS2-compatible keyboard/mouse interface. ++ * Since OHCI only use 4KB BAR resource, LS7A OHCI's ++ * 32KB BAR is wrapped around (the 2nd 4KB BAR space ++ * is the same as the 1st 4KB internally). So add 4KB ++ * offset (0x1000) to the OHCI registers as a quirk. ++ */ ++ if (pdev->revision == 0x2) ++ hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */ ++ ++ return 0; ++} ++ + static int ohci_quirk_qemu(struct usb_hcd *hcd) + { + struct ohci_hcd *ohci = hcd_to_ohci(hcd); +@@ -224,6 +243,10 @@ static const struct pci_device_id ohci_pci_quirks[] = { + PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), + .driver_data = (unsigned long)ohci_quirk_amd700, + }, ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24), ++ .driver_data = (unsigned long)ohci_quirk_loongson, ++ }, + { + .vendor = PCI_VENDOR_ID_APPLE, + .device = 0x003f, +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c +index 87f1597a0e5ab7..257e4d79971fda 100644 +--- a/drivers/usb/host/xhci-mvebu.c ++++ b/drivers/usb/host/xhci-mvebu.c +@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + + return 0; + } +- +-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) +-{ +- struct xhci_hcd *xhci = hcd_to_xhci(hcd); +- +- /* Without reset on resume, the HC won't work at all */ +- xhci->quirks |= XHCI_RESET_ON_RESUME; +- +- return 0; +-} +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h +index 3be021793cc8b0..9d26e22c48422f 100644 +--- a/drivers/usb/host/xhci-mvebu.h ++++ b/drivers/usb/host/xhci-mvebu.h +@@ -12,16 +12,10 @@ struct usb_hcd; + + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); +-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); + #else + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + { + return 0; + } +- +-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) +-{ +- return 0; +-} + #endif + #endif /* __LINUX_XHCI_MVEBU_H */ +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index d68e9abcdc69a6..8832e0cedadaff 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { + }; + + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { +- .init_quirk = xhci_mvebu_a3700_init_quirk, ++ .quirks = XHCI_RESET_ON_RESUME, + }; + + static const struct xhci_plat_priv xhci_plat_brcm = { +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 4a081685a1953e..cb944396294516 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1214,16 +1214,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + * Stopped state, but it will soon change to Running. + * + * Assume this bug on unexpected Stop Endpoint failures. +- * Keep retrying until the EP starts and stops again, on +- * chips where this is known to help. Wait for 100ms. ++ * Keep retrying until the EP starts and stops again. + */ +- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) +- break; + fallthrough; + case EP_STATE_RUNNING: + /* Race, HW handled stop ep cmd before ep was running */ + xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", + GET_EP_CTX_STATE(ep_ctx)); ++ /* ++ * Don't retry forever if we guessed wrong or a defective HC never starts ++ * the EP or says 'Running' but fails the command. We must give back TDs. ++ */ ++ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) ++ break; + + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); + if (!command) { +@@ -3876,7 +3879,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + * enqueue a No Op TRB, this can prevent the Setup and Data Stage + * TRB to be breaked by the Link TRB. + */ +- if (trb_is_link(ep_ring->enqueue + 1)) { ++ if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { + field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; + queue_trb(xhci, ep_ring, false, 0, 0, + TRB_INTR_TARGET(0), field); +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index b8e2bfd4282809..b583b31ea5e72e 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1093,6 +1093,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, ++ /* Abacus Electrics */ ++ { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 52be47d684ea66..9acb6f83732763 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -442,6 +442,11 @@ + #define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ + #define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ + ++/* ++ * Abacus Electrics ++ */ ++#define ABACUS_OPTICAL_PROBE_PID 0xf458 /* ABACUS ELECTRICS Optical Probe */ ++ + /* + * Oceanic product ids + */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index a9f95bb35bb0f5..5d669511609892 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb); + /* Sierra Wireless products */ + #define SIERRA_VENDOR_ID 0x1199 + #define SIERRA_PRODUCT_EM9191 0x90d3 ++#define SIERRA_PRODUCT_EM9291 0x90e3 + + /* UNISOC (Spreadtrum) products */ + #define UNISOC_VENDOR_ID 0x1782 +@@ -2432,6 +2433,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c +index 24b8772a345e2f..bac5ab6377ae4b 100644 +--- a/drivers/usb/serial/usb-serial-simple.c ++++ b/drivers/usb/serial/usb-serial-simple.c +@@ -101,6 +101,11 @@ DEVICE(nokia, NOKIA_IDS); + { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ + DEVICE_N(novatel_gps, NOVATEL_IDS, 3); + ++/* OWON electronic test and measurement equipment driver */ ++#define OWON_IDS() \ ++ { USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */ ++DEVICE(owon, OWON_IDS); ++ + /* Siemens USB/MPI adapter */ + #define SIEMENS_IDS() \ + { USB_DEVICE(0x908, 0x0004) } +@@ -135,6 +140,7 @@ static struct usb_serial_driver * const serial_drivers[] = { + &motorola_tetra_device, + &nokia_device, + &novatel_gps_device, ++ &owon_device, + &siemens_mpi_device, + &suunto_device, + &vivopay_device, +@@ -154,6 +160,7 @@ static const struct usb_device_id id_table[] = { + MOTOROLA_TETRA_IDS(), + NOKIA_IDS(), + NOVATEL_IDS(), ++ OWON_IDS(), + SIEMENS_IDS(), + SUUNTO_IDS(), + VIVOPAY_IDS(), +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 1f8c9b16a0fb85..d460d71b425783 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -83,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_LUNS), + ++/* Reported-by: Oliver Neukum */ ++UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160, ++ "ADATA", ++ "Portable HDD CH94", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_ATA_1X), ++ + /* Reported-by: Benjamin Tissoires */ + UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, + "Initio Corporation", +diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig +index d43153fec18ea8..af5c214b220699 100644 +--- a/drivers/xen/Kconfig ++++ b/drivers/xen/Kconfig +@@ -278,7 +278,7 @@ config XEN_PRIVCMD_IRQFD + + config XEN_ACPI_PROCESSOR + tristate "Xen ACPI processor" +- depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ ++ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ + default m + help + This ACPI processor uploads Power Management information to the Xen +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 68092b64e29eac..e794606e7c780b 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2225,15 +2225,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, + * will always return true. + * So here we need to do extra page alignment for + * filemap_range_has_page(). ++ * ++ * And do not decrease page_lockend right now, as it can be 0. + */ + const u64 page_lockstart = round_up(lockstart, PAGE_SIZE); +- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1; ++ const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE); + + while (1) { + truncate_pagecache_range(inode, lockstart, lockend); + + lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, + cached_state); ++ /* The same page or adjacent pages. */ ++ if (page_lockend <= page_lockstart) ++ break; + /* + * We can't have ordered extents in the range, nor dirty/writeback + * pages, because we have locked the inode's VFS lock in exclusive +@@ -2245,7 +2250,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, + * we do, unlock the range and retry. + */ + if (!filemap_range_has_page(inode->i_mapping, page_lockstart, +- page_lockend)) ++ page_lockend - 1)) + break; + + unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index db6977c15c2828..f0befbeb6cb833 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -2319,7 +2319,7 @@ static int fill_fscrypt_truncate(struct inode *inode, + + /* Try to writeback the dirty pagecaches */ + if (issued & (CEPH_CAP_FILE_BUFFER)) { +- loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1; ++ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1; + + ret = filemap_write_and_wait_range(inode->i_mapping, + orig_pos, lend); +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c +index 6fe3c941b56514..4d6ba140276b5f 100644 +--- a/fs/ext4/block_validity.c ++++ b/fs/ext4/block_validity.c +@@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line, + { + __le32 *bref = p; + unsigned int blk; ++ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; + +- if (ext4_has_feature_journal(inode->i_sb) && +- (inode->i_ino == +- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) ++ if (journal && inode == journal->j_inode) + return 0; + + while (bref < p+max) { +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index ddfeaf19bff1ba..d3d28e65872027 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -378,10 +378,11 @@ static int __check_block_validity(struct inode *inode, const char *func, + unsigned int line, + struct ext4_map_blocks *map) + { +- if (ext4_has_feature_journal(inode->i_sb) && +- (inode->i_ino == +- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) ++ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; ++ ++ if (journal && inode == journal->j_inode) + return 0; ++ + if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { + ext4_error_inode(inode, func, line, map->m_pblk, + "lblock %lu mapped to illegal pblock %llu " +@@ -5478,7 +5479,7 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + oldsize & (inode->i_sb->s_blocksize - 1)) { + error = ext4_inode_attach_jinode(inode); + if (error) +- goto err_out; ++ goto out_mmap_sem; + } + + handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c +index e7e6701806ad26..7ffdf0d037fae0 100644 +--- a/fs/iomap/buffered-io.c ++++ b/fs/iomap/buffered-io.c +@@ -224,7 +224,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, + } + + /* truncate len if we find any trailing uptodate block(s) */ +- for ( ; i <= last; i++) { ++ while (++i <= last) { + if (ifs_block_is_uptodate(ifs, i)) { + plen -= (last - i + 1) * block_size; + last = i - 1; +diff --git a/fs/namespace.c b/fs/namespace.c +index 671e266b8fc5d2..5a885d35efe937 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2439,56 +2439,62 @@ static struct mountpoint *do_lock_mount(struct path *path, bool beneath) + struct vfsmount *mnt = path->mnt; + struct dentry *dentry; + struct mountpoint *mp = ERR_PTR(-ENOENT); ++ struct path under = {}; + + for (;;) { +- struct mount *m; ++ struct mount *m = real_mount(mnt); + + if (beneath) { +- m = real_mount(mnt); ++ path_put(&under); + read_seqlock_excl(&mount_lock); +- dentry = dget(m->mnt_mountpoint); ++ under.mnt = mntget(&m->mnt_parent->mnt); ++ under.dentry = dget(m->mnt_mountpoint); + read_sequnlock_excl(&mount_lock); ++ dentry = under.dentry; + } else { + dentry = path->dentry; + } + + inode_lock(dentry->d_inode); +- if (unlikely(cant_mount(dentry))) { +- inode_unlock(dentry->d_inode); +- goto out; +- } +- + namespace_lock(); + +- if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) { ++ if (unlikely(cant_mount(dentry) || !is_mounted(mnt))) ++ break; // not to be mounted on ++ ++ if (beneath && unlikely(m->mnt_mountpoint != dentry || ++ &m->mnt_parent->mnt != under.mnt)) { + namespace_unlock(); + inode_unlock(dentry->d_inode); +- goto out; ++ continue; // got moved + } + + mnt = lookup_mnt(path); +- if (likely(!mnt)) ++ if (unlikely(mnt)) { ++ namespace_unlock(); ++ inode_unlock(dentry->d_inode); ++ path_put(path); ++ path->mnt = mnt; ++ path->dentry = dget(mnt->mnt_root); ++ continue; // got overmounted ++ } ++ mp = get_mountpoint(dentry); ++ if (IS_ERR(mp)) + break; +- +- namespace_unlock(); +- inode_unlock(dentry->d_inode); +- if (beneath) +- dput(dentry); +- path_put(path); +- path->mnt = mnt; +- path->dentry = dget(mnt->mnt_root); +- } +- +- mp = get_mountpoint(dentry); +- if (IS_ERR(mp)) { +- namespace_unlock(); +- inode_unlock(dentry->d_inode); ++ if (beneath) { ++ /* ++ * @under duplicates the references that will stay ++ * at least until namespace_unlock(), so the path_put() ++ * below is safe (and OK to do under namespace_lock - ++ * we are not dropping the final references here). ++ */ ++ path_put(&under); ++ } ++ return mp; + } +- +-out: ++ namespace_unlock(); ++ inode_unlock(dentry->d_inode); + if (beneath) +- dput(dentry); +- ++ path_put(&under); + return mp; + } + +@@ -2499,14 +2505,11 @@ static inline struct mountpoint *lock_mount(struct path *path) + + static void unlock_mount(struct mountpoint *where) + { +- struct dentry *dentry = where->m_dentry; +- ++ inode_unlock(where->m_dentry->d_inode); + read_seqlock_excl(&mount_lock); + put_mountpoint(where); + read_sequnlock_excl(&mount_lock); +- + namespace_unlock(); +- inode_unlock(dentry->d_inode); + } + + static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) +diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c +index 2ecd0303f9421b..4aea458216117f 100644 +--- a/fs/ntfs3/file.c ++++ b/fs/ntfs3/file.c +@@ -335,6 +335,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, + } + + if (extend_init && !is_compressed(ni)) { ++ WARN_ON(ni->i_valid >= pos); + err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); + if (err) + goto out; +diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c +index c2a98b2736645d..f04922eb45d4c9 100644 +--- a/fs/smb/client/sess.c ++++ b/fs/smb/client/sess.c +@@ -732,6 +732,22 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) + *pbcc_area = bcc_ptr; + } + ++static void ++ascii_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) ++{ ++ char *bcc_ptr = *pbcc_area; ++ ++ strcpy(bcc_ptr, "Linux version "); ++ bcc_ptr += strlen("Linux version "); ++ strcpy(bcc_ptr, init_utsname()->release); ++ bcc_ptr += strlen(init_utsname()->release) + 1; ++ ++ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); ++ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; ++ ++ *pbcc_area = bcc_ptr; ++} ++ + static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, + const struct nls_table *nls_cp) + { +@@ -756,6 +772,25 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, + *pbcc_area = bcc_ptr; + } + ++static void ascii_domain_string(char **pbcc_area, struct cifs_ses *ses, ++ const struct nls_table *nls_cp) ++{ ++ char *bcc_ptr = *pbcc_area; ++ int len; ++ ++ /* copy domain */ ++ if (ses->domainName != NULL) { ++ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); ++ if (WARN_ON_ONCE(len < 0)) ++ len = CIFS_MAX_DOMAINNAME_LEN - 1; ++ bcc_ptr += len; ++ } /* else we send a null domain name so server will default to its own domain */ ++ *bcc_ptr = 0; ++ bcc_ptr++; ++ ++ *pbcc_area = bcc_ptr; ++} ++ + static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, + const struct nls_table *nls_cp) + { +@@ -801,25 +836,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, + *bcc_ptr = 0; + bcc_ptr++; /* account for null termination */ + +- /* copy domain */ +- if (ses->domainName != NULL) { +- len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); +- if (WARN_ON_ONCE(len < 0)) +- len = CIFS_MAX_DOMAINNAME_LEN - 1; +- bcc_ptr += len; +- } /* else we send a null domain name so server will default to its own domain */ +- *bcc_ptr = 0; +- bcc_ptr++; +- + /* BB check for overflow here */ + +- strcpy(bcc_ptr, "Linux version "); +- bcc_ptr += strlen("Linux version "); +- strcpy(bcc_ptr, init_utsname()->release); +- bcc_ptr += strlen(init_utsname()->release) + 1; +- +- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); +- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; ++ ascii_domain_string(&bcc_ptr, ses, nls_cp); ++ ascii_oslm_strings(&bcc_ptr, nls_cp); + + *pbcc_area = bcc_ptr; + } +@@ -1622,7 +1642,7 @@ sess_auth_kerberos(struct sess_data *sess_data) + sess_data->iov[1].iov_len = msg->secblob_len; + pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); + +- if (ses->capabilities & CAP_UNICODE) { ++ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) { + /* unicode strings must be word aligned */ + if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { + *bcc_ptr = 0; +@@ -1631,8 +1651,8 @@ sess_auth_kerberos(struct sess_data *sess_data) + unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); + unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp); + } else { +- /* BB: is this right? */ +- ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); ++ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp); ++ ascii_domain_string(&bcc_ptr, ses, sess_data->nls_cp); + } + + sess_data->iov[2].iov_len = (long) bcc_ptr - +diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c +index bc1bac36c1b291..caa1d852ece49c 100644 +--- a/fs/smb/client/smb1ops.c ++++ b/fs/smb/client/smb1ops.c +@@ -597,6 +597,42 @@ static int cifs_query_path_info(const unsigned int xid, + CIFSSMBClose(xid, tcon, fid.netfid); + } + ++#ifdef CONFIG_CIFS_XATTR ++ /* ++ * For WSL CHR and BLK reparse points it is required to fetch ++ * EA $LXDEV which contains major and minor device numbers. ++ */ ++ if (!rc && data->reparse_point) { ++ struct smb2_file_full_ea_info *ea; ++ ++ ea = (struct smb2_file_full_ea_info *)data->wsl.eas; ++ rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV, ++ &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1], ++ SMB2_WSL_XATTR_DEV_SIZE, cifs_sb); ++ if (rc == SMB2_WSL_XATTR_DEV_SIZE) { ++ ea->next_entry_offset = cpu_to_le32(0); ++ ea->flags = 0; ++ ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN; ++ ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE); ++ memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1); ++ data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + ++ SMB2_WSL_XATTR_DEV_SIZE; ++ rc = 0; ++ } else if (rc >= 0) { ++ /* It is an error if EA $LXDEV has wrong size. */ ++ rc = -EINVAL; ++ } else { ++ /* ++ * In all other cases ignore error if fetching ++ * of EA $LXDEV failed. It is needed only for ++ * WSL CHR and BLK reparse points and wsl_to_fattr() ++ * handle the case when EA is missing. ++ */ ++ rc = 0; ++ } ++ } ++#endif ++ + return rc; + } + +diff --git a/fs/splice.c b/fs/splice.c +index d983d375ff1130..6f9b06bbb860ac 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -45,7 +45,7 @@ + * here if set to avoid blocking other users of this pipe if splice is + * being done on it. + */ +-static noinline void noinline pipe_clear_nowait(struct file *file) ++static noinline void pipe_clear_nowait(struct file *file) + { + fmode_t fmode = READ_ONCE(file->f_mode); + +diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h +index b9caa01dfac485..adec808b371a11 100644 +--- a/include/linux/energy_model.h ++++ b/include/linux/energy_model.h +@@ -243,7 +243,6 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, + scale_cpu = arch_scale_cpu_capacity(cpu); + ps = &pd->table[pd->nr_perf_states - 1]; + +- max_util = map_util_perf(max_util); + max_util = min(max_util, allowed_cpu_cap); + freq = map_util_freq(max_util, ps->frequency, scale_cpu); + +diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h +index ab2a7ef61d420f..b4fcd0164048ed 100644 +--- a/include/media/v4l2-subdev.h ++++ b/include/media/v4l2-subdev.h +@@ -1038,10 +1038,11 @@ struct v4l2_subdev_platform_data { + * @active_state: Active state for the subdev (NULL for subdevs tracking the + * state internally). Initialized by calling + * v4l2_subdev_init_finalize(). +- * @enabled_streams: Bitmask of enabled streams used by +- * v4l2_subdev_enable_streams() and +- * v4l2_subdev_disable_streams() helper functions for fallback +- * cases. ++ * @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams() ++ * and v4l2_subdev_disable_streams() helper functions for ++ * fallback cases. ++ * @s_stream_enabled: Tracks whether streaming has been enabled with s_stream. ++ * This is only for call_s_stream() internal use. + * + * Each instance of a subdev driver should create this struct, either + * stand-alone or embedded in a larger struct. +@@ -1089,7 +1090,8 @@ struct v4l2_subdev { + * doesn't support it. + */ + struct v4l2_subdev_state *active_state; +- u64 enabled_streams; ++ u64 enabled_pads; ++ bool s_stream_enabled; + }; + + +@@ -1916,4 +1918,17 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; + void v4l2_subdev_notify_event(struct v4l2_subdev *sd, + const struct v4l2_event *ev); + ++/** ++ * v4l2_subdev_is_streaming() - Returns if the subdevice is streaming ++ * @sd: The subdevice ++ * ++ * v4l2_subdev_is_streaming() tells if the subdevice is currently streaming. ++ * "Streaming" here means whether .s_stream() or .enable_streams() has been ++ * successfully called, and the streaming has not yet been disabled. ++ * ++ * If the subdevice implements .enable_streams() this function must be called ++ * while holding the active state lock. ++ */ ++bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd); ++ + #endif /* _V4L2_SUBDEV_H */ +diff --git a/include/soc/qcom/ice.h b/include/soc/qcom/ice.h +index 5870a94599a258..d5f6a228df6594 100644 +--- a/include/soc/qcom/ice.h ++++ b/include/soc/qcom/ice.h +@@ -34,4 +34,6 @@ int qcom_ice_program_key(struct qcom_ice *ice, + int slot); + int qcom_ice_evict_key(struct qcom_ice *ice, int slot); + struct qcom_ice *of_qcom_ice_get(struct device *dev); ++struct qcom_ice *devm_of_qcom_ice_get(struct device *dev); ++ + #endif /* __QCOM_ICE_H__ */ +diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h +index c1fb1355d3094b..1e7b0bef95f525 100644 +--- a/include/trace/stages/stage3_trace_output.h ++++ b/include/trace/stages/stage3_trace_output.h +@@ -119,6 +119,14 @@ + trace_print_array_seq(p, array, count, el_size); \ + }) + ++#undef __print_dynamic_array ++#define __print_dynamic_array(array, el_size) \ ++ ({ \ ++ __print_array(__get_dynamic_array(array), \ ++ __get_dynamic_array_len(array) / (el_size), \ ++ (el_size)); \ ++ }) ++ + #undef __print_hex_dump + #define __print_hex_dump(prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii) \ +diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h +index bcb960d16fc0ed..fcd564a590f434 100644 +--- a/include/trace/stages/stage7_class_define.h ++++ b/include/trace/stages/stage7_class_define.h +@@ -22,6 +22,7 @@ + #undef __get_rel_cpumask + #undef __get_rel_sockaddr + #undef __print_array ++#undef __print_dynamic_array + #undef __print_hex_dump + #undef __get_buf + +diff --git a/init/Kconfig b/init/Kconfig +index 1105cb53f391ab..8b630143c720f6 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -689,7 +689,7 @@ endmenu # "CPU/Task time and stats accounting" + + config CPU_ISOLATION + bool "CPU isolation" +- depends on SMP || COMPILE_TEST ++ depends on SMP + default y + help + Make sure that CPUs running critical tasks are not disturbed by +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index efa7849b82c184..3ce93418e0151d 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -1247,21 +1247,22 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) + while (node) { + req = container_of(node, struct io_kiocb, io_task_work.node); + node = node->next; +- if (sync && last_ctx != req->ctx) { ++ if (last_ctx != req->ctx) { + if (last_ctx) { +- flush_delayed_work(&last_ctx->fallback_work); ++ if (sync) ++ flush_delayed_work(&last_ctx->fallback_work); + percpu_ref_put(&last_ctx->refs); + } + last_ctx = req->ctx; + percpu_ref_get(&last_ctx->refs); + } +- if (llist_add(&req->io_task_work.node, +- &req->ctx->fallback_llist)) +- schedule_delayed_work(&req->ctx->fallback_work, 1); ++ if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist)) ++ schedule_delayed_work(&last_ctx->fallback_work, 1); + } + + if (last_ctx) { +- flush_delayed_work(&last_ctx->fallback_work); ++ if (sync) ++ flush_delayed_work(&last_ctx->fallback_work); + percpu_ref_put(&last_ctx->refs); + } + } +@@ -1916,7 +1917,7 @@ struct io_wq_work *io_wq_free_work(struct io_wq_work *work) + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_kiocb *nxt = NULL; + +- if (req_ref_put_and_test(req)) { ++ if (req_ref_put_and_test_atomic(req)) { + if (req->flags & IO_REQ_LINK_FLAGS) + nxt = io_req_find_next(req); + io_free_req(req); +diff --git a/io_uring/refs.h b/io_uring/refs.h +index 1336de3f2a30aa..21a379b0f22d61 100644 +--- a/io_uring/refs.h ++++ b/io_uring/refs.h +@@ -17,6 +17,13 @@ static inline bool req_ref_inc_not_zero(struct io_kiocb *req) + return atomic_inc_not_zero(&req->refs); + } + ++static inline bool req_ref_put_and_test_atomic(struct io_kiocb *req) ++{ ++ WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT)); ++ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); ++ return atomic_dec_and_test(&req->refs); ++} ++ + static inline bool req_ref_put_and_test(struct io_kiocb *req) + { + if (likely(!(req->flags & REQ_F_REFCOUNT))) +diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c +index ee1c7b77096e7b..fbbf3b6b9f8353 100644 +--- a/kernel/bpf/bpf_cgrp_storage.c ++++ b/kernel/bpf/bpf_cgrp_storage.c +@@ -162,6 +162,7 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, + void *, value, u64, flags, gfp_t, gfp_flags) + { + struct bpf_local_storage_data *sdata; ++ bool nobusy; + + WARN_ON_ONCE(!bpf_rcu_lock_held()); + if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) +@@ -170,21 +171,21 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, + if (!cgroup) + return (unsigned long)NULL; + +- if (!bpf_cgrp_storage_trylock()) +- return (unsigned long)NULL; ++ nobusy = bpf_cgrp_storage_trylock(); + +- sdata = cgroup_storage_lookup(cgroup, map, true); ++ sdata = cgroup_storage_lookup(cgroup, map, nobusy); + if (sdata) + goto unlock; + + /* only allocate new storage, when the cgroup is refcounted */ + if (!percpu_ref_is_dying(&cgroup->self.refcnt) && +- (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) ++ (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) + sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, + value, BPF_NOEXIST, gfp_flags); + + unlock: +- bpf_cgrp_storage_unlock(); ++ if (nobusy) ++ bpf_cgrp_storage_unlock(); + return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; + } + +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index d6a4102312fadd..e443506b0a65a1 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -20106,6 +20106,33 @@ BTF_ID(func, __rcu_read_unlock) + #endif + BTF_SET_END(btf_id_deny) + ++/* fexit and fmod_ret can't be used to attach to __noreturn functions. ++ * Currently, we must manually list all __noreturn functions here. Once a more ++ * robust solution is implemented, this workaround can be removed. ++ */ ++BTF_SET_START(noreturn_deny) ++#ifdef CONFIG_IA32_EMULATION ++BTF_ID(func, __ia32_sys_exit) ++BTF_ID(func, __ia32_sys_exit_group) ++#endif ++#ifdef CONFIG_KUNIT ++BTF_ID(func, __kunit_abort) ++BTF_ID(func, kunit_try_catch_throw) ++#endif ++#ifdef CONFIG_MODULES ++BTF_ID(func, __module_put_and_kthread_exit) ++#endif ++#ifdef CONFIG_X86_64 ++BTF_ID(func, __x64_sys_exit) ++BTF_ID(func, __x64_sys_exit_group) ++#endif ++BTF_ID(func, do_exit) ++BTF_ID(func, do_group_exit) ++BTF_ID(func, kthread_complete_and_exit) ++BTF_ID(func, kthread_exit) ++BTF_ID(func, make_task_dead) ++BTF_SET_END(noreturn_deny) ++ + static bool can_be_sleepable(struct bpf_prog *prog) + { + if (prog->type == BPF_PROG_TYPE_TRACING) { +@@ -20194,6 +20221,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) + } else if (prog->type == BPF_PROG_TYPE_TRACING && + btf_id_set_contains(&btf_id_deny, btf_id)) { + return -EINVAL; ++ } else if ((prog->expected_attach_type == BPF_TRACE_FEXIT || ++ prog->expected_attach_type == BPF_MODIFY_RETURN) && ++ btf_id_set_contains(&noreturn_deny, btf_id)) { ++ verbose(env, "Attaching fexit/fmod_ret to __noreturn functions is rejected.\n"); ++ return -EINVAL; + } + + key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); +diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c +index f005c66f378c32..a600819799637b 100644 +--- a/kernel/dma/contiguous.c ++++ b/kernel/dma/contiguous.c +@@ -70,8 +70,7 @@ struct cma *dma_contiguous_default_area; + * Users, who want to set the size of global CMA area for their system + * should use cma= kernel parameter. + */ +-static const phys_addr_t size_bytes __initconst = +- (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; ++#define size_bytes ((phys_addr_t)CMA_SIZE_MBYTES * SZ_1M) + static phys_addr_t size_cmdline __initdata = -1; + static phys_addr_t base_cmdline __initdata; + static phys_addr_t limit_cmdline __initdata; +diff --git a/kernel/events/core.c b/kernel/events/core.c +index b710976fb01b17..987807b1040ae0 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -13419,6 +13419,9 @@ inherit_event(struct perf_event *parent_event, + if (IS_ERR(child_event)) + return child_event; + ++ get_ctx(child_ctx); ++ child_event->ctx = child_ctx; ++ + pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event); + if (IS_ERR(pmu_ctx)) { + free_event(child_event); +@@ -13441,8 +13444,6 @@ inherit_event(struct perf_event *parent_event, + return NULL; + } + +- get_ctx(child_ctx); +- + /* + * Make the child state follow the state of the parent event, + * not its attr.disabled bit. We hold the parent's mutex, +@@ -13463,7 +13464,6 @@ inherit_event(struct perf_event *parent_event, + local64_set(&hwc->period_left, sample_period); + } + +- child_event->ctx = child_ctx; + child_event->overflow_handler = parent_event->overflow_handler; + child_event->overflow_handler_context + = parent_event->overflow_handler_context; +diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig +index 33a2e991f60814..b411315ecd3c4b 100644 +--- a/kernel/module/Kconfig ++++ b/kernel/module/Kconfig +@@ -229,6 +229,7 @@ comment "Do not forget to sign required modules with scripts/sign-file" + choice + prompt "Which hash algorithm should modules be signed with?" + depends on MODULE_SIG || IMA_APPRAISE_MODSIG ++ default MODULE_SIG_SHA512 + help + This determines which sort of hashing algorithm will be used during + signature generation. This algorithm _must_ be built into the kernel +diff --git a/kernel/panic.c b/kernel/panic.c +index ef9f9a4e928de6..d7973e97547482 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -763,9 +763,15 @@ device_initcall(register_warn_debugfs); + */ + __visible noinstr void __stack_chk_fail(void) + { ++ unsigned long flags; ++ + instrumentation_begin(); ++ flags = user_access_save(); ++ + panic("stack-protector: Kernel stack is corrupted in: %pB", + __builtin_return_address(0)); ++ ++ user_access_restore(flags); + instrumentation_end(); + } + EXPORT_SYMBOL(__stack_chk_fail); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 8c5f75af07db0e..760a6c3781cbfc 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7406,18 +7406,13 @@ int sched_core_idle_cpu(int cpu) + * required to meet deadlines. + */ + unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, +- enum cpu_util_type type, +- struct task_struct *p) ++ unsigned long *min, ++ unsigned long *max) + { +- unsigned long dl_util, util, irq, max; ++ unsigned long util, irq, scale; + struct rq *rq = cpu_rq(cpu); + +- max = arch_scale_cpu_capacity(cpu); +- +- if (!uclamp_is_used() && +- type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { +- return max; +- } ++ scale = arch_scale_cpu_capacity(cpu); + + /* + * Early check to see if IRQ/steal time saturates the CPU, can be +@@ -7425,45 +7420,49 @@ unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, + * update_irq_load_avg(). + */ + irq = cpu_util_irq(rq); +- if (unlikely(irq >= max)) +- return max; ++ if (unlikely(irq >= scale)) { ++ if (min) ++ *min = scale; ++ if (max) ++ *max = scale; ++ return scale; ++ } ++ ++ if (min) { ++ /* ++ * The minimum utilization returns the highest level between: ++ * - the computed DL bandwidth needed with the IRQ pressure which ++ * steals time to the deadline task. ++ * - The minimum performance requirement for CFS and/or RT. ++ */ ++ *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); ++ ++ /* ++ * When an RT task is runnable and uclamp is not used, we must ++ * ensure that the task will run at maximum compute capacity. ++ */ ++ if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) ++ *min = max(*min, scale); ++ } + + /* + * Because the time spend on RT/DL tasks is visible as 'lost' time to + * CFS tasks and we use the same metric to track the effective + * utilization (PELT windows are synchronized) we can directly add them + * to obtain the CPU's actual utilization. +- * +- * CFS and RT utilization can be boosted or capped, depending on +- * utilization clamp constraints requested by currently RUNNABLE +- * tasks. +- * When there are no CFS RUNNABLE tasks, clamps are released and +- * frequency will be gracefully reduced with the utilization decay. + */ + util = util_cfs + cpu_util_rt(rq); +- if (type == FREQUENCY_UTIL) +- util = uclamp_rq_util_with(rq, util, p); +- +- dl_util = cpu_util_dl(rq); ++ util += cpu_util_dl(rq); + + /* +- * For frequency selection we do not make cpu_util_dl() a permanent part +- * of this sum because we want to use cpu_bw_dl() later on, but we need +- * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such +- * that we select f_max when there is no idle time. +- * +- * NOTE: numerical errors or stop class might cause us to not quite hit +- * saturation when we should -- something for later. ++ * The maximum hint is a soft bandwidth requirement, which can be lower ++ * than the actual utilization because of uclamp_max requirements. + */ +- if (util + dl_util >= max) +- return max; ++ if (max) ++ *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); + +- /* +- * OTOH, for energy computation we need the estimated running time, so +- * include util_dl and ignore dl_bw. +- */ +- if (type == ENERGY_UTIL) +- util += dl_util; ++ if (util >= scale) ++ return scale; + + /* + * There is still idle time; further improve the number by using the +@@ -7474,28 +7473,15 @@ unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, + * U' = irq + --------- * U + * max + */ +- util = scale_irq_capacity(util, irq, max); ++ util = scale_irq_capacity(util, irq, scale); + util += irq; + +- /* +- * Bandwidth required by DEADLINE must always be granted while, for +- * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism +- * to gracefully reduce the frequency when no tasks show up for longer +- * periods of time. +- * +- * Ideally we would like to set bw_dl as min/guaranteed freq and util + +- * bw_dl as requested freq. However, cpufreq is not yet ready for such +- * an interface. So, we only do the latter for now. +- */ +- if (type == FREQUENCY_UTIL) +- util += cpu_bw_dl(rq); +- +- return min(max, util); ++ return min(scale, util); + } + + unsigned long sched_cpu_util(int cpu) + { +- return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL); ++ return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); + } + #endif /* CONFIG_SMP */ + +@@ -10048,7 +10034,7 @@ void __init sched_init(void) + #ifdef CONFIG_SMP + rq->sd = NULL; + rq->rd = NULL; +- rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; ++ rq->cpu_capacity = SCHED_CAPACITY_SCALE; + rq->balance_callback = &balance_push_callback; + rq->active_balance = 0; + rq->next_balance = jiffies; +diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c +index 57c92d751bcd73..95baa12a10293e 100644 +--- a/kernel/sched/cpudeadline.c ++++ b/kernel/sched/cpudeadline.c +@@ -131,7 +131,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, + if (!dl_task_fits_capacity(p, cpu)) { + cpumask_clear_cpu(cpu, later_mask); + +- cap = capacity_orig_of(cpu); ++ cap = arch_scale_cpu_capacity(cpu); + + if (cap > max_cap || + (cpu == task_cpu(p) && cap == max_cap)) { +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 259521b179aa11..776be0549162c9 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -47,7 +47,7 @@ struct sugov_cpu { + u64 last_update; + + unsigned long util; +- unsigned long bw_dl; ++ unsigned long bw_min; + + /* The field below is for single-CPU policies only: */ + #ifdef CONFIG_NO_HZ_COMMON +@@ -81,9 +81,20 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) + if (!cpufreq_this_cpu_can_update(sg_policy->policy)) + return false; + +- if (unlikely(sg_policy->limits_changed)) { +- sg_policy->limits_changed = false; ++ if (unlikely(READ_ONCE(sg_policy->limits_changed))) { ++ WRITE_ONCE(sg_policy->limits_changed, false); + sg_policy->need_freq_update = true; ++ ++ /* ++ * The above limits_changed update must occur before the reads ++ * of policy limits in cpufreq_driver_resolve_freq() or a policy ++ * limits update might be missed, so use a memory barrier to ++ * ensure it. ++ * ++ * This pairs with the write memory barrier in sugov_limits(). ++ */ ++ smp_mb(); ++ + return true; + } + +@@ -155,7 +166,6 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, + unsigned int freq = arch_scale_freq_invariant() ? + policy->cpuinfo.max_freq : policy->cur; + +- util = map_util_perf(util); + freq = map_util_freq(util, freq, max); + + if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) +@@ -165,14 +175,30 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, + return cpufreq_driver_resolve_freq(policy, freq); + } + ++unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, ++ unsigned long min, ++ unsigned long max) ++{ ++ /* Add dvfs headroom to actual utilization */ ++ actual = map_util_perf(actual); ++ /* Actually we don't need to target the max performance */ ++ if (actual < max) ++ max = actual; ++ ++ /* ++ * Ensure at least minimum performance while providing more compute ++ * capacity when possible. ++ */ ++ return max(min, max); ++} ++ + static void sugov_get_util(struct sugov_cpu *sg_cpu) + { +- unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu); +- struct rq *rq = cpu_rq(sg_cpu->cpu); ++ unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu); + +- sg_cpu->bw_dl = cpu_bw_dl(rq); +- sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util, +- FREQUENCY_UTIL, NULL); ++ util = effective_cpu_util(sg_cpu->cpu, util, &min, &max); ++ sg_cpu->bw_min = min; ++ sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max); + } + + /** +@@ -318,8 +344,8 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } + */ + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) + { +- if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) +- sg_cpu->sg_policy->limits_changed = true; ++ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) ++ WRITE_ONCE(sg_cpu->sg_policy->limits_changed, true); + } + + static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, +@@ -419,8 +445,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time, + sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) + sg_cpu->util = prev_util; + +- cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), +- map_util_perf(sg_cpu->util), max_cap); ++ cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min, ++ sg_cpu->util, max_cap); + + sg_cpu->sg_policy->last_freq_update_time = time; + } +@@ -829,7 +855,16 @@ static void sugov_limits(struct cpufreq_policy *policy) + mutex_unlock(&sg_policy->work_lock); + } + +- sg_policy->limits_changed = true; ++ /* ++ * The limits_changed update below must take place before the updates ++ * of policy limits in cpufreq_set_policy() or a policy limits update ++ * might be missed, so use a memory barrier to ensure it. ++ * ++ * This pairs with the memory barrier in sugov_should_update_freq(). ++ */ ++ smp_wmb(); ++ ++ WRITE_ONCE(sg_policy->limits_changed, true); + } + + struct cpufreq_governor schedutil_gov = { +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 6c639e48e49a97..a15cf7969953a5 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -132,7 +132,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) + int i; + + for_each_cpu_and(i, mask, cpu_active_mask) +- cap += capacity_orig_of(i); ++ cap += arch_scale_cpu_capacity(i); + + return cap; + } +@@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) + static inline unsigned long dl_bw_capacity(int i) + { + if (!sched_asym_cpucap_active() && +- capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { ++ arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) { + return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; + } else { + RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 2808dbdd03847e..268e2a49b964e0 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4951,7 +4951,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, + * To avoid overestimation of actual task utilization, skip updates if + * we cannot grant there is idle time in this CPU. + */ +- if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) ++ if (task_util(p) > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)))) + return; + + /* +@@ -4999,14 +4999,14 @@ static inline int util_fits_cpu(unsigned long util, + return fits; + + /* +- * We must use capacity_orig_of() for comparing against uclamp_min and ++ * We must use arch_scale_cpu_capacity() for comparing against uclamp_min and + * uclamp_max. We only care about capacity pressure (by using + * capacity_of()) for comparing against the real util. + * + * If a task is boosted to 1024 for example, we don't want a tiny + * pressure to skew the check whether it fits a CPU or not. + * +- * Similarly if a task is capped to capacity_orig_of(little_cpu), it ++ * Similarly if a task is capped to arch_scale_cpu_capacity(little_cpu), it + * should fit a little cpu even if there's some pressure. + * + * Only exception is for thermal pressure since it has a direct impact +@@ -5018,7 +5018,7 @@ static inline int util_fits_cpu(unsigned long util, + * For uclamp_max, we can tolerate a drop in performance level as the + * goal is to cap the task. So it's okay if it's getting less. + */ +- capacity_orig = capacity_orig_of(cpu); ++ capacity_orig = arch_scale_cpu_capacity(cpu); + capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); + + /* +@@ -7515,7 +7515,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) + * Look for the CPU with best capacity. + */ + else if (fits < 0) +- cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu)); ++ cpu_cap = arch_scale_cpu_capacity(cpu) - thermal_load_avg(cpu_rq(cpu)); + + /* + * First, select CPU which fits better (-1 being better than 0). +@@ -7757,7 +7757,7 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) + util = max(util, util_est); + } + +- return min(util, capacity_orig_of(cpu)); ++ return min(util, arch_scale_cpu_capacity(cpu)); + } + + unsigned long cpu_util_cfs(int cpu) +@@ -7859,7 +7859,7 @@ static inline void eenv_pd_busy_time(struct energy_env *eenv, + for_each_cpu(cpu, pd_cpus) { + unsigned long util = cpu_util(cpu, p, -1, 0); + +- busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL); ++ busy_time += effective_cpu_util(cpu, util, NULL, NULL); + } + + eenv->pd_busy_time = min(eenv->pd_cap, busy_time); +@@ -7882,7 +7882,7 @@ eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus, + for_each_cpu(cpu, pd_cpus) { + struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; + unsigned long util = cpu_util(cpu, p, dst_cpu, 1); +- unsigned long eff_util; ++ unsigned long eff_util, min, max; + + /* + * Performance domain frequency: utilization clamping +@@ -7891,7 +7891,23 @@ eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus, + * NOTE: in case RT tasks are running, by default the + * FREQUENCY_UTIL's utilization can be max OPP. + */ +- eff_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk); ++ eff_util = effective_cpu_util(cpu, util, &min, &max); ++ ++ /* Task's uclamp can modify min and max value */ ++ if (tsk && uclamp_is_used()) { ++ min = max(min, uclamp_eff_value(p, UCLAMP_MIN)); ++ ++ /* ++ * If there is no active max uclamp constraint, ++ * directly use task's one, otherwise keep max. ++ */ ++ if (uclamp_rq_is_idle(cpu_rq(cpu))) ++ max = uclamp_eff_value(p, UCLAMP_MAX); ++ else ++ max = max(max, uclamp_eff_value(p, UCLAMP_MAX)); ++ } ++ ++ eff_util = sugov_effective_cpu_perf(cpu, eff_util, min, max); + max_util = max(max_util, eff_util); + } + +@@ -9544,8 +9560,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) + unsigned long capacity = scale_rt_capacity(cpu); + struct sched_group *sdg = sd->groups; + +- cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); +- + if (!capacity) + capacity = 1; + +@@ -9621,7 +9635,7 @@ static inline int + check_cpu_capacity(struct rq *rq, struct sched_domain *sd) + { + return ((rq->cpu_capacity * sd->imbalance_pct) < +- (rq->cpu_capacity_orig * 100)); ++ (arch_scale_cpu_capacity(cpu_of(rq)) * 100)); + } + + /* +@@ -9632,7 +9646,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) + static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) + { + return rq->misfit_task_load && +- (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || ++ (arch_scale_cpu_capacity(rq->cpu) < rq->rd->max_cpu_capacity || + check_cpu_capacity(rq, sd)); + } + +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index b89223a973168f..91b1ee0d81fce4 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -519,7 +519,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) + min_cap = uclamp_eff_value(p, UCLAMP_MIN); + max_cap = uclamp_eff_value(p, UCLAMP_MAX); + +- cpu_cap = capacity_orig_of(cpu); ++ cpu_cap = arch_scale_cpu_capacity(cpu); + + return cpu_cap >= min(min_cap, max_cap); + } +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index d48c6a292a83db..60dc51f43dd91f 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1048,7 +1048,6 @@ struct rq { + struct sched_domain __rcu *sd; + + unsigned long cpu_capacity; +- unsigned long cpu_capacity_orig; + + struct balance_callback *balance_callback; + +@@ -2985,29 +2984,14 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} + #endif + + #ifdef CONFIG_SMP +-static inline unsigned long capacity_orig_of(int cpu) +-{ +- return cpu_rq(cpu)->cpu_capacity_orig; +-} ++unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, ++ unsigned long *min, ++ unsigned long *max); + +-/** +- * enum cpu_util_type - CPU utilization type +- * @FREQUENCY_UTIL: Utilization used to select frequency +- * @ENERGY_UTIL: Utilization used during energy calculation +- * +- * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time +- * need to be aggregated differently depending on the usage made of them. This +- * enum is used within effective_cpu_util() to differentiate the types of +- * utilization expected by the callers, and adjust the aggregation accordingly. +- */ +-enum cpu_util_type { +- FREQUENCY_UTIL, +- ENERGY_UTIL, +-}; ++unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, ++ unsigned long min, ++ unsigned long max); + +-unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, +- enum cpu_util_type type, +- struct task_struct *p); + + /* + * Verify the fitness of task @p to run on @cpu taking into account the +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index 2ed884bb362137..c61698cff0f3a8 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -2486,12 +2486,15 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att + /* Attach the domains */ + rcu_read_lock(); + for_each_cpu(i, cpu_map) { ++ unsigned long capacity; ++ + rq = cpu_rq(i); + sd = *per_cpu_ptr(d.sd, i); + ++ capacity = arch_scale_cpu_capacity(i); + /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ +- if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) +- WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); ++ if (capacity > READ_ONCE(d.rd->max_cpu_capacity)) ++ WRITE_ONCE(d.rd->max_cpu_capacity, capacity); + + cpu_attach_domain(sd, d.rd, i); + } +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c +index 7f2b17fc8ce403..ecdb8c2b2cab21 100644 +--- a/kernel/time/tick-common.c ++++ b/kernel/time/tick-common.c +@@ -495,6 +495,7 @@ void tick_resume(void) + + #ifdef CONFIG_SUSPEND + static DEFINE_RAW_SPINLOCK(tick_freeze_lock); ++static DEFINE_WAIT_OVERRIDE_MAP(tick_freeze_map, LD_WAIT_SLEEP); + static unsigned int tick_freeze_depth; + + /** +@@ -514,9 +515,22 @@ void tick_freeze(void) + if (tick_freeze_depth == num_online_cpus()) { + trace_suspend_resume(TPS("timekeeping_freeze"), + smp_processor_id(), true); ++ /* ++ * All other CPUs have their interrupts disabled and are ++ * suspended to idle. Other tasks have been frozen so there ++ * is no scheduling happening. This means that there is no ++ * concurrency in the system at this point. Therefore it is ++ * okay to acquire a sleeping lock on PREEMPT_RT, such as a ++ * spinlock, because the lock cannot be held by other CPUs ++ * or threads and acquiring it cannot block. ++ * ++ * Inform lockdep about the situation. ++ */ ++ lock_map_acquire_try(&tick_freeze_map); + system_state = SYSTEM_SUSPEND; + sched_clock_suspend(); + timekeeping_suspend(); ++ lock_map_release(&tick_freeze_map); + } else { + tick_suspend_local(); + } +@@ -538,8 +552,16 @@ void tick_unfreeze(void) + raw_spin_lock(&tick_freeze_lock); + + if (tick_freeze_depth == num_online_cpus()) { ++ /* ++ * Similar to tick_freeze(). On resumption the first CPU may ++ * acquire uncontended sleeping locks while other CPUs block on ++ * tick_freeze_lock. ++ */ ++ lock_map_acquire_try(&tick_freeze_map); + timekeeping_resume(); + sched_clock_resume(); ++ lock_map_release(&tick_freeze_map); ++ + system_state = SYSTEM_RUNNING; + trace_suspend_resume(TPS("timekeeping_freeze"), + smp_processor_id(), false); +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 545393601be8ce..97f660a8ddc73d 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -400,7 +400,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { + .arg2_type = ARG_CONST_SIZE, + }; + +-static void __set_printk_clr_event(void) ++static void __set_printk_clr_event(struct work_struct *work) + { + /* + * This program might be calling bpf_trace_printk, +@@ -413,10 +413,11 @@ static void __set_printk_clr_event(void) + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) + pr_warn_ratelimited("could not enable bpf_trace_printk events"); + } ++static DECLARE_WORK(set_printk_work, __set_printk_clr_event); + + const struct bpf_func_proto *bpf_get_trace_printk_proto(void) + { +- __set_printk_clr_event(); ++ schedule_work(&set_printk_work); + return &bpf_trace_printk_proto; + } + +@@ -459,7 +460,7 @@ static const struct bpf_func_proto bpf_trace_vprintk_proto = { + + const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) + { +- __set_printk_clr_event(); ++ schedule_work(&set_printk_work); + return &bpf_trace_vprintk_proto; + } + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 1a936978c2b1a6..5f74e9f9c8a734 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -470,6 +470,7 @@ static void test_event_printk(struct trace_event_call *call) + case '%': + continue; + case 'p': ++ do_pointer: + /* Find dereferencing fields */ + switch (fmt[i + 1]) { + case 'B': case 'R': case 'r': +@@ -498,6 +499,12 @@ static void test_event_printk(struct trace_event_call *call) + continue; + if (fmt[i + j] == '*') { + star = true; ++ /* Handle %*pbl case */ ++ if (!j && fmt[i + 1] == 'p') { ++ arg++; ++ i++; ++ goto do_pointer; ++ } + continue; + } + if ((fmt[i + j] == 's')) { +diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c +index 2062be1f2e80f6..f90f2b9842ec4f 100644 +--- a/lib/test_ubsan.c ++++ b/lib/test_ubsan.c +@@ -35,18 +35,22 @@ static void test_ubsan_shift_out_of_bounds(void) + + static void test_ubsan_out_of_bounds(void) + { +- volatile int i = 4, j = 5, k = -1; +- volatile char above[4] = { }; /* Protect surrounding memory. */ +- volatile int arr[4]; +- volatile char below[4] = { }; /* Protect surrounding memory. */ ++ int i = 4, j = 4, k = -1; ++ volatile struct { ++ char above[4]; /* Protect surrounding memory. */ ++ int arr[4]; ++ char below[4]; /* Protect surrounding memory. */ ++ } data; + +- above[0] = below[0]; ++ OPTIMIZER_HIDE_VAR(i); ++ OPTIMIZER_HIDE_VAR(j); ++ OPTIMIZER_HIDE_VAR(k); + + UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above"); +- arr[j] = i; ++ data.arr[j] = i; + + UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below"); +- arr[k] = i; ++ data.arr[k] = i; + } + + enum ubsan_test_enum { +diff --git a/net/9p/client.c b/net/9p/client.c +index d841d82e908fe3..cf73fe306219a9 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -1547,7 +1547,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, + struct p9_client *clnt = fid->clnt; + struct p9_req_t *req; + int count = iov_iter_count(to); +- int rsize, received, non_zc = 0; ++ u32 rsize, received; ++ bool non_zc = false; + char *dataptr; + + *err = 0; +@@ -1570,7 +1571,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, + 0, 11, "dqd", fid->fid, + offset, rsize); + } else { +- non_zc = 1; ++ non_zc = true; + req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, + rsize); + } +@@ -1591,11 +1592,11 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, + return 0; + } + if (rsize < received) { +- pr_err("bogus RREAD count (%d > %d)\n", received, rsize); ++ pr_err("bogus RREAD count (%u > %u)\n", received, rsize); + received = rsize; + } + +- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received); ++ p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received); + + if (non_zc) { + int n = copy_to_iter(dataptr, received, to); +@@ -1622,9 +1623,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) + *err = 0; + + while (iov_iter_count(from)) { +- int count = iov_iter_count(from); +- int rsize = fid->iounit; +- int written; ++ size_t count = iov_iter_count(from); ++ u32 rsize = fid->iounit; ++ u32 written; + + if (!rsize || rsize > clnt->msize - P9_IOHDRSZ) + rsize = clnt->msize - P9_IOHDRSZ; +@@ -1632,7 +1633,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) + if (count < rsize) + rsize = count; + +- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n", ++ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n", + fid->fid, offset, rsize, count); + + /* Don't bother zerocopy for small IO (< 1024) */ +@@ -1658,11 +1659,11 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) + break; + } + if (rsize < written) { +- pr_err("bogus RWRITE count (%d > %d)\n", written, rsize); ++ pr_err("bogus RWRITE count (%u > %u)\n", written, rsize); + written = rsize; + } + +- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written); ++ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written); + + p9_req_put(clnt, req); + iov_iter_revert(from, count - written - iov_iter_count(from)); +@@ -2049,7 +2050,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate); + + int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) + { +- int err, rsize, non_zc = 0; ++ int err, non_zc = 0; ++ u32 rsize; + struct p9_client *clnt; + struct p9_req_t *req; + char *dataptr; +@@ -2058,7 +2060,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) + + iov_iter_kvec(&to, ITER_DEST, &kv, 1, count); + +- p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", ++ p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n", + fid->fid, offset, count); + + clnt = fid->clnt; +@@ -2093,11 +2095,11 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) + goto free_and_error; + } + if (rsize < count) { +- pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); ++ pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize); + count = rsize; + } + +- p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); ++ p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count); + + if (non_zc) + memmove(data, dataptr, count); +diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c +index 4417a18b3e951a..f63586c9ce0216 100644 +--- a/net/core/lwtunnel.c ++++ b/net/core/lwtunnel.c +@@ -332,6 +332,8 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) + struct dst_entry *dst; + int ret; + ++ local_bh_disable(); ++ + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); +@@ -347,8 +349,10 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) + lwtstate = dst->lwtstate; + + if (lwtstate->type == LWTUNNEL_ENCAP_NONE || +- lwtstate->type > LWTUNNEL_ENCAP_MAX) +- return 0; ++ lwtstate->type > LWTUNNEL_ENCAP_MAX) { ++ ret = 0; ++ goto out; ++ } + + ret = -EOPNOTSUPP; + rcu_read_lock(); +@@ -363,11 +367,13 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) + if (ret == -EOPNOTSUPP) + goto drop; + +- return ret; ++ goto out; + + drop: + kfree_skb(skb); + ++out: ++ local_bh_enable(); + return ret; + } + EXPORT_SYMBOL_GPL(lwtunnel_output); +@@ -379,6 +385,8 @@ int lwtunnel_xmit(struct sk_buff *skb) + struct dst_entry *dst; + int ret; + ++ local_bh_disable(); ++ + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); +@@ -395,8 +403,10 @@ int lwtunnel_xmit(struct sk_buff *skb) + lwtstate = dst->lwtstate; + + if (lwtstate->type == LWTUNNEL_ENCAP_NONE || +- lwtstate->type > LWTUNNEL_ENCAP_MAX) +- return 0; ++ lwtstate->type > LWTUNNEL_ENCAP_MAX) { ++ ret = 0; ++ goto out; ++ } + + ret = -EOPNOTSUPP; + rcu_read_lock(); +@@ -411,11 +421,13 @@ int lwtunnel_xmit(struct sk_buff *skb) + if (ret == -EOPNOTSUPP) + goto drop; + +- return ret; ++ goto out; + + drop: + kfree_skb(skb); + ++out: ++ local_bh_enable(); + return ret; + } + EXPORT_SYMBOL_GPL(lwtunnel_xmit); +@@ -427,6 +439,8 @@ int lwtunnel_input(struct sk_buff *skb) + struct dst_entry *dst; + int ret; + ++ DEBUG_NET_WARN_ON_ONCE(!in_softirq()); ++ + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); +diff --git a/net/core/selftests.c b/net/core/selftests.c +index acb1ee97bbd324..7af99d07762ea0 100644 +--- a/net/core/selftests.c ++++ b/net/core/selftests.c +@@ -100,10 +100,10 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, + ehdr->h_proto = htons(ETH_P_IP); + + if (attr->tcp) { ++ memset(thdr, 0, sizeof(*thdr)); + thdr->source = htons(attr->sport); + thdr->dest = htons(attr->dport); + thdr->doff = sizeof(struct tcphdr) / 4; +- thdr->check = 0; + } else { + uhdr->source = htons(attr->sport); + uhdr->dest = htons(attr->dport); +@@ -144,10 +144,18 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, + attr->id = net_test_next_id; + shdr->id = net_test_next_id++; + +- if (attr->size) +- skb_put(skb, attr->size); +- if (attr->max_size && attr->max_size > skb->len) +- skb_put(skb, attr->max_size - skb->len); ++ if (attr->size) { ++ void *payload = skb_put(skb, attr->size); ++ ++ memset(payload, 0, attr->size); ++ } ++ ++ if (attr->max_size && attr->max_size > skb->len) { ++ size_t pad_len = attr->max_size - skb->len; ++ void *pad = skb_put(skb, pad_len); ++ ++ memset(pad, 0, pad_len); ++ } + + skb->csum = 0; + skb->ip_summed = CHECKSUM_PARTIAL; +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index 880c5f16b29ccf..371255e624332f 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -958,6 +958,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + + if (cl != NULL) { + int old_flags; ++ int len = 0; + + if (parentid) { + if (cl->cl_parent && +@@ -988,9 +989,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + if (usc != NULL) + hfsc_change_usc(cl, usc, cur_time); + ++ if (cl->qdisc->q.qlen != 0) ++ len = qdisc_peek_len(cl->qdisc); ++ /* Check queue length again since some qdisc implementations ++ * (e.g., netem/codel) might empty the queue during the peek ++ * operation. ++ */ + if (cl->qdisc->q.qlen != 0) { +- int len = qdisc_peek_len(cl->qdisc); +- + if (cl->cl_flags & HFSC_RSC) { + if (old_flags & HFSC_RSC) + update_ed(cl, len); +@@ -1633,10 +1638,16 @@ hfsc_dequeue(struct Qdisc *sch) + if (cl->qdisc->q.qlen != 0) { + /* update ed */ + next_len = qdisc_peek_len(cl->qdisc); +- if (realtime) +- update_ed(cl, next_len); +- else +- update_d(cl, next_len); ++ /* Check queue length again since some qdisc implementations ++ * (e.g., netem/codel) might empty the queue during the peek ++ * operation. ++ */ ++ if (cl->qdisc->q.qlen != 0) { ++ if (realtime) ++ update_ed(cl, next_len); ++ else ++ update_d(cl, next_len); ++ } + } else { + /* the class becomes passive */ + eltree_remove(cl); +diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c +index 77a3d016cadec1..ddc3e4e5e18d78 100644 +--- a/net/tipc/monitor.c ++++ b/net/tipc/monitor.c +@@ -716,7 +716,8 @@ void tipc_mon_reinit_self(struct net *net) + if (!mon) + continue; + write_lock_bh(&mon->lock); +- mon->self->addr = tipc_own_addr(net); ++ if (mon->self) ++ mon->self->addr = tipc_own_addr(net); + write_unlock_bh(&mon->lock); + } + } +diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h +index 1c6b843b8c4eeb..06be777b3b14b7 100644 +--- a/samples/trace_events/trace-events-sample.h ++++ b/samples/trace_events/trace-events-sample.h +@@ -302,6 +302,7 @@ TRACE_EVENT(foo_bar, + __bitmask( cpus, num_possible_cpus() ) + __cpumask( cpum ) + __vstring( vstr, fmt, va ) ++ __string_len( lstr, foo, bar / 2 < strlen(foo) ? bar / 2 : strlen(foo) ) + ), + + TP_fast_assign( +@@ -310,12 +311,14 @@ TRACE_EVENT(foo_bar, + memcpy(__get_dynamic_array(list), lst, + __length_of(lst) * sizeof(int)); + __assign_str(str, string); ++ __assign_str(lstr, foo); + __assign_vstr(vstr, fmt, va); + __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus()); + __assign_cpumask(cpum, cpumask_bits(mask)); + ), + +- TP_printk("foo %s %d %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar, ++ TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl", ++ __entry->foo, __entry->bar, + + /* + * Notice here the use of some helper functions. This includes: +@@ -359,8 +362,17 @@ TRACE_EVENT(foo_bar, + __print_array(__get_dynamic_array(list), + __get_dynamic_array_len(list) / sizeof(int), + sizeof(int)), +- __get_str(str), __get_bitmask(cpus), __get_cpumask(cpum), +- __get_str(vstr)) ++ ++/* A shortcut is to use __print_dynamic_array for dynamic arrays */ ++ ++ __print_dynamic_array(list, sizeof(int)), ++ ++ __get_str(str), __get_str(lstr), ++ __get_bitmask(cpus), __get_cpumask(cpum), ++ __get_str(vstr), ++ __get_dynamic_array_len(cpus), ++ __get_dynamic_array_len(cpus), ++ __get_dynamic_array(cpus)) + ); + + /* +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index 44f20b1b853a50..4aecfb0a0ef6ae 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -268,7 +268,7 @@ objtool-args-$(CONFIG_SLS) += --sls + objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval + objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call + objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess +-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable ++objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV)) += --no-unreachable + objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES) + + objtool-args = $(objtool-args-y) \ +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c +index 1b6e376f3833cb..fe222c4b74c006 100644 +--- a/sound/soc/codecs/wcd934x.c ++++ b/sound/soc/codecs/wcd934x.c +@@ -2281,7 +2281,7 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data) + { + struct wcd934x_codec *wcd = data; + unsigned long status = 0; +- int i, j, port_id; ++ unsigned int i, j, port_id; + unsigned int val, int_val = 0; + irqreturn_t ret = IRQ_NONE; + bool tx; +diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c +index ff9f6a1c95df19..40b6a837f66bbc 100644 +--- a/sound/soc/qcom/apq8016_sbc.c ++++ b/sound/soc/qcom/apq8016_sbc.c +@@ -343,4 +343,4 @@ module_platform_driver(apq8016_sbc_platform_driver); + + MODULE_AUTHOR("Srinivas Kandagatla variant->exit) + drvdata->variant->exit(pdev); +- +- +- return 0; + } + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove); + +@@ -1307,4 +1304,4 @@ void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev) + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown); + + MODULE_DESCRIPTION("QTi LPASS CPU Driver"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/lpass-hdmi.c b/sound/soc/qcom/lpass-hdmi.c +index 24b1a7523adb90..ce753ebc08945a 100644 +--- a/sound/soc/qcom/lpass-hdmi.c ++++ b/sound/soc/qcom/lpass-hdmi.c +@@ -251,4 +251,4 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_hdmi_dai_ops = { + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_hdmi_dai_ops); + + MODULE_DESCRIPTION("QTi LPASS HDMI Driver"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c +index 2c97f295e39400..2a82684c04de45 100644 +--- a/sound/soc/qcom/lpass-ipq806x.c ++++ b/sound/soc/qcom/lpass-ipq806x.c +@@ -172,9 +172,9 @@ static struct platform_driver ipq806x_lpass_cpu_platform_driver = { + .of_match_table = of_match_ptr(ipq806x_lpass_cpu_device_id), + }, + .probe = asoc_qcom_lpass_cpu_platform_probe, +- .remove = asoc_qcom_lpass_cpu_platform_remove, ++ .remove_new = asoc_qcom_lpass_cpu_platform_remove, + }; + module_platform_driver(ipq806x_lpass_cpu_platform_driver); + + MODULE_DESCRIPTION("QTi LPASS CPU Driver"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c +index 73e3d39bd24c30..f918d9e16dc041 100644 +--- a/sound/soc/qcom/lpass-platform.c ++++ b/sound/soc/qcom/lpass-platform.c +@@ -1383,4 +1383,4 @@ int asoc_qcom_lpass_platform_register(struct platform_device *pdev) + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register); + + MODULE_DESCRIPTION("QTi LPASS Platform Driver"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c +index d16c0d83aaad92..98faf82c22568e 100644 +--- a/sound/soc/qcom/lpass-sc7180.c ++++ b/sound/soc/qcom/lpass-sc7180.c +@@ -315,11 +315,11 @@ static struct platform_driver sc7180_lpass_cpu_platform_driver = { + .pm = &sc7180_lpass_pm_ops, + }, + .probe = asoc_qcom_lpass_cpu_platform_probe, +- .remove = asoc_qcom_lpass_cpu_platform_remove, ++ .remove_new = asoc_qcom_lpass_cpu_platform_remove, + .shutdown = asoc_qcom_lpass_cpu_platform_shutdown, + }; + + module_platform_driver(sc7180_lpass_cpu_platform_driver); + + MODULE_DESCRIPTION("SC7180 LPASS CPU DRIVER"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/lpass-sc7280.c b/sound/soc/qcom/lpass-sc7280.c +index 6b2eb25ed9390c..97b9053ed3b027 100644 +--- a/sound/soc/qcom/lpass-sc7280.c ++++ b/sound/soc/qcom/lpass-sc7280.c +@@ -445,7 +445,7 @@ static struct platform_driver sc7280_lpass_cpu_platform_driver = { + .pm = &sc7280_lpass_pm_ops, + }, + .probe = asoc_qcom_lpass_cpu_platform_probe, +- .remove = asoc_qcom_lpass_cpu_platform_remove, ++ .remove_new = asoc_qcom_lpass_cpu_platform_remove, + .shutdown = asoc_qcom_lpass_cpu_platform_shutdown, + }; + +diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h +index ea12f02eca55f6..5caec24555ea2e 100644 +--- a/sound/soc/qcom/lpass.h ++++ b/sound/soc/qcom/lpass.h +@@ -399,8 +399,8 @@ struct lpass_pcm_data { + }; + + /* register the platform driver from the CPU DAI driver */ +-int asoc_qcom_lpass_platform_register(struct platform_device *); +-int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev); ++int asoc_qcom_lpass_platform_register(struct platform_device *pdev); ++void asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev); + void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev); + int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev); + extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops; +diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c +index 919e326b9462b3..fcef53b97ff98a 100644 +--- a/sound/soc/qcom/qdsp6/q6afe.c ++++ b/sound/soc/qcom/qdsp6/q6afe.c +@@ -552,13 +552,13 @@ struct q6afe_port { + }; + + struct afe_cmd_remote_lpass_core_hw_vote_request { +- uint32_t hw_block_id; +- char client_name[8]; ++ uint32_t hw_block_id; ++ char client_name[8]; + } __packed; + + struct afe_cmd_remote_lpass_core_hw_devote_request { +- uint32_t hw_block_id; +- uint32_t client_handle; ++ uint32_t hw_block_id; ++ uint32_t client_handle; + } __packed; + + +diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c +index def05ce58d176e..179f4f7386dd00 100644 +--- a/sound/soc/qcom/qdsp6/q6apm-dai.c ++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c +@@ -64,20 +64,16 @@ struct q6apm_dai_rtd { + phys_addr_t phys; + unsigned int pcm_size; + unsigned int pcm_count; +- unsigned int pos; /* Buffer position */ + unsigned int periods; + unsigned int bytes_sent; + unsigned int bytes_received; + unsigned int copied_total; + uint16_t bits_per_sample; +- uint16_t source; /* Encoding source bit mask */ +- uint16_t session_id; ++ snd_pcm_uframes_t queue_ptr; + bool next_track; + enum stream_state state; + struct q6apm_graph *graph; + spinlock_t lock; +- uint32_t initial_samples_drop; +- uint32_t trailing_samples_drop; + bool notify_on_drain; + }; + +@@ -127,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo + { + struct q6apm_dai_rtd *prtd = priv; + struct snd_pcm_substream *substream = prtd->substream; +- unsigned long flags; + + switch (opcode) { + case APM_CLIENT_EVENT_CMD_EOS_DONE: + prtd->state = Q6APM_STREAM_STOPPED; + break; + case APM_CLIENT_EVENT_DATA_WRITE_DONE: +- spin_lock_irqsave(&prtd->lock, flags); +- prtd->pos += prtd->pcm_count; +- spin_unlock_irqrestore(&prtd->lock, flags); + snd_pcm_period_elapsed(substream); +- if (prtd->state == Q6APM_STREAM_RUNNING) +- q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0); + + break; + case APM_CLIENT_EVENT_DATA_READ_DONE: +- spin_lock_irqsave(&prtd->lock, flags); +- prtd->pos += prtd->pcm_count; +- spin_unlock_irqrestore(&prtd->lock, flags); + snd_pcm_period_elapsed(substream); + if (prtd->state == Q6APM_STREAM_RUNNING) + q6apm_read(prtd->graph); +@@ -251,7 +238,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component, + } + + prtd->pcm_count = snd_pcm_lib_period_bytes(substream); +- prtd->pos = 0; + /* rate and channels are sent to audio driver */ + ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg); + if (ret < 0) { +@@ -297,6 +283,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component, + return 0; + } + ++static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct q6apm_dai_rtd *prtd = runtime->private_data; ++ int i, ret = 0, avail_periods; ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size; ++ for (i = 0; i < avail_periods; i++) { ++ ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP); ++ if (ret < 0) { ++ dev_err(component->dev, "Error queuing playback buffer %d\n", ret); ++ return ret; ++ } ++ prtd->queue_ptr += runtime->period_size; ++ } ++ } ++ ++ return ret; ++} ++ + static int q6apm_dai_trigger(struct snd_soc_component *component, + struct snd_pcm_substream *substream, int cmd) + { +@@ -308,9 +315,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component, + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: +- /* start writing buffers for playback only as we already queued capture buffers */ +- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) +- ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0); + break; + case SNDRV_PCM_TRIGGER_STOP: + /* TODO support be handled via SoftPause Module */ +@@ -432,16 +436,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component, + struct snd_pcm_runtime *runtime = substream->runtime; + struct q6apm_dai_rtd *prtd = runtime->private_data; + snd_pcm_uframes_t ptr; +- unsigned long flags; + +- spin_lock_irqsave(&prtd->lock, flags); +- if (prtd->pos == prtd->pcm_size) +- prtd->pos = 0; +- +- ptr = bytes_to_frames(runtime, prtd->pos); +- spin_unlock_irqrestore(&prtd->lock, flags); ++ ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size; ++ if (ptr) ++ return ptr - 1; + +- return ptr; ++ return 0; + } + + static int q6apm_dai_hw_params(struct snd_soc_component *component, +@@ -656,8 +656,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component, + prtd->pcm_size = runtime->fragments * runtime->fragment_size; + prtd->bits_per_sample = 16; + +- prtd->pos = 0; +- + if (prtd->next_track != true) { + memcpy(&prtd->codec, codec, sizeof(*codec)); + +@@ -721,14 +719,12 @@ static int q6apm_dai_compr_set_metadata(struct snd_soc_component *component, + + switch (metadata->key) { + case SNDRV_COMPRESS_ENCODER_PADDING: +- prtd->trailing_samples_drop = metadata->value[0]; + q6apm_remove_trailing_silence(component->dev, prtd->graph, +- prtd->trailing_samples_drop); ++ metadata->value[0]); + break; + case SNDRV_COMPRESS_ENCODER_DELAY: +- prtd->initial_samples_drop = metadata->value[0]; + q6apm_remove_initial_silence(component->dev, prtd->graph, +- prtd->initial_samples_drop); ++ metadata->value[0]); + break; + default: + ret = -EINVAL; +@@ -840,6 +836,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = { + .hw_params = q6apm_dai_hw_params, + .pointer = q6apm_dai_pointer, + .trigger = q6apm_dai_trigger, ++ .ack = q6apm_dai_ack, + .compress_ops = &q6apm_dai_compress_ops, + .use_dai_pcm_id = true, + }; +diff --git a/sound/soc/qcom/qdsp6/q6asm.h b/sound/soc/qcom/qdsp6/q6asm.h +index 394604c349432f..a33d92c7bd6bff 100644 +--- a/sound/soc/qcom/qdsp6/q6asm.h ++++ b/sound/soc/qcom/qdsp6/q6asm.h +@@ -36,16 +36,16 @@ enum { + #define ASM_LAST_BUFFER_FLAG BIT(30) + + struct q6asm_flac_cfg { +- u32 sample_rate; +- u32 ext_sample_rate; +- u32 min_frame_size; +- u32 max_frame_size; +- u16 stream_info_present; +- u16 min_blk_size; +- u16 max_blk_size; +- u16 ch_cfg; +- u16 sample_size; +- u16 md5_sum; ++ u32 sample_rate; ++ u32 ext_sample_rate; ++ u32 min_frame_size; ++ u32 max_frame_size; ++ u16 stream_info_present; ++ u16 min_blk_size; ++ u16 max_blk_size; ++ u16 ch_cfg; ++ u16 sample_size; ++ u16 md5_sum; + }; + + struct q6asm_wma_cfg { +diff --git a/sound/soc/qcom/qdsp6/topology.c b/sound/soc/qcom/qdsp6/topology.c +index 130b22a34fb3b5..70572c83e1017d 100644 +--- a/sound/soc/qcom/qdsp6/topology.c ++++ b/sound/soc/qcom/qdsp6/topology.c +@@ -545,6 +545,7 @@ static struct audioreach_module *audioreach_parse_common_tokens(struct q6apm *ap + + if (mod) { + int pn, id = 0; ++ + mod->module_id = module_id; + mod->max_ip_port = max_ip_port; + mod->max_op_port = max_op_port; +@@ -1271,7 +1272,7 @@ int audioreach_tplg_init(struct snd_soc_component *component) + + ret = request_firmware(&fw, tplg_fw_name, dev); + if (ret < 0) { +- dev_err(dev, "tplg firmware loading %s failed %d \n", tplg_fw_name, ret); ++ dev_err(dev, "tplg firmware loading %s failed %d\n", tplg_fw_name, ret); + goto err; + } + +diff --git a/sound/soc/qcom/sc7180.c b/sound/soc/qcom/sc7180.c +index d1fd40e3f7a9d8..1367752f2b63a6 100644 +--- a/sound/soc/qcom/sc7180.c ++++ b/sound/soc/qcom/sc7180.c +@@ -428,4 +428,4 @@ static struct platform_driver sc7180_snd_driver = { + module_platform_driver(sc7180_snd_driver); + + MODULE_DESCRIPTION("sc7180 ASoC Machine Driver"); +-MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c +index 6e5f194bc34b06..d5cc967992d161 100644 +--- a/sound/soc/qcom/sc8280xp.c ++++ b/sound/soc/qcom/sc8280xp.c +@@ -174,4 +174,4 @@ static struct platform_driver snd_sc8280xp_driver = { + module_platform_driver(snd_sc8280xp_driver); + MODULE_AUTHOR("Srinivas Kandagatla substreams) + return -ENOMEM; + ++ /* ++ * Initialize critical substream fields early in case we hit an ++ * error path and end up trying to clean up uninitialized structures ++ * elsewhere. ++ */ ++ for (i = 0; i < snd->nsubstreams; ++i) { ++ struct virtio_pcm_substream *vss = &snd->substreams[i]; ++ ++ vss->snd = snd; ++ vss->sid = i; ++ INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); ++ init_waitqueue_head(&vss->msg_empty); ++ spin_lock_init(&vss->lock); ++ } ++ + info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; +@@ -350,12 +365,6 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) + struct virtio_pcm_substream *vss = &snd->substreams[i]; + struct virtio_pcm *vpcm; + +- vss->snd = snd; +- vss->sid = i; +- INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); +- init_waitqueue_head(&vss->msg_empty); +- spin_lock_init(&vss->lock); +- + rc = virtsnd_pcm_build_hw(vss, &info[i]); + if (rc) + goto on_exit; +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c +index 90ae2ea61324cc..174e076e56af2a 100644 +--- a/tools/bpf/bpftool/prog.c ++++ b/tools/bpf/bpftool/prog.c +@@ -1924,6 +1924,7 @@ static int do_loader(int argc, char **argv) + + obj = bpf_object__open_file(file, &open_opts); + if (!obj) { ++ err = -1; + p_err("failed to open object file"); + goto err_close_obj; + } +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index 8ba5bcfd5cd572..a1b14378bab045 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -1225,12 +1225,15 @@ static const char *uaccess_safe_builtin[] = { + "__ubsan_handle_load_invalid_value", + /* STACKLEAK */ + "stackleak_track_stack", ++ /* TRACE_BRANCH_PROFILING */ ++ "ftrace_likely_update", ++ /* STACKPROTECTOR */ ++ "__stack_chk_fail", + /* misc */ + "csum_partial_copy_generic", + "copy_mc_fragile", + "copy_mc_fragile_handle_tail", + "copy_mc_enhanced_fast_string", +- "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ + "rep_stos_alternative", + "rep_movs_alternative", + "__copy_user_nocache", +@@ -1549,6 +1552,8 @@ static int add_jump_destinations(struct objtool_file *file) + unsigned long dest_off; + + for_each_insn(file, insn) { ++ struct symbol *func = insn_func(insn); ++ + if (insn->jump_dest) { + /* + * handle_group_alt() may have previously set +@@ -1572,7 +1577,7 @@ static int add_jump_destinations(struct objtool_file *file) + } else if (reloc->sym->return_thunk) { + add_return_call(file, insn, true); + continue; +- } else if (insn_func(insn)) { ++ } else if (func) { + /* + * External sibling call or internal sibling call with + * STT_FUNC reloc. +@@ -1605,6 +1610,15 @@ static int add_jump_destinations(struct objtool_file *file) + continue; + } + ++ /* ++ * GCOV/KCOV dead code can jump to the end of the ++ * function/section. ++ */ ++ if (file->ignore_unreachables && func && ++ dest_sec == insn->sec && ++ dest_off == func->offset + func->len) ++ continue; ++ + WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx", + dest_sec->name, dest_off); + return -1; +@@ -1613,8 +1627,7 @@ static int add_jump_destinations(struct objtool_file *file) + /* + * Cross-function jump. + */ +- if (insn_func(insn) && insn_func(jump_dest) && +- insn_func(insn) != insn_func(jump_dest)) { ++ if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) { + + /* + * For GCC 8+, create parent/child links for any cold +@@ -1631,10 +1644,10 @@ static int add_jump_destinations(struct objtool_file *file) + * case where the parent function's only reference to a + * subfunction is through a jump table. + */ +- if (!strstr(insn_func(insn)->name, ".cold") && ++ if (!strstr(func->name, ".cold") && + strstr(insn_func(jump_dest)->name, ".cold")) { +- insn_func(insn)->cfunc = insn_func(jump_dest); +- insn_func(jump_dest)->pfunc = insn_func(insn); ++ func->cfunc = insn_func(jump_dest); ++ insn_func(jump_dest)->pfunc = func; + } + } + +@@ -3569,6 +3582,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, + !strncmp(func->name, "__pfx_", 6)) + return 0; + ++ if (file->ignore_unreachables) ++ return 0; ++ + WARN("%s() falls through to next function %s()", + func->name, insn_func(insn)->name); + return 1; +@@ -3788,6 +3804,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, + if (!next_insn) { + if (state.cfi.cfa.base == CFI_UNDEFINED) + return 0; ++ if (file->ignore_unreachables) ++ return 0; ++ + WARN("%s: unexpected end of section", sec->name); + return 1; + } +@@ -3940,6 +3959,9 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn) + break; + } + ++ if (insn->dead_end) ++ return 0; ++ + if (!next) { + WARN_INSN(insn, "teh end!"); + return -1; +diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c +index e949a43a614508..efabfcbe0b498c 100644 +--- a/tools/testing/selftests/mincore/mincore_selftest.c ++++ b/tools/testing/selftests/mincore/mincore_selftest.c +@@ -261,9 +261,6 @@ TEST(check_file_mmap) + TH_LOG("No read-ahead pages found in memory"); + } + +- EXPECT_LT(i, vec_size) { +- TH_LOG("Read-ahead pages reached the end of the file"); +- } + /* + * End of the readahead window. The rest of the pages shouldn't + * be in memory. +diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh +new file mode 100755 +index 00000000000000..1f2b642381d179 +--- /dev/null ++++ b/tools/testing/selftests/ublk/test_stripe_04.sh +@@ -0,0 +1,24 @@ ++#!/bin/bash ++# SPDX-License-Identifier: GPL-2.0 ++ ++. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh ++ ++TID="stripe_04" ++ERR_CODE=0 ++ ++_prep_test "stripe" "mkfs & mount & umount on zero copy" ++ ++backfile_0=$(_create_backfile 256M) ++backfile_1=$(_create_backfile 256M) ++dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1") ++_check_add_dev $TID $? "$backfile_0" "$backfile_1" ++ ++_mkfs_mount_test /dev/ublkb"${dev_id}" ++ERR_CODE=$? ++ ++_cleanup_test "stripe" ++ ++_remove_backfile "$backfile_0" ++_remove_backfile "$backfile_1" ++ ++_show_result $TID $ERR_CODE diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.89-90.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.89-90.patch new file mode 100644 index 0000000000..ef784cc2c4 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.89-90.patch @@ -0,0 +1,5797 @@ +diff --git a/Makefile b/Makefile +index 23e90df5785c84..587a1586e76db8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 89 ++SUBLEVEL = 90 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi +index f2386dcb9ff2c0..dda4fa91b2f2cc 100644 +--- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi ++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi +@@ -40,6 +40,9 @@ ethphy1: ethernet-phy@1 { + reg = <1>; + interrupt-parent = <&gpio4>; + interrupts = <16 IRQ_TYPE_LEVEL_LOW>; ++ micrel,led-mode = <1>; ++ clocks = <&clks IMX6UL_CLK_ENET_REF>; ++ clock-names = "rmii-ref"; + status = "okay"; + }; + }; +diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi +index 5268a43218415f..ce5409acae1ce0 100644 +--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi ++++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi +@@ -73,14 +73,13 @@ scmi_reset: protocol@16 { + }; + + intc: interrupt-controller@4ac00000 { +- compatible = "arm,cortex-a7-gic"; ++ compatible = "arm,gic-400"; + #interrupt-cells = <3>; +- #address-cells = <1>; + interrupt-controller; + reg = <0x0 0x4ac10000 0x0 0x1000>, +- <0x0 0x4ac20000 0x0 0x2000>, +- <0x0 0x4ac40000 0x0 0x2000>, +- <0x0 0x4ac60000 0x0 0x2000>; ++ <0x0 0x4ac20000 0x0 0x20000>, ++ <0x0 0x4ac40000 0x0 0x20000>, ++ <0x0 0x4ac60000 0x0 0x20000>; + }; + + psci { +diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c +index ecfbff6991bb5d..edc4c727783d82 100644 +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void) + static const struct midr_range spectre_bhb_k132_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), ++ {}, + }; + static const struct midr_range spectre_bhb_k38_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), ++ {}, + }; + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), +diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c +index 6ce427b58836c5..ecd27b48d61f9d 100644 +--- a/arch/parisc/math-emu/driver.c ++++ b/arch/parisc/math-emu/driver.c +@@ -103,9 +103,19 @@ handle_fpe(struct pt_regs *regs) + + memcpy(regs->fr, frcopy, sizeof regs->fr); + if (signalcode != 0) { +- force_sig_fault(signalcode >> 24, signalcode & 0xffffff, +- (void __user *) regs->iaoq[0]); +- return -1; ++ int sig = signalcode >> 24; ++ ++ if (sig == SIGFPE) { ++ /* ++ * Clear floating point trap bit to avoid trapping ++ * again on the first floating-point instruction in ++ * the userspace signal handler. ++ */ ++ regs->fr[0] &= ~(1ULL << 38); ++ } ++ force_sig_fault(sig, signalcode & 0xffffff, ++ (void __user *) regs->iaoq[0]); ++ return -1; + } + + return signalcode ? -1 : 0; +diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper +index 352d7de24018fb..ddb02cf0caaf59 100755 +--- a/arch/powerpc/boot/wrapper ++++ b/arch/powerpc/boot/wrapper +@@ -234,10 +234,8 @@ fi + + # suppress some warnings in recent ld versions + nowarn="-z noexecstack" +-if ! ld_is_lld; then +- if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then +- nowarn="$nowarn --no-warn-rwx-segments" +- fi ++if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then ++ nowarn="$nowarn --no-warn-rwx-segments" + fi + + platformo=$object/"$platform".o +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c +index c6a4ac766b2bf9..28460e33408084 100644 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c +@@ -1056,6 +1056,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in + pmd_t *pmd; + pte_t *pte; + ++ /* ++ * Make sure we align the start vmemmap addr so that we calculate ++ * the correct start_pfn in altmap boundary check to decided whether ++ * we should use altmap or RAM based backing memory allocation. Also ++ * the address need to be aligned for set_pte operation. ++ ++ * If the start addr is already PMD_SIZE aligned we will try to use ++ * a pmd mapping. We don't want to be too aggressive here beacause ++ * that will cause more allocations in RAM. So only if the namespace ++ * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. ++ */ ++ ++ start = ALIGN_DOWN(start, PAGE_SIZE); + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + +@@ -1081,8 +1094,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in + * in altmap block allocation failures, in which case + * we fallback to RAM for vmemmap allocation. + */ +- if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || +- altmap_cross_boundary(altmap, addr, PMD_SIZE))) { ++ if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && ++ altmap_cross_boundary(altmap, addr, PMD_SIZE))) { + /* + * make sure we don't create altmap mappings + * covering things outside the device. +diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h +index 9f5d6e14c40553..7228e266b9a1ae 100644 +--- a/arch/riscv/include/asm/patch.h ++++ b/arch/riscv/include/asm/patch.h +@@ -9,7 +9,7 @@ + int patch_insn_write(void *addr, const void *insn, size_t len); + int patch_text_nosync(void *addr, const void *insns, size_t len); + int patch_text_set_nosync(void *addr, u8 c, size_t len); +-int patch_text(void *addr, u32 *insns, int ninsns); ++int patch_text(void *addr, u32 *insns, size_t len); + + extern int riscv_patch_in_stop_machine; + +diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c +index 78387d843aa56b..aeda87240dbc1e 100644 +--- a/arch/riscv/kernel/patch.c ++++ b/arch/riscv/kernel/patch.c +@@ -19,7 +19,7 @@ + struct patch_insn { + void *addr; + u32 *insns; +- int ninsns; ++ size_t len; + atomic_t cpu_count; + }; + +@@ -234,14 +234,10 @@ NOKPROBE_SYMBOL(patch_text_nosync); + static int patch_text_cb(void *data) + { + struct patch_insn *patch = data; +- unsigned long len; +- int i, ret = 0; ++ int ret = 0; + + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { +- for (i = 0; ret == 0 && i < patch->ninsns; i++) { +- len = GET_INSN_LENGTH(patch->insns[i]); +- ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len); +- } ++ ret = patch_insn_write(patch->addr, patch->insns, patch->len); + /* + * Make sure the patching store is effective *before* we + * increment the counter which releases all waiting CPUs +@@ -262,13 +258,13 @@ static int patch_text_cb(void *data) + } + NOKPROBE_SYMBOL(patch_text_cb); + +-int patch_text(void *addr, u32 *insns, int ninsns) ++int patch_text(void *addr, u32 *insns, size_t len) + { + int ret; + struct patch_insn patch = { + .addr = addr, + .insns = insns, +- .ninsns = ninsns, ++ .len = len, + .cpu_count = ATOMIC_INIT(0), + }; + +diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c +index 4fbc70e823f0fa..297427ffc4e043 100644 +--- a/arch/riscv/kernel/probes/kprobes.c ++++ b/arch/riscv/kernel/probes/kprobes.c +@@ -23,13 +23,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); + + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) + { ++ size_t len = GET_INSN_LENGTH(p->opcode); + u32 insn = __BUG_INSN_32; +- unsigned long offset = GET_INSN_LENGTH(p->opcode); + +- p->ainsn.api.restore = (unsigned long)p->addr + offset; ++ p->ainsn.api.restore = (unsigned long)p->addr + len; + +- patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1); +- patch_text_nosync((void *)p->ainsn.api.insn + offset, &insn, 1); ++ patch_text_nosync(p->ainsn.api.insn, &p->opcode, len); ++ patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn)); + } + + static void __kprobes arch_prepare_simulate(struct kprobe *p) +@@ -116,16 +116,18 @@ void *alloc_insn_page(void) + /* install breakpoint in text */ + void __kprobes arch_arm_kprobe(struct kprobe *p) + { +- u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ? +- __BUG_INSN_32 : __BUG_INSN_16; ++ size_t len = GET_INSN_LENGTH(p->opcode); ++ u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16; + +- patch_text(p->addr, &insn, 1); ++ patch_text(p->addr, &insn, len); + } + + /* remove breakpoint from text */ + void __kprobes arch_disarm_kprobe(struct kprobe *p) + { +- patch_text(p->addr, &p->opcode, 1); ++ size_t len = GET_INSN_LENGTH(p->opcode); ++ ++ patch_text(p->addr, &p->opcode, len); + } + + void __kprobes arch_remove_kprobe(struct kprobe *p) +diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c +index 26eeb397363193..16eb4cd11cbd67 100644 +--- a/arch/riscv/net/bpf_jit_comp64.c ++++ b/arch/riscv/net/bpf_jit_comp64.c +@@ -14,6 +14,7 @@ + #include "bpf_jit.h" + + #define RV_FENTRY_NINSNS 2 ++#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4) + + #define RV_REG_TCC RV_REG_A6 + #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ +@@ -681,7 +682,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + if (ret) + return ret; + +- if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4)) ++ if (memcmp(ip, old_insns, RV_FENTRY_NBYTES)) + return -EFAULT; + + ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call); +@@ -690,8 +691,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + + cpus_read_lock(); + mutex_lock(&text_mutex); +- if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4)) +- ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS); ++ if (memcmp(ip, new_insns, RV_FENTRY_NBYTES)) ++ ret = patch_text(ip, new_insns, RV_FENTRY_NBYTES); + mutex_unlock(&text_mutex); + cpus_read_unlock(); + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 66d5782df18f8c..835c9febb6a854 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -4206,7 +4206,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) + arr[pebs_enable] = (struct perf_guest_switch_msr){ + .msr = MSR_IA32_PEBS_ENABLE, + .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, +- .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, ++ .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, + }; + + if (arr[pebs_enable].host) { +diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h +index 9b419f0de713cc..e59ded9761663e 100644 +--- a/arch/x86/include/asm/kvm-x86-ops.h ++++ b/arch/x86/include/asm/kvm-x86-ops.h +@@ -48,6 +48,7 @@ KVM_X86_OP(set_idt) + KVM_X86_OP(get_gdt) + KVM_X86_OP(set_gdt) + KVM_X86_OP(sync_dirty_debug_regs) ++KVM_X86_OP(set_dr6) + KVM_X86_OP(set_dr7) + KVM_X86_OP(cache_reg) + KVM_X86_OP(get_rflags) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 39672561c6be87..5dfb8cc9616e55 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1595,6 +1595,7 @@ struct kvm_x86_ops { + void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); ++ void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); + void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); + void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); + unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 1d06b8fc15a85c..29c1be65cb71a0 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2014,11 +2014,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) + svm->asid = sd->next_asid++; + } + +-static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) ++static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) + { +- struct vmcb *vmcb = svm->vmcb; ++ struct vmcb *vmcb = to_svm(vcpu)->vmcb; + +- if (svm->vcpu.arch.guest_state_protected) ++ if (vcpu->arch.guest_state_protected) + return; + + if (unlikely(value != vmcb->save.dr6)) { +@@ -4220,10 +4220,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + * Run with all-zero DR6 unless needed, so that we can get the exact cause + * of a #DB. + */ +- if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) +- svm_set_dr6(svm, vcpu->arch.dr6); +- else +- svm_set_dr6(svm, DR6_ACTIVE_LOW); ++ if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) ++ svm_set_dr6(vcpu, DR6_ACTIVE_LOW); + + clgi(); + kvm_load_guest_xsave_state(vcpu); +@@ -5002,6 +5000,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { + .set_idt = svm_set_idt, + .get_gdt = svm_get_gdt, + .set_gdt = svm_set_gdt, ++ .set_dr6 = svm_set_dr6, + .set_dr7 = svm_set_dr7, + .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, + .cache_reg = svm_cache_reg, +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 52098844290ad4..e5a2c230110ea4 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -5617,6 +5617,12 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) + set_debugreg(DR6_RESERVED, 6); + } + ++static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) ++{ ++ lockdep_assert_irqs_disabled(); ++ set_debugreg(vcpu->arch.dr6, 6); ++} ++ + static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) + { + vmcs_writel(GUEST_DR7, val); +@@ -7356,10 +7362,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + +- /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ +- if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) +- set_debugreg(vcpu->arch.dr6, 6); +- + /* When single-stepping over STI and MOV SS, we must clear the + * corresponding interruptibility bits in the guest state. Otherwise + * vmentry fails as it then expects bit 14 (BS) in pending debug +@@ -8292,6 +8294,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { + .set_idt = vmx_set_idt, + .get_gdt = vmx_get_gdt, + .set_gdt = vmx_set_gdt, ++ .set_dr6 = vmx_set_dr6, + .set_dr7 = vmx_set_dr7, + .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, + .cache_reg = vmx_cache_reg, +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index f67fe8a65820c8..1eeb01afa40ba9 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -10772,6 +10772,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + set_debugreg(vcpu->arch.eff_db[1], 1); + set_debugreg(vcpu->arch.eff_db[2], 2); + set_debugreg(vcpu->arch.eff_db[3], 3); ++ /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ ++ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) ++ static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6); + } else if (unlikely(hw_breakpoint_active())) { + set_debugreg(0, 7); + } +diff --git a/drivers/base/module.c b/drivers/base/module.c +index a33663d92256d8..955582b34e54af 100644 +--- a/drivers/base/module.c ++++ b/drivers/base/module.c +@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, struct device_driver *drv) + if (mod) + mk = &mod->mkobj; + else if (drv->mod_name) { +- struct kobject *mkobj; +- +- /* Lookup built-in module entry in /sys/modules */ +- mkobj = kset_find_obj(module_kset, drv->mod_name); +- if (mkobj) { +- mk = container_of(mkobj, struct module_kobject, kobj); ++ /* Lookup or create built-in module entry in /sys/modules */ ++ mk = lookup_or_create_module_kobject(drv->mod_name); ++ if (mk) { + /* remember our module structure */ + drv->p->mkobj = mk; +- /* kset_find_obj took a reference */ +- kobject_put(mkobj); ++ /* lookup_or_create_module_kobject took a reference */ ++ kobject_put(&mk->kobj); + } + } + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index bc3f63f1ccd863..d6195565ef7aeb 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -3521,22 +3521,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev) + bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err); + } + +-/* +- * ==0: not a dump pkt. +- * < 0: fails to handle a dump pkt +- * > 0: otherwise. +- */ ++/* Return: 0 on success, negative errno on failure. */ + static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- int ret = 1; ++ int ret = 0; + u8 pkt_type; + u8 *sk_ptr; + unsigned int sk_len; + u16 seqno; + u32 dump_size; + +- struct hci_event_hdr *event_hdr; +- struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; +@@ -3546,30 +3540,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + sk_len = skb->len; + + if (pkt_type == HCI_ACLDATA_PKT) { +- acl_hdr = hci_acl_hdr(skb); +- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) +- return 0; + sk_ptr += HCI_ACL_HDR_SIZE; + sk_len -= HCI_ACL_HDR_SIZE; +- event_hdr = (struct hci_event_hdr *)sk_ptr; +- } else { +- event_hdr = hci_event_hdr(skb); + } + +- if ((event_hdr->evt != HCI_VENDOR_PKT) +- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) +- return 0; +- + sk_ptr += HCI_EVENT_HDR_SIZE; + sk_len -= HCI_EVENT_HDR_SIZE; + + dump_hdr = (struct qca_dump_hdr *)sk_ptr; +- if ((sk_len < offsetof(struct qca_dump_hdr, data)) +- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) +- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) +- return 0; +- +- /*it is dump pkt now*/ + seqno = le16_to_cpu(dump_hdr->seqno); + if (seqno == 0) { + set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); +@@ -3643,17 +3621,84 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + return ret; + } + ++/* Return: true if the ACL packet is a dump packet, false otherwise. */ ++static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) ++{ ++ u8 *sk_ptr; ++ unsigned int sk_len; ++ ++ struct hci_event_hdr *event_hdr; ++ struct hci_acl_hdr *acl_hdr; ++ struct qca_dump_hdr *dump_hdr; ++ ++ sk_ptr = skb->data; ++ sk_len = skb->len; ++ ++ acl_hdr = hci_acl_hdr(skb); ++ if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) ++ return false; ++ ++ sk_ptr += HCI_ACL_HDR_SIZE; ++ sk_len -= HCI_ACL_HDR_SIZE; ++ event_hdr = (struct hci_event_hdr *)sk_ptr; ++ ++ if ((event_hdr->evt != HCI_VENDOR_PKT) || ++ (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) ++ return false; ++ ++ sk_ptr += HCI_EVENT_HDR_SIZE; ++ sk_len -= HCI_EVENT_HDR_SIZE; ++ ++ dump_hdr = (struct qca_dump_hdr *)sk_ptr; ++ if ((sk_len < offsetof(struct qca_dump_hdr, data)) || ++ (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ return false; ++ ++ return true; ++} ++ ++/* Return: true if the event packet is a dump packet, false otherwise. */ ++static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) ++{ ++ u8 *sk_ptr; ++ unsigned int sk_len; ++ ++ struct hci_event_hdr *event_hdr; ++ struct qca_dump_hdr *dump_hdr; ++ ++ sk_ptr = skb->data; ++ sk_len = skb->len; ++ ++ event_hdr = hci_event_hdr(skb); ++ ++ if ((event_hdr->evt != HCI_VENDOR_PKT) ++ || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) ++ return false; ++ ++ sk_ptr += HCI_EVENT_HDR_SIZE; ++ sk_len -= HCI_EVENT_HDR_SIZE; ++ ++ dump_hdr = (struct qca_dump_hdr *)sk_ptr; ++ if ((sk_len < offsetof(struct qca_dump_hdr, data)) || ++ (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ return false; ++ ++ return true; ++} ++ + static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- if (handle_dump_pkt_qca(hdev, skb)) +- return 0; ++ if (acl_pkt_is_dump_qca(hdev, skb)) ++ return handle_dump_pkt_qca(hdev, skb); + return hci_recv_frame(hdev, skb); + } + + static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- if (handle_dump_pkt_qca(hdev, skb)) +- return 0; ++ if (evt_pkt_is_dump_qca(hdev, skb)) ++ return handle_dump_pkt_qca(hdev, skb); + return hci_recv_frame(hdev, skb); + } + +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 0ac0998152ce81..6682f422cadd90 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -534,16 +534,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) + EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); + + static unsigned int __resolve_freq(struct cpufreq_policy *policy, +- unsigned int target_freq, unsigned int relation) ++ unsigned int target_freq, ++ unsigned int min, unsigned int max, ++ unsigned int relation) + { + unsigned int idx; + +- target_freq = clamp_val(target_freq, policy->min, policy->max); ++ target_freq = clamp_val(target_freq, min, max); + + if (!policy->freq_table) + return target_freq; + +- idx = cpufreq_frequency_table_target(policy, target_freq, relation); ++ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); + policy->cached_resolved_idx = idx; + policy->cached_target_freq = target_freq; + return policy->freq_table[idx].frequency; +@@ -563,7 +565,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, + unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq) + { +- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); ++ unsigned int min = READ_ONCE(policy->min); ++ unsigned int max = READ_ONCE(policy->max); ++ ++ /* ++ * If this function runs in parallel with cpufreq_set_policy(), it may ++ * read policy->min before the update and policy->max after the update ++ * or the other way around, so there is no ordering guarantee. ++ * ++ * Resolve this by always honoring the max (in case it comes from ++ * thermal throttling or similar). ++ */ ++ if (unlikely(min > max)) ++ min = max; ++ ++ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); + } + EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); + +@@ -2335,7 +2351,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, + if (cpufreq_disabled()) + return -ENODEV; + +- target_freq = __resolve_freq(policy, target_freq, relation); ++ target_freq = __resolve_freq(policy, target_freq, policy->min, ++ policy->max, relation); + + pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", + policy->cpu, target_freq, relation, old_target_freq); +@@ -2625,11 +2642,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, + * Resolve policy min/max to available frequencies. It ensures + * no frequency resolution will neither overshoot the requested maximum + * nor undershoot the requested minimum. ++ * ++ * Avoid storing intermediate values in policy->max or policy->min and ++ * compiler optimizations around them because they may be accessed ++ * concurrently by cpufreq_driver_resolve_freq() during the update. + */ +- policy->min = new_data.min; +- policy->max = new_data.max; +- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); +- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); ++ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, ++ new_data.min, new_data.max, ++ CPUFREQ_RELATION_H)); ++ new_data.min = __resolve_freq(policy, new_data.min, new_data.min, ++ new_data.max, CPUFREQ_RELATION_L); ++ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); ++ + trace_cpu_frequency_limits(policy); + + policy->cached_target_freq = UINT_MAX; +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index c52d19d67557f5..65cbd5ecbaf83d 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -77,7 +77,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, + return freq_next; + } + +- index = cpufreq_frequency_table_target(policy, freq_next, relation); ++ index = cpufreq_frequency_table_target(policy, freq_next, policy->min, ++ policy->max, relation); + freq_req = freq_table[index].frequency; + freq_reduc = freq_req * od_tuners->powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; +diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c +index c17dc51a5a022d..94de089b145394 100644 +--- a/drivers/cpufreq/freq_table.c ++++ b/drivers/cpufreq/freq_table.c +@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) + EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); + + int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, +- unsigned int target_freq, +- unsigned int relation) ++ unsigned int target_freq, unsigned int min, ++ unsigned int max, unsigned int relation) + { + struct cpufreq_frequency_table optimal = { + .driver_data = ~0, +@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + cpufreq_for_each_valid_entry_idx(pos, table, i) { + freq = pos->frequency; + +- if ((freq < policy->min) || (freq > policy->max)) ++ if (freq < min || freq > max) + continue; + if (freq == target_freq) { + optimal.driver_data = i; +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +index 8b31cd54bdb6de..e04fd1a7e9aaea 100644 +--- a/drivers/edac/altera_edac.c ++++ b/drivers/edac/altera_edac.c +@@ -98,7 +98,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) + if (status & priv->ecc_stat_ce_mask) { + regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, + &err_addr); +- if (priv->ecc_uecnt_offset) ++ if (priv->ecc_cecnt_offset) + regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, + &err_count); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, +@@ -1015,9 +1015,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, + } + } + +- /* Interrupt mode set to every SBERR */ +- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, +- ALTR_A10_ECC_INTMODE); + /* Enable ECC */ + ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + + ALTR_A10_ECC_CTRL_OFST)); +@@ -2138,6 +2135,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) + return PTR_ERR(edac->ecc_mgr_map); + } + ++ /* Set irq mask for DDR SBE to avoid any pending irq before registration */ ++ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, ++ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0)); ++ + edac->irq_chip.name = pdev->dev.of_node->name; + edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; + edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; +diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h +index 3727e72c8c2e70..7248d24c4908d7 100644 +--- a/drivers/edac/altera_edac.h ++++ b/drivers/edac/altera_edac.h +@@ -249,6 +249,8 @@ struct altr_sdram_mc_data { + #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 + #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 + #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) ++#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16) ++#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17) + + #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C + #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +index 7cd6b1564e8018..7c2db3f017651b 100644 +--- a/drivers/firmware/arm_ffa/driver.c ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -225,7 +225,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, + memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, + buf_sz); + +- ffa_rx_release(); ++ if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY)) ++ ffa_rx_release(); + + mutex_unlock(&drv_info->rx_lock); + +diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c +index dcf774d3edfe4c..51eeaf14367dac 100644 +--- a/drivers/firmware/arm_scmi/bus.c ++++ b/drivers/firmware/arm_scmi/bus.c +@@ -240,6 +240,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent, + if (!dev) + return NULL; + ++ /* Drop the refcnt bumped implicitly by device_find_child */ ++ put_device(dev); ++ + return to_scmi_dev(dev); + } + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +index 2ad9f900a85749..a048022d9865a7 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +@@ -172,7 +172,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + struct mod_hdcp_display_adjustment display_adjust; + unsigned int conn_index = aconnector->base.index; + +- mutex_lock(&hdcp_w->mutex); ++ guard(mutex)(&hdcp_w->mutex); ++ drm_connector_get(&aconnector->base); ++ if (hdcp_w->aconnector[conn_index]) ++ drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; + + memset(&link_adjust, 0, sizeof(link_adjust)); +@@ -209,7 +212,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); + + process_output(hdcp_w); +- mutex_unlock(&hdcp_w->mutex); + } + + static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, +@@ -220,8 +222,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; + +- mutex_lock(&hdcp_w->mutex); +- hdcp_w->aconnector[conn_index] = aconnector; ++ guard(mutex)(&hdcp_w->mutex); + + /* the removal of display will invoke auth reset -> hdcp destroy and + * we'd expect the Content Protection (CP) property changed back to +@@ -237,9 +238,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + } + + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); +- ++ if (hdcp_w->aconnector[conn_index]) { ++ drm_connector_put(&hdcp_w->aconnector[conn_index]->base); ++ hdcp_w->aconnector[conn_index] = NULL; ++ } + process_output(hdcp_w); +- mutex_unlock(&hdcp_w->mutex); + } + + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +@@ -247,7 +250,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; + +- mutex_lock(&hdcp_w->mutex); ++ guard(mutex)(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + +@@ -256,11 +259,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; ++ if (hdcp_w->aconnector[conn_index]) { ++ drm_connector_put(&hdcp_w->aconnector[conn_index]->base); ++ hdcp_w->aconnector[conn_index] = NULL; ++ } + } + + process_output(hdcp_w); +- +- mutex_unlock(&hdcp_w->mutex); + } + + void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +@@ -277,7 +282,7 @@ static void event_callback(struct work_struct *work) + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + +- mutex_lock(&hdcp_work->mutex); ++ guard(mutex)(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->callback_dwork); + +@@ -285,8 +290,6 @@ static void event_callback(struct work_struct *work) + &hdcp_work->output); + + process_output(hdcp_work); +- +- mutex_unlock(&hdcp_work->mutex); + } + + static void event_property_update(struct work_struct *work) +@@ -323,7 +326,7 @@ static void event_property_update(struct work_struct *work) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); +- mutex_lock(&hdcp_work->mutex); ++ guard(mutex)(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, +@@ -355,7 +358,6 @@ static void event_property_update(struct work_struct *work) + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); + } +- mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } + } +@@ -368,7 +370,7 @@ static void event_property_validate(struct work_struct *work) + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; + +- mutex_lock(&hdcp_work->mutex); ++ guard(mutex)(&hdcp_work->mutex); + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { +@@ -408,8 +410,6 @@ static void event_property_validate(struct work_struct *work) + schedule_work(&hdcp_work->property_update_work); + } + } +- +- mutex_unlock(&hdcp_work->mutex); + } + + static void event_watchdog_timer(struct work_struct *work) +@@ -420,7 +420,7 @@ static void event_watchdog_timer(struct work_struct *work) + struct hdcp_workqueue, + watchdog_timer_dwork); + +- mutex_lock(&hdcp_work->mutex); ++ guard(mutex)(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + +@@ -429,8 +429,6 @@ static void event_watchdog_timer(struct work_struct *work) + &hdcp_work->output); + + process_output(hdcp_work); +- +- mutex_unlock(&hdcp_work->mutex); + } + + static void event_cpirq(struct work_struct *work) +@@ -439,13 +437,11 @@ static void event_cpirq(struct work_struct *work) + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + +- mutex_lock(&hdcp_work->mutex); ++ guard(mutex)(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); +- +- mutex_unlock(&hdcp_work->mutex); + } + + void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) +@@ -479,7 +475,7 @@ static bool enable_assr(void *handle, struct dc_link *link) + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; + +- mutex_lock(&psp->dtm_context.mutex); ++ guard(mutex)(&psp->dtm_context.mutex); + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; +@@ -494,8 +490,6 @@ static bool enable_assr(void *handle, struct dc_link *link) + res = false; + } + +- mutex_unlock(&psp->dtm_context.mutex); +- + return res; + } + +@@ -504,6 +498,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; ++ unsigned int conn_index = aconnector->base.index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; +@@ -557,13 +552,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); + +- mutex_lock(&hdcp_w->mutex); ++ guard(mutex)(&hdcp_w->mutex); + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); +- ++ drm_connector_get(&aconnector->base); ++ if (hdcp_w->aconnector[conn_index]) ++ drm_connector_put(&hdcp_w->aconnector[conn_index]->base); ++ hdcp_w->aconnector[conn_index] = aconnector; + process_output(hdcp_w); +- mutex_unlock(&hdcp_w->mutex); +- + } + + /** +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c +index 1d22dba69b2753..b943221b238f87 100644 +--- a/drivers/gpu/drm/drm_file.c ++++ b/drivers/gpu/drm/drm_file.c +@@ -1015,6 +1015,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) + struct drm_file *file = f->private_data; + struct drm_device *dev = file->minor->dev; + struct drm_printer p = drm_seq_file_printer(m); ++ int idx; ++ ++ if (!drm_dev_enter(dev, &idx)) ++ return; + + drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name); + drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id); +@@ -1029,6 +1033,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) + + if (dev->driver->show_fdinfo) + dev->driver->show_fdinfo(&p, file); ++ ++ drm_dev_exit(idx); + } + EXPORT_SYMBOL(drm_show_fdinfo); + +diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h +index 298ad38e6c7df6..c36d956b9b824f 100644 +--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h ++++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h +@@ -25,6 +25,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp); + + int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id); + void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); ++bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); + + #else + static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp) +@@ -36,8 +37,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp) + return 0; + } + +-#endif ++static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) ++{ ++ return false; ++} + +-bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); ++#endif + + #endif /*__INTEL_PXP_GSCCS_H__ */ +diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c +index 2a942dc6a6dc23..2a82119eb58ed8 100644 +--- a/drivers/gpu/drm/meson/meson_vclk.c ++++ b/drivers/gpu/drm/meson/meson_vclk.c +@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, + FREQ_1000_1001(params[i].pixel_freq)); + DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", + i, params[i].phy_freq, +- FREQ_1000_1001(params[i].phy_freq/1000)*1000); ++ FREQ_1000_1001(params[i].phy_freq/10)*10); + /* Match strict frequency */ + if (phy_freq == params[i].phy_freq && + vclk_freq == params[i].vclk_freq) + return MODE_OK; + /* Match 1000/1001 variant */ +- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) && ++ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) && + vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) + return MODE_OK; + } +@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, + + for (freq = 0 ; params[freq].pixel_freq ; ++freq) { + if ((phy_freq == params[freq].phy_freq || +- phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) && ++ phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) && + (vclk_freq == params[freq].vclk_freq || + vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { + if (vclk_freq != params[freq].vclk_freq) +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +index 03eacb22648ef7..1bfa312d6fb857 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) + while (!list_empty(&fctx->pending)) { + fence = list_entry(fctx->pending.next, typeof(*fence), head); + +- if (error) ++ if (error && !dma_fence_is_signaled_locked(&fence->base)) + dma_fence_set_error(&fence->base, error); + + if (nouveau_fence_signal(fence)) +diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c +index 5d4f04a3c6d322..b44b36bd565ea4 100644 +--- a/drivers/i2c/busses/i2c-imx-lpi2c.c ++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c +@@ -616,9 +616,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev) + return 0; + + rpm_disable: +- pm_runtime_put(&pdev->dev); +- pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_put_sync(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); + + return ret; + } +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c +index ef3fae113dd643..2e7a12f306510c 100644 +--- a/drivers/iommu/amd/init.c ++++ b/drivers/iommu/amd/init.c +@@ -3682,6 +3682,14 @@ static int __init parse_ivrs_acpihid(char *str) + while (*uid == '0' && *(uid + 1)) + uid++; + ++ if (strlen(hid) >= ACPIHID_HID_LEN) { ++ pr_err("Invalid command line: hid is too long\n"); ++ return 1; ++ } else if (strlen(uid) >= ACPIHID_UID_LEN) { ++ pr_err("Invalid command line: uid is too long\n"); ++ return 1; ++ } ++ + i = early_acpihid_map_size++; + memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); + memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +index 6cecbac0e6babf..f2260f45728e79 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +@@ -1443,26 +1443,37 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) + return 0; + } + ++static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs) ++{ ++ struct arm_smmu_stream *stream_rhs = ++ rb_entry(rhs, struct arm_smmu_stream, node); ++ const u32 *sid_lhs = lhs; ++ ++ if (*sid_lhs < stream_rhs->id) ++ return -1; ++ if (*sid_lhs > stream_rhs->id) ++ return 1; ++ return 0; ++} ++ ++static int arm_smmu_streams_cmp_node(struct rb_node *lhs, ++ const struct rb_node *rhs) ++{ ++ return arm_smmu_streams_cmp_key( ++ &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs); ++} ++ + static struct arm_smmu_master * + arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) + { + struct rb_node *node; +- struct arm_smmu_stream *stream; + + lockdep_assert_held(&smmu->streams_mutex); + +- node = smmu->streams.rb_node; +- while (node) { +- stream = rb_entry(node, struct arm_smmu_stream, node); +- if (stream->id < sid) +- node = node->rb_right; +- else if (stream->id > sid) +- node = node->rb_left; +- else +- return stream->master; +- } +- +- return NULL; ++ node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key); ++ if (!node) ++ return NULL; ++ return rb_entry(node, struct arm_smmu_stream, node)->master; + } + + /* IRQ and event handlers */ +@@ -2575,8 +2586,6 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + { + int i; + int ret = 0; +- struct arm_smmu_stream *new_stream, *cur_stream; +- struct rb_node **new_node, *parent_node = NULL; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); + + master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), +@@ -2587,9 +2596,10 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids; i++) { ++ struct arm_smmu_stream *new_stream = &master->streams[i]; ++ struct rb_node *existing; + u32 sid = fwspec->ids[i]; + +- new_stream = &master->streams[i]; + new_stream->id = sid; + new_stream->master = master; + +@@ -2598,28 +2608,23 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + break; + + /* Insert into SID tree */ +- new_node = &(smmu->streams.rb_node); +- while (*new_node) { +- cur_stream = rb_entry(*new_node, struct arm_smmu_stream, +- node); +- parent_node = *new_node; +- if (cur_stream->id > new_stream->id) { +- new_node = &((*new_node)->rb_left); +- } else if (cur_stream->id < new_stream->id) { +- new_node = &((*new_node)->rb_right); +- } else { +- dev_warn(master->dev, +- "stream %u already in tree\n", +- cur_stream->id); +- ret = -EINVAL; +- break; +- } +- } +- if (ret) +- break; ++ existing = rb_find_add(&new_stream->node, &smmu->streams, ++ arm_smmu_streams_cmp_node); ++ if (existing) { ++ struct arm_smmu_master *existing_master = ++ rb_entry(existing, struct arm_smmu_stream, node) ++ ->master; ++ ++ /* Bridged PCI devices may end up with duplicated IDs */ ++ if (existing_master == master) ++ continue; + +- rb_link_node(&new_stream->node, parent_node, new_node); +- rb_insert_color(&new_stream->node, &smmu->streams); ++ dev_warn(master->dev, ++ "stream %u already in tree from dev %s\n", sid, ++ dev_name(existing_master->dev)); ++ ret = -EINVAL; ++ break; ++ } + } + + if (ret) { +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index d6381c00bb8ddc..6a745616d85a4b 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -4855,6 +4855,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); + ++/* QM57/QS57 integrated gfx malfunctions with dmar */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx); ++ + /* Broadwell igfx malfunctions with dmar */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); +@@ -4932,7 +4935,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) + } + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); + +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index 3f1029c0825e95..f2b3a4e2e54fc8 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -566,6 +566,18 @@ int iommu_probe_device(struct device *dev) + mutex_lock(&iommu_probe_device_lock); + ret = __iommu_probe_device(dev, NULL); + mutex_unlock(&iommu_probe_device_lock); ++ ++ /* ++ * The dma_configure replay paths need bus_iommu_probe() to ++ * finish before they can call arch_setup_dma_ops() ++ */ ++ if (IS_ENABLED(CONFIG_IOMMU_DMA) && !ret && dev->iommu_group) { ++ mutex_lock(&dev->iommu_group->mutex); ++ if (!dev->iommu_group->default_domain && ++ !dev_iommu_ops(dev)->set_platform_dma_ops) ++ ret = -EPROBE_DEFER; ++ mutex_unlock(&dev->iommu_group->mutex); ++ } + if (ret) + return ret; + +@@ -3149,6 +3161,12 @@ int iommu_device_use_default_domain(struct device *dev) + return 0; + + mutex_lock(&group->mutex); ++ /* We may race against bus_iommu_probe() finalising groups here */ ++ if (IS_ENABLED(CONFIG_IOMMU_DMA) && !group->default_domain && ++ !dev_iommu_ops(dev)->set_platform_dma_ops) { ++ ret = -EPROBE_DEFER; ++ goto unlock_out; ++ } + if (group->owner_cnt) { + if (group->owner || !iommu_is_default_domain(group) || + !xa_empty(&group->pasid_array)) { +diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c +index 7124565234a586..0807e4aca933fb 100644 +--- a/drivers/irqchip/irq-qcom-mpm.c ++++ b/drivers/irqchip/irq-qcom-mpm.c +@@ -226,6 +226,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, + if (ret) + return ret; + ++ if (pin == GPIO_NO_WAKE_IRQ) ++ return irq_domain_disconnect_hierarchy(domain, virq); ++ + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, + &qcom_mpm_chip, priv); + if (ret) +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index 30ddfb21f65818..2d3afeaf886877 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -68,6 +68,8 @@ + #define LIST_DIRTY 1 + #define LIST_SIZE 2 + ++#define SCAN_RESCHED_CYCLE 16 ++ + /*--------------------------------------------------------------*/ + + /* +@@ -2387,7 +2389,12 @@ static void __scan(struct dm_bufio_client *c) + + atomic_long_dec(&c->need_shrink); + freed++; +- cond_resched(); ++ ++ if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { ++ dm_bufio_unlock(c); ++ cond_resched(); ++ dm_bufio_lock(c); ++ } + } + } + } +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index eb2b44f4a61f08..1e27a5bce2d942 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -4687,7 +4687,7 @@ static void dm_integrity_dtr(struct dm_target *ti) + BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); + BUG_ON(!list_empty(&ic->wait_list)); + +- if (ic->mode == 'B') ++ if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) + cancel_delayed_work_sync(&ic->bitmap_flush_work); + if (ic->metadata_wq) + destroy_workqueue(ic->metadata_wq); +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index fd84e06670e8d7..319bd10548e9ad 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -500,8 +500,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv) + gfp = GFP_NOIO; + } + argv = kmalloc_array(new_size, sizeof(*argv), gfp); +- if (argv && old_argv) { +- memcpy(argv, old_argv, *size * sizeof(*argv)); ++ if (argv) { ++ if (old_argv) ++ memcpy(argv, old_argv, *size * sizeof(*argv)); + *size = new_size; + } + +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c +index c675dec587efb2..597b00e8c9539d 100644 +--- a/drivers/mmc/host/renesas_sdhi_core.c ++++ b/drivers/mmc/host/renesas_sdhi_core.c +@@ -1107,26 +1107,26 @@ int renesas_sdhi_probe(struct platform_device *pdev, + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) { + ret = num_irqs; +- goto eirq; ++ goto edisclk; + } + + /* There must be at least one IRQ source */ + if (!num_irqs) { + ret = -ENXIO; +- goto eirq; ++ goto edisclk; + } + + for (i = 0; i < num_irqs; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; +- goto eirq; ++ goto edisclk; + } + + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, + dev_name(&pdev->dev), host); + if (ret) +- goto eirq; ++ goto edisclk; + } + + ret = tmio_mmc_host_probe(host); +@@ -1138,8 +1138,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, + + return ret; + +-eirq: +- tmio_mmc_host_remove(host); + edisclk: + renesas_sdhi_clk_disable(host); + efree: +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c +index 8d27933c3733b1..f91f25578f075b 100644 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c +@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) + struct tc_taprio_qopt_offload *taprio; + struct ocelot_port *ocelot_port; + struct timespec64 base_ts; +- int port; ++ int i, port; + u32 val; + + mutex_lock(&ocelot->fwd_domain_lock); +@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, + QSYS_PARAM_CFG_REG_3); + ++ for (i = 0; i < taprio->num_entries; i++) ++ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); ++ + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL); +diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c +index fb7a5403e630db..889a18962270aa 100644 +--- a/drivers/net/ethernet/amd/pds_core/auxbus.c ++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c +@@ -172,48 +172,57 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf, + return padev; + } + +-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) ++void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf, ++ struct pds_auxiliary_dev **pd_ptr) + { + struct pds_auxiliary_dev *padev; +- int err = 0; ++ ++ if (!*pd_ptr) ++ return; + + mutex_lock(&pf->config_lock); + +- padev = pf->vfs[cf->vf_id].padev; +- if (padev) { +- pds_client_unregister(pf, padev->client_id); +- auxiliary_device_delete(&padev->aux_dev); +- auxiliary_device_uninit(&padev->aux_dev); +- padev->client_id = 0; +- } +- pf->vfs[cf->vf_id].padev = NULL; ++ padev = *pd_ptr; ++ pds_client_unregister(pf, padev->client_id); ++ auxiliary_device_delete(&padev->aux_dev); ++ auxiliary_device_uninit(&padev->aux_dev); ++ *pd_ptr = NULL; + + mutex_unlock(&pf->config_lock); +- return err; + } + +-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf) ++int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf, ++ enum pds_core_vif_types vt, ++ struct pds_auxiliary_dev **pd_ptr) + { + struct pds_auxiliary_dev *padev; +- enum pds_core_vif_types vt; + char devname[PDS_DEVNAME_LEN]; ++ unsigned long mask; + u16 vt_support; + int client_id; + int err = 0; + ++ if (!cf) ++ return -ENODEV; ++ ++ if (vt >= PDS_DEV_TYPE_MAX) ++ return -EINVAL; ++ + mutex_lock(&pf->config_lock); + +- /* We only support vDPA so far, so it is the only one to +- * be verified that it is available in the Core device and +- * enabled in the devlink param. In the future this might +- * become a loop for several VIF types. +- */ ++ mask = BIT_ULL(PDSC_S_FW_DEAD) | ++ BIT_ULL(PDSC_S_STOPPING_DRIVER); ++ if (cf->state & mask) { ++ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n", ++ __func__, cf->state); ++ err = -ENXIO; ++ goto out_unlock; ++ } + + /* Verify that the type is supported and enabled. It is not + * an error if there is no auxbus device support for this + * VF, it just means something else needs to happen with it. + */ +- vt = PDS_DEV_TYPE_VDPA; + vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]); + if (!(vt_support && + pf->viftype_status[vt].supported && +@@ -239,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf) + err = PTR_ERR(padev); + goto out_unlock; + } +- pf->vfs[cf->vf_id].padev = padev; ++ *pd_ptr = padev; + + out_unlock: + mutex_unlock(&pf->config_lock); +diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h +index 858bebf7977624..61ee607ee48ace 100644 +--- a/drivers/net/ethernet/amd/pds_core/core.h ++++ b/drivers/net/ethernet/amd/pds_core/core.h +@@ -300,8 +300,11 @@ void pdsc_health_thread(struct work_struct *work); + int pdsc_register_notify(struct notifier_block *nb); + void pdsc_unregister_notify(struct notifier_block *nb); + void pdsc_notify(unsigned long event, void *data); +-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf); +-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf); ++int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf, ++ enum pds_core_vif_types vt, ++ struct pds_auxiliary_dev **pd_ptr); ++void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf, ++ struct pds_auxiliary_dev **pd_ptr); + + void pdsc_process_adminq(struct pdsc_qcq *qcq); + void pdsc_work_thread(struct work_struct *work); +diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c +index f0e39ab4004503..e65a1632df505d 100644 +--- a/drivers/net/ethernet/amd/pds_core/dev.c ++++ b/drivers/net/ethernet/amd/pds_core/dev.c +@@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code) + return -ERANGE; + case PDS_RC_BAD_ADDR: + return -EFAULT; ++ case PDS_RC_BAD_PCI: ++ return -ENXIO; + case PDS_RC_EOPCODE: + case PDS_RC_EINTR: + case PDS_RC_DEV_CMD: +@@ -65,7 +67,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc) + /* Firmware is useful only if the running bit is set and + * fw_status != 0xff (bad PCI read) + */ +- return (pdsc->fw_status != 0xff) && ++ return (pdsc->fw_status != PDS_RC_BAD_PCI) && + (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING); + } + +@@ -131,6 +133,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) + unsigned long max_wait; + unsigned long duration; + int timeout = 0; ++ bool running; + int done = 0; + int err = 0; + int status; +@@ -139,6 +142,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) + max_wait = start_time + (max_seconds * HZ); + + while (!done && !timeout) { ++ running = pdsc_is_fw_running(pdsc); ++ if (!running) ++ break; ++ + done = pdsc_devcmd_done(pdsc); + if (done) + break; +@@ -155,7 +162,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) + dev_dbg(dev, "DEVCMD %d %s after %ld secs\n", + opcode, pdsc_devcmd_str(opcode), duration / HZ); + +- if (!done || timeout) { ++ if ((!done || timeout) && running) { + dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n", + opcode, pdsc_devcmd_str(opcode), done, timeout, + max_seconds); +diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c +index 0032e8e3518117..bee70e46e34c68 100644 +--- a/drivers/net/ethernet/amd/pds_core/devlink.c ++++ b/drivers/net/ethernet/amd/pds_core/devlink.c +@@ -55,8 +55,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id, + for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) { + struct pdsc *vf = pdsc->vfs[vf_id].vf; + +- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) : +- pdsc_auxbus_dev_del(vf, pdsc); ++ if (ctx->val.vbool) ++ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id, ++ &pdsc->vfs[vf_id].padev); ++ else ++ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev); + } + + return err; +diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c +index eddbf0acdde77f..76652e0e5b6d9c 100644 +--- a/drivers/net/ethernet/amd/pds_core/main.c ++++ b/drivers/net/ethernet/amd/pds_core/main.c +@@ -189,7 +189,8 @@ static int pdsc_init_vf(struct pdsc *vf) + devl_unlock(dl); + + pf->vfs[vf->vf_id].vf = vf; +- err = pdsc_auxbus_dev_add(vf, pf); ++ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA, ++ &pf->vfs[vf->vf_id].padev); + if (err) { + devl_lock(dl); + devl_unregister(dl); +@@ -415,7 +416,7 @@ static void pdsc_remove(struct pci_dev *pdev) + + pf = pdsc_get_pf_struct(pdsc->pdev); + if (!IS_ERR(pf)) { +- pdsc_auxbus_dev_del(pdsc, pf); ++ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev); + pf->vfs[pdsc->vf_id].vf = NULL; + } + } else { +@@ -475,6 +476,15 @@ static void pdsc_reset_prepare(struct pci_dev *pdev) + pdsc_stop_health_thread(pdsc); + pdsc_fw_down(pdsc); + ++ if (pdev->is_virtfn) { ++ struct pdsc *pf; ++ ++ pf = pdsc_get_pf_struct(pdsc->pdev); ++ if (!IS_ERR(pf)) ++ pdsc_auxbus_dev_del(pdsc, pf, ++ &pf->vfs[pdsc->vf_id].padev); ++ } ++ + pdsc_unmap_bars(pdsc); + pci_release_regions(pdev); + pci_disable_device(pdev); +@@ -510,6 +520,15 @@ static void pdsc_reset_done(struct pci_dev *pdev) + + pdsc_fw_up(pdsc); + pdsc_restart_health_thread(pdsc); ++ ++ if (pdev->is_virtfn) { ++ struct pdsc *pf; ++ ++ pf = pdsc_get_pf_struct(pdsc->pdev); ++ if (!IS_ERR(pf)) ++ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA, ++ &pf->vfs[pdsc->vf_id].padev); ++ } + } + + static const struct pci_error_handlers pdsc_err_handler = { +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +index 230726d7b74f63..d41b58fad37bbf 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, + } + + /* Set up the header page info */ +- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, +- XGBE_SKB_ALLOC_SIZE); ++ if (pdata->netdev->features & NETIF_F_RXCSUM) { ++ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, ++ XGBE_SKB_ALLOC_SIZE); ++ } else { ++ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, ++ pdata->rx_buf_size); ++ } + + /* Set up the buffer page info */ + xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +index f393228d41c7be..f1b0fb02b3cd14 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); + } + ++static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < pdata->channel_count; i++) { ++ if (!pdata->channel[i]->rx_ring) ++ break; ++ ++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); ++ } ++} ++ + static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, + unsigned int index, unsigned int val) + { +@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata) + xgbe_config_tx_coalesce(pdata); + xgbe_config_rx_buffer_size(pdata); + xgbe_config_tso_mode(pdata); +- xgbe_config_sph_mode(pdata); +- xgbe_config_rss(pdata); ++ ++ if (pdata->netdev->features & NETIF_F_RXCSUM) { ++ xgbe_config_sph_mode(pdata); ++ xgbe_config_rss(pdata); ++ } ++ + desc_if->wrapper_tx_desc_init(pdata); + desc_if->wrapper_rx_desc_init(pdata); + xgbe_enable_dma_interrupts(pdata); +@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) + hw_if->disable_vxlan = xgbe_disable_vxlan; + hw_if->set_vxlan_id = xgbe_set_vxlan_id; + ++ /* For Split Header*/ ++ hw_if->enable_sph = xgbe_config_sph_mode; ++ hw_if->disable_sph = xgbe_disable_sph_mode; ++ + DBGPR("<--xgbe_init_function_ptrs\n"); + } +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +index 6b73648b377936..34d45cebefb5d3 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev, + if (ret) + return ret; + +- if ((features & NETIF_F_RXCSUM) && !rxcsum) ++ if ((features & NETIF_F_RXCSUM) && !rxcsum) { ++ hw_if->enable_sph(pdata); ++ hw_if->enable_vxlan(pdata); + hw_if->enable_rx_csum(pdata); +- else if (!(features & NETIF_F_RXCSUM) && rxcsum) ++ schedule_work(&pdata->restart_work); ++ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) { ++ hw_if->disable_sph(pdata); ++ hw_if->disable_vxlan(pdata); + hw_if->disable_rx_csum(pdata); ++ schedule_work(&pdata->restart_work); ++ } + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) + hw_if->enable_rx_vlan_stripping(pdata); +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h +index ad136ed493ed1f..173f4dad470f55 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe.h ++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h +@@ -865,6 +865,10 @@ struct xgbe_hw_if { + void (*enable_vxlan)(struct xgbe_prv_data *); + void (*disable_vxlan)(struct xgbe_prv_data *); + void (*set_vxlan_id)(struct xgbe_prv_data *); ++ ++ /* For Split Header */ ++ void (*enable_sph)(struct xgbe_prv_data *pdata); ++ void (*disable_sph)(struct xgbe_prv_data *pdata); + }; + + /* This structure represents implementation specific routines for an +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +index c067898820360e..32813cdd5aa5cb 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +@@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, + } + } + ++ if (cmn_req->req_type == ++ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) ++ info->dest_buf_size += len; ++ + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { +- memcpy(info->dest_buf + off, dma_buf, len); ++ u16 copylen = min_t(u16, len, ++ info->dest_buf_size - off); ++ ++ memcpy(info->dest_buf + off, dma_buf, copylen); ++ if (copylen < len) ++ break; + } else { + rc = -ENOBUFS; ++ if (cmn_req->req_type == ++ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { ++ kfree(info->dest_buf); ++ info->dest_buf = NULL; ++ } + break; + } + } + +- if (cmn_req->req_type == +- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) +- info->dest_buf_size += len; +- + if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) + break; + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +index 2e7ddbca9d53b1..dcedafa4d2e14f 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +@@ -1393,6 +1393,17 @@ static int bnxt_get_regs_len(struct net_device *dev) + return reg_len; + } + ++#define BNXT_PCIE_32B_ENTRY(start, end) \ ++ { offsetof(struct pcie_ctx_hw_stats, start), \ ++ offsetof(struct pcie_ctx_hw_stats, end) } ++ ++static const struct { ++ u16 start; ++ u16 end; ++} bnxt_pcie_32b_entries[] = { ++ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), ++}; ++ + static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *_p) + { +@@ -1424,12 +1435,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); + if (!rc) { +- __le64 *src = (__le64 *)hw_pcie_stats; +- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); +- int i; +- +- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) +- dst[i] = le64_to_cpu(src[i]); ++ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); ++ u8 *src = (u8 *)hw_pcie_stats; ++ int i, j; ++ ++ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { ++ if (i >= bnxt_pcie_32b_entries[j].start && ++ i <= bnxt_pcie_32b_entries[j].end) { ++ u32 *dst32 = (u32 *)(dst + i); ++ ++ *dst32 = le32_to_cpu(*(__le32 *)(src + i)); ++ i += 4; ++ if (i > bnxt_pcie_32b_entries[j].end && ++ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) ++ j++; ++ } else { ++ u64 *dst64 = (u64 *)(dst + i); ++ ++ *dst64 = le64_to_cpu(*(__le64 *)(src + i)); ++ i += 8; ++ } ++ } + } + hwrm_req_drop(bp, req); + } +diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c +index db6615aa921b19..ce46f3ac3b5a18 100644 +--- a/drivers/net/ethernet/dlink/dl2k.c ++++ b/drivers/net/ethernet/dlink/dl2k.c +@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev) + eth_hw_addr_set(dev, psrom->mac_addr); + + if (np->chip_id == CHIP_IP1000A) { +- np->led_mode = psrom->led_mode; ++ np->led_mode = le16_to_cpu(psrom->led_mode); + return 0; + } + +diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h +index 195dc6cfd8955c..0e33e2eaae9606 100644 +--- a/drivers/net/ethernet/dlink/dl2k.h ++++ b/drivers/net/ethernet/dlink/dl2k.h +@@ -335,7 +335,7 @@ typedef struct t_SROM { + u16 sub_system_id; /* 0x06 */ + u16 pci_base_1; /* 0x08 (IP1000A only) */ + u16 pci_base_2; /* 0x0a (IP1000A only) */ +- u16 led_mode; /* 0x0c (IP1000A only) */ ++ __le16 led_mode; /* 0x0c (IP1000A only) */ + u16 reserved1[9]; /* 0x0e-0x1f */ + u8 mac_addr[6]; /* 0x20-0x25 */ + u8 reserved2[10]; /* 0x26-0x2f */ +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 2d6b50903c923d..7261838a09db63 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -695,7 +695,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + txq->bd.cur = bdp; + + /* Trigger transmission start */ +- writel(0, txq->bd.reg_desc_active); ++ if (!(fep->quirks & FEC_QUIRK_ERR007885) || ++ !readl(txq->bd.reg_desc_active) || ++ !readl(txq->bd.reg_desc_active) || ++ !readl(txq->bd.reg_desc_active) || ++ !readl(txq->bd.reg_desc_active)) ++ writel(0, txq->bd.reg_desc_active); + + return 0; + } +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +index 4f385a18d288e4..36206273453f3a 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { + .name = "tm_qset", + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dentry = HNS3_DBG_DENTRY_TM, +- .buf_len = HNS3_DBG_READ_LEN, ++ .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +index 801801e8803e9f..0ed01f4d680618 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + writel(mask_en, tqp_vector->mask_addr); + } + +-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) ++static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) + { + napi_enable(&tqp_vector->napi); + enable_irq(tqp_vector->vector_irq); +- +- /* enable vector */ +- hns3_mask_vector_irq(tqp_vector, 1); + } + +-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) ++static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) + { +- /* disable vector */ +- hns3_mask_vector_irq(tqp_vector, 0); +- + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); + cancel_work_sync(&tqp_vector->rx_group.dim.work); +@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev) + return 0; + } + ++static void hns3_enable_irqs_and_tqps(struct net_device *netdev) ++{ ++ struct hns3_nic_priv *priv = netdev_priv(netdev); ++ struct hnae3_handle *h = priv->ae_handle; ++ u16 i; ++ ++ for (i = 0; i < priv->vector_num; i++) ++ hns3_irq_enable(&priv->tqp_vector[i]); ++ ++ for (i = 0; i < priv->vector_num; i++) ++ hns3_mask_vector_irq(&priv->tqp_vector[i], 1); ++ ++ for (i = 0; i < h->kinfo.num_tqps; i++) ++ hns3_tqp_enable(h->kinfo.tqp[i]); ++} ++ ++static void hns3_disable_irqs_and_tqps(struct net_device *netdev) ++{ ++ struct hns3_nic_priv *priv = netdev_priv(netdev); ++ struct hnae3_handle *h = priv->ae_handle; ++ u16 i; ++ ++ for (i = 0; i < h->kinfo.num_tqps; i++) ++ hns3_tqp_disable(h->kinfo.tqp[i]); ++ ++ for (i = 0; i < priv->vector_num; i++) ++ hns3_mask_vector_irq(&priv->tqp_vector[i], 0); ++ ++ for (i = 0; i < priv->vector_num; i++) ++ hns3_irq_disable(&priv->tqp_vector[i]); ++} ++ + static int hns3_nic_net_up(struct net_device *netdev) + { + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; +- int i, j; + int ret; + + ret = hns3_nic_reset_all_ring(h); +@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev) + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + +- /* enable the vectors */ +- for (i = 0; i < priv->vector_num; i++) +- hns3_vector_enable(&priv->tqp_vector[i]); +- +- /* enable rcb */ +- for (j = 0; j < h->kinfo.num_tqps; j++) +- hns3_tqp_enable(h->kinfo.tqp[j]); ++ hns3_enable_irqs_and_tqps(netdev); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) { + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); +- while (j--) +- hns3_tqp_disable(h->kinfo.tqp[j]); +- +- for (j = i - 1; j >= 0; j--) +- hns3_vector_disable(&priv->tqp_vector[j]); ++ hns3_disable_irqs_and_tqps(netdev); + } + + return ret; +@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) + static void hns3_nic_net_down(struct net_device *netdev) + { + struct hns3_nic_priv *priv = netdev_priv(netdev); +- struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops; +- int i; + +- /* disable vectors */ +- for (i = 0; i < priv->vector_num; i++) +- hns3_vector_disable(&priv->tqp_vector[i]); +- +- /* disable rcb */ +- for (i = 0; i < h->kinfo.num_tqps; i++) +- hns3_tqp_disable(h->kinfo.tqp[i]); ++ hns3_disable_irqs_and_tqps(netdev); + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; +@@ -5870,8 +5877,6 @@ int hns3_set_channels(struct net_device *netdev, + void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) + { + struct hns3_nic_priv *priv = netdev_priv(ndev); +- struct hnae3_handle *h = priv->ae_handle; +- int i; + + if (!if_running) + return; +@@ -5882,11 +5887,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) + netif_carrier_off(ndev); + netif_tx_disable(ndev); + +- for (i = 0; i < priv->vector_num; i++) +- hns3_vector_disable(&priv->tqp_vector[i]); +- +- for (i = 0; i < h->kinfo.num_tqps; i++) +- hns3_tqp_disable(h->kinfo.tqp[i]); ++ hns3_disable_irqs_and_tqps(ndev); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able +@@ -5902,7 +5903,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) + { + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; +- int i; + + if (!if_running) + return; +@@ -5918,11 +5918,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + +- for (i = 0; i < priv->vector_num; i++) +- hns3_vector_enable(&priv->tqp_vector[i]); +- +- for (i = 0; i < h->kinfo.num_tqps; i++) +- hns3_tqp_enable(h->kinfo.tqp[i]); ++ hns3_enable_irqs_and_tqps(ndev); + + netif_tx_wake_all_queues(ndev); + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +index ddc691424c8163..9a806ac727cf5b 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +@@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) + ptp->info.settime64 = hclge_ptp_settime; + + ptp->info.n_alarm = 0; ++ ++ spin_lock_init(&ptp->lock); ++ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; ++ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; ++ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; ++ hdev->ptp = ptp; ++ + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); + if (IS_ERR(ptp->clock)) { + dev_err(&hdev->pdev->dev, +@@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) + return -ENODEV; + } + +- spin_lock_init(&ptp->lock); +- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; +- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; +- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; +- hdev->ptp = ptp; +- + return 0; + } + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +index 69bfcfb148def4..1ba0b57c7a72d7 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +@@ -1257,9 +1257,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) + rtnl_unlock(); + } + +-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) ++static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) + { +- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, +@@ -1268,6 +1267,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + } + ++static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) ++{ ++ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); ++ int ret; ++ ++ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); ++ if (ret) ++ return ret; ++ ++ hdev->rxvtag_strip_en = enable; ++ return 0; ++} ++ + static int hclgevf_reset_tqp(struct hnae3_handle *handle) + { + #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U +@@ -2143,12 +2155,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) + tc_valid, tc_size); + } + +-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) ++static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, ++ bool rxvtag_strip_en) + { + struct hnae3_handle *nic = &hdev->nic; + int ret; + +- ret = hclgevf_en_hw_strip_rxvtag(nic, true); ++ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); +@@ -2815,7 +2828,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) + if (ret) + return ret; + +- ret = hclgevf_init_vlan_config(hdev); ++ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); +@@ -2928,7 +2941,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) + goto err_config; + } + +- ret = hclgevf_init_vlan_config(hdev); ++ ret = hclgevf_init_vlan_config(hdev, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +index cccef32284616b..0208425ab594f5 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +@@ -253,6 +253,7 @@ struct hclgevf_dev { + int *vector_irq; + + bool gro_en; ++ bool rxvtag_strip_en; + + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +index 3ca5f44dea26eb..88c1acd5e8f05d 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +@@ -1824,6 +1824,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) + pf = vf->pf; + dev = ice_pf_to_dev(pf); + vf_vsi = ice_get_vf_vsi(vf); ++ if (!vf_vsi) { ++ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); ++ v_ret = VIRTCHNL_STATUS_ERR_PARAM; ++ goto err_exit; ++ } + + #define ICE_VF_MAX_FDIR_FILTERS 128 + if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c +index b6bb01a486d9d8..a82af96e6bd12f 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c +@@ -1237,6 +1237,8 @@ void igc_ptp_reset(struct igc_adapter *adapter) + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ++ mutex_lock(&adapter->ptm_lock); ++ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { +@@ -1255,7 +1257,6 @@ void igc_ptp_reset(struct igc_adapter *adapter) + if (!igc_is_crosststamp_supported(adapter)) + break; + +- mutex_lock(&adapter->ptm_lock); + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + +@@ -1279,7 +1280,6 @@ void igc_ptp_reset(struct igc_adapter *adapter) + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + + igc_ptm_reset(hw); +- mutex_unlock(&adapter->ptm_lock); + break; + default: + /* No work to do. */ +@@ -1296,5 +1296,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) + out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ++ mutex_unlock(&adapter->ptm_lock); ++ + wrfl(); + } +diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +index 6f1fe7e283d4eb..7a30095b3486f3 100644 +--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c ++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +@@ -917,7 +917,7 @@ static void octep_hb_timeout_task(struct work_struct *work) + miss_cnt); + rtnl_lock(); + if (netif_running(oct->netdev)) +- octep_stop(oct->netdev); ++ dev_close(oct->netdev); + rtnl_unlock(); + } + +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index dc89dbc13b251f..d2ec8f642c2fa0 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2180,14 +2180,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + release_desc: ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { ++ if (unlikely(dma_addr == DMA_MAPPING_ERROR)) ++ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK, ++ rxd->rxd2); ++ else ++ addr64 = RX_DMA_PREP_ADDR64(dma_addr); ++ } ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else +- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); +- +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && +- likely(dma_addr != DMA_MAPPING_ERROR)) +- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); ++ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64; + + ring->calc_idx = idx; + done++; +diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c +index 25989c79c92e61..c2ab87828d8589 100644 +--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c ++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c +@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) + struct net_device *ndev = priv->ndev; + unsigned int head = ring->head; + unsigned int entry = ring->tail; ++ unsigned long flags; + + while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { + ret = mtk_star_tx_complete_one(priv); +@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) + netif_wake_queue(ndev); + + if (napi_complete(napi)) { +- spin_lock(&priv->lock); ++ spin_lock_irqsave(&priv->lock, flags); + mtk_star_enable_dma_irq(priv, false, true); +- spin_unlock(&priv->lock); ++ spin_unlock_irqrestore(&priv->lock, flags); + } + + return 0; +@@ -1341,16 +1342,16 @@ static int mtk_star_rx(struct mtk_star_priv *priv, int budget) + static int mtk_star_rx_poll(struct napi_struct *napi, int budget) + { + struct mtk_star_priv *priv; ++ unsigned long flags; + int work_done = 0; + + priv = container_of(napi, struct mtk_star_priv, rx_napi); + + work_done = mtk_star_rx(priv, budget); +- if (work_done < budget) { +- napi_complete_done(napi, work_done); +- spin_lock(&priv->lock); ++ if (work_done < budget && napi_complete_done(napi, work_done)) { ++ spin_lock_irqsave(&priv->lock, flags); + mtk_star_enable_dma_irq(priv, true, false); +- spin_unlock(&priv->lock); ++ spin_unlock_irqrestore(&priv->lock, flags); + } + + return work_done; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 7eba3a5bb97cae..326c72b3df8671 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -3499,7 +3499,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + int err; + + mutex_init(&esw->offloads.termtbl_mutex); +- mlx5_rdma_enable_roce(esw->dev); ++ err = mlx5_rdma_enable_roce(esw->dev); ++ if (err) ++ goto err_roce; + + err = mlx5_esw_host_number_init(esw); + if (err) +@@ -3560,6 +3562,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + esw_offloads_metadata_uninit(esw); + err_metadata: + mlx5_rdma_disable_roce(esw->dev); ++err_roce: + mutex_destroy(&esw->offloads.termtbl_mutex); + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +index a42f6cd99b7448..5c552b71e371c5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid * + + static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) + { ++ u8 mac[ETH_ALEN] = {}; + union ib_gid gid; +- u8 mac[ETH_ALEN]; + + mlx5_rdma_make_default_gid(dev, &gid); + return mlx5_core_roce_gid_set(dev, 0, +@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) + mlx5_nic_vport_disable_roce(dev); + } + +-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) ++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) + { + int err; + + if (!MLX5_CAP_GEN(dev, roce)) +- return; ++ return 0; + + err = mlx5_nic_vport_enable_roce(dev); + if (err) { + mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); +- return; ++ return err; + } + + err = mlx5_rdma_add_roce_addr(dev); +@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) + goto del_roce_addr; + } + +- return; ++ return err; + + del_roce_addr: + mlx5_rdma_del_roce_addr(dev); + disable_roce: + mlx5_nic_vport_disable_roce(dev); ++ return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h +index 750cff2a71a4bb..3d9e76c3d42fb1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h +@@ -8,12 +8,12 @@ + + #ifdef CONFIG_MLX5_ESWITCH + +-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); ++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); + void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); + + #else /* CONFIG_MLX5_ESWITCH */ + +-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} ++static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; } + static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} + + #endif /* CONFIG_MLX5_ESWITCH */ +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c +index 92010bfe5e4133..5d2ceff72784f2 100644 +--- a/drivers/net/ethernet/microchip/lan743x_main.c ++++ b/drivers/net/ethernet/microchip/lan743x_main.c +@@ -1949,6 +1949,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, + if (nr_frags <= 0) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; ++ tx->frame_last = tx->frame_first; + } + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); +@@ -2018,6 +2019,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, + tx->frame_first = 0; + tx->frame_data0 = 0; + tx->frame_tail = 0; ++ tx->frame_last = 0; + return -ENOMEM; + } + +@@ -2058,16 +2060,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, + TX_DESC_DATA0_DTYPE_DATA_) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; ++ tx->frame_last = tx->frame_tail; + } + +- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; +- buffer_info = &tx->buffer_info[tx->frame_tail]; ++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last]; ++ buffer_info = &tx->buffer_info[tx->frame_last]; + buffer_info->skb = skb; + if (time_stamp) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; + if (ignore_sync) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + ++ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx->last_tail = tx->frame_tail; +diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h +index 3b2c6046eb3ad5..b6c83c68241e63 100644 +--- a/drivers/net/ethernet/microchip/lan743x_main.h ++++ b/drivers/net/ethernet/microchip/lan743x_main.h +@@ -974,6 +974,7 @@ struct lan743x_tx { + u32 frame_first; + u32 frame_data0; + u32 frame_tail; ++ u32 frame_last; + + struct lan743x_tx_buffer_info *buffer_info; + +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c +index f6aa5d6b6597e0..252d8e6f18c3cc 100644 +--- a/drivers/net/ethernet/mscc/ocelot.c ++++ b/drivers/net/ethernet/mscc/ocelot.c +@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot, + return VLAN_N_VID - bridge_num - 1; + } + ++/** ++ * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID ++ * ++ * @ocelot: Switch private data structure ++ * @port: Index of ingress port ++ * ++ * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN ++ * component conformance" suggest that a C-VLAN component should only recognize ++ * and filter on C-Tags, and an S-VLAN component should only recognize and ++ * process based on C-Tags. ++ * ++ * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN ++ * components are largely represented by a bridge with vlan_protocol 802.1Q, ++ * and S-VLAN components by a bridge with vlan_protocol 802.1ad. ++ * ++ * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware ++ * design is non-conformant, because the switch assigns each frame to a VLAN ++ * based on an entirely different question, as detailed in figure "Basic VLAN ++ * Classification Flow" from its manual and reproduced below. ++ * ++ * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register ++ * if VLAN_AWARE_ENA[port] and frame has outer tag then: ++ * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then: ++ * TAG_TYPE = (Frame.InnerTPID <> 0x8100) ++ * Set PCP, DEI, VID to values from inner VLAN header ++ * else: ++ * TAG_TYPE = (Frame.OuterTPID <> 0x8100) ++ * Set PCP, DEI, VID to values from outer VLAN header ++ * if VID == 0 then: ++ * VID = VLAN_CFG.VLAN_VID ++ * ++ * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN ++ * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1 ++ * (if 802.1ad). It will classify based on whichever of the tags is "outer", no ++ * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]). ++ * ++ * In the VLAN Table, the TAG_TYPE information is not accessible - just the ++ * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs: ++ * C-VLAN X, and S-VLAN X. ++ * ++ * Whereas the Linux bridge behavior is to only filter on frames with a TPID ++ * equal to the vlan_protocol, and treat everything else as VLAN-untagged. ++ * ++ * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5, ++ * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame ++ * should be treated as 802.1Q-untagged, and classified to the PVID of that ++ * bridge port. Not to VID=3, and not to VID=5. ++ * ++ * The VCAP IS1 TCAM has everything we need to overwrite the choices made in ++ * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key, ++ * and it can modify the classified VID in the action. Thus, for each port ++ * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to ++ * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q ++ * PVID of the port. This effectively makes it appear to the outside world as ++ * if those packets were processed as VLAN-untagged. ++ * ++ * The rule needs to be updated each time the bridge PVID changes, and needs ++ * to be deleted if the bridge PVID is deleted, or if the port becomes ++ * VLAN-unaware. ++ */ ++static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port) ++{ ++ unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port); ++ struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1]; ++ struct ocelot_port *ocelot_port = ocelot->ports[port]; ++ const struct ocelot_bridge_vlan *pvid_vlan; ++ struct ocelot_vcap_filter *filter; ++ int err, val, pcp, dei; ++ bool vid_replace_ena; ++ u16 vid; ++ ++ pvid_vlan = ocelot_port->pvid_vlan; ++ vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan; ++ ++ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie, ++ false); ++ if (!vid_replace_ena) { ++ /* If the reclassification filter doesn't need to exist, delete ++ * it if it was previously installed, and exit doing nothing ++ * otherwise. ++ */ ++ if (filter) ++ return ocelot_vcap_filter_del(ocelot, filter); ++ ++ return 0; ++ } ++ ++ /* The reclassification rule must apply. See if it already exists ++ * or if it must be created. ++ */ ++ ++ /* Treating as VLAN-untagged means using as classified VID equal to ++ * the bridge PVID, and PCP/DEI set to the port default QoS values. ++ */ ++ vid = pvid_vlan->vid; ++ val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); ++ pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val); ++ dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL); ++ ++ if (filter) { ++ bool changed = false; ++ ++ /* Filter exists, just update it */ ++ if (filter->action.vid != vid) { ++ filter->action.vid = vid; ++ changed = true; ++ } ++ if (filter->action.pcp != pcp) { ++ filter->action.pcp = pcp; ++ changed = true; ++ } ++ if (filter->action.dei != dei) { ++ filter->action.dei = dei; ++ changed = true; ++ } ++ ++ if (!changed) ++ return 0; ++ ++ return ocelot_vcap_filter_replace(ocelot, filter); ++ } ++ ++ /* Filter doesn't exist, create it */ ++ filter = kzalloc(sizeof(*filter), GFP_KERNEL); ++ if (!filter) ++ return -ENOMEM; ++ ++ filter->key_type = OCELOT_VCAP_KEY_ANY; ++ filter->ingress_port_mask = BIT(port); ++ filter->vlan.tpid = OCELOT_VCAP_BIT_1; ++ filter->prio = 1; ++ filter->id.cookie = cookie; ++ filter->id.tc_offload = false; ++ filter->block_id = VCAP_IS1; ++ filter->type = OCELOT_VCAP_FILTER_OFFLOAD; ++ filter->lookup = 0; ++ filter->action.vid_replace_ena = true; ++ filter->action.pcp_dei_ena = true; ++ filter->action.vid = vid; ++ filter->action.pcp = pcp; ++ filter->action.dei = dei; ++ ++ err = ocelot_vcap_filter_add(ocelot, filter, NULL); ++ if (err) ++ kfree(filter); ++ ++ return err; ++} ++ + /* Default vlan to clasify for untagged frames (may be zero) */ +-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, +- const struct ocelot_bridge_vlan *pvid_vlan) ++static int ocelot_port_set_pvid(struct ocelot *ocelot, int port, ++ const struct ocelot_bridge_vlan *pvid_vlan) + { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); +@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, + * happens automatically), but also 802.1p traffic which gets + * classified to VLAN 0, but that is always in our RX filter, so it + * would get accepted were it not for this setting. ++ * ++ * Also, we only support the bridge 802.1Q VLAN protocol, so ++ * 802.1ad-tagged frames (carrying S-Tags) should be considered ++ * 802.1Q-untagged, and also dropped. + */ + if (!pvid_vlan && ocelot_port->vlan_aware) + val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | +- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; ++ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA | ++ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA; + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | +- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, ++ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA | ++ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA, + ANA_PORT_DROP_CFG, port); ++ ++ return ocelot_update_vlan_reclassify_rule(ocelot, port); + } + + static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, +@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port); + +- ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); ++ err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); ++ if (err) ++ return err; ++ + ocelot_port_manage_port_tag(ocelot, port); + + return 0; +@@ -670,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); + int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, + bool untagged) + { ++ struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + /* Ignore VID 0 added to our RX filter by the 8021q module, since +@@ -684,9 +845,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, + return err; + + /* Default ingress vlan classification */ +- if (pvid) +- ocelot_port_set_pvid(ocelot, port, +- ocelot_bridge_vlan_find(ocelot, vid)); ++ if (pvid) { ++ err = ocelot_port_set_pvid(ocelot, port, ++ ocelot_bridge_vlan_find(ocelot, vid)); ++ if (err) ++ return err; ++ } else if (ocelot_port->pvid_vlan && ++ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { ++ err = ocelot_port_set_pvid(ocelot, port, NULL); ++ if (err) ++ return err; ++ } + + /* Untagged egress vlan clasification */ + ocelot_port_manage_port_tag(ocelot, port); +@@ -712,8 +881,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) + return err; + + /* Ingress */ +- if (del_pvid) +- ocelot_port_set_pvid(ocelot, port, NULL); ++ if (del_pvid) { ++ err = ocelot_port_set_pvid(ocelot, port, NULL); ++ if (err) ++ return err; ++ } + + /* Egress */ + ocelot_port_manage_port_tag(ocelot, port); +@@ -2607,7 +2779,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio) + ANA_PORT_QOS_CFG, + port); + +- return 0; ++ return ocelot_update_vlan_reclassify_rule(ocelot, port); + } + EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio); + +diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c +index 73cdec5ca6a34d..5734b86aed5b53 100644 +--- a/drivers/net/ethernet/mscc/ocelot_vcap.c ++++ b/drivers/net/ethernet/mscc/ocelot_vcap.c +@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged); ++ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid); + vcap_key_set(vcap, &data, VCAP_IS1_HK_VID, + tag->vid.value, tag->vid.mask); + vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP, +diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c +index 8f67c39f479eef..060a566bc6aae1 100644 +--- a/drivers/net/ethernet/vertexcom/mse102x.c ++++ b/drivers/net/ethernet/vertexcom/mse102x.c +@@ -6,6 +6,7 @@ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + ++#include + #include + #include + #include +@@ -33,7 +34,7 @@ + #define CMD_CTR (0x2 << CMD_SHIFT) + + #define CMD_MASK GENMASK(15, CMD_SHIFT) +-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) ++#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0) + + #define DET_CMD_LEN 4 + #define DET_SOF_LEN 2 +@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, + } + + static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, +- unsigned int frame_len) ++ unsigned int frame_len, bool drop) + { + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; +@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; ++ } else if (drop) { ++ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__); ++ ret = -EINVAL; + } else if (*sof != cpu_to_be16(DET_SOF)) { + netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", + __func__, *sof); +@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) + struct sk_buff *skb; + unsigned int rxalign; + unsigned int rxlen; ++ bool drop = false; + __be16 rx = 0; + u16 cmd_resp; + u8 *rxpkt; +@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_rts++; +- return; ++ drop = true; ++ goto drop; + } + + net_dbg_ratelimited("%s: Unexpected response to first CMD\n", +@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) + } + + rxlen = cmd_resp & LEN_MASK; +- if (!rxlen) { +- net_dbg_ratelimited("%s: No frame length defined\n", __func__); ++ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) { ++ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__, ++ rxlen); + mse->stats.invalid_len++; +- return; ++ drop = true; + } + ++ /* In case of a invalid CMD_RTS, the frame must be consumed anyway. ++ * So assume the maximum possible frame length. ++ */ ++drop: ++ if (drop) ++ rxlen = VLAN_ETH_FRAME_LEN; ++ + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); + skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); + if (!skb) +@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) + * They are copied, but ignored. + */ + rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; +- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { ++ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { + mse->ndev->stats.rx_errors++; + dev_kfree_skb(skb); + return; +@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) + static int mse102x_net_open(struct net_device *ndev) + { + struct mse102x_net *mse = netdev_priv(ndev); ++ struct mse102x_net_spi *mses = to_mse102x_spi(mse); + int ret; + + ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, +@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev) + + netif_carrier_on(ndev); + ++ /* The SPI interrupt can stuck in case of pending packet(s). ++ * So poll for possible packet(s) to re-arm the interrupt. ++ */ ++ mutex_lock(&mses->lock); ++ mse102x_rx_pkt_spi(mse); ++ mutex_unlock(&mses->lock); ++ + netif_dbg(mse, ifup, ndev, "network device up\n"); + + return 0; +diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c +index 76188575ca1fcf..19153d44800a94 100644 +--- a/drivers/net/mdio/mdio-mux-meson-gxl.c ++++ b/drivers/net/mdio/mdio-mux-meson-gxl.c +@@ -17,6 +17,7 @@ + #define REG2_LEDACT GENMASK(23, 22) + #define REG2_LEDLINK GENMASK(25, 24) + #define REG2_DIV4SEL BIT(27) ++#define REG2_REVERSED BIT(28) + #define REG2_ADCBYPASS BIT(30) + #define REG2_CLKINSEL BIT(31) + #define ETH_REG3 0x4 +@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv) + * The only constraint is that it must match the one in + * drivers/net/phy/meson-gxl.c to properly match the PHY. + */ +- writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), ++ writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), + priv->regs + ETH_REG2); + + /* Enable the internal phy */ +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c +index bb0bf141587274..7b3739b29c8f72 100644 +--- a/drivers/net/usb/rndis_host.c ++++ b/drivers/net/usb/rndis_host.c +@@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = { + .tx_fixup = rndis_tx_fixup, + }; + +-static const struct driver_info wwan_rndis_info = { +- .description = "Mobile Broadband RNDIS device", +- .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, +- .bind = rndis_bind, +- .unbind = rndis_unbind, +- .status = rndis_status, +- .rx_fixup = rndis_rx_fixup, +- .tx_fixup = rndis_tx_fixup, +-}; +- + /*-------------------------------------------------------------------------*/ + + static const struct usb_device_id products [] = { +@@ -676,11 +666,9 @@ static const struct usb_device_id products [] = { + USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), + .driver_info = (unsigned long) &rndis_info, + }, { +- /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and +- * Telit FN990A (RNDIS) +- */ ++ /* Novatel Verizon USB730L */ + USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), +- .driver_info = (unsigned long)&wwan_rndis_info, ++ .driver_info = (unsigned long) &rndis_info, + }, + { }, // END + }; +diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c +index 6e6e9f05509ab0..06d19e90eadb59 100644 +--- a/drivers/net/vxlan/vxlan_vnifilter.c ++++ b/drivers/net/vxlan/vxlan_vnifilter.c +@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, + * default dst remote_ip previously added for this vni + */ + if (!vxlan_addr_any(&vninode->remote_ip) || +- !vxlan_addr_any(&dst->remote_ip)) ++ !vxlan_addr_any(&dst->remote_ip)) { ++ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, ++ vninode->vni); ++ ++ spin_lock_bh(&vxlan->hash_lock[hash_index]); + __vxlan_fdb_delete(vxlan, all_zeros_mac, + (vxlan_addr_any(&vninode->remote_ip) ? + dst->remote_ip : vninode->remote_ip), +@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, + vninode->vni, vninode->vni, + dst->remote_ifindex, + true); ++ spin_unlock_bh(&vxlan->hash_lock[hash_index]); ++ } + + if (vxlan->dev->flags & IFF_UP) { + if (vxlan_addr_multicast(&vninode->remote_ip) && +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +index 2178675ae1a44d..6f64a05debd2cb 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +@@ -903,14 +903,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) + } + + /* 1) Prepare USB boot loader for runtime image */ +- brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); ++ err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); ++ if (err) ++ goto fail; + + rdlstate = le32_to_cpu(state.state); + rdlbytes = le32_to_cpu(state.bytes); + + /* 2) Check we are in the Waiting state */ + if (rdlstate != DL_WAITING) { +- brcmf_err("Failed to DL_START\n"); ++ brcmf_err("Invalid DL state: %u\n", rdlstate); + err = -EINVAL; + goto fail; + } +diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c +index 506d2f31efb5af..7ebc0df0944cb5 100644 +--- a/drivers/net/wireless/purelifi/plfxlc/mac.c ++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c +@@ -103,7 +103,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw) + void plfxlc_mac_release(struct plfxlc_mac *mac) + { + plfxlc_chip_release(&mac->chip); +- lockdep_assert_held(&mac->lock); + } + + int plfxlc_op_start(struct ieee80211_hw *hw) +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index a763df0200ab46..fdde38903ebcd5 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3377,7 +3377,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) + + dev_info(dev->ctrl.device, "restart after slot reset\n"); + pci_restore_state(pdev); +- if (!nvme_try_sched_reset(&dev->ctrl)) ++ if (nvme_try_sched_reset(&dev->ctrl)) + nvme_unquiesce_io_queues(&dev->ctrl); + return PCI_ERS_RESULT_RECOVERED; + } +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index 84db7f4f861cb1..5b76670f34be29 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -1710,7 +1710,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) + cancel_work_sync(&queue->io_work); + } + +-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) ++static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) + { + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; +@@ -1724,6 +1724,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) + mutex_unlock(&queue->queue_lock); + } + ++static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) ++{ ++ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); ++ struct nvme_tcp_queue *queue = &ctrl->queues[qid]; ++ int timeout = 100; ++ ++ while (timeout > 0) { ++ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || ++ !sk_wmem_alloc_get(queue->sock->sk)) ++ return; ++ msleep(2); ++ timeout -= 2; ++ } ++ dev_warn(nctrl->device, ++ "qid %d: timeout draining sock wmem allocation expired\n", ++ qid); ++} ++ ++static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) ++{ ++ nvme_tcp_stop_queue_nowait(nctrl, qid); ++ nvme_tcp_wait_queue(nctrl, qid); ++} ++ ++ + static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) + { + write_lock_bh(&queue->sock->sk->sk_callback_lock); +@@ -1790,7 +1815,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) + int i; + + for (i = 1; i < ctrl->queue_count; i++) +- nvme_tcp_stop_queue(ctrl, i); ++ nvme_tcp_stop_queue_nowait(ctrl, i); ++ for (i = 1; i < ctrl->queue_count; i++) ++ nvme_tcp_wait_queue(ctrl, i); + } + + static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, +diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c +index 822a750b064b27..cedfbd4258631d 100644 +--- a/drivers/pci/controller/dwc/pci-imx6.c ++++ b/drivers/pci/controller/dwc/pci-imx6.c +@@ -1283,7 +1283,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) + case IMX8MQ_EP: + if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) + imx6_pcie->controller_id = 1; +- ++ fallthrough; ++ case IMX7D: + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, + "pciephy"); + if (IS_ERR(imx6_pcie->pciephy_reset)) { +diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c +index 70907e8f3ea96d..946a546cd9dd01 100644 +--- a/drivers/platform/x86/amd/pmc/pmc.c ++++ b/drivers/platform/x86/amd/pmc/pmc.c +@@ -823,10 +823,9 @@ static void amd_pmc_s2idle_check(void) + struct smu_metrics table; + int rc; + +- /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ +- if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && +- table.s0i3_last_entry_status) +- usleep_range(10000, 20000); ++ /* Avoid triggering OVP */ ++ if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status) ++ msleep(2500); + + /* Dump the IdleMask before we add to the STB */ + amd_pmc_idlemask_read(pdev, pdev->dev, NULL); +diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +index a3b25253b6fdeb..2c9c5cc7d854ed 100644 +--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c ++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +@@ -121,15 +121,13 @@ static int uncore_event_cpu_online(unsigned int cpu) + { + struct uncore_data *data; + int target; ++ int ret; + + /* Check if there is an online cpu in the package for uncore MSR */ + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + +- /* Use this CPU on this die as a control CPU */ +- cpumask_set_cpu(cpu, &uncore_cpu_mask); +- + data = uncore_get_instance(cpu); + if (!data) + return 0; +@@ -138,7 +136,14 @@ static int uncore_event_cpu_online(unsigned int cpu) + data->die_id = topology_die_id(cpu); + data->domain_id = UNCORE_DOMAIN_ID_INVALID; + +- return uncore_freq_add_entry(data, cpu); ++ ret = uncore_freq_add_entry(data, cpu); ++ if (ret) ++ return ret; ++ ++ /* Use this CPU on this die as a control CPU */ ++ cpumask_set_cpu(cpu, &uncore_cpu_mask); ++ ++ return 0; + } + + static int uncore_event_cpu_offline(unsigned int cpu) +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c +index 460f232dad508b..147d7052794f77 100644 +--- a/drivers/spi/spi-tegra114.c ++++ b/drivers/spi/spi-tegra114.c +@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) + u32 inactive_cycles; + u8 cs_state; + +- if (setup->unit != SPI_DELAY_UNIT_SCK || +- hold->unit != SPI_DELAY_UNIT_SCK || +- inactive->unit != SPI_DELAY_UNIT_SCK) { ++ if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) || ++ (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) || ++ (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) { + dev_err(&spi->dev, + "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", + SPI_DELAY_UNIT_SCK); +diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c +index 99baa60ef50fe9..15a8402ee8a17a 100644 +--- a/drivers/usb/host/xhci-debugfs.c ++++ b/drivers/usb/host/xhci-debugfs.c +@@ -693,7 +693,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) + "command-ring", + xhci->debugfs_root); + +- xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring, ++ xhci_debugfs_create_ring_dir(xhci, &xhci->interrupters[0]->event_ring, + "event-ring", + xhci->debugfs_root); + +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 0df5d807a77e8f..a2b6a922077ee3 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -1880,9 +1880,10 @@ int xhci_bus_resume(struct usb_hcd *hcd) + int slot_id; + int sret; + u32 next_state; +- u32 temp, portsc; ++ u32 portsc; + struct xhci_hub *rhub; + struct xhci_port **ports; ++ bool disabled_irq = false; + + rhub = xhci_get_rhub(hcd); + ports = rhub->ports; +@@ -1898,17 +1899,20 @@ int xhci_bus_resume(struct usb_hcd *hcd) + return -ESHUTDOWN; + } + +- /* delay the irqs */ +- temp = readl(&xhci->op_regs->command); +- temp &= ~CMD_EIE; +- writel(temp, &xhci->op_regs->command); +- + /* bus specific resume for ports we suspended at bus_suspend */ +- if (hcd->speed >= HCD_USB3) ++ if (hcd->speed >= HCD_USB3) { + next_state = XDEV_U0; +- else ++ } else { + next_state = XDEV_RESUME; +- ++ if (bus_state->bus_suspended) { ++ /* ++ * prevent port event interrupts from interfering ++ * with usb2 port resume process ++ */ ++ xhci_disable_interrupter(xhci->interrupters[0]); ++ disabled_irq = true; ++ } ++ } + port_index = max_ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); +@@ -1977,11 +1981,9 @@ int xhci_bus_resume(struct usb_hcd *hcd) + (void) readl(&xhci->op_regs->command); + + bus_state->next_statechange = jiffies + msecs_to_jiffies(5); +- /* re-enable irqs */ +- temp = readl(&xhci->op_regs->command); +- temp |= CMD_EIE; +- writel(temp, &xhci->op_regs->command); +- temp = readl(&xhci->op_regs->command); ++ /* re-enable interrupter */ ++ if (disabled_irq) ++ xhci_enable_interrupter(xhci->interrupters[0]); + + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index fbc486546b8533..22cca89efbfd72 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -29,6 +29,7 @@ + static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, + unsigned int cycle_state, + unsigned int max_packet, ++ unsigned int num, + gfp_t flags) + { + struct xhci_segment *seg; +@@ -60,6 +61,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); + } ++ seg->num = num; + seg->dma = dma; + seg->next = NULL; + +@@ -316,6 +318,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, + */ + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; + } ++EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); + + /* Allocate segments and link them for a ring */ + static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, +@@ -324,6 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) + { + struct xhci_segment *prev; ++ unsigned int num = 0; + bool chain_links; + + /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ +@@ -331,16 +335,17 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, + (type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST))); + +- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); ++ prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); + if (!prev) + return -ENOMEM; +- num_segs--; ++ num++; + + *first = prev; +- while (num_segs > 0) { ++ while (num < num_segs) { + struct xhci_segment *next; + +- next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); ++ next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, ++ flags); + if (!next) { + prev = *first; + while (prev) { +@@ -353,7 +358,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, + xhci_link_segments(prev, next, type, chain_links); + + prev = next; +- num_segs--; ++ num++; + } + xhci_link_segments(prev, *first, type, chain_links); + *last = prev; +@@ -1799,23 +1804,13 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, + } + + static void +-xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) ++xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) + { +- struct device *dev = xhci_to_hcd(xhci)->self.sysdev; +- size_t erst_size; +- u64 tmp64; + u32 tmp; + + if (!ir) + return; + +- erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; +- if (ir->erst.entries) +- dma_free_coherent(dev, erst_size, +- ir->erst.entries, +- ir->erst.erst_dma_addr); +- ir->erst.entries = NULL; +- + /* + * Clean out interrupter registers except ERSTBA. Clearing either the + * low or high 32 bits of ERSTBA immediately causes the controller to +@@ -1826,19 +1821,60 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) + tmp &= ERST_SIZE_MASK; + writel(tmp, &ir->ir_set->erst_size); + +- tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); +- tmp64 &= (u64) ERST_PTR_MASK; +- xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue); ++ xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); + } ++} + +- /* free interrrupter event ring */ ++static void ++xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) ++{ ++ struct device *dev = xhci_to_hcd(xhci)->self.sysdev; ++ size_t erst_size; ++ ++ if (!ir) ++ return; ++ ++ erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; ++ if (ir->erst.entries) ++ dma_free_coherent(dev, erst_size, ++ ir->erst.entries, ++ ir->erst.erst_dma_addr); ++ ir->erst.entries = NULL; ++ ++ /* free interrupter event ring */ + if (ir->event_ring) + xhci_ring_free(xhci, ir->event_ring); ++ + ir->event_ring = NULL; + + kfree(ir); + } + ++void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ unsigned int intr_num; ++ ++ spin_lock_irq(&xhci->lock); ++ ++ /* interrupter 0 is primary interrupter, don't touch it */ ++ if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) { ++ xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n"); ++ spin_unlock_irq(&xhci->lock); ++ return; ++ } ++ ++ intr_num = ir->intr_num; ++ ++ xhci_remove_interrupter(xhci, ir); ++ xhci->interrupters[intr_num] = NULL; ++ ++ spin_unlock_irq(&xhci->lock); ++ ++ xhci_free_interrupter(xhci, ir); ++} ++EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter); ++ + void xhci_mem_cleanup(struct xhci_hcd *xhci) + { + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; +@@ -1846,9 +1882,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + + cancel_delayed_work_sync(&xhci->cmd_timer); + +- xhci_free_interrupter(xhci, xhci->interrupter); +- xhci->interrupter = NULL; +- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring"); ++ for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) { ++ if (xhci->interrupters[i]) { ++ xhci_remove_interrupter(xhci, xhci->interrupters[i]); ++ xhci_free_interrupter(xhci, xhci->interrupters[i]); ++ xhci->interrupters[i] = NULL; ++ } ++ } ++ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters"); + + if (xhci->cmd_ring) + xhci_ring_free(xhci, xhci->cmd_ring); +@@ -1918,6 +1959,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + for (i = 0; i < xhci->num_port_caps; i++) + kfree(xhci->port_caps[i].psi); + kfree(xhci->port_caps); ++ kfree(xhci->interrupters); + xhci->num_port_caps = 0; + + xhci->usb2_rhub.ports = NULL; +@@ -1926,6 +1968,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + xhci->rh_bw = NULL; + xhci->ext_caps = NULL; + xhci->port_caps = NULL; ++ xhci->interrupters = NULL; + + xhci->page_size = 0; + xhci->page_shift = 0; +@@ -1935,7 +1978,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + + static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir) + { +- u64 temp; + dma_addr_t deq; + + deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, +@@ -1943,15 +1985,12 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter + if (!deq) + xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n"); + /* Update HC event ring dequeue pointer */ +- temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); +- temp &= ERST_PTR_MASK; + /* Don't clear the EHB bit (which is RW1C) because + * there might be more events to service. + */ +- temp &= ~ERST_EHB; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "// Write event ring dequeue pointer, preserving EHB bit"); +- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, ++ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK), + &ir->ir_set->erst_dequeue); + } + +@@ -2236,18 +2275,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) + } + + static struct xhci_interrupter * +-xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags) ++xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) + { + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_interrupter *ir; ++ unsigned int max_segs; + int ret; + ++ if (!segs) ++ segs = ERST_DEFAULT_SEGS; ++ ++ max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2)); ++ segs = min(segs, max_segs); ++ + ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); + if (!ir) + return NULL; + +- ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, +- 0, flags); ++ ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); + if (!ir->event_ring) { + xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); + kfree(ir); +@@ -2278,12 +2323,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + return -EINVAL; + } + ++ if (xhci->interrupters[intr_num]) { ++ xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); ++ return -EINVAL; ++ } ++ ++ xhci->interrupters[intr_num] = ir; ++ ir->intr_num = intr_num; + ir->ir_set = &xhci->run_regs->ir_set[intr_num]; + + /* set ERST count with the number of entries in the segment table */ + erst_size = readl(&ir->ir_set->erst_size); + erst_size &= ERST_SIZE_MASK; +- erst_size |= ERST_NUM_SEGS; ++ erst_size |= ir->event_ring->num_segs; + writel(erst_size, &ir->ir_set->erst_size); + + erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); +@@ -2300,10 +2352,58 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + return 0; + } + ++struct xhci_interrupter * ++xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, ++ u32 imod_interval) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct xhci_interrupter *ir; ++ unsigned int i; ++ int err = -ENOSPC; ++ ++ if (!xhci->interrupters || xhci->max_interrupters <= 1) ++ return NULL; ++ ++ ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); ++ if (!ir) ++ return NULL; ++ ++ spin_lock_irq(&xhci->lock); ++ ++ /* Find available secondary interrupter, interrupter 0 is reserved for primary */ ++ for (i = 1; i < xhci->max_interrupters; i++) { ++ if (xhci->interrupters[i] == NULL) { ++ err = xhci_add_interrupter(xhci, ir, i); ++ break; ++ } ++ } ++ ++ spin_unlock_irq(&xhci->lock); ++ ++ if (err) { ++ xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n", ++ xhci->max_interrupters); ++ xhci_free_interrupter(xhci, ir); ++ return NULL; ++ } ++ ++ err = xhci_set_interrupter_moderation(ir, imod_interval); ++ if (err) ++ xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n", ++ i, imod_interval); ++ ++ xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", ++ i, xhci->max_interrupters); ++ ++ return ir; ++} ++EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); ++ + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + { +- dma_addr_t dma; ++ struct xhci_interrupter *ir; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; ++ dma_addr_t dma; + unsigned int val, val2; + u64 val_64; + u32 page_size, temp; +@@ -2428,11 +2528,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + /* Allocate and set up primary interrupter 0 with an event ring. */ + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocating primary event ring"); +- xhci->interrupter = xhci_alloc_interrupter(xhci, flags); +- if (!xhci->interrupter) ++ xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), ++ flags, dev_to_node(dev)); ++ ++ ir = xhci_alloc_interrupter(xhci, 0, flags); ++ if (!ir) + goto fail; + +- if (xhci_add_interrupter(xhci, xhci->interrupter, 0)) ++ if (xhci_add_interrupter(xhci, ir, 0)) + goto fail; + + xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index cb944396294516..5a53280fa2edfd 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3167,7 +3167,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + return; + + /* Update HC event ring dequeue pointer */ +- temp_64 &= ERST_DESI_MASK; ++ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; + temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); + } + +@@ -3225,7 +3225,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) + writel(status, &xhci->op_regs->status); + + /* This is the handler of the primary interrupter */ +- ir = xhci->interrupter; ++ ir = xhci->interrupters[0]; + if (!hcd->msi_enabled) { + u32 irq_pending; + irq_pending = readl(&ir->ir_set->irq_pending); +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 70e6c240a5409f..ce38cd2435c8c3 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -297,7 +297,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) + xhci_info(xhci, "Fault detected\n"); + } + +-static int xhci_enable_interrupter(struct xhci_interrupter *ir) ++int xhci_enable_interrupter(struct xhci_interrupter *ir) + { + u32 iman; + +@@ -310,7 +310,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir) + return 0; + } + +-static int xhci_disable_interrupter(struct xhci_interrupter *ir) ++int xhci_disable_interrupter(struct xhci_interrupter *ir) + { + u32 iman; + +@@ -323,6 +323,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir) + return 0; + } + ++/* interrupt moderation interval imod_interval in nanoseconds */ ++int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, ++ u32 imod_interval) ++{ ++ u32 imod; ++ ++ if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) ++ return -EINVAL; ++ ++ imod = readl(&ir->ir_set->irq_control); ++ imod &= ~ER_IRQ_INTERVAL_MASK; ++ imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; ++ writel(imod, &ir->ir_set->irq_control); ++ ++ return 0; ++} ++ + static void compliance_mode_recovery(struct timer_list *t) + { + struct xhci_hcd *xhci; +@@ -457,7 +474,7 @@ static int xhci_init(struct usb_hcd *hcd) + + static int xhci_run_finished(struct xhci_hcd *xhci) + { +- struct xhci_interrupter *ir = xhci->interrupter; ++ struct xhci_interrupter *ir = xhci->interrupters[0]; + unsigned long flags; + u32 temp; + +@@ -505,11 +522,10 @@ static int xhci_run_finished(struct xhci_hcd *xhci) + */ + int xhci_run(struct usb_hcd *hcd) + { +- u32 temp; + u64 temp_64; + int ret; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +- struct xhci_interrupter *ir = xhci->interrupter; ++ struct xhci_interrupter *ir = xhci->interrupters[0]; + /* Start the xHCI host controller running only after the USB 2.0 roothub + * is setup. + */ +@@ -525,12 +541,7 @@ int xhci_run(struct usb_hcd *hcd) + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "ERST deq = 64'h%0lx", (long unsigned int) temp_64); + +- xhci_dbg_trace(xhci, trace_xhci_dbg_init, +- "// Set the interrupt modulation register"); +- temp = readl(&ir->ir_set->irq_control); +- temp &= ~ER_IRQ_INTERVAL_MASK; +- temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; +- writel(temp, &ir->ir_set->irq_control); ++ xhci_set_interrupter_moderation(ir, xhci->imod_interval); + + if (xhci->quirks & XHCI_NEC_HOST) { + struct xhci_command *command; +@@ -573,7 +584,7 @@ void xhci_stop(struct usb_hcd *hcd) + { + u32 temp; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +- struct xhci_interrupter *ir = xhci->interrupter; ++ struct xhci_interrupter *ir = xhci->interrupters[0]; + + mutex_lock(&xhci->mutex); + +@@ -669,36 +680,51 @@ EXPORT_SYMBOL_GPL(xhci_shutdown); + #ifdef CONFIG_PM + static void xhci_save_registers(struct xhci_hcd *xhci) + { +- struct xhci_interrupter *ir = xhci->interrupter; ++ struct xhci_interrupter *ir; ++ unsigned int i; + + xhci->s3.command = readl(&xhci->op_regs->command); + xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); + xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); + +- if (!ir) +- return; ++ /* save both primary and all secondary interrupters */ ++ /* fixme, shold we lock to prevent race with remove secondary interrupter? */ ++ for (i = 0; i < xhci->max_interrupters; i++) { ++ ir = xhci->interrupters[i]; ++ if (!ir) ++ continue; + +- ir->s3_erst_size = readl(&ir->ir_set->erst_size); +- ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); +- ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); +- ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); +- ir->s3_irq_control = readl(&ir->ir_set->irq_control); ++ ir->s3_erst_size = readl(&ir->ir_set->erst_size); ++ ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); ++ ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); ++ ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); ++ ir->s3_irq_control = readl(&ir->ir_set->irq_control); ++ } + } + + static void xhci_restore_registers(struct xhci_hcd *xhci) + { +- struct xhci_interrupter *ir = xhci->interrupter; ++ struct xhci_interrupter *ir; ++ unsigned int i; + + writel(xhci->s3.command, &xhci->op_regs->command); + writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); + xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); + writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); +- writel(ir->s3_erst_size, &ir->ir_set->erst_size); +- xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); +- xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); +- writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); +- writel(ir->s3_irq_control, &ir->ir_set->irq_control); ++ ++ /* FIXME should we lock to protect against freeing of interrupters */ ++ for (i = 0; i < xhci->max_interrupters; i++) { ++ ir = xhci->interrupters[i]; ++ if (!ir) ++ continue; ++ ++ writel(ir->s3_erst_size, &ir->ir_set->erst_size); ++ xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); ++ xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); ++ writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); ++ writel(ir->s3_irq_control, &ir->ir_set->irq_control); ++ } + } + + static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +@@ -1061,7 +1087,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); + temp = readl(&xhci->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); +- xhci_disable_interrupter(xhci->interrupter); ++ xhci_disable_interrupter(xhci->interrupters[0]); + + xhci_dbg(xhci, "cleaning up memory\n"); + xhci_mem_cleanup(xhci); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index df87e8bcb7d246..74bdd035d756a4 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1293,6 +1293,7 @@ struct xhci_segment { + union xhci_trb *trbs; + /* private to HCD */ + struct xhci_segment *next; ++ unsigned int num; + dma_addr_t dma; + /* Max packet sized bounce buffer for td-fragmant alignment */ + dma_addr_t bounce_dma; +@@ -1422,12 +1423,8 @@ struct urb_priv { + struct xhci_td td[]; + }; + +-/* +- * Each segment table entry is 4*32bits long. 1K seems like an ok size: +- * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, +- * meaning 64 ring segments. +- * Initial allocated size of the ERST, in number of entries */ +-#define ERST_NUM_SEGS 1 ++/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */ ++#define ERST_DEFAULT_SEGS 2 + /* Poll every 60 seconds */ + #define POLL_TIMEOUT 60 + /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ +@@ -1552,7 +1549,7 @@ struct xhci_hcd { + struct reset_control *reset; + /* data structures */ + struct xhci_device_context_array *dcbaa; +- struct xhci_interrupter *interrupter; ++ struct xhci_interrupter **interrupters; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + #define CMD_RING_STATE_RUNNING (1 << 0) +@@ -1869,6 +1866,11 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, + int type, gfp_t flags); + void xhci_free_container_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); ++struct xhci_interrupter * ++xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, ++ u32 imod_interval); ++void xhci_remove_secondary_interrupter(struct usb_hcd ++ *hcd, struct xhci_interrupter *ir); + + /* xHCI host controller glue */ + typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); +@@ -1904,6 +1906,10 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); ++int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, ++ u32 imod_interval); ++int xhci_enable_interrupter(struct xhci_interrupter *ir); ++int xhci_disable_interrupter(struct xhci_interrupter *ir); + + /* xHCI ring, segment, TRB, and TD functions */ + dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 11c4d69177f0ca..48d2579236729d 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -2058,12 +2058,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode, + + /* + * If the found extent starts after requested offset, then +- * adjust extent_end to be right before this extent begins ++ * adjust cur_offset to be right before this extent begins. + */ + if (found_key.offset > cur_offset) { +- extent_end = found_key.offset; +- extent_type = 0; +- goto must_cow; ++ if (cow_start == (u64)-1) ++ cow_start = cur_offset; ++ cur_offset = found_key.offset; ++ goto next_slot; + } + + /* +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index 0af3535e08f308..4536b6fcfa0256 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -2932,6 +2932,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, + req->CreateContextsOffset = cpu_to_le32( + sizeof(struct smb2_create_req) + + iov[1].iov_len); ++ le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); + pc_buf = iov[n_iov-1].iov_base; + } + +diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c +index 5345d2417c7fc9..f4b20b80af0620 100644 +--- a/fs/smb/server/auth.c ++++ b/fs/smb/server/auth.c +@@ -546,7 +546,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob, + retval = -ENOMEM; + goto out; + } +- sess->user = user; ++ ++ if (!sess->user) { ++ /* First successful authentication */ ++ sess->user = user; ++ } else { ++ if (!ksmbd_compare_user(sess->user, user)) { ++ ksmbd_debug(AUTH, "different user tried to reuse session\n"); ++ retval = -EPERM; ++ ksmbd_free_user(user); ++ goto out; ++ } ++ ksmbd_free_user(user); ++ } + + memcpy(sess->sess_key, resp->payload, resp->session_key_len); + memcpy(out_blob, resp->payload + resp->session_key_len, +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index d41d67ec5ee51a..13750a5e5ba02e 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1599,11 +1599,6 @@ static int krb5_authenticate(struct ksmbd_work *work, + if (prev_sess_id && prev_sess_id != sess->id) + destroy_previous_session(conn, sess->user, prev_sess_id); + +- if (sess->state == SMB2_SESSION_VALID) { +- ksmbd_free_user(sess->user); +- sess->user = NULL; +- } +- + retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, + out_blob, &out_len); + if (retval) { +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 035e627f94f62d..17de12a98f858a 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1430,6 +1430,7 @@ struct bpf_prog_aux { + bool sleepable; + bool tail_call_reachable; + bool xdp_has_frags; ++ bool changes_pkt_data; + /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ + const struct btf_type *attach_func_proto; + /* function name for valid attach_btf_id */ +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index 92919d52f7e1b2..32e89758176be8 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -573,6 +573,7 @@ struct bpf_subprog_info { + bool tail_call_reachable; + bool has_ld_abs; + bool is_async_cb; ++ bool changes_pkt_data; + }; + + struct bpf_verifier_env; +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index 9ca4211c063f39..184a84dd467ec7 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -787,8 +787,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, + int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); + + int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, +- unsigned int target_freq, +- unsigned int relation); ++ unsigned int target_freq, unsigned int min, ++ unsigned int max, unsigned int relation); + int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, + unsigned int freq); + +@@ -853,12 +853,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, + return best; + } + +-/* Works only on sorted freq-tables */ +-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, +- unsigned int target_freq, +- bool efficiencies) ++static inline int find_index_l(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ unsigned int min, unsigned int max, ++ bool efficiencies) + { +- target_freq = clamp_val(target_freq, policy->min, policy->max); ++ target_freq = clamp_val(target_freq, min, max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_al(policy, target_freq, +@@ -868,6 +868,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + efficiencies); + } + ++/* Works only on sorted freq-tables */ ++static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ bool efficiencies) ++{ ++ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies); ++} ++ + /* Find highest freq at or below target in a table in ascending order */ + static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, + unsigned int target_freq, +@@ -921,12 +929,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, + return best; + } + +-/* Works only on sorted freq-tables */ +-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, +- unsigned int target_freq, +- bool efficiencies) ++static inline int find_index_h(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ unsigned int min, unsigned int max, ++ bool efficiencies) + { +- target_freq = clamp_val(target_freq, policy->min, policy->max); ++ target_freq = clamp_val(target_freq, min, max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ah(policy, target_freq, +@@ -936,6 +944,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + efficiencies); + } + ++/* Works only on sorted freq-tables */ ++static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ bool efficiencies) ++{ ++ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies); ++} ++ + /* Find closest freq to target in a table in ascending order */ + static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, + unsigned int target_freq, +@@ -1006,12 +1022,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, + return best; + } + +-/* Works only on sorted freq-tables */ +-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, +- unsigned int target_freq, +- bool efficiencies) ++static inline int find_index_c(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ unsigned int min, unsigned int max, ++ bool efficiencies) + { +- target_freq = clamp_val(target_freq, policy->min, policy->max); ++ target_freq = clamp_val(target_freq, min, max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ac(policy, target_freq, +@@ -1021,7 +1037,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + efficiencies); + } + +-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) ++/* Works only on sorted freq-tables */ ++static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, ++ unsigned int target_freq, ++ bool efficiencies) ++{ ++ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies); ++} ++ ++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, ++ unsigned int min, unsigned int max, ++ int idx) + { + unsigned int freq; + +@@ -1030,11 +1056,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) + + freq = policy->freq_table[idx].frequency; + +- return freq == clamp_val(freq, policy->min, policy->max); ++ return freq == clamp_val(freq, min, max); + } + + static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, ++ unsigned int min, ++ unsigned int max, + unsigned int relation) + { + bool efficiencies = policy->efficiencies_available && +@@ -1045,29 +1073,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + relation &= ~CPUFREQ_RELATION_E; + + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) +- return cpufreq_table_index_unsorted(policy, target_freq, +- relation); ++ return cpufreq_table_index_unsorted(policy, target_freq, min, ++ max, relation); + retry: + switch (relation) { + case CPUFREQ_RELATION_L: +- idx = cpufreq_table_find_index_l(policy, target_freq, +- efficiencies); ++ idx = find_index_l(policy, target_freq, min, max, efficiencies); + break; + case CPUFREQ_RELATION_H: +- idx = cpufreq_table_find_index_h(policy, target_freq, +- efficiencies); ++ idx = find_index_h(policy, target_freq, min, max, efficiencies); + break; + case CPUFREQ_RELATION_C: +- idx = cpufreq_table_find_index_c(policy, target_freq, +- efficiencies); ++ idx = find_index_c(policy, target_freq, min, max, efficiencies); + break; + default: + WARN_ON_ONCE(1); + return 0; + } + +- /* Limit frequency index to honor policy->min/max */ +- if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { ++ /* Limit frequency index to honor min and max */ ++ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) { + efficiencies = false; + goto retry; + } +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 5090e940ba3e46..adf65eacade062 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -915,7 +915,7 @@ bool bpf_jit_needs_zext(void); + bool bpf_jit_supports_subprog_tailcalls(void); + bool bpf_jit_supports_kfunc_call(void); + bool bpf_jit_supports_far_kfunc_call(void); +-bool bpf_helper_changes_pkt_data(void *func); ++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id); + + static inline bool bpf_dump_raw_ok(const struct cred *cred) + { +diff --git a/include/linux/module.h b/include/linux/module.h +index a98e188cf37b81..f2a8624eef1eca 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -162,6 +162,8 @@ extern void cleanup_module(void); + #define __INITRODATA_OR_MODULE __INITRODATA + #endif /*CONFIG_MODULES*/ + ++struct module_kobject *lookup_or_create_module_kobject(const char *name); ++ + /* Generic info of form tag = "info" */ + #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) + +diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h +index e838a2b90440ca..17a87c1a55d7c7 100644 +--- a/include/linux/pds/pds_core_if.h ++++ b/include/linux/pds/pds_core_if.h +@@ -79,6 +79,7 @@ enum pds_core_status_code { + PDS_RC_EVFID = 31, /* VF ID does not exist */ + PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */ + PDS_RC_ECLIENT = 33, /* No such client id */ ++ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */ + }; + + /** +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 5f11f987334190..f7d392d849be56 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -685,6 +685,11 @@ typedef unsigned int sk_buff_data_t; + typedef unsigned char *sk_buff_data_t; + #endif + ++enum skb_tstamp_type { ++ SKB_CLOCK_REALTIME, ++ SKB_CLOCK_MONOTONIC, ++}; ++ + /** + * DOC: Basic sk_buff geometry + * +@@ -804,10 +809,8 @@ typedef unsigned char *sk_buff_data_t; + * @dst_pending_confirm: need to confirm neighbour + * @decrypted: Decrypted SKB + * @slow_gro: state present at GRO time, slower prepare step required +- * @mono_delivery_time: When set, skb->tstamp has the +- * delivery_time in mono clock base (i.e. EDT). Otherwise, the +- * skb->tstamp has the (rcv) timestamp at ingress and +- * delivery_time at egress. ++ * @tstamp_type: When set, skb->tstamp has the ++ * delivery_time clock base of skb->tstamp. + * @napi_id: id of the NAPI struct this skb came from + * @sender_cpu: (aka @napi_id) source CPU in XPS + * @alloc_cpu: CPU which did the skb allocation. +@@ -935,7 +938,7 @@ struct sk_buff { + /* private: */ + __u8 __mono_tc_offset[0]; + /* public: */ +- __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ ++ __u8 tstamp_type:1; /* See skb_tstamp_type */ + #ifdef CONFIG_NET_XGRESS + __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ + __u8 tc_skip_classify:1; +@@ -4189,7 +4192,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, + static inline void __net_timestamp(struct sk_buff *skb) + { + skb->tstamp = ktime_get_real(); +- skb->mono_delivery_time = 0; ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + } + + static inline ktime_t net_timedelta(ktime_t t) +@@ -4198,10 +4201,33 @@ static inline ktime_t net_timedelta(ktime_t t) + } + + static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, +- bool mono) ++ u8 tstamp_type) + { + skb->tstamp = kt; +- skb->mono_delivery_time = kt && mono; ++ ++ if (kt) ++ skb->tstamp_type = tstamp_type; ++ else ++ skb->tstamp_type = SKB_CLOCK_REALTIME; ++} ++ ++static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, ++ ktime_t kt, clockid_t clockid) ++{ ++ u8 tstamp_type = SKB_CLOCK_REALTIME; ++ ++ switch (clockid) { ++ case CLOCK_REALTIME: ++ break; ++ case CLOCK_MONOTONIC: ++ tstamp_type = SKB_CLOCK_MONOTONIC; ++ break; ++ default: ++ WARN_ON_ONCE(1); ++ kt = 0; ++ } ++ ++ skb_set_delivery_time(skb, kt, tstamp_type); + } + + DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); +@@ -4211,8 +4237,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); + */ + static inline void skb_clear_delivery_time(struct sk_buff *skb) + { +- if (skb->mono_delivery_time) { +- skb->mono_delivery_time = 0; ++ if (skb->tstamp_type) { ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + if (static_branch_unlikely(&netstamp_needed_key)) + skb->tstamp = ktime_get_real(); + else +@@ -4222,7 +4248,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb) + + static inline void skb_clear_tstamp(struct sk_buff *skb) + { +- if (skb->mono_delivery_time) ++ if (skb->tstamp_type) + return; + + skb->tstamp = 0; +@@ -4230,7 +4256,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb) + + static inline ktime_t skb_tstamp(const struct sk_buff *skb) + { +- if (skb->mono_delivery_time) ++ if (skb->tstamp_type) + return 0; + + return skb->tstamp; +@@ -4238,7 +4264,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb) + + static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) + { +- if (!skb->mono_delivery_time && skb->tstamp) ++ if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) + return skb->tstamp; + + if (static_branch_unlikely(&netstamp_needed_key) || cond) +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h +index 153960663ce4c2..5af6eb14c5db15 100644 +--- a/include/net/inet_frag.h ++++ b/include/net/inet_frag.h +@@ -76,7 +76,7 @@ struct frag_v6_compare_key { + * @stamp: timestamp of the last received fragment + * @len: total length of the original datagram + * @meat: length of received fragments so far +- * @mono_delivery_time: stamp has a mono delivery time (EDT) ++ * @tstamp_type: stamp has a mono delivery time (EDT) + * @flags: fragment queue flags + * @max_size: maximum received fragment size + * @fqdir: pointer to struct fqdir +@@ -97,7 +97,7 @@ struct inet_frag_queue { + ktime_t stamp; + int len; + int meat; +- u8 mono_delivery_time; ++ u8 tstamp_type; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; +diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h +index c601a4598b0da8..eb19668a06db17 100644 +--- a/include/soc/mscc/ocelot_vcap.h ++++ b/include/soc/mscc/ocelot_vcap.h +@@ -13,6 +13,7 @@ + */ + #define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port)) + #define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port) ++#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port)) + #define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port) + #define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port)) + #define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2) +@@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan { + struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ + enum ocelot_vcap_bit dei; /* DEI */ + enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ ++ enum ocelot_vcap_bit tpid; + }; + + struct ocelot_vcap_key_etype { +diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h +index d099ae27f8491a..682499b871eac4 100644 +--- a/include/sound/ump_convert.h ++++ b/include/sound/ump_convert.h +@@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank { + /* context for converting from MIDI1 byte stream to UMP packet */ + struct ump_cvt_to_ump { + /* MIDI1 intermediate buffer */ +- unsigned char buf[4]; ++ unsigned char buf[6]; /* up to 6 bytes for SysEx */ + int len; + int cmd_bytes; + +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 02f327f05fd619..81fd1bb9941644 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -2893,7 +2893,7 @@ void __weak bpf_jit_compile(struct bpf_prog *prog) + { + } + +-bool __weak bpf_helper_changes_pkt_data(void *func) ++bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id) + { + return false; + } +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index e443506b0a65a1..756e179a1efe3e 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2636,16 +2636,36 @@ static int cmp_subprogs(const void *a, const void *b) + ((struct bpf_subprog_info *)b)->start; + } + ++/* Find subprogram that contains instruction at 'off' */ ++static struct bpf_subprog_info *find_containing_subprog(struct bpf_verifier_env *env, int off) ++{ ++ struct bpf_subprog_info *vals = env->subprog_info; ++ int l, r, m; ++ ++ if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) ++ return NULL; ++ ++ l = 0; ++ r = env->subprog_cnt - 1; ++ while (l < r) { ++ m = l + (r - l + 1) / 2; ++ if (vals[m].start <= off) ++ l = m; ++ else ++ r = m - 1; ++ } ++ return &vals[l]; ++} ++ ++/* Find subprogram that starts exactly at 'off' */ + static int find_subprog(struct bpf_verifier_env *env, int off) + { + struct bpf_subprog_info *p; + +- p = bsearch(&off, env->subprog_info, env->subprog_cnt, +- sizeof(env->subprog_info[0]), cmp_subprogs); +- if (!p) ++ p = find_containing_subprog(env, off); ++ if (!p || p->start != off) + return -ENOENT; + return p - env->subprog_info; +- + } + + static int add_subprog(struct bpf_verifier_env *env, int off) +@@ -9344,6 +9364,8 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + + if (env->log.level & BPF_LOG_LEVEL) + verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); ++ if (env->subprog_info[subprog].changes_pkt_data) ++ clear_all_pkt_pointers(env); + clear_caller_saved_regs(env, caller->regs); + + /* All global functions return a 64-bit SCALAR_VALUE */ +@@ -9987,7 +10009,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn + } + + /* With LD_ABS/IND some JITs save/restore skb from r1. */ +- changes_data = bpf_helper_changes_pkt_data(fn->func); ++ changes_data = bpf_helper_changes_pkt_data(func_id); + if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { + verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", + func_id_name(func_id), func_id); +@@ -15094,6 +15116,29 @@ static int check_return_code(struct bpf_verifier_env *env) + return 0; + } + ++static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off) ++{ ++ struct bpf_subprog_info *subprog; ++ ++ subprog = find_containing_subprog(env, off); ++ subprog->changes_pkt_data = true; ++} ++ ++/* 't' is an index of a call-site. ++ * 'w' is a callee entry point. ++ * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED. ++ * Rely on DFS traversal order and absence of recursive calls to guarantee that ++ * callee's change_pkt_data marks would be correct at that moment. ++ */ ++static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w) ++{ ++ struct bpf_subprog_info *caller, *callee; ++ ++ caller = find_containing_subprog(env, t); ++ callee = find_containing_subprog(env, w); ++ caller->changes_pkt_data |= callee->changes_pkt_data; ++} ++ + /* non-recursive DFS pseudo code + * 1 procedure DFS-iterative(G,v): + * 2 label v as discovered +@@ -15227,6 +15272,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, + bool visit_callee) + { + int ret, insn_sz; ++ int w; + + insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; + ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); +@@ -15238,8 +15284,10 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, + mark_jmp_point(env, t + insn_sz); + + if (visit_callee) { ++ w = t + insns[t].imm + 1; + mark_prune_point(env, t); +- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); ++ merge_callee_effects(env, t, w); ++ ret = push_insn(t, w, BRANCH, env); + } + return ret; + } +@@ -15291,6 +15339,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env) + mark_prune_point(env, t); + mark_jmp_point(env, t); + } ++ if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm)) ++ mark_subprog_changes_pkt_data(env, t); + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { + struct bpf_kfunc_call_arg_meta meta; + +@@ -15412,6 +15462,7 @@ static int check_cfg(struct bpf_verifier_env *env) + } + } + ret = 0; /* cfg looks good */ ++ env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; + + err_free: + kvfree(insn_state); +@@ -18572,6 +18623,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) + } + func[i]->aux->num_exentries = num_exentries; + func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; ++ func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; + func[i] = bpf_int_jit_compile(func[i]); + if (!func[i]->jited) { + err = -ENOTSUPP; +@@ -19856,6 +19908,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, + } + if (tgt_prog) { + struct bpf_prog_aux *aux = tgt_prog->aux; ++ bool tgt_changes_pkt_data; + + if (bpf_prog_is_dev_bound(prog->aux) && + !bpf_prog_dev_bound_match(prog, tgt_prog)) { +@@ -19884,6 +19937,14 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, + "Extension programs should be JITed\n"); + return -EINVAL; + } ++ tgt_changes_pkt_data = aux->func ++ ? aux->func[subprog]->aux->changes_pkt_data ++ : aux->changes_pkt_data; ++ if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { ++ bpf_log(log, ++ "Extension program changes packet data, while original does not\n"); ++ return -EINVAL; ++ } + } + if (!tgt_prog->jited) { + bpf_log(log, "Can attach to only JITed progs\n"); +@@ -20343,10 +20404,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 + if (ret < 0) + goto skip_full_check; + +- ret = check_attach_btf_id(env); +- if (ret) +- goto skip_full_check; +- + ret = resolve_pseudo_ldimm64(env); + if (ret < 0) + goto skip_full_check; +@@ -20361,6 +20418,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 + if (ret < 0) + goto skip_full_check; + ++ ret = check_attach_btf_id(env); ++ if (ret) ++ goto skip_full_check; ++ + ret = do_check_subprogs(env); + ret = ret ?: do_check_main(env); + +diff --git a/kernel/params.c b/kernel/params.c +index 2d4a0564697e83..c7aed3c51cd538 100644 +--- a/kernel/params.c ++++ b/kernel/params.c +@@ -759,7 +759,7 @@ void destroy_params(const struct kernel_param *params, unsigned num) + params[i].ops->free(params[i].arg); + } + +-static struct module_kobject * __init locate_module_kobject(const char *name) ++struct module_kobject __modinit * lookup_or_create_module_kobject(const char *name) + { + struct module_kobject *mk; + struct kobject *kobj; +@@ -801,7 +801,7 @@ static void __init kernel_add_sysfs_param(const char *name, + struct module_kobject *mk; + int err; + +- mk = locate_module_kobject(name); ++ mk = lookup_or_create_module_kobject(name); + if (!mk) + return; + +@@ -872,7 +872,7 @@ static void __init version_sysfs_builtin(void) + int err; + + for (vattr = __start___modver; vattr < __stop___modver; vattr++) { +- mk = locate_module_kobject(vattr->module_name); ++ mk = lookup_or_create_module_kobject(vattr->module_name); + if (mk) { + err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); + WARN_ON_ONCE(err); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index a41c99350a5bf7..95868c31573007 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -7027,13 +7027,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, + /* Copy the data into the page, so we can start over. */ + ret = trace_seq_to_buffer(&iter->seq, + page_address(spd.pages[i]), +- trace_seq_used(&iter->seq)); ++ min((size_t)trace_seq_used(&iter->seq), ++ PAGE_SIZE)); + if (ret < 0) { + __free_page(spd.pages[i]); + break; + } + spd.partial[i].offset = 0; +- spd.partial[i].len = trace_seq_used(&iter->seq); ++ spd.partial[i].len = ret; + + trace_seq_init(&iter->seq); + } +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index 2b948d35fb59ea..448ee37ae2450a 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -950,11 +950,12 @@ enum print_line_t print_event_fields(struct trace_iterator *iter, + struct trace_event_call *call; + struct list_head *head; + ++ lockdep_assert_held_read(&trace_event_sem); ++ + /* ftrace defined events have separate call structures */ + if (event->type <= __TRACE_LAST_TYPE) { + bool found = false; + +- down_read(&trace_event_sem); + list_for_each_entry(call, &ftrace_events, list) { + if (call->event.type == event->type) { + found = true; +@@ -964,7 +965,6 @@ enum print_line_t print_event_fields(struct trace_iterator *iter, + if (call->event.type > __TRACE_LAST_TYPE) + break; + } +- up_read(&trace_event_sem); + if (!found) { + trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type); + goto out; +diff --git a/mm/memblock.c b/mm/memblock.c +index e8a2a1537d6a85..047dce35cf6e0e 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -2122,11 +2122,14 @@ static void __init memmap_init_reserved_pages(void) + struct memblock_region *region; + phys_addr_t start, end; + int nid; ++ unsigned long max_reserved; + + /* + * set nid on all reserved pages and also treat struct + * pages for the NOMAP regions as PageReserved + */ ++repeat: ++ max_reserved = memblock.reserved.max; + for_each_mem_region(region) { + nid = memblock_get_region_node(region); + start = region->base; +@@ -2135,8 +2138,15 @@ static void __init memmap_init_reserved_pages(void) + if (memblock_is_nomap(region)) + reserve_bootmem_region(start, end, nid); + +- memblock_set_node(start, end, &memblock.reserved, nid); ++ memblock_set_node(start, region->size, &memblock.reserved, nid); + } ++ /* ++ * 'max' is changed means memblock.reserved has been doubled its ++ * array, which may result a new reserved region before current ++ * 'start'. Now we should repeat the procedure to set its node id. ++ */ ++ if (max_reserved != memblock.reserved.max) ++ goto repeat; + + /* initialize struct pages for the reserved regions */ + for_each_reserved_mem_region(region) { +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index d4dcdb2370cc98..72ee41b894a520 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -7386,6 +7386,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, + return -ENOMEM; + /* Init rx_len */ + conn->rx_len = len; ++ ++ skb_set_delivery_time(conn->rx_skb, skb->tstamp, ++ skb->tstamp_type); + } + + /* Copy as much as the rx_skb can hold */ +diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c +index 6ef04f9fe481be..4fbfbafdfa0274 100644 +--- a/net/bridge/netfilter/nf_conntrack_bridge.c ++++ b/net/bridge/netfilter/nf_conntrack_bridge.c +@@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, + struct sk_buff *)) + { + int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; +- bool mono_delivery_time = skb->mono_delivery_time; ++ u8 tstamp_type = skb->tstamp_type; + unsigned int hlen, ll_rs, mtu; + ktime_t tstamp = skb->tstamp; + struct ip_frag_state state; +@@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, + if (iter.frag) + ip_fraglist_prepare(skb, &iter); + +- skb_set_delivery_time(skb, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb, tstamp, tstamp_type); + err = output(net, sk, data, skb); + if (err || !iter.frag) + break; +@@ -113,7 +113,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, + goto blackhole; + } + +- skb_set_delivery_time(skb2, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb2, tstamp, tstamp_type); + err = output(net, sk, data, skb2); + if (err) + goto blackhole; +diff --git a/net/core/dev.c b/net/core/dev.c +index c31a7f7bedf3db..4006fd164b7bc7 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2189,7 +2189,7 @@ EXPORT_SYMBOL(net_disable_timestamp); + static inline void net_timestamp_set(struct sk_buff *skb) + { + skb->tstamp = 0; +- skb->mono_delivery_time = 0; ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + if (static_branch_unlikely(&netstamp_needed_key)) + skb->tstamp = ktime_get_real(); + } +diff --git a/net/core/filter.c b/net/core/filter.c +index 39eef3370d800e..066277b91a1be8 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -7734,13 +7734,13 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, + if (!tstamp) + return -EINVAL; + skb->tstamp = tstamp; +- skb->mono_delivery_time = 1; ++ skb->tstamp_type = SKB_CLOCK_MONOTONIC; + break; + case BPF_SKB_TSTAMP_UNSPEC: + if (tstamp) + return -EINVAL; + skb->tstamp = 0; +- skb->mono_delivery_time = 0; ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + break; + default: + return -EINVAL; +@@ -7868,42 +7868,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = { + + #endif /* CONFIG_INET */ + +-bool bpf_helper_changes_pkt_data(void *func) +-{ +- if (func == bpf_skb_vlan_push || +- func == bpf_skb_vlan_pop || +- func == bpf_skb_store_bytes || +- func == bpf_skb_change_proto || +- func == bpf_skb_change_head || +- func == sk_skb_change_head || +- func == bpf_skb_change_tail || +- func == sk_skb_change_tail || +- func == bpf_skb_adjust_room || +- func == sk_skb_adjust_room || +- func == bpf_skb_pull_data || +- func == sk_skb_pull_data || +- func == bpf_clone_redirect || +- func == bpf_l3_csum_replace || +- func == bpf_l4_csum_replace || +- func == bpf_xdp_adjust_head || +- func == bpf_xdp_adjust_meta || +- func == bpf_msg_pull_data || +- func == bpf_msg_push_data || +- func == bpf_msg_pop_data || +- func == bpf_xdp_adjust_tail || +-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) +- func == bpf_lwt_seg6_store_bytes || +- func == bpf_lwt_seg6_adjust_srh || +- func == bpf_lwt_seg6_action || +-#endif +-#ifdef CONFIG_INET +- func == bpf_sock_ops_store_hdr_opt || +-#endif +- func == bpf_lwt_in_push_encap || +- func == bpf_lwt_xmit_push_encap) ++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id) ++{ ++ switch (func_id) { ++ case BPF_FUNC_clone_redirect: ++ case BPF_FUNC_l3_csum_replace: ++ case BPF_FUNC_l4_csum_replace: ++ case BPF_FUNC_lwt_push_encap: ++ case BPF_FUNC_lwt_seg6_action: ++ case BPF_FUNC_lwt_seg6_adjust_srh: ++ case BPF_FUNC_lwt_seg6_store_bytes: ++ case BPF_FUNC_msg_pop_data: ++ case BPF_FUNC_msg_pull_data: ++ case BPF_FUNC_msg_push_data: ++ case BPF_FUNC_skb_adjust_room: ++ case BPF_FUNC_skb_change_head: ++ case BPF_FUNC_skb_change_proto: ++ case BPF_FUNC_skb_change_tail: ++ case BPF_FUNC_skb_pull_data: ++ case BPF_FUNC_skb_store_bytes: ++ case BPF_FUNC_skb_vlan_pop: ++ case BPF_FUNC_skb_vlan_push: ++ case BPF_FUNC_store_hdr_opt: ++ case BPF_FUNC_xdp_adjust_head: ++ case BPF_FUNC_xdp_adjust_meta: ++ case BPF_FUNC_xdp_adjust_tail: ++ /* tail-called program could call any of the above */ ++ case BPF_FUNC_tail_call: + return true; +- +- return false; ++ default: ++ return false; ++ } + } + + const struct bpf_func_proto bpf_event_output_data_proto __weak; +@@ -9443,7 +9438,7 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, + TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, + TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); +- /* skb->tc_at_ingress && skb->mono_delivery_time, ++ /* skb->tc_at_ingress && skb->tstamp_type, + * read 0 as the (rcv) timestamp. + */ + *insn++ = BPF_MOV64_IMM(value_reg, 0); +@@ -9468,7 +9463,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, + * the bpf prog is aware the tstamp could have delivery time. + * Thus, write skb->tstamp as is if tstamp_type_access is true. + * Otherwise, writing at ingress will have to clear the +- * mono_delivery_time bit also. ++ * skb->tstamp_type bit also. + */ + if (!prog->tstamp_type_access) { + __u8 tmp_reg = BPF_REG_AX; +@@ -9478,7 +9473,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); + /* goto */ + *insn++ = BPF_JMP_A(2); +- /* : mono_delivery_time */ ++ /* : skb->tstamp_type */ + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET); + } +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c +index 6dd960ec558cf6..ba0455ad770193 100644 +--- a/net/ieee802154/6lowpan/reassembly.c ++++ b/net/ieee802154/6lowpan/reassembly.c +@@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq, + goto err; + + fq->q.stamp = skb->tstamp; +- fq->q.mono_delivery_time = skb->mono_delivery_time; ++ fq->q.tstamp_type = skb->tstamp_type; + if (frag_type == LOWPAN_DISPATCH_FRAG1) + fq->q.flags |= INET_FRAG_FIRST_IN; + +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c +index c88c9034d63004..496308c0238485 100644 +--- a/net/ipv4/inet_fragment.c ++++ b/net/ipv4/inet_fragment.c +@@ -619,7 +619,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, + skb_mark_not_on_list(head); + head->prev = NULL; + head->tstamp = q->stamp; +- head->mono_delivery_time = q->mono_delivery_time; ++ head->tstamp_type = q->tstamp_type; + + if (sk) + refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc); +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index 877d1e03150c77..484edc8513e4b7 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -360,7 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) + qp->iif = dev->ifindex; + + qp->q.stamp = skb->tstamp; +- qp->q.mono_delivery_time = skb->mono_delivery_time; ++ qp->q.tstamp_type = skb->tstamp_type; + qp->q.meat += skb->len; + qp->ecn |= ecn; + add_frag_mem_limit(qp->q.fqdir, skb->truesize); +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 765bd3f2a84089..b8cfe6afc84b88 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + { + struct iphdr *iph; + struct sk_buff *skb2; +- bool mono_delivery_time = skb->mono_delivery_time; ++ u8 tstamp_type = skb->tstamp_type; + struct rtable *rt = skb_rtable(skb); + unsigned int mtu, hlen, ll_rs; + struct ip_fraglist_iter iter; +@@ -856,7 +856,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + } + } + +- skb_set_delivery_time(skb, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb, tstamp, tstamp_type); + err = output(net, sk, skb); + + if (!err) +@@ -912,7 +912,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + /* + * Put this fragment into the sending queue. + */ +- skb_set_delivery_time(skb2, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb2, tstamp, tstamp_type); + err = output(net, sk, skb2); + if (err) + goto fail; +@@ -1648,7 +1648,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, + arg->csumoffset) = csum_fold(csum_add(nskb->csum, + arg->csum)); + nskb->ip_summed = CHECKSUM_NONE; +- nskb->mono_delivery_time = !!transmit_time; ++ if (transmit_time) ++ nskb->tstamp_type = SKB_CLOCK_MONOTONIC; + if (txhash) + skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); + ip_push_pending_frames(sk, &fl4); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 3771ed22c2f56f..560273e7f77365 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1266,7 +1266,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + tp = tcp_sk(sk); + prior_wstamp = tp->tcp_wstamp_ns; + tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); +- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); ++ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); + if (clone_it) { + oskb = skb; + +@@ -1607,7 +1607,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, + + skb_split(skb, buff, len); + +- skb_set_delivery_time(buff, skb->tstamp, true); ++ skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); + tcp_fragment_tstamp(skb, buff); + + old_factor = tcp_skb_pcount(skb); +@@ -2703,7 +2703,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { + /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ + tp->tcp_wstamp_ns = tp->tcp_clock_cache; +- skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); ++ skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); + list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); + tcp_init_tso_segs(skb, mss_now); + goto repair; /* Skip network transmission */ +@@ -3688,11 +3688,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, + #ifdef CONFIG_SYN_COOKIES + if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) + skb_set_delivery_time(skb, cookie_init_timestamp(req, now), +- true); ++ SKB_CLOCK_MONOTONIC); + else + #endif + { +- skb_set_delivery_time(skb, now, true); ++ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); + if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ + tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); + } +@@ -3741,7 +3741,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, + bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, + synack_type, &opts); + +- skb_set_delivery_time(skb, now, true); ++ skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); + tcp_add_tx_delay(skb, tp); + + return skb; +@@ -3923,7 +3923,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) + + err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); + +- skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); ++ skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); + + /* Now full SYN+DATA was cloned and sent (or not), + * remove the SYN from the original skb (syn_data) +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index 2ab16139c197b3..132cfc3b2c847b 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) + return segs; + } + ++static void __udpv6_gso_segment_csum(struct sk_buff *seg, ++ struct in6_addr *oldip, ++ const struct in6_addr *newip, ++ __be16 *oldport, __be16 newport) ++{ ++ struct udphdr *uh = udp_hdr(seg); ++ ++ if (ipv6_addr_equal(oldip, newip) && *oldport == newport) ++ return; ++ ++ if (uh->check) { ++ inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32, ++ newip->s6_addr32, true); ++ ++ inet_proto_csum_replace2(&uh->check, seg, *oldport, newport, ++ false); ++ if (!uh->check) ++ uh->check = CSUM_MANGLED_0; ++ } ++ ++ *oldip = *newip; ++ *oldport = newport; ++} ++ ++static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs) ++{ ++ const struct ipv6hdr *iph; ++ const struct udphdr *uh; ++ struct ipv6hdr *iph2; ++ struct sk_buff *seg; ++ struct udphdr *uh2; ++ ++ seg = segs; ++ uh = udp_hdr(seg); ++ iph = ipv6_hdr(seg); ++ uh2 = udp_hdr(seg->next); ++ iph2 = ipv6_hdr(seg->next); ++ ++ if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) && ++ ipv6_addr_equal(&iph->saddr, &iph2->saddr) && ++ ipv6_addr_equal(&iph->daddr, &iph2->daddr)) ++ return segs; ++ ++ while ((seg = seg->next)) { ++ uh2 = udp_hdr(seg); ++ iph2 = ipv6_hdr(seg); ++ ++ __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, ++ &uh2->source, uh->source); ++ __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, ++ &uh2->dest, uh->dest); ++ } ++ ++ return segs; ++} ++ + static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, + netdev_features_t features, + bool is_ipv6) +@@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, + + udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); + +- return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); ++ if (is_ipv6) ++ return __udpv6_gso_segment_list_csum(skb); ++ else ++ return __udpv4_gso_segment_list_csum(skb); + } + + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index cd89a2b35dfb56..c86d5dca29df01 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -864,7 +864,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? + inet6_sk(skb->sk) : NULL; +- bool mono_delivery_time = skb->mono_delivery_time; ++ u8 tstamp_type = skb->tstamp_type; + struct ip6_frag_state state; + unsigned int mtu, hlen, nexthdr_offset; + ktime_t tstamp = skb->tstamp; +@@ -958,7 +958,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + if (iter.frag) + ip6_fraglist_prepare(skb, &iter); + +- skb_set_delivery_time(skb, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb, tstamp, tstamp_type); + err = output(net, sk, skb); + if (!err) + IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), +@@ -1019,7 +1019,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + /* + * Put this fragment into the sending queue. + */ +- skb_set_delivery_time(frag, tstamp, mono_delivery_time); ++ skb_set_delivery_time(frag, tstamp, tstamp_type); + err = output(net, sk, frag); + if (err) + goto fail; +diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c +index 857713d7a38a54..7c4af48d529e1e 100644 +--- a/net/ipv6/netfilter.c ++++ b/net/ipv6/netfilter.c +@@ -126,7 +126,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + struct sk_buff *)) + { + int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; +- bool mono_delivery_time = skb->mono_delivery_time; ++ u8 tstamp_type = skb->tstamp_type; + ktime_t tstamp = skb->tstamp; + struct ip6_frag_state state; + u8 *prevhdr, nexthdr = 0; +@@ -192,7 +192,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + if (iter.frag) + ip6_fraglist_prepare(skb, &iter); + +- skb_set_delivery_time(skb, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb, tstamp, tstamp_type); + err = output(net, sk, data, skb); + if (err || !iter.frag) + break; +@@ -225,7 +225,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + goto blackhole; + } + +- skb_set_delivery_time(skb2, tstamp, mono_delivery_time); ++ skb_set_delivery_time(skb2, tstamp, tstamp_type); + err = output(net, sk, data, skb2); + if (err) + goto blackhole; +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c +index c78b13ea5b196a..82e51b2ec4f512 100644 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c +@@ -268,7 +268,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, + fq->iif = dev->ifindex; + + fq->q.stamp = skb->tstamp; +- fq->q.mono_delivery_time = skb->mono_delivery_time; ++ fq->q.tstamp_type = skb->tstamp_type; + fq->q.meat += skb->len; + fq->ecn |= ecn; + if (payload_len > fq->q.max_size) +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index 2af98edef87ee0..cb219d4bdf25ed 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, + fq->iif = dev->ifindex; + + fq->q.stamp = skb->tstamp; +- fq->q.mono_delivery_time = skb->mono_delivery_time; ++ fq->q.tstamp_type = skb->tstamp_type; + fq->q.meat += skb->len; + fq->ecn |= ecn; + add_frag_mem_limit(fq->q.fqdir, skb->truesize); +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index f285e52b8b8579..624ab1424eba7d 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -934,7 +934,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 + mark = inet_twsk(sk)->tw_mark; + else + mark = READ_ONCE(sk->sk_mark); +- skb_set_delivery_time(buff, tcp_transmit_time(sk), true); ++ skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC); + } + if (txhash) { + /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c +index b0455fda7d0baf..ac87fcff4795e8 100644 +--- a/net/sched/act_bpf.c ++++ b/net/sched/act_bpf.c +@@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb, + bpf_compute_data_pointers(skb); + filter_res = bpf_prog_run(filter, skb); + } +- if (unlikely(!skb->tstamp && skb->mono_delivery_time)) +- skb->mono_delivery_time = 0; ++ if (unlikely(!skb->tstamp && skb->tstamp_type)) ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) + skb_orphan(skb); + +diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c +index 382c7a71f81f2d..db7151c6b70b79 100644 +--- a/net/sched/cls_bpf.c ++++ b/net/sched/cls_bpf.c +@@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, + bpf_compute_data_pointers(skb); + filter_res = bpf_prog_run(prog->filter, skb); + } +- if (unlikely(!skb->tstamp && skb->mono_delivery_time)) +- skb->mono_delivery_time = 0; ++ if (unlikely(!skb->tstamp && skb->tstamp_type)) ++ skb->tstamp_type = SKB_CLOCK_REALTIME; + + if (prog->exts_integrated) { + res->class = 0; +diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c +index 19901e77cd3b7f..9b36955e32b142 100644 +--- a/net/sched/sch_drr.c ++++ b/net/sched/sch_drr.c +@@ -35,6 +35,11 @@ struct drr_sched { + struct Qdisc_class_hash clhash; + }; + ++static bool cl_is_active(struct drr_class *cl) ++{ ++ return !list_empty(&cl->alist); ++} ++ + static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) + { + struct drr_sched *q = qdisc_priv(sch); +@@ -105,6 +110,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + return -ENOBUFS; + + gnet_stats_basic_sync_init(&cl->bstats); ++ INIT_LIST_HEAD(&cl->alist); + cl->common.classid = classid; + cl->quantum = quantum; + cl->qdisc = qdisc_create_dflt(sch->dev_queue, +@@ -229,7 +235,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) + { + struct drr_class *cl = (struct drr_class *)arg; + +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + } + + static int drr_dump_class(struct Qdisc *sch, unsigned long arg, +@@ -336,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct drr_sched *q = qdisc_priv(sch); + struct drr_class *cl; + int err = 0; +- bool first; + + cl = drr_classify(skb, sch, &err); + if (cl == NULL) { +@@ -346,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, + return err; + } + +- first = !cl->qdisc->q.qlen; + err = qdisc_enqueue(skb, cl->qdisc, to_free); + if (unlikely(err != NET_XMIT_SUCCESS)) { + if (net_xmit_drop_count(err)) { +@@ -356,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, + return err; + } + +- if (first) { ++ if (!cl_is_active(cl)) { + list_add_tail(&cl->alist, &q->active); + cl->deficit = cl->quantum; + } +@@ -390,7 +394,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) + if (unlikely(skb == NULL)) + goto out; + if (cl->qdisc->q.qlen == 0) +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + + bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); +@@ -431,7 +435,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) + for (i = 0; i < q->clhash.hashsize; i++) { + hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { + if (cl->qdisc->q.qlen) +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + qdisc_reset(cl->qdisc); + } + } +diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c +index 9fd70462b41d5a..9da86db4d2c2fe 100644 +--- a/net/sched/sch_ets.c ++++ b/net/sched/sch_ets.c +@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = { + [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, + }; + ++static bool cl_is_active(struct ets_class *cl) ++{ ++ return !list_empty(&cl->alist); ++} ++ + static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr, + unsigned int *quantum, + struct netlink_ext_ack *extack) +@@ -293,7 +298,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg) + * to remove them. + */ + if (!ets_class_is_strict(q, cl) && sch->q.qlen) +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + } + + static int ets_class_dump(struct Qdisc *sch, unsigned long arg, +@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct ets_sched *q = qdisc_priv(sch); + struct ets_class *cl; + int err = 0; +- bool first; + + cl = ets_classify(skb, sch, &err); + if (!cl) { +@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + return err; + } + +- first = !cl->qdisc->q.qlen; + err = qdisc_enqueue(skb, cl->qdisc, to_free); + if (unlikely(err != NET_XMIT_SUCCESS)) { + if (net_xmit_drop_count(err)) { +@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + return err; + } + +- if (first && !ets_class_is_strict(q, cl)) { ++ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) { + list_add_tail(&cl->alist, &q->active); + cl->deficit = cl->quantum; + } +@@ -488,7 +491,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch) + if (unlikely(!skb)) + goto out; + if (cl->qdisc->q.qlen == 0) +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + return ets_qdisc_dequeue_skb(sch, skb); + } + +@@ -657,7 +660,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, + } + for (i = q->nbands; i < oldbands; i++) { + if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) +- list_del(&q->classes[i].alist); ++ list_del_init(&q->classes[i].alist); + qdisc_tree_flush_backlog(q->classes[i].qdisc); + } + q->nstrict = nstrict; +@@ -713,7 +716,7 @@ static void ets_qdisc_reset(struct Qdisc *sch) + + for (band = q->nstrict; band < q->nbands; band++) { + if (q->classes[band].qdisc->q.qlen) +- list_del(&q->classes[band].alist); ++ list_del_init(&q->classes[band].alist); + } + for (band = 0; band < q->nbands; band++) + qdisc_reset(q->classes[band].qdisc); +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index 371255e624332f..5d9cccfac4a155 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -203,7 +203,10 @@ eltree_insert(struct hfsc_class *cl) + static inline void + eltree_remove(struct hfsc_class *cl) + { +- rb_erase(&cl->el_node, &cl->sched->eligible); ++ if (!RB_EMPTY_NODE(&cl->el_node)) { ++ rb_erase(&cl->el_node, &cl->sched->eligible); ++ RB_CLEAR_NODE(&cl->el_node); ++ } + } + + static inline void +@@ -1224,7 +1227,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) + /* vttree is now handled in update_vf() so that update_vf(cl, 0, 0) + * needs to be called explicitly to remove a class from vttree. + */ +- update_vf(cl, 0, 0); ++ if (cl->cl_nactive) ++ update_vf(cl, 0, 0); + if (cl->cl_flags & HFSC_RSC) + eltree_remove(cl); + } +@@ -1566,7 +1570,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + return err; + } + +- if (first) { ++ if (first && !cl->cl_nactive) { + if (cl->cl_flags & HFSC_RSC) + init_ed(cl, len); + if (cl->cl_flags & HFSC_FSC) +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 19035ef8387fed..9a3f7ea80b34b9 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -1485,6 +1485,8 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) + { + struct htb_class *cl = (struct htb_class *)arg; + ++ if (!cl->prio_activity) ++ return; + htb_deactivate(qdisc_priv(sch), cl); + } + +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c +index 546c10adcacdea..5e557b960bde33 100644 +--- a/net/sched/sch_qfq.c ++++ b/net/sched/sch_qfq.c +@@ -202,6 +202,11 @@ struct qfq_sched { + */ + enum update_reason {enqueue, requeue}; + ++static bool cl_is_active(struct qfq_class *cl) ++{ ++ return !list_empty(&cl->alist); ++} ++ + static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) + { + struct qfq_sched *q = qdisc_priv(sch); +@@ -347,7 +352,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) + struct qfq_aggregate *agg = cl->agg; + + +- list_del(&cl->alist); /* remove from RR queue of the aggregate */ ++ list_del_init(&cl->alist); /* remove from RR queue of the aggregate */ + if (list_empty(&agg->active)) /* agg is now inactive */ + qfq_deactivate_agg(q, agg); + } +@@ -477,6 +482,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + gnet_stats_basic_sync_init(&cl->bstats); + cl->common.classid = classid; + cl->deficit = lmax; ++ INIT_LIST_HEAD(&cl->alist); + + cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + classid, NULL); +@@ -985,7 +991,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, + cl->deficit -= (int) len; + + if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ +- list_del(&cl->alist); ++ list_del_init(&cl->alist); + else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { + cl->deficit += agg->lmax; + list_move_tail(&cl->alist, &agg->active); +@@ -1217,7 +1223,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct qfq_class *cl; + struct qfq_aggregate *agg; + int err = 0; +- bool first; + + cl = qfq_classify(skb, sch, &err); + if (cl == NULL) { +@@ -1239,7 +1244,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + } + + gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; +- first = !cl->qdisc->q.qlen; + err = qdisc_enqueue(skb, cl->qdisc, to_free); + if (unlikely(err != NET_XMIT_SUCCESS)) { + pr_debug("qfq_enqueue: enqueue failed %d\n", err); +@@ -1255,8 +1259,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + ++sch->q.qlen; + + agg = cl->agg; +- /* if the queue was not empty, then done here */ +- if (!first) { ++ /* if the class is active, then done here */ ++ if (cl_is_active(cl)) { + if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && + list_first_entry(&agg->active, struct qfq_class, alist) + == cl && cl->deficit < len) +@@ -1418,6 +1422,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) + struct qfq_sched *q = qdisc_priv(sch); + struct qfq_class *cl = (struct qfq_class *)arg; + ++ if (list_empty(&cl->alist)) ++ return; + qfq_deactivate_class(q, cl); + } + +diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c +index 619a817ee91cb8..4c1318ce8ae58f 100644 +--- a/sound/soc/codecs/ak4613.c ++++ b/sound/soc/codecs/ak4613.c +@@ -840,14 +840,14 @@ static void ak4613_parse_of(struct ak4613_priv *priv, + /* Input 1 - 2 */ + for (i = 0; i < 2; i++) { + snprintf(prop, sizeof(prop), "asahi-kasei,in%d-single-end", i + 1); +- if (!of_get_property(np, prop, NULL)) ++ if (!of_property_read_bool(np, prop)) + priv->ic |= 1 << i; + } + + /* Output 1 - 6 */ + for (i = 0; i < 6; i++) { + snprintf(prop, sizeof(prop), "asahi-kasei,out%d-single-end", i + 1); +- if (!of_get_property(np, prop, NULL)) ++ if (!of_property_read_bool(np, prop)) + priv->oc |= 1 << i; + } + +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index e65fe3a7c3e42c..7eea70eea68b47 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -2935,7 +2935,7 @@ int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop) + unsigned int i, nb_controls; + int ret; + +- if (!of_property_read_bool(dev->of_node, prop)) ++ if (!of_property_present(dev->of_node, prop)) + return 0; + + strings = devm_kcalloc(dev, nb_controls_max, +@@ -3009,23 +3009,17 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np, + if (rx_mask) + snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask); + +- if (of_property_read_bool(np, "dai-tdm-slot-num")) { +- ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); +- if (ret) +- return ret; +- +- if (slots) +- *slots = val; +- } +- +- if (of_property_read_bool(np, "dai-tdm-slot-width")) { +- ret = of_property_read_u32(np, "dai-tdm-slot-width", &val); +- if (ret) +- return ret; ++ ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); ++ if (ret && ret != -EINVAL) ++ return ret; ++ if (!ret && slots) ++ *slots = val; + +- if (slot_width) +- *slot_width = val; +- } ++ ret = of_property_read_u32(np, "dai-tdm-slot-width", &val); ++ if (ret && ret != -EINVAL) ++ return ret; ++ if (!ret && slot_width) ++ *slot_width = val; + + return 0; + } +@@ -3249,10 +3243,10 @@ unsigned int snd_soc_daifmt_parse_format(struct device_node *np, + * SND_SOC_DAIFMT_INV_MASK area + */ + snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix); +- bit = !!of_get_property(np, prop, NULL); ++ bit = of_property_read_bool(np, prop); + + snprintf(prop, sizeof(prop), "%sframe-inversion", prefix); +- frame = !!of_get_property(np, prop, NULL); ++ frame = of_property_read_bool(np, prop); + + switch ((bit << 4) + frame) { + case 0x11: +@@ -3289,12 +3283,12 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np, + * check "[prefix]frame-master" + */ + snprintf(prop, sizeof(prop), "%sbitclock-master", prefix); +- bit = !!of_get_property(np, prop, NULL); ++ bit = of_property_present(np, prop); + if (bit && bitclkmaster) + *bitclkmaster = of_parse_phandle(np, prop, 0); + + snprintf(prop, sizeof(prop), "%sframe-master", prefix); +- frame = !!of_get_property(np, prop, NULL); ++ frame = of_property_present(np, prop); + if (frame && framemaster) + *framemaster = of_parse_phandle(np, prop, 0); + +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 60248a6820aacc..30e93f9aad7624 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1534,10 +1534,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream, + /* + * Filter for systems with 'component_chaining' enabled. + * This helps to avoid unnecessary re-configuration of an +- * already active BE on such systems. ++ * already active BE on such systems and ensures the BE DAI ++ * widget is powered ON after hw_params() BE DAI callback. + */ + if (fe->card->component_chaining && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) + continue; + +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index 68aa174be12d71..f71f6ff3e2b0f7 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -926,14 +926,21 @@ static int endpoint_set_interface(struct snd_usb_audio *chip, + { + int altset = set ? ep->altsetting : 0; + int err; ++ int retries = 0; ++ const int max_retries = 5; + + if (ep->iface_ref->altset == altset) + return 0; + + usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n", + ep->iface, altset, ep->ep_num); ++retry: + err = usb_set_interface(chip->dev, ep->iface, altset); + if (err < 0) { ++ if (err == -EPROTO && ++retries <= max_retries) { ++ msleep(5 * (1 << (retries - 1))); ++ goto retry; ++ } + usb_audio_err_ratelimited( + chip, "%d:%d: usb_set_interface failed (%d)\n", + ep->iface, altset, err); +diff --git a/sound/usb/format.c b/sound/usb/format.c +index 3b3a5ea6fcbfc0..f33d25a4e4cc7c 100644 +--- a/sound/usb/format.c ++++ b/sound/usb/format.c +@@ -263,7 +263,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof + } + + /* Jabra Evolve 65 headset */ +- if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) { ++ if (chip->usb_id == USB_ID(0x0b0e, 0x030b) || ++ chip->usb_id == USB_ID(0x0b0e, 0x030c)) { + /* only 48kHz for playback while keeping 16kHz for capture */ + if (fp->nr_rates != 1) + return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); +diff --git a/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c +new file mode 100644 +index 00000000000000..7526de3790814c +--- /dev/null ++++ b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c +@@ -0,0 +1,107 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#include "bpf/libbpf.h" ++#include "changes_pkt_data_freplace.skel.h" ++#include "changes_pkt_data.skel.h" ++#include ++ ++static void print_verifier_log(const char *log) ++{ ++ if (env.verbosity >= VERBOSE_VERY) ++ fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log); ++} ++ ++static void test_aux(const char *main_prog_name, ++ const char *to_be_replaced, ++ const char *replacement, ++ bool expect_load) ++{ ++ struct changes_pkt_data_freplace *freplace = NULL; ++ struct bpf_program *freplace_prog = NULL; ++ struct bpf_program *main_prog = NULL; ++ LIBBPF_OPTS(bpf_object_open_opts, opts); ++ struct changes_pkt_data *main = NULL; ++ char log[16*1024]; ++ int err; ++ ++ opts.kernel_log_buf = log; ++ opts.kernel_log_size = sizeof(log); ++ if (env.verbosity >= VERBOSE_SUPER) ++ opts.kernel_log_level = 1 | 2 | 4; ++ main = changes_pkt_data__open_opts(&opts); ++ if (!ASSERT_OK_PTR(main, "changes_pkt_data__open")) ++ goto out; ++ main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name); ++ if (!ASSERT_OK_PTR(main_prog, "main_prog")) ++ goto out; ++ bpf_program__set_autoload(main_prog, true); ++ err = changes_pkt_data__load(main); ++ print_verifier_log(log); ++ if (!ASSERT_OK(err, "changes_pkt_data__load")) ++ goto out; ++ freplace = changes_pkt_data_freplace__open_opts(&opts); ++ if (!ASSERT_OK_PTR(freplace, "changes_pkt_data_freplace__open")) ++ goto out; ++ freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement); ++ if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog")) ++ goto out; ++ bpf_program__set_autoload(freplace_prog, true); ++ bpf_program__set_autoattach(freplace_prog, true); ++ bpf_program__set_attach_target(freplace_prog, ++ bpf_program__fd(main_prog), ++ to_be_replaced); ++ err = changes_pkt_data_freplace__load(freplace); ++ print_verifier_log(log); ++ if (expect_load) { ++ ASSERT_OK(err, "changes_pkt_data_freplace__load"); ++ } else { ++ ASSERT_ERR(err, "changes_pkt_data_freplace__load"); ++ ASSERT_HAS_SUBSTR(log, "Extension program changes packet data", "error log"); ++ } ++ ++out: ++ changes_pkt_data_freplace__destroy(freplace); ++ changes_pkt_data__destroy(main); ++} ++ ++/* There are two global subprograms in both changes_pkt_data.skel.h: ++ * - one changes packet data; ++ * - another does not. ++ * It is ok to freplace subprograms that change packet data with those ++ * that either do or do not. It is only ok to freplace subprograms ++ * that do not change packet data with those that do not as well. ++ * The below tests check outcomes for each combination of such freplace. ++ * Also test a case when main subprogram itself is replaced and is a single ++ * subprogram in a program. ++ */ ++void test_changes_pkt_data_freplace(void) ++{ ++ struct { ++ const char *main; ++ const char *to_be_replaced; ++ bool changes; ++ } mains[] = { ++ { "main_with_subprogs", "changes_pkt_data", true }, ++ { "main_with_subprogs", "does_not_change_pkt_data", false }, ++ { "main_changes", "main_changes", true }, ++ { "main_does_not_change", "main_does_not_change", false }, ++ }; ++ struct { ++ const char *func; ++ bool changes; ++ } replacements[] = { ++ { "changes_pkt_data", true }, ++ { "does_not_change_pkt_data", false } ++ }; ++ char buf[64]; ++ ++ for (int i = 0; i < ARRAY_SIZE(mains); ++i) { ++ for (int j = 0; j < ARRAY_SIZE(replacements); ++j) { ++ snprintf(buf, sizeof(buf), "%s_with_%s", ++ mains[i].to_be_replaced, replacements[j].func); ++ if (!test__start_subtest(buf)) ++ continue; ++ test_aux(mains[i].main, mains[i].to_be_replaced, replacements[j].func, ++ mains[i].changes || !replacements[j].changes); ++ } ++ } ++} +diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data.c b/tools/testing/selftests/bpf/progs/changes_pkt_data.c +new file mode 100644 +index 00000000000000..43cada48b28ad4 +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/changes_pkt_data.c +@@ -0,0 +1,39 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++ ++__noinline ++long changes_pkt_data(struct __sk_buff *sk) ++{ ++ return bpf_skb_pull_data(sk, 0); ++} ++ ++__noinline __weak ++long does_not_change_pkt_data(struct __sk_buff *sk) ++{ ++ return 0; ++} ++ ++SEC("?tc") ++int main_with_subprogs(struct __sk_buff *sk) ++{ ++ changes_pkt_data(sk); ++ does_not_change_pkt_data(sk); ++ return 0; ++} ++ ++SEC("?tc") ++int main_changes(struct __sk_buff *sk) ++{ ++ bpf_skb_pull_data(sk, 0); ++ return 0; ++} ++ ++SEC("?tc") ++int main_does_not_change(struct __sk_buff *sk) ++{ ++ return 0; ++} ++ ++char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c +new file mode 100644 +index 00000000000000..f9a622705f1b3b +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++ ++SEC("?freplace") ++long changes_pkt_data(struct __sk_buff *sk) ++{ ++ return bpf_skb_pull_data(sk, 0); ++} ++ ++SEC("?freplace") ++long does_not_change_pkt_data(struct __sk_buff *sk) ++{ ++ return 0; ++} ++ ++char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c +index ee76b51005abe7..3c8f6646e33dae 100644 +--- a/tools/testing/selftests/bpf/progs/verifier_sock.c ++++ b/tools/testing/selftests/bpf/progs/verifier_sock.c +@@ -50,6 +50,13 @@ struct { + __uint(map_flags, BPF_F_NO_PREALLOC); + } sk_storage_map SEC(".maps"); + ++struct { ++ __uint(type, BPF_MAP_TYPE_PROG_ARRAY); ++ __uint(max_entries, 1); ++ __uint(key_size, sizeof(__u32)); ++ __uint(value_size, sizeof(__u32)); ++} jmp_table SEC(".maps"); ++ + SEC("cgroup/skb") + __description("skb->sk: no NULL check") + __failure __msg("invalid mem access 'sock_common_or_null'") +@@ -977,4 +984,53 @@ l1_%=: r0 = *(u8*)(r7 + 0); \ + : __clobber_all); + } + ++__noinline ++long skb_pull_data2(struct __sk_buff *sk, __u32 len) ++{ ++ return bpf_skb_pull_data(sk, len); ++} ++ ++__noinline ++long skb_pull_data1(struct __sk_buff *sk, __u32 len) ++{ ++ return skb_pull_data2(sk, len); ++} ++ ++/* global function calls bpf_skb_pull_data(), which invalidates packet ++ * pointers established before global function call. ++ */ ++SEC("tc") ++__failure __msg("invalid mem access") ++int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk) ++{ ++ int *p = (void *)(long)sk->data; ++ ++ if ((void *)(p + 1) > (void *)(long)sk->data_end) ++ return TCX_DROP; ++ skb_pull_data1(sk, 0); ++ *p = 42; /* this is unsafe */ ++ return TCX_PASS; ++} ++ ++__noinline ++int tail_call(struct __sk_buff *sk) ++{ ++ bpf_tail_call_static(sk, &jmp_table, 0); ++ return 0; ++} ++ ++/* Tail calls invalidate packet pointers. */ ++SEC("tc") ++__failure __msg("invalid mem access") ++int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk) ++{ ++ int *p = (void *)(long)sk->data; ++ ++ if ((void *)(p + 1) > (void *)(long)sk->data_end) ++ return TCX_DROP; ++ tail_call(sk); ++ *p = 42; /* this is unsafe */ ++ return TCX_PASS; ++} ++ + char _license[] SEC("license") = "GPL"; diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.90-91.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.90-91.patch new file mode 100644 index 0000000000..8cffc8d8b2 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.90-91.patch @@ -0,0 +1,4952 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 657bdee28d845a..0426ec112155ec 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -514,6 +514,7 @@ Description: information about CPUs heterogeneity. + + What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling ++ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +index ff0b440ef2dc90..d2caa390395e5b 100644 +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -22,3 +22,4 @@ are configurable at compile, boot or run time. + srso + gather_data_sampling + reg-file-data-sampling ++ indirect-target-selection +diff --git a/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst +new file mode 100644 +index 00000000000000..d9ca64108d2332 +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst +@@ -0,0 +1,168 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++Indirect Target Selection (ITS) ++=============================== ++ ++ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were ++released before Alder Lake. ITS may allow an attacker to control the prediction ++of indirect branches and RETs located in the lower half of a cacheline. ++ ++ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium). ++ ++Scope of Impact ++--------------- ++- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be ++ predicted with unintended target corresponding to a branch in the guest. ++ ++- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native ++ gadgets. ++ ++- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect ++ branches may still be predicted with targets corresponding to direct branches ++ executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which ++ should be available via distro updates. Alternatively microcode can be ++ obtained from Intel's github repository [#f1]_. ++ ++Affected CPUs ++------------- ++Below is the list of ITS affected CPUs [#f2]_ [#f3]_: ++ ++ ======================== ============ ==================== =============== ++ Common name Family_Model eIBRS Intra-mode BTI ++ Guest/Host Isolation ++ ======================== ============ ==================== =============== ++ SKYLAKE_X (step >= 6) 06_55H Affected Affected ++ ICELAKE_X 06_6AH Not affected Affected ++ ICELAKE_D 06_6CH Not affected Affected ++ ICELAKE_L 06_7EH Not affected Affected ++ TIGERLAKE_L 06_8CH Not affected Affected ++ TIGERLAKE 06_8DH Not affected Affected ++ KABYLAKE_L (step >= 12) 06_8EH Affected Affected ++ KABYLAKE (step >= 13) 06_9EH Affected Affected ++ COMETLAKE 06_A5H Affected Affected ++ COMETLAKE_L 06_A6H Affected Affected ++ ROCKETLAKE 06_A7H Not affected Affected ++ ======================== ============ ==================== =============== ++ ++- All affected CPUs enumerate Enhanced IBRS feature. ++- IBPB isolation is affected on all ITS affected CPUs, and need a microcode ++ update for mitigation. ++- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden ++ Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the ++ host's affected status. ++- Intel Atom CPUs are not affected by ITS. ++ ++Mitigation ++---------- ++As only the indirect branches and RETs that have their last byte of instruction ++in the lower half of the cacheline are vulnerable to ITS, the basic idea behind ++the mitigation is to not allow indirect branches in the lower half. ++ ++This is achieved by relying on existing retpoline support in the kernel, and in ++compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly ++added ITS-safe thunks. These safe thunks consists of indirect branch in the ++second half of the cacheline. Not all retpoline sites are patched to thunks, if ++a retpoline site is evaluated to be ITS-safe, it is replaced with an inline ++indirect branch. ++ ++Dynamic thunks ++~~~~~~~~~~~~~~ ++From a dynamically allocated pool of safe-thunks, each vulnerable site is ++replaced with a new thunk, such that they get a unique address. This could ++improve the branch prediction accuracy. Also, it is a defense-in-depth measure ++against aliasing. ++ ++Note, for simplicity, indirect branches in eBPF programs are always replaced ++with a jump to a static thunk in __x86_indirect_its_thunk_array. If required, ++in future this can be changed to use dynamic thunks. ++ ++All vulnerable RETs are replaced with a static thunk, they do not use dynamic ++thunks. This is because RETs get their prediction from RSB mostly that does not ++depend on source address. RETs that underflow RSB may benefit from dynamic ++thunks. But, RETs significantly outnumber indirect branches, and any benefit ++from a unique source address could be outweighed by the increased icache ++footprint and iTLB pressure. ++ ++Retpoline ++~~~~~~~~~ ++Retpoline sequence also mitigates ITS-unsafe indirect branches. For this ++reason, when retpoline is enabled, ITS mitigation only relocates the RETs to ++safe thunks. Unless user requested the RSB-stuffing mitigation. ++ ++RSB Stuffing ++~~~~~~~~~~~~ ++RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow ++attacks. And it also mitigates RETs that are vulnerable to ITS. ++ ++Mitigation in guests ++^^^^^^^^^^^^^^^^^^^^ ++All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration ++and Family/Model of the guest. This is because eIBRS feature could be hidden ++from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which ++indicates that the guest is running on an unaffected host. ++ ++To prevent guests from unnecessarily deploying the mitigation on unaffected ++platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When ++a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit ++is not set by any hardware, but is **intended for VMMs to synthesize** it for ++guests as per the host's affected status. ++ ++Mitigation options ++^^^^^^^^^^^^^^^^^^ ++The ITS mitigation can be controlled using the "indirect_target_selection" ++kernel parameter. The available options are: ++ ++ ======== =================================================================== ++ on (default) Deploy the "Aligned branch/return thunks" mitigation. ++ If spectre_v2 mitigation enables retpoline, aligned-thunks are only ++ deployed for the affected RET instructions. Retpoline mitigates ++ indirect branches. ++ ++ off Disable ITS mitigation. ++ ++ vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation ++ part of ITS. Otherwise, mitigation is not deployed. This option is ++ useful when host userspace is not in the threat model, and only ++ attacks from guest to host are considered. ++ ++ stuff Deploy RSB-fill mitigation when retpoline is also deployed. ++ Otherwise, deploy the default mitigation. When retpoline mitigation ++ is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates ++ ITS. ++ ++ force Force the ITS bug and deploy the default mitigation. ++ ======== =================================================================== ++ ++Sysfs reporting ++--------------- ++ ++The sysfs file showing ITS mitigation status is: ++ ++ /sys/devices/system/cpu/vulnerabilities/indirect_target_selection ++ ++Note, microcode mitigation status is not reported in this file. ++ ++The possible values in this file are: ++ ++.. list-table:: ++ ++ * - Not affected ++ - The processor is not vulnerable. ++ * - Vulnerable ++ - System is vulnerable and no mitigation has been applied. ++ * - Vulnerable, KVM: Not affected ++ - System is vulnerable to intra-mode BTI, but not affected by eIBRS ++ guest/host isolation. ++ * - Mitigation: Aligned branch/return thunks ++ - The mitigation is enabled, affected indirect branches and RETs are ++ relocated to safe thunks. ++ * - Mitigation: Retpolines, Stuffing RSB ++ - The mitigation is enabled using retpoline and RSB stuffing. ++ ++References ++---------- ++.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files ++ ++.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html ++ ++.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 184f2f96f6a547..f95734ceb82b86 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2060,6 +2060,23 @@ + different crypto accelerators. This option can be used + to achieve best performance for particular HW. + ++ indirect_target_selection= [X86,Intel] Mitigation control for Indirect ++ Target Selection(ITS) bug in Intel CPUs. Updated ++ microcode is also required for a fix in IBPB. ++ ++ on: Enable mitigation (default). ++ off: Disable mitigation. ++ force: Force the ITS bug and deploy default ++ mitigation. ++ vmexit: Only deploy mitigation if CPU is affected by ++ guest/host isolation part of ITS. ++ stuff: Deploy RSB-fill mitigation when retpoline is ++ also deployed. Otherwise, deploy the default ++ mitigation. ++ ++ For details see: ++ Documentation/admin-guide/hw-vuln/indirect-target-selection.rst ++ + init= [KNL] + Format: + Run specified binary instead of /sbin/init as init +@@ -3331,6 +3348,7 @@ + expose users to several CPU vulnerabilities. + Equivalent to: if nokaslr then kpti=0 [ARM64] + gather_data_sampling=off [X86] ++ indirect_target_selection=off [X86] + kvm.nx_huge_pages=off [X86] + l1tf=off [X86] + mds=off [X86] +diff --git a/Makefile b/Makefile +index 587a1586e76db8..a6a1942e2d00a9 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 90 ++SUBLEVEL = 91 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +index 6c48fa4b0d0c4f..6457d2c377017a 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +@@ -148,6 +148,19 @@ reg_usdhc2_vmmc: regulator-usdhc2 { + startup-delay-us = <20000>; + }; + ++ reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc { ++ compatible = "regulator-gpio"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usdhc2_vsel>; ++ gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>; ++ regulator-max-microvolt = <3300000>; ++ regulator-min-microvolt = <1800000>; ++ states = <1800000 0x1>, ++ <3300000 0x0>; ++ regulator-name = "PMIC_USDHC_VSELECT"; ++ vin-supply = <®_nvcc_sd>; ++ }; ++ + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; +@@ -266,7 +279,7 @@ &gpio1 { + "SODIMM_19", + "", + "", +- "", ++ "PMIC_USDHC_VSELECT", + "", + "", + "", +@@ -787,6 +800,7 @@ &usdhc2 { + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>; + pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>; + vmmc-supply = <®_usdhc2_vmmc>; ++ vqmmc-supply = <®_usdhc2_vqmmc>; + }; + + &wdog1 { +@@ -1209,13 +1223,17 @@ pinctrl_usdhc2_pwr_en: usdhc2pwrengrp { + ; /* SODIMM 76 */ + }; + ++ pinctrl_usdhc2_vsel: usdhc2vselgrp { ++ fsl,pins = ++ ; /* PMIC_USDHC_VSELECT */ ++ }; ++ + /* + * Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the + * on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here. + */ + pinctrl_usdhc2: usdhc2grp { + fsl,pins = +- , + , /* SODIMM 78 */ + , /* SODIMM 74 */ + , /* SODIMM 80 */ +@@ -1226,7 +1244,6 @@ pinctrl_usdhc2: usdhc2grp { + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = +- , + , + , + , +@@ -1237,7 +1254,6 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = +- , + , + , + , +@@ -1249,7 +1265,6 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + /* Avoid backfeeding with removed card power */ + pinctrl_usdhc2_sleep: usdhc2slpgrp { + fsl,pins = +- , + , + , + , +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 2a4e686e633c62..8a6b7feca3e428 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -81,6 +81,7 @@ + #define ARM_CPU_PART_CORTEX_A78AE 0xD42 + #define ARM_CPU_PART_CORTEX_X1 0xD44 + #define ARM_CPU_PART_CORTEX_A510 0xD46 ++#define ARM_CPU_PART_CORTEX_X1C 0xD4C + #define ARM_CPU_PART_CORTEX_A520 0xD80 + #define ARM_CPU_PART_CORTEX_A710 0xD47 + #define ARM_CPU_PART_CORTEX_A715 0xD4D +@@ -166,6 +167,7 @@ + #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) + #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) + #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) ++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) + #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520) + #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) + #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) +diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h +index 0ccf51afde31a6..12c0278294e3f6 100644 +--- a/arch/arm64/include/asm/insn.h ++++ b/arch/arm64/include/asm/insn.h +@@ -687,6 +687,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, + } + #endif + u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type); ++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type); + + s32 aarch64_get_branch_offset(u32 insn); + u32 aarch64_set_branch_offset(u32 insn, s32 offset); +diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h +index 0c2b47673922e3..32475d19c15f44 100644 +--- a/arch/arm64/include/asm/spectre.h ++++ b/arch/arm64/include/asm/spectre.h +@@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void); + + enum mitigation_state arm64_get_spectre_bhb_state(void); + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); ++extern bool __nospectre_bhb; ++u8 get_spectre_bhb_loop_value(void); ++bool is_spectre_bhb_fw_mitigated(void); + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); + bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); + +diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c +index edc4c727783d82..28c48bc9c09538 100644 +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void) + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +@@ -998,6 +999,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, + return true; + } + ++u8 get_spectre_bhb_loop_value(void) ++{ ++ return max_bhb_k; ++} ++ + static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) + { + const char *v = arm64_get_bp_hardening_vector(slot); +@@ -1015,7 +1021,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) + isb(); + } + +-static bool __read_mostly __nospectre_bhb; ++bool __read_mostly __nospectre_bhb; + static int __init parse_spectre_bhb_param(char *str) + { + __nospectre_bhb = true; +@@ -1093,6 +1099,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) + update_mitigation_state(&spectre_bhb_state, state); + } + ++bool is_spectre_bhb_fw_mitigated(void) ++{ ++ return test_bit(BHB_FW, &system_bhb_mitigations); ++} ++ + /* Patched to NOP when enabled */ + void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, +diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c +index a635ab83fee359..7232b1e70a125f 100644 +--- a/arch/arm64/lib/insn.c ++++ b/arch/arm64/lib/insn.c +@@ -5,6 +5,7 @@ + * + * Copyright (C) 2014-2016 Zi Shen Lim + */ ++#include + #include + #include + #include +@@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); + } + +-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) ++static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type) + { +- u32 opt; +- u32 insn; +- + switch (type) { + case AARCH64_INSN_MB_SY: +- opt = 0xf; +- break; ++ return 0xf; + case AARCH64_INSN_MB_ST: +- opt = 0xe; +- break; ++ return 0xe; + case AARCH64_INSN_MB_LD: +- opt = 0xd; +- break; ++ return 0xd; + case AARCH64_INSN_MB_ISH: +- opt = 0xb; +- break; ++ return 0xb; + case AARCH64_INSN_MB_ISHST: +- opt = 0xa; +- break; ++ return 0xa; + case AARCH64_INSN_MB_ISHLD: +- opt = 0x9; +- break; ++ return 0x9; + case AARCH64_INSN_MB_NSH: +- opt = 0x7; +- break; ++ return 0x7; + case AARCH64_INSN_MB_NSHST: +- opt = 0x6; +- break; ++ return 0x6; + case AARCH64_INSN_MB_NSHLD: +- opt = 0x5; +- break; ++ return 0x5; + default: +- pr_err("%s: unknown dmb type %d\n", __func__, type); ++ pr_err("%s: unknown barrier type %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } ++} ++ ++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) ++{ ++ u32 opt; ++ u32 insn; ++ ++ opt = __get_barrier_crm_val(type); ++ if (opt == AARCH64_BREAK_FAULT) ++ return AARCH64_BREAK_FAULT; + + insn = aarch64_insn_get_dmb_value(); + insn &= ~GENMASK(11, 8); +@@ -1515,3 +1514,18 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) + + return insn; + } ++ ++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type) ++{ ++ u32 opt, insn; ++ ++ opt = __get_barrier_crm_val(type); ++ if (opt == AARCH64_BREAK_FAULT) ++ return AARCH64_BREAK_FAULT; ++ ++ insn = aarch64_insn_get_dsb_base_value(); ++ insn &= ~GENMASK(11, 8); ++ insn |= (opt << 8); ++ ++ return insn; ++} +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c +index 5074bd1d37b5f6..75523c1be07350 100644 +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -7,6 +7,7 @@ + + #define pr_fmt(fmt) "bpf_jit: " fmt + ++#include + #include + #include + #include +@@ -17,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -653,7 +655,51 @@ static void build_plt(struct jit_ctx *ctx) + plt->target = (u64)&dummy_tramp; + } + +-static void build_epilogue(struct jit_ctx *ctx) ++/* Clobbers BPF registers 1-4, aka x0-x3 */ ++static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx) ++{ ++ const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */ ++ u8 k = get_spectre_bhb_loop_value(); ++ ++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || ++ cpu_mitigations_off() || __nospectre_bhb || ++ arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) ++ return; ++ ++ if (capable(CAP_SYS_ADMIN)) ++ return; ++ ++ if (supports_clearbhb(SCOPE_SYSTEM)) { ++ emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx); ++ return; ++ } ++ ++ if (k) { ++ emit_a64_mov_i64(r1, k, ctx); ++ emit(A64_B(1), ctx); ++ emit(A64_SUBS_I(true, r1, r1, 1), ctx); ++ emit(A64_B_(A64_COND_NE, -2), ctx); ++ emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx); ++ emit(aarch64_insn_get_isb_value(), ctx); ++ } ++ ++ if (is_spectre_bhb_fw_mitigated()) { ++ emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR, ++ ARM_SMCCC_ARCH_WORKAROUND_3), ctx); ++ switch (arm_smccc_1_1_get_conduit()) { ++ case SMCCC_CONDUIT_HVC: ++ emit(aarch64_insn_get_hvc_value(), ctx); ++ break; ++ case SMCCC_CONDUIT_SMC: ++ emit(aarch64_insn_get_smc_value(), ctx); ++ break; ++ default: ++ pr_err_once("Firmware mitigation enabled with unknown conduit\n"); ++ } ++ } ++} ++ ++static void build_epilogue(struct jit_ctx *ctx, bool was_classic) + { + const u8 r0 = bpf2a64[BPF_REG_0]; + const u8 r6 = bpf2a64[BPF_REG_6]; +@@ -675,10 +721,13 @@ static void build_epilogue(struct jit_ctx *ctx) + emit(A64_POP(r8, r9, A64_SP), ctx); + emit(A64_POP(r6, r7, A64_SP), ctx); + ++ if (was_classic) ++ build_bhb_mitigation(ctx); ++ + /* Restore FP/LR registers */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + +- /* Set return value */ ++ /* Move the return value from bpf:r0 (aka x7) to x0 */ + emit(A64_MOV(1, A64_R(0), r0), ctx); + + /* Authenticate lr */ +@@ -1586,7 +1635,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + } + + ctx.epilogue_offset = ctx.idx; +- build_epilogue(&ctx); ++ build_epilogue(&ctx, was_classic); + build_plt(&ctx); + + extable_align = __alignof__(struct exception_table_entry); +@@ -1622,7 +1671,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + goto out_off; + } + +- build_epilogue(&ctx); ++ build_epilogue(&ctx, was_classic); + build_plt(&ctx); + + /* 3. Extra pass to validate JITed code. */ +diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h +index 4a2b40ce39e091..841612913f0d1b 100644 +--- a/arch/mips/include/asm/ptrace.h ++++ b/arch/mips/include/asm/ptrace.h +@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs, + + /* Query offset/name of register from its name/offset */ + extern int regs_query_register_offset(const char *name); +-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) ++#define MAX_REG_OFFSET \ ++ (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) + + /** + * regs_get_register() - get register value from its offset +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index d874ea22512b5c..4372657ab0d6fa 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2610,6 +2610,17 @@ config MITIGATION_SPECTRE_BHI + indirect branches. + See + ++config MITIGATION_ITS ++ bool "Enable Indirect Target Selection mitigation" ++ depends on CPU_SUP_INTEL && X86_64 ++ depends on RETPOLINE && RETHUNK ++ default y ++ help ++ Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in ++ BPU on some Intel CPUs that may allow Spectre V2 style attacks. If ++ disabled, mitigation cannot be enabled via cmdline. ++ See ++ + endif + + config ARCH_HAS_ADD_PAGES +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index 2192b6c33ea009..1f9e508ac075c3 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -1569,7 +1569,9 @@ SYM_CODE_END(rewind_stack_and_make_dead) + * ORC to unwind properly. + * + * The alignment is for performance and not for safety, and may be safely +- * refactored in the future if needed. ++ * refactored in the future if needed. The .skips are for safety, to ensure ++ * that all RETs are in the second half of a cacheline to mitigate Indirect ++ * Target Selection, rather than taking the slowpath via its_return_thunk. + */ + SYM_FUNC_START(clear_bhb_loop) + push %rbp +@@ -1579,10 +1581,22 @@ SYM_FUNC_START(clear_bhb_loop) + call 1f + jmp 5f + .align 64, 0xcc ++ /* ++ * Shift instructions so that the RET is in the upper half of the ++ * cacheline and don't take the slowpath to its_return_thunk. ++ */ ++ .skip 32 - (.Lret1 - 1f), 0xcc + ANNOTATE_INTRA_FUNCTION_CALL + 1: call 2f +- RET ++.Lret1: RET + .align 64, 0xcc ++ /* ++ * As above shift instructions for RET at .Lret2 as well. ++ * ++ * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc ++ * but some Clang versions (e.g. 18) don't like this. ++ */ ++ .skip 32 - 18, 0xcc + 2: movl $5, %eax + 3: jmp 4f + nop +@@ -1590,7 +1604,7 @@ SYM_FUNC_START(clear_bhb_loop) + jnz 3b + sub $1, %ecx + jnz 1b +- RET ++.Lret2: RET + 5: lfence + pop %rbp + RET +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h +index cb9ce0f9e78e05..6740b839153a04 100644 +--- a/arch/x86/include/asm/alternative.h ++++ b/arch/x86/include/asm/alternative.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #define ALT_FLAGS_SHIFT 16 + +@@ -130,6 +131,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog, + } + #endif + ++#ifdef CONFIG_MITIGATION_ITS ++extern void its_init_mod(struct module *mod); ++extern void its_fini_mod(struct module *mod); ++extern void its_free_mod(struct module *mod); ++extern u8 *its_static_thunk(int reg); ++#else /* CONFIG_MITIGATION_ITS */ ++static inline void its_init_mod(struct module *mod) { } ++static inline void its_fini_mod(struct module *mod) { } ++static inline void its_free_mod(struct module *mod) { } ++static inline u8 *its_static_thunk(int reg) ++{ ++ WARN_ONCE(1, "ITS not compiled in"); ++ ++ return NULL; ++} ++#endif ++ ++#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL) ++extern bool cpu_wants_rethunk(void); ++extern bool cpu_wants_rethunk_at(void *addr); ++#else ++static __always_inline bool cpu_wants_rethunk(void) ++{ ++ return false; ++} ++static __always_inline bool cpu_wants_rethunk_at(void *addr) ++{ ++ return false; ++} ++#endif ++ + #ifdef CONFIG_SMP + extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 55d18eef6775a6..8a2482651a6f1e 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -468,6 +468,7 @@ + #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */ + #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */ + #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ ++#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ + + /* + * BUG word(s) +@@ -518,4 +519,6 @@ + #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */ + #define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */ + #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ ++#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ ++#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h +index 0ee6ed0ff2bf20..79c35947e20cb7 100644 +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -17,10 +17,12 @@ struct ucode_cpu_info { + void load_ucode_bsp(void); + void load_ucode_ap(void); + void microcode_bsp_resume(void); ++bool __init microcode_loader_disabled(void); + #else + static inline void load_ucode_bsp(void) { } + static inline void load_ucode_ap(void) { } + static inline void microcode_bsp_resume(void) { } ++static inline bool __init microcode_loader_disabled(void) { return false; } + #endif + + extern unsigned long initrd_start_early; +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 623bb48774d44c..9fbad4cb971bff 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -180,6 +180,14 @@ + * VERW clears CPU Register + * File. + */ ++#define ARCH_CAP_ITS_NO BIT_ULL(62) /* ++ * Not susceptible to ++ * Indirect Target Selection. ++ * This bit is not set by ++ * HW, but is synthesized by ++ * VMMs for guests to know ++ * their affected status. ++ */ + + #define ARCH_CAP_XAPIC_DISABLE BIT(21) /* + * IA32_XAPIC_DISABLE_STATUS MSR +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index ee642d26e30457..bc4fa6d09d29d9 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -219,9 +219,8 @@ + .endm + + /* +- * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call +- * to the retpoline thunk with a CS prefix when the register requires +- * a RAX prefix byte to encode. Also see apply_retpolines(). ++ * Emits a conditional CS prefix that is compatible with ++ * -mindirect-branch-cs-prefix. + */ + .macro __CS_PREFIX reg:req + .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 +@@ -365,10 +364,14 @@ + ".long 999b\n\t" \ + ".popsection\n\t" + ++#define ITS_THUNK_SIZE 64 ++ + typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; ++typedef u8 its_thunk_t[ITS_THUNK_SIZE]; + extern retpoline_thunk_t __x86_indirect_thunk_array[]; + extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; + extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; ++extern its_thunk_t __x86_indirect_its_thunk_array[]; + + #ifdef CONFIG_RETHUNK + extern void __x86_return_thunk(void); +@@ -392,6 +395,12 @@ static inline void srso_return_thunk(void) {} + static inline void srso_alias_return_thunk(void) {} + #endif + ++#ifdef CONFIG_MITIGATION_ITS ++extern void its_return_thunk(void); ++#else ++static inline void its_return_thunk(void) {} ++#endif ++ + extern void retbleed_return_thunk(void); + extern void srso_return_thunk(void); + extern void srso_alias_return_thunk(void); +@@ -412,11 +421,6 @@ extern void (*x86_return_thunk)(void); + #ifdef CONFIG_CALL_DEPTH_TRACKING + extern void __x86_return_skl(void); + +-static inline void x86_set_skl_return_thunk(void) +-{ +- x86_return_thunk = &__x86_return_skl; +-} +- + #define CALL_DEPTH_ACCOUNT \ + ALTERNATIVE("", \ + __stringify(INCREMENT_CALL_DEPTH), \ +@@ -429,7 +433,6 @@ DECLARE_PER_CPU(u64, __x86_stuffs_count); + DECLARE_PER_CPU(u64, __x86_ctxsw_count); + #endif + #else +-static inline void x86_set_skl_return_thunk(void) {} + + #define CALL_DEPTH_ACCOUNT "" + +@@ -454,20 +457,23 @@ static inline void x86_set_skl_return_thunk(void) {} + + #ifdef CONFIG_X86_64 + ++/* ++ * Emits a conditional CS prefix that is compatible with ++ * -mindirect-branch-cs-prefix. ++ */ ++#define __CS_PREFIX(reg) \ ++ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \ ++ ".ifc \\rs," reg "\n" \ ++ ".byte 0x2e\n" \ ++ ".endif\n" \ ++ ".endr\n" ++ + /* + * Inline asm uses the %V modifier which is only in newer GCC + * which is ensured when CONFIG_RETPOLINE is defined. + */ +-# define CALL_NOSPEC \ +- ALTERNATIVE_2( \ +- ANNOTATE_RETPOLINE_SAFE \ +- "call *%[thunk_target]\n", \ +- "call __x86_indirect_thunk_%V[thunk_target]\n", \ +- X86_FEATURE_RETPOLINE, \ +- "lfence;\n" \ +- ANNOTATE_RETPOLINE_SAFE \ +- "call *%[thunk_target]\n", \ +- X86_FEATURE_RETPOLINE_LFENCE) ++#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \ ++ "call __x86_indirect_thunk_%V[thunk_target]\n" + + # define THUNK_TARGET(addr) [thunk_target] "r" (addr) + +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index aae7456ece0700..4817e424d69658 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -18,6 +18,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -30,6 +32,8 @@ + #include + #include + #include ++#include ++#include + + int __read_mostly alternatives_patched; + +@@ -123,6 +127,135 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] = + #endif + }; + ++#ifdef CONFIG_MITIGATION_ITS ++ ++#ifdef CONFIG_MODULES ++static struct module *its_mod; ++static void *its_page; ++static unsigned int its_offset; ++ ++/* Initialize a thunk with the "jmp *reg; int3" instructions. */ ++static void *its_init_thunk(void *thunk, int reg) ++{ ++ u8 *bytes = thunk; ++ int i = 0; ++ ++ if (reg >= 8) { ++ bytes[i++] = 0x41; /* REX.B prefix */ ++ reg -= 8; ++ } ++ bytes[i++] = 0xff; ++ bytes[i++] = 0xe0 + reg; /* jmp *reg */ ++ bytes[i++] = 0xcc; ++ ++ return thunk; ++} ++ ++void its_init_mod(struct module *mod) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) ++ return; ++ ++ mutex_lock(&text_mutex); ++ its_mod = mod; ++ its_page = NULL; ++} ++ ++void its_fini_mod(struct module *mod) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) ++ return; ++ ++ WARN_ON_ONCE(its_mod != mod); ++ ++ its_mod = NULL; ++ its_page = NULL; ++ mutex_unlock(&text_mutex); ++ ++ for (int i = 0; i < mod->its_num_pages; i++) { ++ void *page = mod->its_page_array[i]; ++ set_memory_rox((unsigned long)page, 1); ++ } ++} ++ ++void its_free_mod(struct module *mod) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) ++ return; ++ ++ for (int i = 0; i < mod->its_num_pages; i++) { ++ void *page = mod->its_page_array[i]; ++ module_memfree(page); ++ } ++ kfree(mod->its_page_array); ++} ++ ++DEFINE_FREE(its_execmem, void *, if (_T) module_memfree(_T)); ++ ++static void *its_alloc(void) ++{ ++ void *page __free(its_execmem) = module_alloc(PAGE_SIZE); ++ ++ if (!page) ++ return NULL; ++ ++ if (its_mod) { ++ void *tmp = krealloc(its_mod->its_page_array, ++ (its_mod->its_num_pages+1) * sizeof(void *), ++ GFP_KERNEL); ++ if (!tmp) ++ return NULL; ++ ++ its_mod->its_page_array = tmp; ++ its_mod->its_page_array[its_mod->its_num_pages++] = page; ++ } ++ ++ return no_free_ptr(page); ++} ++ ++static void *its_allocate_thunk(int reg) ++{ ++ int size = 3 + (reg / 8); ++ void *thunk; ++ ++ if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) { ++ its_page = its_alloc(); ++ if (!its_page) { ++ pr_err("ITS page allocation failed\n"); ++ return NULL; ++ } ++ memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE); ++ its_offset = 32; ++ } ++ ++ /* ++ * If the indirect branch instruction will be in the lower half ++ * of a cacheline, then update the offset to reach the upper half. ++ */ ++ if ((its_offset + size - 1) % 64 < 32) ++ its_offset = ((its_offset - 1) | 0x3F) + 33; ++ ++ thunk = its_page + its_offset; ++ its_offset += size; ++ ++ set_memory_rw((unsigned long)its_page, 1); ++ thunk = its_init_thunk(thunk, reg); ++ set_memory_rox((unsigned long)its_page, 1); ++ ++ return thunk; ++} ++ ++#else /* CONFIG_MODULES */ ++ ++static void *its_allocate_thunk(int reg) ++{ ++ return NULL; ++} ++ ++#endif /* CONFIG_MODULES */ ++ ++#endif /* CONFIG_MITIGATION_ITS */ ++ + /* + * Fill the buffer with a single effective instruction of size @len. + * +@@ -521,7 +654,8 @@ static int emit_indirect(int op, int reg, u8 *bytes) + return i; + } + +-static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) ++static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes, ++ void *call_dest, void *jmp_dest) + { + u8 op = insn->opcode.bytes[0]; + int i = 0; +@@ -542,7 +676,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 + switch (op) { + case CALL_INSN_OPCODE: + __text_gen_insn(bytes+i, op, addr+i, +- __x86_indirect_call_thunk_array[reg], ++ call_dest, + CALL_INSN_SIZE); + i += CALL_INSN_SIZE; + break; +@@ -550,7 +684,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 + case JMP32_INSN_OPCODE: + clang_jcc: + __text_gen_insn(bytes+i, op, addr+i, +- __x86_indirect_jump_thunk_array[reg], ++ jmp_dest, + JMP32_INSN_SIZE); + i += JMP32_INSN_SIZE; + break; +@@ -565,6 +699,39 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 + return i; + } + ++static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) ++{ ++ return __emit_trampoline(addr, insn, bytes, ++ __x86_indirect_call_thunk_array[reg], ++ __x86_indirect_jump_thunk_array[reg]); ++} ++ ++#ifdef CONFIG_MITIGATION_ITS ++static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes) ++{ ++ u8 *thunk = __x86_indirect_its_thunk_array[reg]; ++ u8 *tmp = its_allocate_thunk(reg); ++ ++ if (tmp) ++ thunk = tmp; ++ ++ return __emit_trampoline(addr, insn, bytes, thunk, thunk); ++} ++ ++/* Check if an indirect branch is at ITS-unsafe address */ ++static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) ++ return false; ++ ++ /* Indirect branch opcode is 2 or 3 bytes depending on reg */ ++ addr += 1 + reg / 8; ++ ++ /* Lower-half of the cacheline? */ ++ return !(addr & 0x20); ++} ++#endif ++ + /* + * Rewrite the compiler generated retpoline thunk calls. + * +@@ -639,6 +806,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) + bytes[i++] = 0xe8; /* LFENCE */ + } + ++#ifdef CONFIG_MITIGATION_ITS ++ /* ++ * Check if the address of last byte of emitted-indirect is in ++ * lower-half of the cacheline. Such branches need ITS mitigation. ++ */ ++ if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg)) ++ return emit_its_trampoline(addr, insn, reg, bytes); ++#endif ++ + ret = emit_indirect(op, reg, bytes + i); + if (ret < 0) + return ret; +@@ -710,6 +886,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) + + #ifdef CONFIG_RETHUNK + ++bool cpu_wants_rethunk(void) ++{ ++ return cpu_feature_enabled(X86_FEATURE_RETHUNK); ++} ++ ++bool cpu_wants_rethunk_at(void *addr) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ return false; ++ if (x86_return_thunk != its_return_thunk) ++ return true; ++ ++ return !((unsigned long)addr & 0x20); ++} ++ + /* + * Rewrite the compiler generated return thunk tail-calls. + * +@@ -726,7 +917,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) + int i = 0; + + /* Patch the custom return thunks... */ +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { ++ if (cpu_wants_rethunk_at(addr)) { + i = JMP32_INSN_SIZE; + __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); + } else { +@@ -743,7 +934,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) + { + s32 *s; + +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ if (cpu_wants_rethunk()) + static_call_force_reinit(); + + for (s = start; s < end; s++) { +@@ -1258,6 +1449,13 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + static void poison_cfi(void *addr) { } + #endif + ++u8 *its_static_thunk(int reg) ++{ ++ u8 *thunk = __x86_indirect_its_thunk_array[reg]; ++ ++ return thunk; ++} ++ + #endif + + void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, +@@ -1575,6 +1773,8 @@ static noinline void __init alt_reloc_selftest(void) + + void __init alternative_instructions(void) + { ++ u64 ibt; ++ + int3_selftest(); + + /* +@@ -1612,6 +1812,9 @@ void __init alternative_instructions(void) + */ + paravirt_set_cap(); + ++ /* Keep CET-IBT disabled until caller/callee are patched */ ++ ibt = ibt_save(/*disable*/ true); ++ + /* + * First patch paravirt functions, such that we overwrite the indirect + * call with the direct call. +@@ -1645,6 +1848,8 @@ void __init alternative_instructions(void) + */ + apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); + ++ ibt_restore(ibt); ++ + #ifdef CONFIG_SMP + /* Patch to UP if other cpus not imminent. */ + if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 78545f7e9cc6ca..07b45bbf6348de 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -49,6 +49,7 @@ static void __init srbds_select_mitigation(void); + static void __init l1d_flush_select_mitigation(void); + static void __init srso_select_mitigation(void); + static void __init gds_select_mitigation(void); ++static void __init its_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; +@@ -67,6 +68,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex); + + void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; + ++static void __init set_return_thunk(void *thunk) ++{ ++ if (x86_return_thunk != __x86_return_thunk) ++ pr_warn("x86/bugs: return thunk changed\n"); ++ ++ x86_return_thunk = thunk; ++} ++ + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ + static void update_spec_ctrl(u64 val) + { +@@ -175,6 +184,7 @@ void __init cpu_select_mitigations(void) + */ + srso_select_mitigation(); + gds_select_mitigation(); ++ its_select_mitigation(); + } + + /* +@@ -1102,7 +1112,7 @@ static void __init retbleed_select_mitigation(void) + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + +- x86_return_thunk = retbleed_return_thunk; ++ set_return_thunk(retbleed_return_thunk); + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +@@ -1136,7 +1146,9 @@ static void __init retbleed_select_mitigation(void) + case RETBLEED_MITIGATION_STUFF: + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); +- x86_set_skl_return_thunk(); ++#ifdef CONFIG_CALL_DEPTH_TRACKING ++ set_return_thunk(&__x86_return_skl); ++#endif + break; + + default: +@@ -1170,6 +1182,146 @@ static void __init retbleed_select_mitigation(void) + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); + } + ++#undef pr_fmt ++#define pr_fmt(fmt) "ITS: " fmt ++ ++enum its_mitigation_cmd { ++ ITS_CMD_OFF, ++ ITS_CMD_ON, ++ ITS_CMD_VMEXIT, ++ ITS_CMD_RSB_STUFF, ++}; ++ ++enum its_mitigation { ++ ITS_MITIGATION_OFF, ++ ITS_MITIGATION_VMEXIT_ONLY, ++ ITS_MITIGATION_ALIGNED_THUNKS, ++ ITS_MITIGATION_RETPOLINE_STUFF, ++}; ++ ++static const char * const its_strings[] = { ++ [ITS_MITIGATION_OFF] = "Vulnerable", ++ [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", ++ [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", ++ [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", ++}; ++ ++static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS; ++ ++static enum its_mitigation_cmd its_cmd __ro_after_init = ++ IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF; ++ ++static int __init its_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { ++ pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); ++ return 0; ++ } ++ ++ if (!strcmp(str, "off")) { ++ its_cmd = ITS_CMD_OFF; ++ } else if (!strcmp(str, "on")) { ++ its_cmd = ITS_CMD_ON; ++ } else if (!strcmp(str, "force")) { ++ its_cmd = ITS_CMD_ON; ++ setup_force_cpu_bug(X86_BUG_ITS); ++ } else if (!strcmp(str, "vmexit")) { ++ its_cmd = ITS_CMD_VMEXIT; ++ } else if (!strcmp(str, "stuff")) { ++ its_cmd = ITS_CMD_RSB_STUFF; ++ } else { ++ pr_err("Ignoring unknown indirect_target_selection option (%s).", str); ++ } ++ ++ return 0; ++} ++early_param("indirect_target_selection", its_parse_cmdline); ++ ++static void __init its_select_mitigation(void) ++{ ++ enum its_mitigation_cmd cmd = its_cmd; ++ ++ if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { ++ its_mitigation = ITS_MITIGATION_OFF; ++ return; ++ } ++ ++ /* Retpoline+CDT mitigates ITS, bail out */ ++ if (boot_cpu_has(X86_FEATURE_RETPOLINE) && ++ boot_cpu_has(X86_FEATURE_CALL_DEPTH)) { ++ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; ++ goto out; ++ } ++ ++ /* Exit early to avoid irrelevant warnings */ ++ if (cmd == ITS_CMD_OFF) { ++ its_mitigation = ITS_MITIGATION_OFF; ++ goto out; ++ } ++ if (spectre_v2_enabled == SPECTRE_V2_NONE) { ++ pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); ++ its_mitigation = ITS_MITIGATION_OFF; ++ goto out; ++ } ++ if (!IS_ENABLED(CONFIG_RETPOLINE) || !IS_ENABLED(CONFIG_RETHUNK)) { ++ pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); ++ its_mitigation = ITS_MITIGATION_OFF; ++ goto out; ++ } ++ if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { ++ pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); ++ its_mitigation = ITS_MITIGATION_OFF; ++ goto out; ++ } ++ if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { ++ pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); ++ its_mitigation = ITS_MITIGATION_OFF; ++ goto out; ++ } ++ ++ if (cmd == ITS_CMD_RSB_STUFF && ++ (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING))) { ++ pr_err("RSB stuff mitigation not supported, using default\n"); ++ cmd = ITS_CMD_ON; ++ } ++ ++ switch (cmd) { ++ case ITS_CMD_OFF: ++ its_mitigation = ITS_MITIGATION_OFF; ++ break; ++ case ITS_CMD_VMEXIT: ++ if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) { ++ its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; ++ goto out; ++ } ++ fallthrough; ++ case ITS_CMD_ON: ++ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; ++ if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) ++ setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); ++ setup_force_cpu_cap(X86_FEATURE_RETHUNK); ++ set_return_thunk(its_return_thunk); ++ break; ++ case ITS_CMD_RSB_STUFF: ++ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; ++ setup_force_cpu_cap(X86_FEATURE_RETHUNK); ++ setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); ++#ifdef CONFIG_CALL_DEPTH_TRACKING ++ set_return_thunk(&__x86_return_skl); ++#endif ++ if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) { ++ retbleed_mitigation = RETBLEED_MITIGATION_STUFF; ++ pr_info("Retbleed mitigation updated to stuffing\n"); ++ } ++ break; ++ } ++out: ++ pr_info("%s\n", its_strings[its_mitigation]); ++} ++ + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt + +@@ -1677,10 +1829,11 @@ static void __init bhi_select_mitigation(void) + return; + } + +- if (spec_ctrl_bhi_dis()) ++ if (!IS_ENABLED(CONFIG_X86_64)) + return; + +- if (!IS_ENABLED(CONFIG_X86_64)) ++ /* Mitigate in hardware if supported */ ++ if (spec_ctrl_bhi_dis()) + return; + + /* Mitigate KVM by default */ +@@ -2606,10 +2759,10 @@ static void __init srso_select_mitigation(void) + + if (boot_cpu_data.x86 == 0x19) { + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); +- x86_return_thunk = srso_alias_return_thunk; ++ set_return_thunk(srso_alias_return_thunk); + } else { + setup_force_cpu_cap(X86_FEATURE_SRSO); +- x86_return_thunk = srso_return_thunk; ++ set_return_thunk(srso_return_thunk); + } + if (has_microcode) + srso_mitigation = SRSO_MITIGATION_SAFE_RET; +@@ -2793,6 +2946,11 @@ static ssize_t rfds_show_state(char *buf) + return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); + } + ++static ssize_t its_show_state(char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); ++} ++ + static char *stibp_state(void) + { + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && +@@ -2975,6 +3133,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_RFDS: + return rfds_show_state(buf); + ++ case X86_BUG_ITS: ++ return its_show_state(buf); ++ + default: + break; + } +@@ -3054,4 +3215,9 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib + { + return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); + } ++ ++ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_ITS); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index a844110691f978..067e31fb9e165d 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1272,6 +1272,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + #define GDS BIT(6) + /* CPU is affected by Register File Data Sampling */ + #define RFDS BIT(7) ++/* CPU is affected by Indirect Target Selection */ ++#define ITS BIT(8) ++/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ ++#define ITS_NATIVE_ONLY BIT(9) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1283,22 +1287,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), + VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), +- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), +- VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), +- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), +- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), +- VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), +- VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), ++ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), ++ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), +- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS), + VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS), + VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS), +@@ -1362,6 +1369,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr) + return cpu_matches(cpu_vuln_blacklist, RFDS); + } + ++static bool __init vulnerable_to_its(u64 x86_arch_cap_msr) ++{ ++ /* The "immunity" bit trumps everything else: */ ++ if (x86_arch_cap_msr & ARCH_CAP_ITS_NO) ++ return false; ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ++ return false; ++ ++ /* None of the affected CPUs have BHI_CTRL */ ++ if (boot_cpu_has(X86_FEATURE_BHI_CTRL)) ++ return false; ++ ++ /* ++ * If a VMM did not expose ITS_NO, assume that a guest could ++ * be running on a vulnerable hardware or may migrate to such ++ * hardware. ++ */ ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ return true; ++ ++ if (cpu_matches(cpu_vuln_blacklist, ITS)) ++ return true; ++ ++ return false; ++} ++ + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); +@@ -1476,9 +1509,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (vulnerable_to_rfds(x86_arch_cap_msr)) + setup_force_cpu_bug(X86_BUG_RFDS); + +- /* When virtualized, eIBRS could be hidden, assume vulnerable */ +- if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && +- !cpu_matches(cpu_vuln_whitelist, NO_BHI) && ++ /* ++ * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with ++ * BHI_NO still need to use the BHI mitigation to prevent Intra-mode ++ * attacks. When virtualized, eIBRS could be hidden, assume vulnerable. ++ */ ++ if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) && + (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || + boot_cpu_has(X86_FEATURE_HYPERVISOR))) + setup_force_cpu_bug(X86_BUG_BHI); +@@ -1486,6 +1522,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) + setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); + ++ if (vulnerable_to_its(x86_arch_cap_msr)) { ++ setup_force_cpu_bug(X86_BUG_ITS); ++ if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY)) ++ setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); ++ } ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index ce78e39004e0ea..9b0570f769eb3d 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -1102,15 +1102,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz + + static int __init save_microcode_in_initrd(void) + { +- unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; + struct cont_desc desc = { 0 }; ++ unsigned int cpuid_1_eax; + enum ucode_state ret; + struct cpio_data cp; + +- if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) ++ if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + return 0; + ++ cpuid_1_eax = native_cpuid_eax(1); ++ + if (!find_blobs_in_containers(&cp)) + return -EINVAL; + +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c +index c15c7b862bec1c..5b47c320f17a6d 100644 +--- a/arch/x86/kernel/cpu/microcode/core.c ++++ b/arch/x86/kernel/cpu/microcode/core.c +@@ -43,8 +43,8 @@ + + #define DRIVER_VERSION "2.2" + +-static struct microcode_ops *microcode_ops; +-bool dis_ucode_ldr = true; ++static struct microcode_ops *microcode_ops; ++static bool dis_ucode_ldr = false; + + bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); + module_param(force_minrev, bool, S_IRUSR | S_IWUSR); +@@ -91,6 +91,9 @@ static bool amd_check_current_patch_level(void) + u32 lvl, dummy, i; + u32 *levels; + ++ if (x86_cpuid_vendor() != X86_VENDOR_AMD) ++ return false; ++ + native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); + + levels = final_levels; +@@ -102,27 +105,29 @@ static bool amd_check_current_patch_level(void) + return false; + } + +-static bool __init check_loader_disabled_bsp(void) ++bool __init microcode_loader_disabled(void) + { +- static const char *__dis_opt_str = "dis_ucode_ldr"; +- const char *cmdline = boot_command_line; +- const char *option = __dis_opt_str; ++ if (dis_ucode_ldr) ++ return true; + + /* +- * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not +- * completely accurate as xen pv guests don't see that CPUID bit set but +- * that's good enough as they don't land on the BSP path anyway. ++ * Disable when: ++ * ++ * 1) The CPU does not support CPUID. ++ * ++ * 2) Bit 31 in CPUID[1]:ECX is clear ++ * The bit is reserved for hypervisor use. This is still not ++ * completely accurate as XEN PV guests don't see that CPUID bit ++ * set, but that's good enough as they don't land on the BSP ++ * path anyway. ++ * ++ * 3) Certain AMD patch levels are not allowed to be ++ * overwritten. + */ +- if (native_cpuid_ecx(1) & BIT(31)) +- return true; +- +- if (x86_cpuid_vendor() == X86_VENDOR_AMD) { +- if (amd_check_current_patch_level()) +- return true; +- } +- +- if (cmdline_find_option_bool(cmdline, option) <= 0) +- dis_ucode_ldr = false; ++ if (!have_cpuid_p() || ++ native_cpuid_ecx(1) & BIT(31) || ++ amd_check_current_patch_level()) ++ dis_ucode_ldr = true; + + return dis_ucode_ldr; + } +@@ -132,7 +137,10 @@ void __init load_ucode_bsp(void) + unsigned int cpuid_1_eax; + bool intel = true; + +- if (!have_cpuid_p()) ++ if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0) ++ dis_ucode_ldr = true; ++ ++ if (microcode_loader_disabled()) + return; + + cpuid_1_eax = native_cpuid_eax(1); +@@ -153,9 +161,6 @@ void __init load_ucode_bsp(void) + return; + } + +- if (check_loader_disabled_bsp()) +- return; +- + if (intel) + load_ucode_intel_bsp(&early_data); + else +@@ -166,6 +171,11 @@ void load_ucode_ap(void) + { + unsigned int cpuid_1_eax; + ++ /* ++ * Can't use microcode_loader_disabled() here - .init section ++ * hell. It doesn't have to either - the BSP variant must've ++ * parsed cmdline already anyway. ++ */ + if (dis_ucode_ldr) + return; + +@@ -817,7 +827,7 @@ static int __init microcode_init(void) + struct cpuinfo_x86 *c = &boot_cpu_data; + int error; + +- if (dis_ucode_ldr) ++ if (microcode_loader_disabled()) + return -EINVAL; + + if (c->x86_vendor == X86_VENDOR_INTEL) +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c +index 9d7baf2573bcde..4e5a71796fbdb1 100644 +--- a/arch/x86/kernel/cpu/microcode/intel.c ++++ b/arch/x86/kernel/cpu/microcode/intel.c +@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void) + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; + +- if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ++ if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; + + uci.mc = get_microcode_blob(&uci, true); +diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h +index 21776c529fa97a..54f2b9582a7cec 100644 +--- a/arch/x86/kernel/cpu/microcode/internal.h ++++ b/arch/x86/kernel/cpu/microcode/internal.h +@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void) + return x86_family(eax); + } + +-extern bool dis_ucode_ldr; + extern bool force_minrev; + + #ifdef CONFIG_CPU_SUP_AMD +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index 12df54ff0e8171..50f8c8a8483be2 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -363,7 +363,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) + goto fail; + + ip = trampoline + size; +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ if (cpu_wants_rethunk_at(ip)) + __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE); + else + memcpy(ip, retq, sizeof(retq)); +diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c +index de001b2146abf3..375f2d7f1762d4 100644 +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) + *ptr = (unsigned long)ptep + PAGE_OFFSET; + + #ifdef CONFIG_MICROCODE_INITRD32 +- /* Running on a hypervisor? */ +- if (native_cpuid_ecx(1) & BIT(31)) +- return; +- + params = (struct boot_params *)__pa_nodebug(&boot_params); + if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) + return; +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c +index 5f71a0cf4399a5..cfb7163cf90e95 100644 +--- a/arch/x86/kernel/module.c ++++ b/arch/x86/kernel/module.c +@@ -312,6 +312,9 @@ int module_finalize(const Elf_Ehdr *hdr, + void *pseg = (void *)para->sh_addr; + apply_paravirt(pseg, pseg + para->sh_size); + } ++ ++ its_init_mod(me); ++ + if (retpolines || cfi) { + void *rseg = NULL, *cseg = NULL; + unsigned int rsize = 0, csize = 0; +@@ -332,6 +335,9 @@ int module_finalize(const Elf_Ehdr *hdr, + void *rseg = (void *)retpolines->sh_addr; + apply_retpolines(rseg, rseg + retpolines->sh_size); + } ++ ++ its_fini_mod(me); ++ + if (returns) { + void *rseg = (void *)returns->sh_addr; + apply_returns(rseg, rseg + returns->sh_size); +@@ -379,4 +385,5 @@ int module_finalize(const Elf_Ehdr *hdr, + void module_arch_cleanup(struct module *mod) + { + alternatives_smp_module_del(mod); ++ its_free_mod(mod); + } +diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c +index 07961b362e2a0c..bcea882e022f50 100644 +--- a/arch/x86/kernel/static_call.c ++++ b/arch/x86/kernel/static_call.c +@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, + break; + + case RET: +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ if (cpu_wants_rethunk_at(insn)) + code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk); + else + code = &retinsn; +@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, + case JCC: + if (!func) { + func = __static_call_return; +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ if (cpu_wants_rethunk()) + func = x86_return_thunk; + } + +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index 60eb8baa44d7b7..c57d5df1abc603 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -541,4 +541,14 @@ INIT_PER_CPU(irq_stack_backing_store); + "SRSO function pair won't alias"); + #endif + ++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) ++. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline"); ++. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart"); ++. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array"); ++#endif ++ ++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) ++. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline"); ++#endif ++ + #endif /* CONFIG_X86_64 */ +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1eeb01afa40ba9..55185670e0e566 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1621,7 +1621,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr) + ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ + ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ + ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \ +- ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO) ++ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO) + + static u64 kvm_get_arch_capabilities(void) + { +@@ -1655,6 +1655,8 @@ static u64 kvm_get_arch_capabilities(void) + data |= ARCH_CAP_MDS_NO; + if (!boot_cpu_has_bug(X86_BUG_RFDS)) + data |= ARCH_CAP_RFDS_NO; ++ if (!boot_cpu_has_bug(X86_BUG_ITS)) ++ data |= ARCH_CAP_ITS_NO; + + if (!boot_cpu_has(X86_FEATURE_RTM)) { + /* +diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S +index ffa51f392e17a3..91ce427d5af191 100644 +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -360,6 +360,45 @@ SYM_FUNC_END(__x86_return_skl) + + #endif /* CONFIG_CALL_DEPTH_TRACKING */ + ++#ifdef CONFIG_MITIGATION_ITS ++ ++.macro ITS_THUNK reg ++ ++SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL) ++ UNWIND_HINT_UNDEFINED ++ ANNOTATE_NOENDBR ++ ANNOTATE_RETPOLINE_SAFE ++ jmp *%\reg ++ int3 ++ .align 32, 0xcc /* fill to the end of the line */ ++ .skip 32, 0xcc /* skip to the next upper half */ ++.endm ++ ++/* ITS mitigation requires thunks be aligned to upper half of cacheline */ ++.align 64, 0xcc ++.skip 32, 0xcc ++SYM_CODE_START(__x86_indirect_its_thunk_array) ++ ++#define GEN(reg) ITS_THUNK reg ++#include ++#undef GEN ++ ++ .align 64, 0xcc ++SYM_CODE_END(__x86_indirect_its_thunk_array) ++ ++.align 64, 0xcc ++.skip 32, 0xcc ++SYM_CODE_START(its_return_thunk) ++ UNWIND_HINT_FUNC ++ ANNOTATE_NOENDBR ++ ANNOTATE_UNRET_SAFE ++ ret ++ int3 ++SYM_CODE_END(its_return_thunk) ++EXPORT_SYMBOL(its_return_thunk) ++ ++#endif /* CONFIG_MITIGATION_ITS */ ++ + /* + * This function name is magical and is used by -mfunction-return=thunk-extern + * for the compiler to generate JMPs to it. +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 4872bb082b1935..20ce6a1bd6c57d 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -630,7 +630,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + +- /* Let nmi_uaccess_okay() know that we're changing CR3. */ ++ /* ++ * Indicate that CR3 is about to change. nmi_uaccess_okay() ++ * and others are sensitive to the window where mm_cpumask(), ++ * CR3 and cpu_tlbstate.loaded_mm are not all in sync. ++ */ + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); + barrier(); + } +@@ -900,8 +904,16 @@ static void flush_tlb_func(void *info) + + static bool should_flush_tlb(int cpu, void *data) + { ++ struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu); + struct flush_tlb_info *info = data; + ++ /* ++ * Order the 'loaded_mm' and 'is_lazy' against their ++ * write ordering in switch_mm_irqs_off(). Ensure ++ * 'is_lazy' is at least as new as 'loaded_mm'. ++ */ ++ smp_rmb(); ++ + /* Lazy TLB will get flushed at the next context switch. */ + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) + return false; +@@ -910,8 +922,15 @@ static bool should_flush_tlb(int cpu, void *data) + if (!info->mm) + return true; + ++ /* ++ * While switching, the remote CPU could have state from ++ * either the prev or next mm. Assume the worst and flush. ++ */ ++ if (loaded_mm == LOADED_MM_SWITCHING) ++ return true; ++ + /* The target mm is loaded, and the CPU is not lazy. */ +- if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm) ++ if (loaded_mm == info->mm) + return true; + + /* In cpumask, but not the loaded mm? Periodically remove by flushing. */ +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index a50c99e9b5c01f..07592eef253c21 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -37,6 +37,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) + #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) + #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) + #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) ++#define EMIT5(b1, b2, b3, b4, b5) \ ++ do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0) + + #define EMIT1_off32(b1, off) \ + do { EMIT1(b1); EMIT(off, 4); } while (0) +@@ -470,7 +472,11 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) + { + u8 *prog = *pprog; + +- if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { ++ if (IS_ENABLED(CONFIG_MITIGATION_ITS) && ++ cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) { ++ OPTIMIZER_HIDE_VAR(reg); ++ emit_jump(&prog, its_static_thunk(reg), ip); ++ } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { + EMIT_LFENCE(); + EMIT2(0xFF, 0xE0 + reg); + } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { +@@ -492,7 +498,7 @@ static void emit_return(u8 **pprog, u8 *ip) + { + u8 *prog = *pprog; + +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { ++ if (cpu_wants_rethunk()) { + emit_jump(&prog, x86_return_thunk, ip); + } else { + EMIT1(0xC3); /* ret */ +@@ -1072,6 +1078,48 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) + #define RESTORE_TAIL_CALL_CNT(stack) \ + EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) + ++static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip, ++ struct bpf_prog *bpf_prog) ++{ ++ u8 *prog = *pprog; ++ u8 *func; ++ ++ if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) { ++ /* The clearing sequence clobbers eax and ecx. */ ++ EMIT1(0x50); /* push rax */ ++ EMIT1(0x51); /* push rcx */ ++ ip += 2; ++ ++ func = (u8 *)clear_bhb_loop; ++ ip += x86_call_depth_emit_accounting(&prog, func); ++ ++ if (emit_call(&prog, func, ip)) ++ return -EINVAL; ++ EMIT1(0x59); /* pop rcx */ ++ EMIT1(0x58); /* pop rax */ ++ } ++ /* Insert IBHF instruction */ ++ if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) && ++ cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) || ++ cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) { ++ /* ++ * Add an Indirect Branch History Fence (IBHF). IBHF acts as a ++ * fence preventing branch history from before the fence from ++ * affecting indirect branches after the fence. This is ++ * specifically used in cBPF jitted code to prevent Intra-mode ++ * BHI attacks. The IBHF instruction is designed to be a NOP on ++ * hardware that doesn't need or support it. The REP and REX.W ++ * prefixes are required by the microcode, and they also ensure ++ * that the NOP is unlikely to be used in existing code. ++ * ++ * IBHF is not a valid instruction in 32-bit mode. ++ */ ++ EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */ ++ } ++ *pprog = prog; ++ return 0; ++} ++ + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, + int oldproglen, struct jit_context *ctx, bool jmp_padding) + { +@@ -1945,6 +1993,15 @@ st: if (is_imm8(insn->off)) + seen_exit = true; + /* Update cleanup_addr */ + ctx->cleanup_addr = proglen; ++ ++ if (bpf_prog_was_classic(bpf_prog) && ++ !capable(CAP_SYS_ADMIN)) { ++ u8 *ip = image + addrs[i - 1]; ++ ++ if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) ++ return -EINVAL; ++ } ++ + pop_callee_regs(&prog, callee_regs_used); + EMIT1(0xC9); /* leave */ + emit_return(&prog, image + addrs[i - 1] + (prog - temp)); +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index ef427ee787a99b..a5cfc1bfad51fb 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -566,6 +566,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed); + CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); + CPU_SHOW_VULN_FALLBACK(gds); + CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); ++CPU_SHOW_VULN_FALLBACK(indirect_target_selection); + + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +@@ -581,6 +582,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); + static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); + static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); + static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); ++static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -597,6 +599,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_spec_rstack_overflow.attr, + &dev_attr_gather_data_sampling.attr, + &dev_attr_reg_file_data_sampling.attr, ++ &dev_attr_indirect_target_selection.attr, + NULL + }; + +diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c +index 39f7c2d736d169..b603c25f3dfaac 100644 +--- a/drivers/clocksource/i8253.c ++++ b/drivers/clocksource/i8253.c +@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void) + #ifdef CONFIG_CLKEVT_I8253 + void clockevent_i8253_disable(void) + { +- raw_spin_lock(&i8253_lock); ++ guard(raw_spinlock_irqsave)(&i8253_lock); + + /* + * Writing the MODE register should stop the counter, according to +@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void) + outb_p(0, PIT_CH0); + + outb_p(0x30, PIT_MODE); +- +- raw_spin_unlock(&i8253_lock); + } + + static int pit_shutdown(struct clock_event_device *evt) +diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +index 30210613dc5c47..2f3054ed7b1b5b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +@@ -42,7 +42,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, + { + if (!ring || !ring->funcs->emit_wreg) { + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); ++ /* We just need to read back a register to post the write. ++ * Reading back the remapped register causes problems on ++ * some platforms so just read back the memory size register. ++ */ ++ if (adev->nbio.funcs->get_memsize) ++ adev->nbio.funcs->get_memsize(adev); + } else { + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +index d3962d46908811..40705e13ca567b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +@@ -33,7 +33,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, + { + if (!ring || !ring->funcs->emit_wreg) { + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); ++ /* We just need to read back a register to post the write. ++ * Reading back the remapped register causes problems on ++ * some platforms so just read back the memory size register. ++ */ ++ if (adev->nbio.funcs->get_memsize) ++ adev->nbio.funcs->get_memsize(adev); + } else { + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +index f52552c5fa27b6..6b9f2e1d9d690d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c ++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +@@ -34,7 +34,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, + if (!ring || !ring->funcs->emit_wreg) { + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, + 0); +- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); ++ if (amdgpu_sriov_vf(adev)) { ++ /* this is fine because SR_IOV doesn't remap the register */ ++ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); ++ } else { ++ /* We just need to read back a register to post the write. ++ * Reading back the remapped register causes problems on ++ * some platforms so just read back the memory size register. ++ */ ++ if (adev->nbio.funcs->get_memsize) ++ adev->nbio.funcs->get_memsize(adev); ++ } + } else { + amdgpu_ring_emit_wreg(ring, + (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, +diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +index b6d71ec1debf9a..0d0c568f383931 100644 +--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +@@ -33,7 +33,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, + { + if (!ring || !ring->funcs->emit_wreg) { + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); ++ /* We just need to read back a register to post the write. ++ * Reading back the remapped register causes problems on ++ * some platforms so just read back the memory size register. ++ */ ++ if (adev->nbio.funcs->get_memsize) ++ adev->nbio.funcs->get_memsize(adev); + } else { + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index e6bc590533194d..f6017be8f9957e 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -610,15 +610,21 @@ static void dm_crtc_high_irq(void *interrupt_params) + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + + if (acrtc->dm_irq_params.stream && +- acrtc->dm_irq_params.vrr_params.supported && +- acrtc->dm_irq_params.freesync_config.state == +- VRR_STATE_ACTIVE_VARIABLE) { ++ acrtc->dm_irq_params.vrr_params.supported) { ++ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; ++ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; ++ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; ++ + mod_freesync_handle_v_update(adev->dm.freesync_module, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params); + +- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, +- &acrtc->dm_irq_params.vrr_params.adjust); ++ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ ++ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { ++ dc_stream_adjust_vmin_vmax(adev->dm.dc, ++ acrtc->dm_irq_params.stream, ++ &acrtc->dm_irq_params.vrr_params.adjust); ++ } + } + + /* +@@ -11049,7 +11055,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( + * Transient states before tunneling is enabled could + * lead to this error. We can ignore this for now. + */ +- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { ++ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { + DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", + payload->address, payload->length, + p_notify->result); +@@ -11058,22 +11064,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( + goto out; + } + ++ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; ++ if (adev->dm.dmub_notify->aux_reply.command & 0xF0) ++ /* The reply is stored in the top nibble of the command. */ ++ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; + +- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; +- if (!payload->write && p_notify->aux_reply.length && +- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { +- +- if (payload->length != p_notify->aux_reply.length) { +- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", +- p_notify->aux_reply.length, +- payload->address, payload->length); +- *operation_result = AUX_RET_ERROR_INVALID_REPLY; +- goto out; +- } +- ++ if (!payload->write && p_notify->aux_reply.length) + memcpy(payload->data, p_notify->aux_reply.data, + p_notify->aux_reply.length); +- } + + /* success */ + ret = p_notify->aux_reply.length; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 5858e288b3fd66..c0cacd501c83eb 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -48,6 +48,9 @@ + + #define PEAK_FACTOR_X1000 1006 + ++/* ++ * This function handles both native AUX and I2C-Over-AUX transactions. ++ */ + static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) + { +@@ -84,15 +87,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + if (adev->dm.aux_hpd_discon_quirk) { + if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && + operation_result == AUX_RET_ERROR_HPD_DISCON) { +- result = 0; ++ result = msg->size; + operation_result = AUX_RET_SUCCESS; + } + } + +- if (payload.write && result >= 0) +- result = msg->size; ++ /* ++ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER ++ */ ++ if (payload.write && result >= 0) { ++ if (result) { ++ /*one byte indicating partially written bytes. Force 0 to retry*/ ++ drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); ++ result = 0; ++ } else if (!payload.reply[0]) ++ /*I2C_ACK|AUX_ACK*/ ++ result = msg->size; ++ } + +- if (result < 0) ++ if (result < 0) { + switch (operation_result) { + case AUX_RET_SUCCESS: + break; +@@ -111,6 +124,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + break; + } + ++ drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); ++ } ++ ++ if (payload.reply[0]) ++ drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", ++ payload.reply[0]); ++ + return result; + } + +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 37fe54c34b141c..0d69098eddd90f 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -979,27 +979,28 @@ static const struct panel_desc auo_g070vvn01 = { + }, + }; + +-static const struct drm_display_mode auo_g101evn010_mode = { +- .clock = 68930, +- .hdisplay = 1280, +- .hsync_start = 1280 + 82, +- .hsync_end = 1280 + 82 + 2, +- .htotal = 1280 + 82 + 2 + 84, +- .vdisplay = 800, +- .vsync_start = 800 + 8, +- .vsync_end = 800 + 8 + 2, +- .vtotal = 800 + 8 + 2 + 6, ++static const struct display_timing auo_g101evn010_timing = { ++ .pixelclock = { 64000000, 68930000, 85000000 }, ++ .hactive = { 1280, 1280, 1280 }, ++ .hfront_porch = { 8, 64, 256 }, ++ .hback_porch = { 8, 64, 256 }, ++ .hsync_len = { 40, 168, 767 }, ++ .vactive = { 800, 800, 800 }, ++ .vfront_porch = { 4, 8, 100 }, ++ .vback_porch = { 4, 8, 100 }, ++ .vsync_len = { 8, 16, 223 }, + }; + + static const struct panel_desc auo_g101evn010 = { +- .modes = &auo_g101evn010_mode, +- .num_modes = 1, ++ .timings = &auo_g101evn010_timing, ++ .num_timings = 1, + .bpc = 6, + .size = { + .width = 216, + .height = 135, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, ++ .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, + }; + +diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c +index 5b729013fd26f5..41493cf3d03b81 100644 +--- a/drivers/gpu/drm/v3d/v3d_sched.c ++++ b/drivers/gpu/drm/v3d/v3d_sched.c +@@ -289,11 +289,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) + return DRM_GPU_SCHED_STAT_NOMINAL; + } + +-/* If the current address or return address have changed, then the GPU +- * has probably made progress and we should delay the reset. This +- * could fail if the GPU got in an infinite loop in the CL, but that +- * is pretty unlikely outside of an i-g-t testcase. +- */ ++static void ++v3d_sched_skip_reset(struct drm_sched_job *sched_job) ++{ ++ struct drm_gpu_scheduler *sched = sched_job->sched; ++ ++ spin_lock(&sched->job_list_lock); ++ list_add(&sched_job->list, &sched->pending_list); ++ spin_unlock(&sched->job_list_lock); ++} ++ + static enum drm_gpu_sched_stat + v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, + u32 *timedout_ctca, u32 *timedout_ctra) +@@ -303,9 +308,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + ++ /* If the current address or return address have changed, then the GPU ++ * has probably made progress and we should delay the reset. This ++ * could fail if the GPU got in an infinite loop in the CL, but that ++ * is pretty unlikely outside of an i-g-t testcase. ++ */ + if (*timedout_ctca != ctca || *timedout_ctra != ctra) { + *timedout_ctca = ctca; + *timedout_ctra = ctra; ++ ++ v3d_sched_skip_reset(sched_job); + return DRM_GPU_SCHED_STAT_NOMINAL; + } + +@@ -345,11 +357,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) + struct v3d_dev *v3d = job->base.v3d; + u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); + +- /* If we've made progress, skip reset and let the timer get +- * rearmed. ++ /* If we've made progress, skip reset, add the job to the pending ++ * list, and let the timer get rearmed. + */ + if (job->timedout_batches != batches) { + job->timedout_batches = batches; ++ ++ v3d_sched_skip_reset(sched_job); + return DRM_GPU_SCHED_STAT_NOMINAL; + } + +diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c +index d054721859b3b5..99b05548b7bdbb 100644 +--- a/drivers/iio/accel/adis16201.c ++++ b/drivers/iio/accel/adis16201.c +@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = { + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), + ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, +- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ++ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), + ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, +- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ++ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), + IIO_CHAN_SOFT_TIMESTAMP(7) + }; + +diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c +index 0c9225d18fb29b..4973e8da5399d7 100644 +--- a/drivers/iio/accel/adxl355_core.c ++++ b/drivers/iio/accel/adxl355_core.c +@@ -231,7 +231,7 @@ struct adxl355_data { + u8 transf_buf[3]; + struct { + u8 buf[14]; +- s64 ts; ++ aligned_s64 ts; + } buffer; + } __aligned(IIO_DMA_MINALIGN); + }; +diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c +index 484fe2e9fb1742..3a55475691de39 100644 +--- a/drivers/iio/accel/adxl367.c ++++ b/drivers/iio/accel/adxl367.c +@@ -620,18 +620,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr) + if (ret) + return ret; + ++ st->odr = odr; ++ + /* Activity timers depend on ODR */ + ret = _adxl367_set_act_time_ms(st, st->act_time_ms); + if (ret) + return ret; + +- ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms); +- if (ret) +- return ret; +- +- st->odr = odr; +- +- return 0; ++ return _adxl367_set_inact_time_ms(st, st->inact_time_ms); + } + + static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr) +diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c +index 287a0591533b6a..67c96572cecc40 100644 +--- a/drivers/iio/adc/ad7606_spi.c ++++ b/drivers/iio/adc/ad7606_spi.c +@@ -127,7 +127,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr) + { + .tx_buf = &st->d16[0], + .len = 2, +- .cs_change = 0, ++ .cs_change = 1, + }, { + .rx_buf = &st->d16[1], + .len = 2, +diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c +index 97d162a3cba4ea..49a2588e7431ed 100644 +--- a/drivers/iio/adc/dln2-adc.c ++++ b/drivers/iio/adc/dln2-adc.c +@@ -483,7 +483,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct { + __le16 values[DLN2_ADC_MAX_CHANNELS]; +- int64_t timestamp_space; ++ aligned_s64 timestamp_space; + } data; + struct dln2_adc_get_all_vals dev_data; + struct dln2_adc *dln2 = iio_priv(indio_dev); +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c +index 929cba215d99ab..7b4eb4a200df81 100644 +--- a/drivers/iio/adc/rockchip_saradc.c ++++ b/drivers/iio/adc/rockchip_saradc.c +@@ -485,15 +485,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev) + if (info->reset) + rockchip_saradc_reset_controller(info->reset); + +- /* +- * Use a default value for the converter clock. +- * This may become user-configurable in the future. +- */ +- ret = clk_set_rate(info->clk, info->data->clk_rate); +- if (ret < 0) +- return dev_err_probe(&pdev->dev, ret, +- "failed to set adc clk rate\n"); +- + ret = regulator_enable(info->vref); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, +@@ -520,6 +511,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev) + if (IS_ERR(info->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), + "failed to get adc clock\n"); ++ /* ++ * Use a default value for the converter clock. ++ * This may become user-configurable in the future. ++ */ ++ ret = clk_set_rate(info->clk, info->data->clk_rate); ++ if (ret < 0) ++ return dev_err_probe(&pdev->dev, ret, ++ "failed to set adc clk rate\n"); + + platform_set_drvdata(pdev, indio_dev); + +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +index 066fe561c5e88d..b8119fa4768eb8 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +@@ -370,6 +370,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) + if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK)) + return 0; + ++ if (!pattern_len) ++ pattern_len = ST_LSM6DSX_SAMPLE_SIZE; ++ + fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) * + ST_LSM6DSX_CHAN_SIZE; + fifo_len = (fifo_len / pattern_len) * pattern_len; +@@ -601,6 +604,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw) + if (!fifo_len) + return 0; + ++ if (!pattern_len) ++ pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE; ++ + for (read_len = 0; read_len < fifo_len; read_len += pattern_len) { + err = st_lsm6dsx_read_block(hw, + ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR, +diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c +index c28a7a6dea5f12..555a61e2f3fdd1 100644 +--- a/drivers/iio/temperature/maxim_thermocouple.c ++++ b/drivers/iio/temperature/maxim_thermocouple.c +@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = { + struct maxim_thermocouple_data { + struct spi_device *spi; + const struct maxim_thermocouple_chip *chip; ++ char tc_type; + + u8 buffer[16] __aligned(IIO_DMA_MINALIGN); +- char tc_type; + }; + + static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index b91467c8e6c402..c65321964131cf 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -77,12 +77,13 @@ + * xbox d-pads should map to buttons, as is required for DDR pads + * but we map them to axes when possible to simplify things + */ +-#define MAP_DPAD_TO_BUTTONS (1 << 0) +-#define MAP_TRIGGERS_TO_BUTTONS (1 << 1) +-#define MAP_STICKS_TO_NULL (1 << 2) +-#define MAP_SELECT_BUTTON (1 << 3) +-#define MAP_PADDLES (1 << 4) +-#define MAP_PROFILE_BUTTON (1 << 5) ++#define MAP_DPAD_TO_BUTTONS BIT(0) ++#define MAP_TRIGGERS_TO_BUTTONS BIT(1) ++#define MAP_STICKS_TO_NULL BIT(2) ++#define MAP_SHARE_BUTTON BIT(3) ++#define MAP_PADDLES BIT(4) ++#define MAP_PROFILE_BUTTON BIT(5) ++#define MAP_SHARE_OFFSET BIT(6) + + #define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \ + MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL) +@@ -135,7 +136,7 @@ static const struct xpad_device { + { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */ + { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, + { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE }, +- { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */ ++ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */ + { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE }, + { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, + { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, +@@ -159,7 +160,7 @@ static const struct xpad_device { + { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, + { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE }, + { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE }, +- { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, ++ { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE }, + { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, +@@ -205,13 +206,13 @@ static const struct xpad_device { + { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, + { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, + { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, +- { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, ++ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 }, + { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, + { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, + { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, +- { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE }, ++ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE }, + { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, + { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, +@@ -240,7 +241,7 @@ static const struct xpad_device { + { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, +- { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE }, ++ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, +@@ -386,10 +387,11 @@ static const struct xpad_device { + { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, ++ { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, +- { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, ++ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, +@@ -1025,7 +1027,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha + * The report format was gleaned from + * https://github.com/kylelemons/xbox/blob/master/xbox.go + */ +-static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) ++static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len) + { + struct input_dev *dev = xpad->dev; + bool do_sync = false; +@@ -1066,8 +1068,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char + /* menu/view buttons */ + input_report_key(dev, BTN_START, data[4] & BIT(2)); + input_report_key(dev, BTN_SELECT, data[4] & BIT(3)); +- if (xpad->mapping & MAP_SELECT_BUTTON) +- input_report_key(dev, KEY_RECORD, data[22] & BIT(0)); ++ if (xpad->mapping & MAP_SHARE_BUTTON) { ++ if (xpad->mapping & MAP_SHARE_OFFSET) ++ input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0)); ++ else ++ input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0)); ++ } + + /* buttons A,B,X,Y */ + input_report_key(dev, BTN_A, data[4] & BIT(4)); +@@ -1215,7 +1221,7 @@ static void xpad_irq_in(struct urb *urb) + xpad360w_process_packet(xpad, 0, xpad->idata); + break; + case XTYPE_XBOXONE: +- xpadone_process_packet(xpad, 0, xpad->idata); ++ xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length); + break; + default: + xpad_process_packet(xpad, 0, xpad->idata); +@@ -1972,7 +1978,7 @@ static int xpad_init_input(struct usb_xpad *xpad) + xpad->xtype == XTYPE_XBOXONE) { + for (i = 0; xpad360_btn[i] >= 0; i++) + input_set_capability(input_dev, EV_KEY, xpad360_btn[i]); +- if (xpad->mapping & MAP_SELECT_BUTTON) ++ if (xpad->mapping & MAP_SHARE_BUTTON) + input_set_capability(input_dev, EV_KEY, KEY_RECORD); + } else { + for (i = 0; xpad_btn[i] >= 0; i++) +diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c +index 4364c3401ff1c6..486ca8ff86f830 100644 +--- a/drivers/input/keyboard/mtk-pmic-keys.c ++++ b/drivers/input/keyboard/mtk-pmic-keys.c +@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys, + u32 value, mask; + int error; + +- kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs; +- kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs; ++ kregs_home = ®s->keys_regs[MTK_PMIC_HOMEKEY_INDEX]; ++ kregs_pwr = ®s->keys_regs[MTK_PMIC_PWRKEY_INDEX]; + + error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec", + &long_press_debounce); +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 26677432ac8361..3ca6642601c7d5 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = { + + static const char * const smbus_pnp_ids[] = { + /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ ++ "DLL060d", /* Dell Precision M3800 */ + "LEN0048", /* X1 Carbon 3 */ + "LEN0046", /* X250 */ + "LEN0049", /* Yoga 11e */ +@@ -189,11 +190,15 @@ static const char * const smbus_pnp_ids[] = { + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ + "LEN2068", /* T14 Gen 1 */ ++ "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */ ++ "SYN3003", /* HP EliteBook 850 G1 */ + "SYN3015", /* HP EliteBook 840 G2 */ + "SYN3052", /* HP EliteBook 840 G4 */ + "SYN3221", /* HP 15-ay000 */ + "SYN323d", /* HP Spectre X360 13-w013dx */ + "SYN3257", /* HP Envy 13-ad105ng */ ++ "TOS01f6", /* Dynabook Portege X30L-G */ ++ "TOS0213", /* Dynabook Portege X30-D */ + NULL + }; + +diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c +index db5a885ecd7285..a74b34d8df2a22 100644 +--- a/drivers/input/touchscreen/cyttsp5.c ++++ b/drivers/input/touchscreen/cyttsp5.c +@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on) + int rc; + + SET_CMD_REPORT_TYPE(cmd[0], 0); +- SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP); ++ SET_CMD_REPORT_ID(cmd[0], state); + SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER); + + rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd)); +@@ -865,13 +865,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq, + ts->input->phys = ts->phys; + input_set_drvdata(ts->input, ts); + +- /* Reset the gpio to be in a reset state */ ++ /* Assert gpio to be in a reset state */ + ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ts->reset_gpio)) { + error = PTR_ERR(ts->reset_gpio); + dev_err(dev, "Failed to request reset gpio, error %d\n", error); + return error; + } ++ ++ fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */ ++ + gpiod_set_value_cansleep(ts->reset_gpio, 0); + + /* Need a delay to have device up */ +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 319bd10548e9ad..7a33da2dd64b12 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -1242,7 +1242,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, + + t = dm_get_live_table(md, &srcu_idx); + if (!t) +- return 0; ++ goto put_live_table; + + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); +@@ -1253,6 +1253,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, + (void *)key); + } + ++put_live_table: + dm_put_live_table(md, srcu_idx); + return 0; + } +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index 2a258986eed02b..ba7f7de25c8529 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -2125,9 +2125,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register); + + void m_can_class_unregister(struct m_can_classdev *cdev) + { ++ unregister_candev(cdev->net); + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); +- unregister_candev(cdev->net); + } + EXPORT_SYMBOL_GPL(m_can_class_unregister); + +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +index 6fecfe4cd08041..21ae3a89924e97 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { + .brp_inc = 1, + }; + ++/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of ++ * [-64,63] for TDCO, indicating a relative TDCO. ++ * ++ * Manual tests have shown, that using a relative TDCO configuration ++ * results in bus off, while an absolute configuration works. ++ * ++ * For TDCO use the max value (63) from the data sheet, but 0 as the ++ * minimum. ++ */ ++static const struct can_tdc_const mcp251xfd_tdc_const = { ++ .tdcv_min = 0, ++ .tdcv_max = 63, ++ .tdco_min = 0, ++ .tdco_max = 63, ++ .tdcf_min = 0, ++ .tdcf_max = 0, ++}; ++ + static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) + { + switch (model) { +@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) + { + const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; +- u32 val = 0; +- s8 tdco; ++ u32 tdcmod, val = 0; + int err; + + /* CAN Control Register +@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) + return err; + + /* Transmitter Delay Compensation */ +- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), +- -64, 63); +- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, +- MCP251XFD_REG_TDC_TDCMOD_AUTO) | +- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); ++ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) ++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; ++ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) ++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; ++ else ++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; ++ ++ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | ++ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | ++ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); + + return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); + } +@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi) + priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; + priv->can.bittiming_const = &mcp251xfd_bittiming_const; + priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; ++ priv->can.tdc_const = &mcp251xfd_tdc_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | +- CAN_CTRLMODE_CC_LEN8_DLC; ++ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | ++ CAN_CTRLMODE_TDC_MANUAL; + set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); + priv->ndev = ndev; + priv->spi = spi; +@@ -2179,8 +2203,8 @@ static void mcp251xfd_remove(struct spi_device *spi) + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct net_device *ndev = priv->ndev; + +- can_rx_offload_del(&priv->offload); + mcp251xfd_unregister(priv); ++ can_rx_offload_del(&priv->offload); + spi->max_speed_hz = priv->spi_max_speed_hz_orig; + free_candev(ndev); + } +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index cfcda893f1a16d..d2ff2c2fcbbfc4 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); + } + ++ vc1 &= ~VC1_RX_MCST_FWD_EN; ++ + if (enable) { + vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; +- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; ++ vc1 |= VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { +- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; ++ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } + +@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, + + } else { + vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); +- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); ++ vc1 &= ~VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc5 &= ~VC5_DROP_VTABLE_MISS; + +@@ -1519,12 +1521,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port, + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct b53_vlan *vl; ++ u16 old_pvid, new_pvid; + int err; + + err = b53_vlan_prepare(ds, port, vlan); + if (err) + return err; + ++ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid); ++ if (pvid) ++ new_pvid = vlan->vid; ++ else if (!pvid && vlan->vid == old_pvid) ++ new_pvid = b53_default_pvid(dev); ++ else ++ new_pvid = old_pvid; ++ + vl = &dev->vlans[vlan->vid]; + + b53_get_vlan_entry(dev, vlan->vid, vl); +@@ -1541,10 +1552,10 @@ int b53_vlan_add(struct dsa_switch *ds, int port, + b53_set_vlan_entry(dev, vlan->vid, vl); + b53_fast_age_vlan(dev, vlan->vid); + +- if (pvid && !dsa_is_cpu_port(ds, port)) { ++ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), +- vlan->vid); +- b53_fast_age_vlan(dev, vlan->vid); ++ new_pvid); ++ b53_fast_age_vlan(dev, old_pvid); + } + + return 0; +@@ -1956,7 +1967,7 @@ EXPORT_SYMBOL(b53_br_join); + void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) + { + struct b53_device *dev = ds->priv; +- struct b53_vlan *vl = &dev->vlans[0]; ++ struct b53_vlan *vl; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + unsigned int i; + u16 pvlan, reg, pvid; +@@ -1982,6 +1993,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) + dev->ports[port].vlan_ctl_mask = pvlan; + + pvid = b53_default_pvid(dev); ++ vl = &dev->vlans[pvid]; + + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { +@@ -1990,12 +2002,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); +- } else { +- b53_get_vlan_entry(dev, pvid, vl); +- vl->members |= BIT(port) | BIT(cpu_port); +- vl->untag |= BIT(port) | BIT(cpu_port); +- b53_set_vlan_entry(dev, pvid, vl); + } ++ ++ b53_get_vlan_entry(dev, pvid, vl); ++ vl->members |= BIT(port) | BIT(cpu_port); ++ vl->untag |= BIT(port) | BIT(cpu_port); ++ b53_set_vlan_entry(dev, pvid, vl); + } + EXPORT_SYMBOL(b53_br_leave); + +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index d2ec8f642c2fa0..c6ccfbd4226570 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3117,11 +3117,19 @@ static int mtk_dma_init(struct mtk_eth *eth) + static void mtk_dma_free(struct mtk_eth *eth) + { + const struct mtk_soc_data *soc = eth->soc; +- int i; ++ int i, j, txqs = 1; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) ++ txqs = MTK_QDMA_NUM_QUEUES; ++ ++ for (i = 0; i < MTK_MAX_DEVS; i++) { ++ if (!eth->netdev[i]) ++ continue; ++ ++ for (j = 0; j < txqs; j++) ++ netdev_tx_reset_subqueue(eth->netdev[i], j); ++ } + +- for (i = 0; i < MTK_MAX_DEVS; i++) +- if (eth->netdev[i]) +- netdev_reset_queue(eth->netdev[i]); + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { + dma_free_coherent(eth->dma_dev, + MTK_QDMA_RING_SIZE * soc->txrx.txd_size, +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index c6b0637e61debd..6e2d0fda3ba4aa 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -4156,7 +4156,8 @@ static void nvme_fw_act_work(struct work_struct *work) + msleep(100); + } + +- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) ++ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || ++ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + return; + + nvme_unquiesce_io_queues(ctrl); +diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c +index 727b956aa23172..f667b3b62f1883 100644 +--- a/drivers/staging/axis-fifo/axis-fifo.c ++++ b/drivers/staging/axis-fifo/axis-fifo.c +@@ -398,16 +398,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + + bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); + if (!bytes_available) { +- dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); +- reset_ip_core(fifo); ++ dev_err(fifo->dt_device, "received a packet of length 0\n"); + ret = -EIO; + goto end_unlock; + } + + if (bytes_available > len) { +- dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", ++ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", + bytes_available, len); +- reset_ip_core(fifo); + ret = -EINVAL; + goto end_unlock; + } +@@ -416,8 +414,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + /* this probably can't happen unless IP + * registers were previously mishandled + */ +- dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); +- reset_ip_core(fifo); ++ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); + ret = -EIO; + goto end_unlock; + } +@@ -438,7 +435,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + + if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, + copy * sizeof(u32))) { +- reset_ip_core(fifo); + ret = -EFAULT; + goto end_unlock; + } +@@ -547,7 +543,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, + + if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), + copy * sizeof(u32))) { +- reset_ip_core(fifo); + ret = -EFAULT; + goto end_unlock; + } +@@ -780,9 +775,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo) + goto end; + } + +- /* IP sets TDFV to fifo depth - 4 so we will do the same */ +- fifo->tx_fifo_depth -= 4; +- + ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); + if (ret) { + dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); +diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c +index 6c14d7bcdd6750..081b17f498638b 100644 +--- a/drivers/staging/iio/adc/ad7816.c ++++ b/drivers/staging/iio/adc/ad7816.c +@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev, + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct ad7816_chip_info *chip = iio_priv(indio_dev); + +- if (strcmp(buf, "full")) { ++ if (strcmp(buf, "full") == 0) { + gpiod_set_value(chip->rdwr_pin, 1); + chip->mode = AD7816_FULL; + } else { +diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c +index 4b67749edb9974..601a60a2802240 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.c ++++ b/drivers/usb/cdns3/cdnsp-gadget.c +@@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, + (portsc & PORT_CHANGE_BITS), port_regs); + } + ++static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) ++{ ++ struct cdns *cdns = dev_get_drvdata(pdev->dev); ++ __le32 __iomem *reg; ++ void __iomem *base; ++ u32 offset = 0; ++ u32 val; ++ ++ if (!cdns->override_apb_timeout) ++ return; ++ ++ base = &pdev->cap_regs->hc_capbase; ++ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); ++ reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; ++ ++ val = le32_to_cpu(readl(reg)); ++ val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); ++ writel(cpu_to_le32(val), reg); ++} ++ + static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) + { + __le32 __iomem *reg; +@@ -1776,6 +1796,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) + reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); + pdev->rev_cap = reg; + ++ pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); ++ + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", + readl(&pdev->rev_cap->ctrl_revision), + readl(&pdev->rev_cap->rtl_revision), +@@ -1801,6 +1823,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev) + pdev->hci_version = HC_VERSION(pdev->hcc_params); + pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + ++ /* ++ * Override the APB timeout value to give the controller more time for ++ * enabling UTMI clock and synchronizing APB and UTMI clock domains. ++ * This fix is platform specific and is required to fixes issue with ++ * reading incorrect value from PORTSC register after resuming ++ * from L1 state. ++ */ ++ cdnsp_set_apb_timeout_value(pdev); ++ + cdnsp_get_rev_cap(pdev); + + /* Make sure the Device Controller is halted. */ +diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h +index 9a5577a772af62..ed84dbb9fd6fbc 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.h ++++ b/drivers/usb/cdns3/cdnsp-gadget.h +@@ -520,6 +520,9 @@ struct cdnsp_rev_cap { + #define REG_CHICKEN_BITS_2_OFFSET 0x48 + #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) + ++#define REG_CHICKEN_BITS_3_OFFSET 0x4C ++#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val)) ++ + /* XBUF Extended Capability ID. */ + #define XBUF_CAP_ID 0xCB + #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C +@@ -1359,6 +1362,7 @@ struct cdnsp_port { + * @rev_cap: Controller Capabilities Registers. + * @hcs_params1: Cached register copies of read-only HCSPARAMS1 + * @hcc_params: Cached register copies of read-only HCCPARAMS1 ++ * @rtl_revision: Cached controller rtl revision. + * @setup: Temporary buffer for setup packet. + * @ep0_preq: Internal allocated request used during enumeration. + * @ep0_stage: ep0 stage during enumeration process. +@@ -1413,6 +1417,8 @@ struct cdnsp_device { + __u32 hcs_params1; + __u32 hcs_params3; + __u32 hcc_params; ++ #define RTL_REVISION_NEW_LPM 0x2700 ++ __u32 rtl_revision; + /* Lock used in interrupt thread context. */ + spinlock_t lock; + struct usb_ctrlrequest setup; +diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c +index 0725668ffea4c8..159c2eae26608c 100644 +--- a/drivers/usb/cdns3/cdnsp-pci.c ++++ b/drivers/usb/cdns3/cdnsp-pci.c +@@ -33,6 +33,8 @@ + #define CDNS_DRD_ID 0x0100 + #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) + ++#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20 ++ + static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) + { + /* +@@ -144,6 +146,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, + cdnsp->otg_irq = pdev->irq; + } + ++ /* ++ * Cadence PCI based platform require some longer timeout for APB ++ * to fixes domain clock synchronization issue after resuming ++ * controller from L1 state. ++ */ ++ cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE; ++ pci_set_drvdata(pdev, cdnsp); ++ + if (pci_is_enabled(func)) { + cdnsp->dev = dev; + cdnsp->gadget_init = cdnsp_gadget_init; +@@ -153,8 +163,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, + goto free_cdnsp; + } + +- pci_set_drvdata(pdev, cdnsp); +- + device_wakeup_enable(&pdev->dev); + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index 1d18d5002ef01d..080a3f17a35dd7 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, + + writel(db_value, reg_addr); + +- cdnsp_force_l0_go(pdev); ++ if (pdev->rtl_revision < RTL_REVISION_NEW_LPM) ++ cdnsp_force_l0_go(pdev); + + /* Doorbell was set. */ + return true; +diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h +index 57d47348dc193b..ac30ee21309d02 100644 +--- a/drivers/usb/cdns3/core.h ++++ b/drivers/usb/cdns3/core.h +@@ -79,6 +79,8 @@ struct cdns3_platform_data { + * @pdata: platform data from glue layer + * @lock: spinlock structure + * @xhci_plat_data: xhci private data structure pointer ++ * @override_apb_timeout: hold value of APB timeout. For value 0 the default ++ * value in CHICKEN_BITS_3 will be preserved. + * @gadget_init: pointer to gadget initialization function + */ + struct cdns { +@@ -117,6 +119,7 @@ struct cdns { + struct cdns3_platform_data *pdata; + spinlock_t lock; + struct xhci_plat_priv *xhci_plat_data; ++ u32 override_apb_timeout; + + int (*gadget_init)(struct cdns *cdns); + }; +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index c2e666e82857c1..2f92905e05cad0 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + u8 *buffer; + u8 tag; + int rv; ++ long wait_rv; + + dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", + data->iin_ep_present); +@@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + } + + if (data->iin_ep_present) { +- rv = wait_event_interruptible_timeout( ++ wait_rv = wait_event_interruptible_timeout( + data->waitq, + atomic_read(&data->iin_data_valid) != 0, + file_data->timeout); +- if (rv < 0) { +- dev_dbg(dev, "wait interrupted %d\n", rv); ++ if (wait_rv < 0) { ++ dev_dbg(dev, "wait interrupted %ld\n", wait_rv); ++ rv = wait_rv; + goto exit; + } + +- if (rv == 0) { ++ if (wait_rv == 0) { + dev_dbg(dev, "wait timed out\n"); + rv = -ETIMEDOUT; + goto exit; +@@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + + dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); + ++ rv = 0; ++ + exit: + /* bump interrupt bTag */ + data->iin_bTag += 1; +@@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, + { + struct usbtmc_device_data *data = file_data->data; + struct device *dev = &data->intf->dev; +- int rv; + u32 timeout; + unsigned long expire; ++ long wait_rv; + + if (!data->iin_ep_present) { + dev_dbg(dev, "no interrupt endpoint present\n"); +@@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, + + mutex_unlock(&data->io_mutex); + +- rv = wait_event_interruptible_timeout( +- data->waitq, +- atomic_read(&file_data->srq_asserted) != 0 || +- atomic_read(&file_data->closing), +- expire); ++ wait_rv = wait_event_interruptible_timeout( ++ data->waitq, ++ atomic_read(&file_data->srq_asserted) != 0 || ++ atomic_read(&file_data->closing), ++ expire); + + mutex_lock(&data->io_mutex); + + /* Note! disconnect or close could be called in the meantime */ + if (atomic_read(&file_data->closing) || data->zombie) +- rv = -ENODEV; ++ return -ENODEV; + +- if (rv < 0) { +- /* dev can be invalid now! */ +- pr_debug("%s - wait interrupted %d\n", __func__, rv); +- return rv; ++ if (wait_rv < 0) { ++ dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv); ++ return wait_rv; + } + +- if (rv == 0) { ++ if (wait_rv == 0) { + dev_dbg(dev, "%s - wait timed out\n", __func__); + return -ETIMEDOUT; + } +@@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, + unsigned long expire; + int bufcount = 1; + int again = 0; ++ long wait_rv; + + /* mutex already locked */ + +@@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, + if (!(flags & USBTMC_FLAG_ASYNC)) { + dev_dbg(dev, "%s: before wait time %lu\n", + __func__, expire); +- retval = wait_event_interruptible_timeout( ++ wait_rv = wait_event_interruptible_timeout( + file_data->wait_bulk_in, + usbtmc_do_transfer(file_data), + expire); + +- dev_dbg(dev, "%s: wait returned %d\n", +- __func__, retval); ++ dev_dbg(dev, "%s: wait returned %ld\n", ++ __func__, wait_rv); ++ ++ if (wait_rv < 0) { ++ retval = wait_rv; ++ goto error; ++ } + +- if (retval <= 0) { +- if (retval == 0) +- retval = -ETIMEDOUT; ++ if (wait_rv == 0) { ++ retval = -ETIMEDOUT; + goto error; + } ++ + } + + urb = usb_get_from_anchor(&file_data->in_anchor); +@@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, + if (!buffer) + return -ENOMEM; + +- mutex_lock(&data->io_mutex); ++ retval = mutex_lock_interruptible(&data->io_mutex); ++ if (retval < 0) ++ goto exit_nolock; ++ + if (data->zombie) { + retval = -ENODEV; + goto exit; +@@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, + + exit: + mutex_unlock(&data->io_mutex); ++exit_nolock: + kfree(buffer); + return retval; + } +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 69ce7d384ba8bb..4f326988be867c 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + + if (f->get_status) { + status = f->get_status(f); ++ + if (status < 0) + break; +- } else { +- /* Set D0 and D1 bits based on func wakeup capability */ +- if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) { +- status |= USB_INTRF_STAT_FUNC_RW_CAP; +- if (f->func_wakeup_armed) +- status |= USB_INTRF_STAT_FUNC_RW; +- } ++ ++ /* if D5 is not set, then device is not wakeup capable */ ++ if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP)) ++ status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW); + } + + put_unaligned_le16(status & 0x0000ffff, req->buf); +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c +index f55f60639e4251..2afc30de54ce2d 100644 +--- a/drivers/usb/gadget/function/f_ecm.c ++++ b/drivers/usb/gadget/function/f_ecm.c +@@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f) + gether_resume(&ecm->port); + } + ++static int ecm_get_status(struct usb_function *f) ++{ ++ return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) | ++ USB_INTRF_STAT_FUNC_RW_CAP; ++} ++ + static void ecm_free(struct usb_function *f) + { + struct f_ecm *ecm; +@@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi) + ecm->port.func.disable = ecm_disable; + ecm->port.func.free_func = ecm_free; + ecm->port.func.suspend = ecm_suspend; ++ ecm->port.func.get_status = ecm_get_status; + ecm->port.func.resume = ecm_resume; + + return &ecm->port.func; +diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c +index 7aa46d426f31b2..9bb54da8a6ae15 100644 +--- a/drivers/usb/gadget/udc/tegra-xudc.c ++++ b/drivers/usb/gadget/udc/tegra-xudc.c +@@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) + val = xudc_readl(xudc, CTRL); + val &= ~CTRL_RUN; + xudc_writel(xudc, val, CTRL); ++ ++ val = xudc_readl(xudc, ST); ++ if (val & ST_RC) ++ xudc_writel(xudc, ST_RC, ST); + } + + dev_info(xudc->dev, "ep %u disabled\n", ep->index); +diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c +index 3dec5dd3a0d5ca..712389599d468c 100644 +--- a/drivers/usb/host/uhci-platform.c ++++ b/drivers/usb/host/uhci-platform.c +@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) + } + + /* Get and enable clock if any specified */ +- uhci->clk = devm_clk_get(&pdev->dev, NULL); ++ uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(uhci->clk)) { + ret = PTR_ERR(uhci->clk); + goto err_rmr; +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 76f228e7443cb6..89b3079194d7b3 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -1363,6 +1363,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, + tegra->otg_usb2_port); + ++ pm_runtime_get_sync(tegra->dev); + if (tegra->host_mode) { + /* switch to host mode */ + if (tegra->otg_usb3_port >= 0) { +@@ -1392,6 +1393,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + } + + tegra_xhci_set_port_power(tegra, true, true); ++ pm_runtime_mark_last_busy(tegra->dev); + + } else { + if (tegra->otg_usb3_port >= 0) +@@ -1399,6 +1401,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + + tegra_xhci_set_port_power(tegra, true, false); + } ++ pm_runtime_put_autosuspend(tegra->dev); + } + + #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index 790aadab72a31b..bfcbccb400c3a8 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -5102,7 +5102,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, + case SNK_TRY_WAIT_DEBOUNCE: + if (!tcpm_port_is_sink(port)) { + port->max_wait = 0; +- tcpm_set_state(port, SRC_TRYWAIT, 0); ++ tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE); + } + break; + case SRC_TRY_WAIT: +diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c +index 2431febc461516..8c19081c325542 100644 +--- a/drivers/usb/typec/ucsi/displayport.c ++++ b/drivers/usb/typec/ucsi/displayport.c +@@ -296,6 +296,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) + if (!dp) + return; + ++ cancel_work_sync(&dp->work); ++ + dp->data.conf = 0; + dp->data.status = 0; + dp->initialized = false; +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 0b3bd9a7575e5f..5770f3b374ece3 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -216,6 +216,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, + * buffering it. + */ + if (dma_capable(dev, dev_addr, size, true) && ++ !dma_kmalloc_needs_bounce(dev, size, dir) && + !range_straddles_page_boundary(phys, size) && + !xen_arch_need_swiotlb(dev, phys, dev_addr) && + !is_swiotlb_force_bounce(dev)) +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h +index 2754bdfadcb89c..4ba73320694a4c 100644 +--- a/drivers/xen/xenbus/xenbus.h ++++ b/drivers/xen/xenbus/xenbus.h +@@ -77,6 +77,7 @@ enum xb_req_state { + struct xb_req_data { + struct list_head list; + wait_queue_head_t wq; ++ struct kref kref; + struct xsd_sockmsg msg; + uint32_t caller_req_id; + enum xsd_sockmsg_type type; +@@ -103,6 +104,7 @@ int xb_init_comms(void); + void xb_deinit_comms(void); + int xs_watch_msg(struct xs_watch_event *event); + void xs_request_exit(struct xb_req_data *req); ++void xs_free_req(struct kref *kref); + + int xenbus_match(struct device *_dev, struct device_driver *_drv); + int xenbus_dev_probe(struct device *_dev); +diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c +index e5fda0256feb3d..82df2da1b880b8 100644 +--- a/drivers/xen/xenbus/xenbus_comms.c ++++ b/drivers/xen/xenbus/xenbus_comms.c +@@ -309,8 +309,8 @@ static int process_msg(void) + virt_wmb(); + req->state = xb_req_state_got_reply; + req->cb(req); +- } else +- kfree(req); ++ } ++ kref_put(&req->kref, xs_free_req); + } + + mutex_unlock(&xs_response_mutex); +@@ -386,14 +386,13 @@ static int process_writes(void) + state.req->msg.type = XS_ERROR; + state.req->err = err; + list_del(&state.req->list); +- if (state.req->state == xb_req_state_aborted) +- kfree(state.req); +- else { ++ if (state.req->state != xb_req_state_aborted) { + /* write err, then update state */ + virt_wmb(); + state.req->state = xb_req_state_got_reply; + wake_up(&state.req->wq); + } ++ kref_put(&state.req->kref, xs_free_req); + + mutex_unlock(&xb_write_mutex); + +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c +index 0792fda49a15f3..c495cff3da308b 100644 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c +@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req) + mutex_unlock(&u->reply_mutex); + + kfree(req->body); +- kfree(req); ++ kref_put(&req->kref, xs_free_req); + + kref_put(&u->kref, xenbus_file_free); + +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c +index 028a182bcc9e83..f84c87ee283958 100644 +--- a/drivers/xen/xenbus/xenbus_xs.c ++++ b/drivers/xen/xenbus/xenbus_xs.c +@@ -112,6 +112,12 @@ static void xs_suspend_exit(void) + wake_up_all(&xs_state_enter_wq); + } + ++void xs_free_req(struct kref *kref) ++{ ++ struct xb_req_data *req = container_of(kref, struct xb_req_data, kref); ++ kfree(req); ++} ++ + static uint32_t xs_request_enter(struct xb_req_data *req) + { + uint32_t rq_id; +@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) + req->caller_req_id = req->msg.req_id; + req->msg.req_id = xs_request_enter(req); + ++ /* ++ * Take 2nd ref. One for this thread, and the second for the ++ * xenbus_thread. ++ */ ++ kref_get(&req->kref); ++ + mutex_lock(&xb_write_mutex); + list_add_tail(&req->list, &xb_write_list); + notify = list_is_singular(&xb_write_list); +@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg) + if (req->state == xb_req_state_queued || + req->state == xb_req_state_wait_reply) + req->state = xb_req_state_aborted; +- else +- kfree(req); ++ ++ kref_put(&req->kref, xs_free_req); + mutex_unlock(&xb_write_mutex); + + return ret; +@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) + req->cb = xenbus_dev_queue_reply; + req->par = par; + req->user_req = true; ++ kref_init(&req->kref); + + xs_send(req, msg); + +@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t, + req->num_vecs = num_vecs; + req->cb = xs_wake_up; + req->user_req = false; ++ kref_init(&req->kref); + + msg.req_id = 0; + msg.tx_id = t.id; +diff --git a/fs/namespace.c b/fs/namespace.c +index 5a885d35efe937..450f4198b8cdd8 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -633,7 +633,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) + return 0; + mnt = real_mount(bastard); + mnt_add_count(mnt, 1); +- smp_mb(); // see mntput_no_expire() ++ smp_mb(); // see mntput_no_expire() and do_umount() + if (likely(!read_seqretry(&mount_lock, seq))) + return 0; + if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { +@@ -1786,6 +1786,7 @@ static int do_umount(struct mount *mnt, int flags) + umount_tree(mnt, UMOUNT_PROPAGATE); + retval = 0; + } else { ++ smp_mb(); // paired with __legitimize_mnt() + shrink_submounts(mnt); + retval = -EBUSY; + if (!propagate_mount_busy(mnt, 2)) { +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c +index cbe3c12ff5f75c..b246e53271114d 100644 +--- a/fs/ocfs2/journal.c ++++ b/fs/ocfs2/journal.c +@@ -174,7 +174,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) + struct ocfs2_recovery_map *rm; + + mutex_init(&osb->recovery_lock); +- osb->disable_recovery = 0; ++ osb->recovery_state = OCFS2_REC_ENABLED; + osb->recovery_thread_task = NULL; + init_waitqueue_head(&osb->recovery_event); + +@@ -190,31 +190,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) + return 0; + } + +-/* we can't grab the goofy sem lock from inside wait_event, so we use +- * memory barriers to make sure that we'll see the null task before +- * being woken up */ + static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) + { +- mb(); + return osb->recovery_thread_task != NULL; + } + +-void ocfs2_recovery_exit(struct ocfs2_super *osb) ++static void ocfs2_recovery_disable(struct ocfs2_super *osb, ++ enum ocfs2_recovery_state state) + { +- struct ocfs2_recovery_map *rm; +- +- /* disable any new recovery threads and wait for any currently +- * running ones to exit. Do this before setting the vol_state. */ + mutex_lock(&osb->recovery_lock); +- osb->disable_recovery = 1; ++ /* ++ * If recovery thread is not running, we can directly transition to ++ * final state. ++ */ ++ if (!ocfs2_recovery_thread_running(osb)) { ++ osb->recovery_state = state + 1; ++ goto out_lock; ++ } ++ osb->recovery_state = state; ++ /* Wait for recovery thread to acknowledge state transition */ ++ wait_event_cmd(osb->recovery_event, ++ !ocfs2_recovery_thread_running(osb) || ++ osb->recovery_state >= state + 1, ++ mutex_unlock(&osb->recovery_lock), ++ mutex_lock(&osb->recovery_lock)); ++out_lock: + mutex_unlock(&osb->recovery_lock); +- wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); + +- /* At this point, we know that no more recovery threads can be +- * launched, so wait for any recovery completion work to +- * complete. */ ++ /* ++ * At this point we know that no more recovery work can be queued so ++ * wait for any recovery completion work to complete. ++ */ + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); ++} ++ ++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb) ++{ ++ ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE); ++} ++ ++void ocfs2_recovery_exit(struct ocfs2_super *osb) ++{ ++ struct ocfs2_recovery_map *rm; ++ ++ /* disable any new recovery threads and wait for any currently ++ * running ones to exit. Do this before setting the vol_state. */ ++ ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE); + + /* + * Now that recovery is shut down, and the osb is about to be +@@ -1472,6 +1494,18 @@ static int __ocfs2_recovery_thread(void *arg) + } + } + restart: ++ if (quota_enabled) { ++ mutex_lock(&osb->recovery_lock); ++ /* Confirm that recovery thread will no longer recover quotas */ ++ if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) { ++ osb->recovery_state = OCFS2_REC_QUOTA_DISABLED; ++ wake_up(&osb->recovery_event); ++ } ++ if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED) ++ quota_enabled = 0; ++ mutex_unlock(&osb->recovery_lock); ++ } ++ + status = ocfs2_super_lock(osb, 1); + if (status < 0) { + mlog_errno(status); +@@ -1569,27 +1603,29 @@ static int __ocfs2_recovery_thread(void *arg) + + ocfs2_free_replay_slots(osb); + osb->recovery_thread_task = NULL; +- mb(); /* sync with ocfs2_recovery_thread_running */ ++ if (osb->recovery_state == OCFS2_REC_WANT_DISABLE) ++ osb->recovery_state = OCFS2_REC_DISABLED; + wake_up(&osb->recovery_event); + + mutex_unlock(&osb->recovery_lock); + +- if (quota_enabled) +- kfree(rm_quota); ++ kfree(rm_quota); + + return status; + } + + void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) + { ++ int was_set = -1; ++ + mutex_lock(&osb->recovery_lock); ++ if (osb->recovery_state < OCFS2_REC_WANT_DISABLE) ++ was_set = ocfs2_recovery_map_set(osb, node_num); + + trace_ocfs2_recovery_thread(node_num, osb->node_num, +- osb->disable_recovery, osb->recovery_thread_task, +- osb->disable_recovery ? +- -1 : ocfs2_recovery_map_set(osb, node_num)); ++ osb->recovery_state, osb->recovery_thread_task, was_set); + +- if (osb->disable_recovery) ++ if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE) + goto out; + + if (osb->recovery_thread_task) +diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h +index e3c3a35dc5e0e7..6397170f302f22 100644 +--- a/fs/ocfs2/journal.h ++++ b/fs/ocfs2/journal.h +@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb); + + int ocfs2_recovery_init(struct ocfs2_super *osb); + void ocfs2_recovery_exit(struct ocfs2_super *osb); ++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb); + + int ocfs2_compute_replay_slots(struct ocfs2_super *osb); + void ocfs2_free_replay_slots(struct ocfs2_super *osb); +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index 8fe826143d7bf4..5e3ebbab698ad1 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -308,6 +308,21 @@ enum ocfs2_journal_trigger_type { + void ocfs2_initialize_journal_triggers(struct super_block *sb, + struct ocfs2_triggers triggers[]); + ++enum ocfs2_recovery_state { ++ OCFS2_REC_ENABLED = 0, ++ OCFS2_REC_QUOTA_WANT_DISABLE, ++ /* ++ * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for ++ * ocfs2_recovery_disable_quota() to work. ++ */ ++ OCFS2_REC_QUOTA_DISABLED, ++ OCFS2_REC_WANT_DISABLE, ++ /* ++ * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work ++ */ ++ OCFS2_REC_DISABLED, ++}; ++ + struct ocfs2_journal; + struct ocfs2_slot_info; + struct ocfs2_recovery_map; +@@ -370,7 +385,7 @@ struct ocfs2_super + struct ocfs2_recovery_map *recovery_map; + struct ocfs2_replay_map *replay_map; + struct task_struct *recovery_thread_task; +- int disable_recovery; ++ enum ocfs2_recovery_state recovery_state; + wait_queue_head_t checkpoint_event; + struct ocfs2_journal *journal; + unsigned long osb_commit_interval; +diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c +index 4b4fa58cd32ff0..0ca8975a1df479 100644 +--- a/fs/ocfs2/quota_local.c ++++ b/fs/ocfs2/quota_local.c +@@ -453,8 +453,7 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( + + /* Sync changes in local quota file into global quota file and + * reinitialize local quota file. +- * The function expects local quota file to be already locked and +- * s_umount locked in shared mode. */ ++ * The function expects local quota file to be already locked. */ + static int ocfs2_recover_local_quota_file(struct inode *lqinode, + int type, + struct ocfs2_quota_recovery *rec) +@@ -585,7 +584,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + { + unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE }; +- struct super_block *sb = osb->sb; + struct ocfs2_local_disk_dqinfo *ldinfo; + struct buffer_head *bh; + handle_t *handle; +@@ -597,7 +595,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " + "slot %u\n", osb->dev_str, slot_num); + +- down_read(&sb->s_umount); + for (type = 0; type < OCFS2_MAXQUOTAS; type++) { + if (list_empty(&(rec->r_list[type]))) + continue; +@@ -674,7 +671,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + break; + } + out: +- up_read(&sb->s_umount); + kfree(rec); + return status; + } +@@ -840,8 +836,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) + ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); + + /* +- * s_umount held in exclusive mode protects us against racing with +- * recovery thread... ++ * ocfs2_dismount_volume() has already aborted quota recovery... + */ + if (oinfo->dqi_rec) { + ocfs2_free_quota_recovery(oinfo->dqi_rec); +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 84fa585c6513a5..e585e77cdc88e1 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -1870,6 +1870,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) + /* Orphan scan should be stopped as early as possible */ + ocfs2_orphan_scan_stop(osb); + ++ /* Stop quota recovery so that we can disable quotas */ ++ ocfs2_recovery_disable_quota(osb); ++ + ocfs2_disable_quotas(osb); + + /* All dquots should be freed by now */ +diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c +index 9c0ef4195b5829..74979466729535 100644 +--- a/fs/smb/client/cached_dir.c ++++ b/fs/smb/client/cached_dir.c +@@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, + { + struct cached_fid *cfid; + +- spin_lock(&cfids->cfid_list_lock); + list_for_each_entry(cfid, &cfids->entries, entry) { + if (!strcmp(cfid->path, path)) { + /* +@@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, + * being deleted due to a lease break. + */ + if (!cfid->time || !cfid->has_lease) { +- spin_unlock(&cfids->cfid_list_lock); + return NULL; + } + kref_get(&cfid->refcount); +- spin_unlock(&cfids->cfid_list_lock); + return cfid; + } + } + if (lookup_only) { +- spin_unlock(&cfids->cfid_list_lock); + return NULL; + } + if (cfids->num_entries >= max_cached_dirs) { +- spin_unlock(&cfids->cfid_list_lock); + return NULL; + } + cfid = init_cached_dir(path); + if (cfid == NULL) { +- spin_unlock(&cfids->cfid_list_lock); + return NULL; + } + cfid->cfids = cfids; +@@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, + */ + cfid->has_lease = true; + +- spin_unlock(&cfids->cfid_list_lock); + return cfid; + } + +@@ -185,8 +178,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + if (!utf16_path) + return -ENOMEM; + ++ spin_lock(&cfids->cfid_list_lock); + cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); + if (cfid == NULL) { ++ spin_unlock(&cfids->cfid_list_lock); + kfree(utf16_path); + return -ENOENT; + } +@@ -195,7 +190,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + * Otherwise, it is either a new entry or laundromat worker removed it + * from @cfids->entries. Caller will put last reference if the latter. + */ +- spin_lock(&cfids->cfid_list_lock); + if (cfid->has_lease && cfid->time) { + spin_unlock(&cfids->cfid_list_lock); + *ret_cfid = cfid; +diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c +index 5a5277b4b53b11..72294764d4c20c 100644 +--- a/fs/smb/server/oplock.c ++++ b/fs/smb/server/oplock.c +@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req) + + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease_v2) - 4) +- return NULL; ++ goto err_out; + + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); + lreq->req_state = lc->lcontext.LeaseState; +@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req) + + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease)) +- return NULL; ++ goto err_out; + + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); + lreq->req_state = lc->lcontext.LeaseState; +@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req) + lreq->version = 1; + } + return lreq; ++err_out: ++ kfree(lreq); ++ return NULL; + } + + /** +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 13750a5e5ba02e..9bd817427a345a 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -632,6 +632,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls) + return name; + } + ++ if (*name == '\0') { ++ kfree(name); ++ return ERR_PTR(-EINVAL); ++ } ++ + if (*name == '\\') { + pr_err("not allow directory name included leading slash\n"); + kfree(name); +diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c +index fa5b7e63eb832e..f6616d687365a3 100644 +--- a/fs/smb/server/vfs.c ++++ b/fs/smb/server/vfs.c +@@ -443,6 +443,13 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, + goto out; + } + ++ if (v_len <= *pos) { ++ pr_err("stream write position %lld is out of bounds (stream length: %zd)\n", ++ *pos, v_len); ++ err = -EINVAL; ++ goto out; ++ } ++ + if (v_len < size) { + wbuf = kvzalloc(size, GFP_KERNEL); + if (!wbuf) { +diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c +index 271a23abc82fdd..002f0864abee62 100644 +--- a/fs/smb/server/vfs_cache.c ++++ b/fs/smb/server/vfs_cache.c +@@ -644,21 +644,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft, + bool (*skip)(struct ksmbd_tree_connect *tcon, + struct ksmbd_file *fp)) + { +- unsigned int id; +- struct ksmbd_file *fp; +- int num = 0; ++ struct ksmbd_file *fp; ++ unsigned int id = 0; ++ int num = 0; ++ ++ while (1) { ++ write_lock(&ft->lock); ++ fp = idr_get_next(ft->idr, &id); ++ if (!fp) { ++ write_unlock(&ft->lock); ++ break; ++ } + +- idr_for_each_entry(ft->idr, fp, id) { +- if (skip(tcon, fp)) ++ if (skip(tcon, fp) || ++ !atomic_dec_and_test(&fp->refcount)) { ++ id++; ++ write_unlock(&ft->lock); + continue; ++ } + + set_close_state_blocked_works(fp); ++ idr_remove(ft->idr, fp->volatile_id); ++ fp->volatile_id = KSMBD_NO_FID; ++ write_unlock(&ft->lock); ++ ++ down_write(&fp->f_ci->m_lock); ++ list_del_init(&fp->node); ++ up_write(&fp->f_ci->m_lock); + +- if (!atomic_dec_and_test(&fp->refcount)) +- continue; + __ksmbd_close_fd(ft, fp); ++ + num++; ++ id++; + } ++ + return num; + } + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index a7d91a167a8b64..20db7fc0651f3c 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -77,6 +77,8 @@ extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_indirect_target_selection(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, +diff --git a/include/linux/module.h b/include/linux/module.h +index f2a8624eef1eca..f58d1eb260fa9e 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -572,6 +572,11 @@ struct module { + atomic_t refcnt; + #endif + ++#ifdef CONFIG_MITIGATION_ITS ++ int its_num_pages; ++ void **its_page_array; ++#endif ++ + #ifdef CONFIG_CONSTRUCTORS + /* Constructor functions. */ + ctor_fn_t *ctors; +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 337a9d1c558f3c..0b0a172337dbac 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -3614,6 +3614,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) + #endif + } + ++/** ++ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue ++ * @dev: network device ++ * @qid: stack index of the queue to reset ++ */ ++static inline void netdev_tx_reset_subqueue(const struct net_device *dev, ++ u32 qid) ++{ ++ netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); ++} ++ + /** + * netdev_reset_queue - reset the packets and bytes count of a network device + * @dev_queue: network device +@@ -3623,7 +3634,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) + */ + static inline void netdev_reset_queue(struct net_device *dev_queue) + { +- netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); ++ netdev_tx_reset_subqueue(dev_queue, 0); + } + + /** +diff --git a/include/linux/types.h b/include/linux/types.h +index 253168bb3fe15c..78d87c751ff58c 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -115,8 +115,9 @@ typedef u64 u_int64_t; + typedef s64 int64_t; + #endif + +-/* this is a special 64bit data type that is 8-byte aligned */ ++/* These are the special 64-bit data types that are 8-byte aligned */ + #define aligned_u64 __aligned_u64 ++#define aligned_s64 __aligned_s64 + #define aligned_be64 __aligned_be64 + #define aligned_le64 __aligned_le64 + +diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h +index 6375a06840520d..48b933938877d9 100644 +--- a/include/uapi/linux/types.h ++++ b/include/uapi/linux/types.h +@@ -53,6 +53,7 @@ typedef __u32 __bitwise __wsum; + * No conversions are necessary between 32-bit user-space and a 64-bit kernel. + */ + #define __aligned_u64 __u64 __attribute__((aligned(8))) ++#define __aligned_s64 __s64 __attribute__((aligned(8))) + #define __aligned_be64 __be64 __attribute__((aligned(8))) + #define __aligned_le64 __le64 __attribute__((aligned(8))) + +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 3ce93418e0151d..db592fa549b738 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -422,24 +422,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) + return req->link; + } + +-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) +-{ +- if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) +- return NULL; +- return __io_prep_linked_timeout(req); +-} +- +-static noinline void __io_arm_ltimeout(struct io_kiocb *req) +-{ +- io_queue_linked_timeout(__io_prep_linked_timeout(req)); +-} +- +-static inline void io_arm_ltimeout(struct io_kiocb *req) +-{ +- if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) +- __io_arm_ltimeout(req); +-} +- + static void io_prep_async_work(struct io_kiocb *req) + { + const struct io_issue_def *def = &io_issue_defs[req->opcode]; +@@ -493,7 +475,6 @@ static void io_prep_async_link(struct io_kiocb *req) + + static void io_queue_iowq(struct io_kiocb *req) + { +- struct io_kiocb *link = io_prep_linked_timeout(req); + struct io_uring_task *tctx = req->task->io_uring; + + BUG_ON(!tctx); +@@ -518,8 +499,6 @@ static void io_queue_iowq(struct io_kiocb *req) + + trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); + io_wq_enqueue(tctx->io_wq, &req->work); +- if (link) +- io_queue_linked_timeout(link); + } + + static __cold void io_queue_deferred(struct io_ring_ctx *ctx) +@@ -940,6 +919,14 @@ static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u + { + bool filled; + ++ /* ++ * If multishot has already posted deferred completions, ensure that ++ * those are flushed first before posting this one. If not, CQEs ++ * could get reordered. ++ */ ++ if (!wq_list_empty(&ctx->submit_state.compl_reqs)) ++ __io_submit_flush_completions(ctx); ++ + io_cq_lock(ctx); + filled = io_fill_cqe_aux(ctx, user_data, res, cflags); + if (!filled && allow_overflow) +@@ -1863,17 +1850,24 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, + return !!req->file; + } + ++#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT) ++ + static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) + { + const struct io_issue_def *def = &io_issue_defs[req->opcode]; + const struct cred *creds = NULL; ++ struct io_kiocb *link = NULL; + int ret; + + if (unlikely(!io_assign_file(req, def, issue_flags))) + return -EBADF; + +- if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) +- creds = override_creds(req->creds); ++ if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) { ++ if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) ++ creds = override_creds(req->creds); ++ if (req->flags & REQ_F_ARM_LTIMEOUT) ++ link = __io_prep_linked_timeout(req); ++ } + + if (!def->audit_skip) + audit_uring_entry(req->opcode); +@@ -1883,8 +1877,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) + if (!def->audit_skip) + audit_uring_exit(!ret, ret); + +- if (creds) +- revert_creds(creds); ++ if (unlikely(creds || link)) { ++ if (creds) ++ revert_creds(creds); ++ if (link) ++ io_queue_linked_timeout(link); ++ } + + if (ret == IOU_OK) { + if (issue_flags & IO_URING_F_COMPLETE_DEFER) +@@ -1939,8 +1937,6 @@ void io_wq_submit_work(struct io_wq_work *work) + else + req_ref_get(req); + +- io_arm_ltimeout(req); +- + /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ + if (work->flags & IO_WQ_WORK_CANCEL) { + fail: +@@ -2036,15 +2032,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd) + static void io_queue_async(struct io_kiocb *req, int ret) + __must_hold(&req->ctx->uring_lock) + { +- struct io_kiocb *linked_timeout; +- + if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { + io_req_defer_failed(req, ret); + return; + } + +- linked_timeout = io_prep_linked_timeout(req); +- + switch (io_arm_poll_handler(req, 0)) { + case IO_APOLL_READY: + io_kbuf_recycle(req, 0); +@@ -2057,9 +2049,6 @@ static void io_queue_async(struct io_kiocb *req, int ret) + case IO_APOLL_OK: + break; + } +- +- if (linked_timeout) +- io_queue_linked_timeout(linked_timeout); + } + + static inline void io_queue_sqe(struct io_kiocb *req) +@@ -2073,9 +2062,7 @@ static inline void io_queue_sqe(struct io_kiocb *req) + * We async punt it if the file wasn't marked NOWAIT, or if the file + * doesn't support non-blocking read/write attempts + */ +- if (likely(!ret)) +- io_arm_ltimeout(req); +- else ++ if (unlikely(ret)) + io_queue_async(req, ret); + } + +diff --git a/kernel/params.c b/kernel/params.c +index c7aed3c51cd538..e39ac5420cd6dc 100644 +--- a/kernel/params.c ++++ b/kernel/params.c +@@ -945,7 +945,9 @@ struct kset *module_kset; + static void module_kobj_release(struct kobject *kobj) + { + struct module_kobject *mk = to_module_kobject(kobj); +- complete(mk->kobj_completion); ++ ++ if (mk->kobj_completion) ++ complete(mk->kobj_completion); + } + + const struct kobj_type module_ktype = { +diff --git a/net/can/gw.c b/net/can/gw.c +index 37528826935e74..e65500c52bf5c7 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -130,7 +130,7 @@ struct cgw_job { + u32 handled_frames; + u32 dropped_frames; + u32 deleted_frames; +- struct cf_mod mod; ++ struct cf_mod __rcu *cf_mod; + union { + /* CAN frame data source */ + struct net_device *dev; +@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + struct cgw_job *gwj = (struct cgw_job *)data; + struct canfd_frame *cf; + struct sk_buff *nskb; ++ struct cf_mod *mod; + int modidx = 0; + + /* process strictly Classic CAN or CAN FD frames */ +@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + * When there is at least one modification function activated, + * we need to copy the skb as we want to modify skb->data. + */ +- if (gwj->mod.modfunc[0]) ++ mod = rcu_dereference(gwj->cf_mod); ++ if (mod->modfunc[0]) + nskb = skb_copy(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); +@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + cf = (struct canfd_frame *)nskb->data; + + /* perform preprocessed modification functions if there are any */ +- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) +- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); ++ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) ++ (*mod->modfunc[modidx++])(cf, mod); + + /* Has the CAN frame been modified? */ + if (modidx) { +@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + } + + /* check for checksum updates */ +- if (gwj->mod.csumfunc.crc8) +- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); ++ if (mod->csumfunc.crc8) ++ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); + +- if (gwj->mod.csumfunc.xor) +- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); ++ if (mod->csumfunc.xor) ++ (*mod->csumfunc.xor)(cf, &mod->csum.xor); + } + + /* clear the skb timestamp if not configured the other way */ +@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head) + { + struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); + ++ /* cgw_job::cf_mod is always accessed from the same cgw_job object within ++ * the same RCU read section. Once cgw_job is scheduled for removal, ++ * cf_mod can also be removed without mandating an additional grace period. ++ */ ++ kfree(rcu_access_pointer(gwj->cf_mod)); + kmem_cache_free(cgw_cache, gwj); + } + ++/* Return cgw_job::cf_mod with RTNL protected section */ ++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) ++{ ++ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); ++} ++ + static int cgw_notifier(struct notifier_block *nb, + unsigned long msg, void *ptr) + { +@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + { + struct rtcanmsg *rtcan; + struct nlmsghdr *nlh; ++ struct cf_mod *mod; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); + if (!nlh) +@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + goto cancel; + } + ++ mod = cgw_job_cf_mod(gwj); + if (gwj->flags & CGW_FLAGS_CAN_FD) { + struct cgw_fdframe_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } else { + struct cgw_frame_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } + +- if (gwj->mod.uid) { +- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) ++ if (mod->uid) { ++ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.crc8) { ++ if (mod->csumfunc.crc8) { + if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, +- &gwj->mod.csum.crc8) < 0) ++ &mod->csum.crc8) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.xor) { ++ if (mod->csumfunc.xor) { + if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, +- &gwj->mod.csum.xor) < 0) ++ &mod->csum.xor) < 0) + goto cancel; + } + +@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + struct net *net = sock_net(skb->sk); + struct rtcanmsg *r; + struct cgw_job *gwj; +- struct cf_mod mod; ++ struct cf_mod *mod; + struct can_can_gw ccgw; + u8 limhops = 0; + int err = 0; +@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (r->gwtype != CGW_TYPE_CAN_CAN) + return -EINVAL; + +- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); ++ mod = kmalloc(sizeof(*mod), GFP_KERNEL); ++ if (!mod) ++ return -ENOMEM; ++ ++ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + if (err < 0) +- return err; ++ goto out_free_cf; + +- if (mod.uid) { ++ if (mod->uid) { + ASSERT_RTNL(); + + /* check for updating an existing job with identical uid */ + hlist_for_each_entry(gwj, &net->can.cgw_list, list) { +- if (gwj->mod.uid != mod.uid) ++ struct cf_mod *old_cf; ++ ++ old_cf = cgw_job_cf_mod(gwj); ++ if (old_cf->uid != mod->uid) + continue; + + /* interfaces & filters must be identical */ +- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) +- return -EINVAL; ++ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { ++ err = -EINVAL; ++ goto out_free_cf; ++ } + +- /* update modifications with disabled softirq & quit */ +- local_bh_disable(); +- memcpy(&gwj->mod, &mod, sizeof(mod)); +- local_bh_enable(); ++ rcu_assign_pointer(gwj->cf_mod, mod); ++ kfree_rcu_mightsleep(old_cf); + return 0; + } + } + + /* ifindex == 0 is not allowed for job creation */ +- if (!ccgw.src_idx || !ccgw.dst_idx) +- return -ENODEV; ++ if (!ccgw.src_idx || !ccgw.dst_idx) { ++ err = -ENODEV; ++ goto out_free_cf; ++ } + + gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); +- if (!gwj) +- return -ENOMEM; ++ if (!gwj) { ++ err = -ENOMEM; ++ goto out_free_cf; ++ } + + gwj->handled_frames = 0; + gwj->dropped_frames = 0; +@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + gwj->limit_hops = limhops; + + /* insert already parsed information */ +- memcpy(&gwj->mod, &mod, sizeof(mod)); ++ RCU_INIT_POINTER(gwj->cf_mod, mod); + memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); + + err = -ENODEV; +@@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (!err) + hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); + out: +- if (err) ++ if (err) { + kmem_cache_free(cgw_cache, gwj); +- ++out_free_cf: ++ kfree(mod); ++ } + return err; + } + +@@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + + /* remove only the first matching entry */ + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { ++ struct cf_mod *cf_mod; ++ + if (gwj->flags != r->flags) + continue; + + if (gwj->limit_hops != limhops) + continue; + ++ cf_mod = cgw_job_cf_mod(gwj); + /* we have a match when uid is enabled and identical */ +- if (gwj->mod.uid || mod.uid) { +- if (gwj->mod.uid != mod.uid) ++ if (cf_mod->uid || mod.uid) { ++ if (cf_mod->uid != mod.uid) + continue; + } else { + /* no uid => check for identical modifications */ +- if (memcmp(&gwj->mod, &mod, sizeof(mod))) ++ if (memcmp(cf_mod, &mod, sizeof(mod))) + continue; + } + +diff --git a/net/core/filter.c b/net/core/filter.c +index 066277b91a1be8..5143c8a9e52cab 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2507,6 +2507,7 @@ int skb_do_redirect(struct sk_buff *skb) + goto out_drop; + skb->dev = dev; + dev_sw_netstats_rx_add(dev, skb->len); ++ skb_scrub_packet(skb, false); + return -EAGAIN; + } + return flags & BPF_F_NEIGH ? +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index bb9add46e382a6..231fa4dc6cde4a 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3189,16 +3189,13 @@ static void add_v4_addrs(struct inet6_dev *idev) + struct in6_addr addr; + struct net_device *dev; + struct net *net = dev_net(idev->dev); +- int scope, plen, offset = 0; ++ int scope, plen; + u32 pflags = 0; + + ASSERT_RTNL(); + + memset(&addr, 0, sizeof(struct in6_addr)); +- /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ +- if (idev->dev->addr_len == sizeof(struct in6_addr)) +- offset = sizeof(struct in6_addr) - 4; +- memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); ++ memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); + + if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { + scope = IPV6_ADDR_COMPATv4; +@@ -3508,7 +3505,13 @@ static void addrconf_gre_config(struct net_device *dev) + return; + } + +- if (dev->type == ARPHRD_ETHER) { ++ /* Generate the IPv6 link-local address using addrconf_addr_gen(), ++ * unless we have an IPv4 GRE device not bound to an IP address and ++ * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this ++ * case). Such devices fall back to add_v4_addrs() instead. ++ */ ++ if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 && ++ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) { + addrconf_addr_gen(idev, true); + return; + } +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index 20aad81fcad7e6..c2d88b1b06b872 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -63,7 +63,7 @@ struct hbucket { + #define ahash_sizeof_regions(htable_bits) \ + (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) + #define ahash_region(n, htable_bits) \ +- ((n) % ahash_numof_locks(htable_bits)) ++ ((n) / jhash_size(HTABLE_REGION_BITS)) + #define ahash_bucket_start(h, htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? 0 \ + : (h) * jhash_size(HTABLE_REGION_BITS)) +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 5cd511162bc038..0103c4a4d10a55 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) + return false; + } + +-/* Get route to daddr, update *saddr, optionally bind route to saddr */ ++/* Get route to daddr, optionally bind route to saddr */ + static struct rtable *do_output_route4(struct net *net, __be32 daddr, +- int rt_mode, __be32 *saddr) ++ int rt_mode, __be32 *ret_saddr) + { + struct flowi4 fl4; + struct rtable *rt; +- bool loop = false; + + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = daddr; +@@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, + retry: + rt = ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) { +- /* Invalid saddr ? */ +- if (PTR_ERR(rt) == -EINVAL && *saddr && +- rt_mode & IP_VS_RT_MODE_CONNECT && !loop) { +- *saddr = 0; +- flowi4_update_output(&fl4, 0, daddr, 0); +- goto retry; +- } + IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr); + return NULL; +- } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { ++ } ++ if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { + ip_rt_put(rt); +- *saddr = fl4.saddr; + flowi4_update_output(&fl4, 0, daddr, fl4.saddr); +- loop = true; ++ rt_mode = 0; + goto retry; + } +- *saddr = fl4.saddr; ++ if (ret_saddr) ++ *ret_saddr = fl4.saddr; + return rt; + } + +@@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, + if (ret_saddr) + *ret_saddr = dest_dst->dst_saddr.ip; + } else { +- __be32 saddr = htonl(INADDR_ANY); +- + noref = 0; + + /* For such unconfigured boxes avoid many route lookups + * for performance reasons because we do not remember saddr + */ + rt_mode &= ~IP_VS_RT_MODE_CONNECT; +- rt = do_output_route4(net, daddr, rt_mode, &saddr); ++ rt = do_output_route4(net, daddr, rt_mode, ret_saddr); + if (!rt) + goto err_unreach; +- if (ret_saddr) +- *ret_saddr = saddr; + } + + local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0; +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 6c5afb4ad67bb6..10c646b32b9d08 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -959,8 +959,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, + upcall.cmd = OVS_PACKET_CMD_ACTION; + upcall.mru = OVS_CB(skb)->mru; + +- for (a = nla_data(attr), rem = nla_len(attr); rem > 0; +- a = nla_next(a, &rem)) { ++ nla_for_each_nested(a, attr, rem) { + switch (nla_type(a)) { + case OVS_USERSPACE_ATTR_USERDATA: + upcall.userdata = a; +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 9a3f7ea80b34b9..716da8c6b3def3 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q, + */ + static inline void htb_next_rb_node(struct rb_node **n) + { +- *n = rb_next(*n); ++ if (*n) ++ *n = rb_next(*n); + } + + /** +@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) + */ + static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) + { +- WARN_ON(!cl->prio_activity); +- ++ if (!cl->prio_activity) ++ return; + htb_deactivate_prios(q, cl); + cl->prio_activity = 0; + } +@@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) + { + struct htb_class *cl = (struct htb_class *)arg; + +- if (!cl->prio_activity) +- return; + htb_deactivate(qdisc_priv(sch), cl); + } + +@@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, + if (cl->parent) + cl->parent->children--; + +- if (cl->prio_activity) +- htb_deactivate(q, cl); ++ htb_deactivate(q, cl); + + if (cl->cmode != HTB_CAN_SEND) + htb_safe_rb_erase(&cl->pq_node, +@@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, + /* turn parent into inner node */ + qdisc_purge_queue(parent->leaf.q); + parent_qdisc = parent->leaf.q; +- if (parent->prio_activity) +- htb_deactivate(q, parent); ++ htb_deactivate(q, parent); + + /* remove from evt list because of level change */ + if (parent->cmode != HTB_CAN_SEND) { +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index ce622a287abc6b..6db8c9a2a7a2b8 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -2511,7 +2511,7 @@ cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen, + /* Required length for first defragmentation */ + buf_len = mle->datalen - 1; + for_each_element(elem, mle->data + mle->datalen, +- ielen - sizeof(*mle) + mle->datalen) { ++ ie + ielen - mle->data - mle->datalen) { + if (elem->id != WLAN_EID_FRAGMENT) + break; + diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.91-92.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.91-92.patch new file mode 100644 index 0000000000..08903ea07d --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.91-92.patch @@ -0,0 +1,4627 @@ +diff --git a/Makefile b/Makefile +index a6a1942e2d00a9..51d975b3555195 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 91 ++SUBLEVEL = 92 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c +index 75523c1be07350..d8012d1a2e152d 100644 +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -2001,7 +2001,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); + + if (flags & BPF_TRAMP_F_CALL_ORIG) { +- emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); ++ /* for the first pass, assume the worst case */ ++ if (!ctx->image) ++ ctx->idx += 4; ++ else ++ emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); + emit_call((const u64)__bpf_tramp_enter, ctx); + } + +@@ -2045,7 +2049,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = ctx->image + ctx->idx; +- emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); ++ /* for the first pass, assume the worst case */ ++ if (!ctx->image) ++ ctx->idx += 4; ++ else ++ emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); + emit_call((const u64)__bpf_tramp_exit, ctx); + } + +diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile +index 81e8089c9c4f18..9c6aff9376ec0b 100644 +--- a/arch/loongarch/Makefile ++++ b/arch/loongarch/Makefile +@@ -43,7 +43,7 @@ endif + + ifdef CONFIG_64BIT + ld-emul = $(64bit-emul) +-cflags-y += -mabi=lp64s ++cflags-y += -mabi=lp64s -mcmodel=normal + endif + + cflags-y += -pipe -msoft-float +diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h +index a5b63c84f8541a..e5d21e836d993c 100644 +--- a/arch/loongarch/include/asm/ptrace.h ++++ b/arch/loongarch/include/asm/ptrace.h +@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v + + /* Query offset/name of register from its name/offset */ + extern int regs_query_register_offset(const char *name); +-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) ++#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) + + /** + * regs_get_register() - get register value from its offset +diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h +index c8f59983f702df..d01b6704e1d8e6 100644 +--- a/arch/loongarch/include/asm/uprobes.h ++++ b/arch/loongarch/include/asm/uprobes.h +@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t; + #define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP) + + struct arch_uprobe { +- unsigned long resume_era; + u32 insn[2]; + u32 ixol[2]; + bool simulate; +diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c +index ec5b28e570c963..4c476904227f95 100644 +--- a/arch/loongarch/kernel/kfpu.c ++++ b/arch/loongarch/kernel/kfpu.c +@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN; + static DEFINE_PER_CPU(bool, in_kernel_fpu); + static DEFINE_PER_CPU(unsigned int, euen_current); + ++static inline void fpregs_lock(void) ++{ ++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) ++ preempt_disable(); ++ else ++ local_bh_disable(); ++} ++ ++static inline void fpregs_unlock(void) ++{ ++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) ++ preempt_enable(); ++ else ++ local_bh_enable(); ++} ++ + void kernel_fpu_begin(void) + { + unsigned int *euen_curr; + +- preempt_disable(); ++ if (!irqs_disabled()) ++ fpregs_lock(); + + WARN_ON(this_cpu_read(in_kernel_fpu)); + +@@ -73,7 +90,8 @@ void kernel_fpu_end(void) + + this_cpu_write(in_kernel_fpu, false); + +- preempt_enable(); ++ if (!irqs_disabled()) ++ fpregs_unlock(); + } + EXPORT_SYMBOL_GPL(kernel_fpu_end); + +diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c +index e7015f7b70e37c..a3732f754b5d8f 100644 +--- a/arch/loongarch/kernel/time.c ++++ b/arch/loongarch/kernel/time.c +@@ -110,7 +110,7 @@ static unsigned long __init get_loops_per_jiffy(void) + return lpj; + } + +-static long init_offset __nosavedata; ++static long init_offset; + + void save_counter(void) + { +diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c +index 87abc7137b738e..6022eb0f71dbce 100644 +--- a/arch/loongarch/kernel/uprobes.c ++++ b/arch/loongarch/kernel/uprobes.c +@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) + utask->autask.saved_trap_nr = current->thread.trap_nr; + current->thread.trap_nr = UPROBE_TRAP_NR; + instruction_pointer_set(regs, utask->xol_vaddr); +- user_enable_single_step(current); + + return 0; + } +@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) + + WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); + current->thread.trap_nr = utask->autask.saved_trap_nr; +- +- if (auprobe->simulate) +- instruction_pointer_set(regs, auprobe->resume_era); +- else +- instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); +- +- user_disable_single_step(current); ++ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); + + return 0; + } +@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) + + current->thread.trap_nr = utask->autask.saved_trap_nr; + instruction_pointer_set(regs, utask->vaddr); +- user_disable_single_step(current); + } + + bool arch_uprobe_xol_was_trapped(struct task_struct *t) +@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) + + insn.word = auprobe->insn[0]; + arch_simulate_insn(insn, regs); +- auprobe->resume_era = regs->csr_era; + + return true; + } +diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c +index 1e0590542f987c..e7b7346592cb2a 100644 +--- a/arch/loongarch/power/hibernate.c ++++ b/arch/loongarch/power/hibernate.c +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -14,6 +15,7 @@ struct pt_regs saved_regs; + + void save_processor_state(void) + { ++ save_counter(); + saved_crmd = csr_read32(LOONGARCH_CSR_CRMD); + saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); + saved_euen = csr_read32(LOONGARCH_CSR_EUEN); +@@ -26,6 +28,7 @@ void save_processor_state(void) + + void restore_processor_state(void) + { ++ sync_counter(); + csr_write32(saved_crmd, LOONGARCH_CSR_CRMD); + csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); + csr_write32(saved_euen, LOONGARCH_CSR_EUEN); +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index 4817e424d69658..8e6cad42b296ee 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -730,7 +730,15 @@ static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg) + /* Lower-half of the cacheline? */ + return !(addr & 0x20); + } +-#endif ++ ++u8 *its_static_thunk(int reg) ++{ ++ u8 *thunk = __x86_indirect_its_thunk_array[reg]; ++ ++ return thunk; ++} ++ ++#endif /* CONFIG_MITIGATION_ITS */ + + /* + * Rewrite the compiler generated retpoline thunk calls. +@@ -1449,13 +1457,6 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + static void poison_cfi(void *addr) { } + #endif + +-u8 *its_static_thunk(int reg) +-{ +- u8 *thunk = __x86_indirect_its_thunk_array[reg]; +- +- return thunk; +-} +- + #endif + + void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, +diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c +index b42111a24cc28d..8e38c51359d05a 100644 +--- a/arch/x86/kvm/smm.c ++++ b/arch/x86/kvm/smm.c +@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) + + kvm_mmu_reset_context(vcpu); + } ++EXPORT_SYMBOL_GPL(kvm_smm_changed); + + void process_smi(struct kvm_vcpu *vcpu) + { +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 29c1be65cb71a0..c84a1451f194c4 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2211,12 +2211,6 @@ static int shutdown_interception(struct kvm_vcpu *vcpu) + struct kvm_run *kvm_run = vcpu->run; + struct vcpu_svm *svm = to_svm(vcpu); + +- /* +- * The VM save area has already been encrypted so it +- * cannot be reinitialized - just terminate. +- */ +- if (sev_es_guest(vcpu->kvm)) +- return -EINVAL; + + /* + * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put +@@ -2225,9 +2219,18 @@ static int shutdown_interception(struct kvm_vcpu *vcpu) + * userspace. At a platform view, INIT is acceptable behavior as + * there exist bare metal platforms that automatically INIT the CPU + * in response to shutdown. ++ * ++ * The VM save area for SEV-ES guests has already been encrypted so it ++ * cannot be reinitialized, i.e. synthesizing INIT is futile. + */ +- clear_page(svm->vmcb); +- kvm_vcpu_reset(vcpu, true); ++ if (!sev_es_guest(vcpu->kvm)) { ++ clear_page(svm->vmcb); ++#ifdef CONFIG_KVM_SMM ++ if (is_smm(vcpu)) ++ kvm_smm_changed(vcpu, false); ++#endif ++ kvm_vcpu_reset(vcpu, true); ++ } + + kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; + return 0; +diff --git a/block/bio.c b/block/bio.c +index 4a8e7616995718..b197abbaebc464 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -600,7 +600,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) + { + struct bio *bio; + +- if (nr_vecs > UIO_MAXIOV) ++ if (nr_vecs > BIO_MAX_INLINE_VECS) + return NULL; + return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); + } +diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c +index f73ce6e13065dd..54676e3d82dd59 100644 +--- a/drivers/acpi/pptt.c ++++ b/drivers/acpi/pptt.c +@@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + +- while ((unsigned long)entry + proc_sz < table_end) { ++ /* ignore subtable types that are smaller than a processor node */ ++ while ((unsigned long)entry + proc_sz <= table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; ++ + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->parent == node_entry) + return 0; + if (entry->length == 0) + return 0; ++ + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); +- + } + return 1; + } +@@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he + proc_sz = sizeof(struct acpi_pptt_processor); + + /* find the processor structure associated with this cpuid */ +- while ((unsigned long)entry + proc_sz < table_end) { ++ while ((unsigned long)entry + proc_sz <= table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } ++ /* entry->length may not equal proc_sz, revalidate the processor structure length */ + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + acpi_cpu_id == cpu_node->acpi_processor_id && ++ (unsigned long)entry + entry->length <= table_end && ++ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) && + acpi_pptt_leaf_node(table_hdr, cpu_node)) { + return (struct acpi_pptt_processor *)entry; + } +diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c +index a4274d8c7faaf3..f0d0c5a6d5127a 100644 +--- a/drivers/bluetooth/btnxpuart.c ++++ b/drivers/bluetooth/btnxpuart.c +@@ -601,8 +601,10 @@ static int nxp_download_firmware(struct hci_dev *hdev) + &nxpdev->tx_state), + msecs_to_jiffies(60000)); + +- release_firmware(nxpdev->fw); +- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); ++ if (nxpdev->fw && strlen(nxpdev->fw_name)) { ++ release_firmware(nxpdev->fw); ++ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); ++ } + + if (err == 0) { + bt_dev_err(hdev, "FW Download Timeout. offset: %d", +diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h +index 369496a6aebf13..27e61ddfb62298 100644 +--- a/drivers/char/tpm/tpm_tis_core.h ++++ b/drivers/char/tpm/tpm_tis_core.h +@@ -54,7 +54,7 @@ enum tis_int_flags { + enum tis_defaults { + TIS_MEM_LEN = 0x5000, + TIS_SHORT_TIMEOUT = 750, /* ms */ +- TIS_LONG_TIMEOUT = 2000, /* 2 sec */ ++ TIS_LONG_TIMEOUT = 4000, /* 4 secs */ + TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ + TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ + }; +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c +index eb8b733065b24d..9093f751f1336a 100644 +--- a/drivers/dma-buf/dma-resv.c ++++ b/drivers/dma-buf/dma-resv.c +@@ -313,8 +313,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + count++; + + dma_resv_list_set(fobj, i, fence, usage); +- /* pointer update must be visible before we extend the num_fences */ +- smp_store_mb(fobj->num_fences, count); ++ /* fence update must be visible before we extend the num_fences */ ++ smp_wmb(); ++ fobj->num_fences = count; + } + EXPORT_SYMBOL(dma_resv_add_fence); + +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index 78b8a97b236376..ffe621695e472b 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -827,9 +827,9 @@ static int dmatest_func(void *data) + } else { + dma_async_issue_pending(chan); + +- wait_event_timeout(thread->done_wait, +- done->done, +- msecs_to_jiffies(params->timeout)); ++ wait_event_freezable_timeout(thread->done_wait, ++ done->done, ++ msecs_to_jiffies(params->timeout)); + + status = dma_async_is_tx_complete(chan, cookie, NULL, + NULL); +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c +index 786afb256b6e0d..92e86ae9db29d7 100644 +--- a/drivers/dma/idxd/init.c ++++ b/drivers/dma/idxd/init.c +@@ -145,6 +145,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) + pci_free_irq_vectors(pdev); + } + ++static void idxd_clean_wqs(struct idxd_device *idxd) ++{ ++ struct idxd_wq *wq; ++ struct device *conf_dev; ++ int i; ++ ++ for (i = 0; i < idxd->max_wqs; i++) { ++ wq = idxd->wqs[i]; ++ if (idxd->hw.wq_cap.op_config) ++ bitmap_free(wq->opcap_bmap); ++ kfree(wq->wqcfg); ++ conf_dev = wq_confdev(wq); ++ put_device(conf_dev); ++ kfree(wq); ++ } ++ bitmap_free(idxd->wq_enable_map); ++ kfree(idxd->wqs); ++} ++ + static int idxd_setup_wqs(struct idxd_device *idxd) + { + struct device *dev = &idxd->pdev->dev; +@@ -159,8 +178,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + + idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); + if (!idxd->wq_enable_map) { +- kfree(idxd->wqs); +- return -ENOMEM; ++ rc = -ENOMEM; ++ goto err_bitmap; + } + + for (i = 0; i < idxd->max_wqs; i++) { +@@ -179,10 +198,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + conf_dev->bus = &dsa_bus_type; + conf_dev->type = &idxd_wq_device_type; + rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); +- if (rc < 0) { +- put_device(conf_dev); ++ if (rc < 0) + goto err; +- } + + mutex_init(&wq->wq_lock); + init_waitqueue_head(&wq->err_queue); +@@ -193,7 +210,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; + wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); + if (!wq->wqcfg) { +- put_device(conf_dev); + rc = -ENOMEM; + goto err; + } +@@ -201,9 +217,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + if (idxd->hw.wq_cap.op_config) { + wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); + if (!wq->opcap_bmap) { +- put_device(conf_dev); + rc = -ENOMEM; +- goto err; ++ goto err_opcap_bmap; + } + bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); + } +@@ -214,15 +229,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + + return 0; + +- err: ++err_opcap_bmap: ++ kfree(wq->wqcfg); ++ ++err: ++ put_device(conf_dev); ++ kfree(wq); ++ + while (--i >= 0) { + wq = idxd->wqs[i]; ++ if (idxd->hw.wq_cap.op_config) ++ bitmap_free(wq->opcap_bmap); ++ kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); ++ kfree(wq); ++ + } ++ bitmap_free(idxd->wq_enable_map); ++ ++err_bitmap: ++ kfree(idxd->wqs); ++ + return rc; + } + ++static void idxd_clean_engines(struct idxd_device *idxd) ++{ ++ struct idxd_engine *engine; ++ struct device *conf_dev; ++ int i; ++ ++ for (i = 0; i < idxd->max_engines; i++) { ++ engine = idxd->engines[i]; ++ conf_dev = engine_confdev(engine); ++ put_device(conf_dev); ++ kfree(engine); ++ } ++ kfree(idxd->engines); ++} ++ + static int idxd_setup_engines(struct idxd_device *idxd) + { + struct idxd_engine *engine; +@@ -253,6 +299,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) + rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); + if (rc < 0) { + put_device(conf_dev); ++ kfree(engine); + goto err; + } + +@@ -266,10 +313,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); ++ kfree(engine); + } ++ kfree(idxd->engines); ++ + return rc; + } + ++static void idxd_clean_groups(struct idxd_device *idxd) ++{ ++ struct idxd_group *group; ++ int i; ++ ++ for (i = 0; i < idxd->max_groups; i++) { ++ group = idxd->groups[i]; ++ put_device(group_confdev(group)); ++ kfree(group); ++ } ++ kfree(idxd->groups); ++} ++ + static int idxd_setup_groups(struct idxd_device *idxd) + { + struct device *dev = &idxd->pdev->dev; +@@ -300,6 +363,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) + rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); + if (rc < 0) { + put_device(conf_dev); ++ kfree(group); + goto err; + } + +@@ -324,20 +388,18 @@ static int idxd_setup_groups(struct idxd_device *idxd) + while (--i >= 0) { + group = idxd->groups[i]; + put_device(group_confdev(group)); ++ kfree(group); + } ++ kfree(idxd->groups); ++ + return rc; + } + + static void idxd_cleanup_internals(struct idxd_device *idxd) + { +- int i; +- +- for (i = 0; i < idxd->max_groups; i++) +- put_device(group_confdev(idxd->groups[i])); +- for (i = 0; i < idxd->max_engines; i++) +- put_device(engine_confdev(idxd->engines[i])); +- for (i = 0; i < idxd->max_wqs; i++) +- put_device(wq_confdev(idxd->wqs[i])); ++ idxd_clean_groups(idxd); ++ idxd_clean_engines(idxd); ++ idxd_clean_wqs(idxd); + destroy_workqueue(idxd->wq); + } + +@@ -380,7 +442,7 @@ static int idxd_init_evl(struct idxd_device *idxd) + static int idxd_setup_internals(struct idxd_device *idxd) + { + struct device *dev = &idxd->pdev->dev; +- int rc, i; ++ int rc; + + init_waitqueue_head(&idxd->cmd_waitq); + +@@ -411,14 +473,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) + err_evl: + destroy_workqueue(idxd->wq); + err_wkq_create: +- for (i = 0; i < idxd->max_groups; i++) +- put_device(group_confdev(idxd->groups[i])); ++ idxd_clean_groups(idxd); + err_group: +- for (i = 0; i < idxd->max_engines; i++) +- put_device(engine_confdev(idxd->engines[i])); ++ idxd_clean_engines(idxd); + err_engine: +- for (i = 0; i < idxd->max_wqs; i++) +- put_device(wq_confdev(idxd->wqs[i])); ++ idxd_clean_wqs(idxd); + err_wqs: + return rc; + } +@@ -518,6 +577,17 @@ static void idxd_read_caps(struct idxd_device *idxd) + idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); + } + ++static void idxd_free(struct idxd_device *idxd) ++{ ++ if (!idxd) ++ return; ++ ++ put_device(idxd_confdev(idxd)); ++ bitmap_free(idxd->opcap_bmap); ++ ida_free(&idxd_ida, idxd->id); ++ kfree(idxd); ++} ++ + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) + { + struct device *dev = &pdev->dev; +@@ -535,28 +605,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d + idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); + idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); + if (idxd->id < 0) +- return NULL; ++ goto err_ida; + + idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); +- if (!idxd->opcap_bmap) { +- ida_free(&idxd_ida, idxd->id); +- return NULL; +- } ++ if (!idxd->opcap_bmap) ++ goto err_opcap; + + device_initialize(conf_dev); + conf_dev->parent = dev; + conf_dev->bus = &dsa_bus_type; + conf_dev->type = idxd->data->dev_type; + rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); +- if (rc < 0) { +- put_device(conf_dev); +- return NULL; +- } ++ if (rc < 0) ++ goto err_name; + + spin_lock_init(&idxd->dev_lock); + spin_lock_init(&idxd->cmd_lock); + + return idxd; ++ ++err_name: ++ put_device(conf_dev); ++ bitmap_free(idxd->opcap_bmap); ++err_opcap: ++ ida_free(&idxd_ida, idxd->id); ++err_ida: ++ kfree(idxd); ++ ++ return NULL; + } + + static int idxd_enable_system_pasid(struct idxd_device *idxd) +@@ -778,7 +854,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + err: + pci_iounmap(pdev, idxd->reg_base); + err_iomap: +- put_device(idxd_confdev(idxd)); ++ idxd_free(idxd); + err_idxd_alloc: + pci_disable_device(pdev); + return rc; +@@ -815,7 +891,6 @@ static void idxd_shutdown(struct pci_dev *pdev) + static void idxd_remove(struct pci_dev *pdev) + { + struct idxd_device *idxd = pci_get_drvdata(pdev); +- struct idxd_irq_entry *irq_entry; + + idxd_unregister_devices(idxd); + /* +@@ -828,20 +903,12 @@ static void idxd_remove(struct pci_dev *pdev) + get_device(idxd_confdev(idxd)); + device_unregister(idxd_confdev(idxd)); + idxd_shutdown(pdev); +- if (device_pasid_enabled(idxd)) +- idxd_disable_system_pasid(idxd); + idxd_device_remove_debugfs(idxd); +- +- irq_entry = idxd_get_ie(idxd, 0); +- free_irq(irq_entry->vector, irq_entry); +- pci_free_irq_vectors(pdev); ++ idxd_cleanup(idxd); + pci_iounmap(pdev, idxd->reg_base); +- if (device_user_pasid_enabled(idxd)) +- idxd_disable_sva(pdev); +- pci_disable_device(pdev); +- destroy_workqueue(idxd->wq); +- perfmon_pmu_remove(idxd); + put_device(idxd_confdev(idxd)); ++ idxd_free(idxd); ++ pci_disable_device(pdev); + } + + static struct pci_driver idxd_pci_driver = { +diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c +index 02a1ab04f498e5..418e1774af1e5e 100644 +--- a/drivers/dma/ti/k3-udma.c ++++ b/drivers/dma/ti/k3-udma.c +@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work) + u32 residue_diff; + ktime_t time_diff; + unsigned long delay; ++ unsigned long flags; + + while (1) { ++ spin_lock_irqsave(&uc->vc.lock, flags); ++ + if (uc->desc) { + /* Get previous residue and time stamp */ + residue_diff = uc->tx_drain.residue; +@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work) + break; + } + ++ spin_unlock_irqrestore(&uc->vc.lock, flags); ++ + usleep_range(ktime_to_us(delay), + ktime_to_us(delay) + 10); + continue; +@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work) + + break; + } ++ ++ spin_unlock_irqrestore(&uc->vc.lock, flags); + } + + static irqreturn_t udma_ring_irq_handler(int irq, void *data) +@@ -4214,7 +4221,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) + { + struct udma_dev *ud = ofdma->of_dma_data; +- dma_cap_mask_t mask = ud->ddev.cap_mask; + struct udma_filter_param filter_param; + struct dma_chan *chan; + +@@ -4246,7 +4252,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + } + } + +- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, ++ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, + ofdma->of_node); + if (!chan) { + dev_err(ud->dev, "get channel fail in %s.\n", __func__); +diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig +index ea0f5083ac47f1..9a41c1c91f71af 100644 +--- a/drivers/firmware/arm_scmi/Kconfig ++++ b/drivers/firmware/arm_scmi/Kconfig +@@ -55,6 +55,20 @@ config ARM_SCMI_RAW_MODE_SUPPORT_COEX + operate normally, thing which could make an SCMI test suite using the + SCMI Raw mode support unreliable. If unsure, say N. + ++config ARM_SCMI_DEBUG_COUNTERS ++ bool "Enable SCMI communication debug metrics tracking" ++ select ARM_SCMI_NEED_DEBUGFS ++ depends on DEBUG_FS ++ default n ++ help ++ Enables tracking of some key communication metrics for debug ++ purposes. It may track metrics like how many messages were sent ++ or received, were there any failures, what kind of failures, ..etc. ++ ++ Enable this option to create a new debugfs directory which contains ++ such useful debug counters. This can be helpful for debugging and ++ SCMI monitoring. ++ + config ARM_SCMI_HAVE_TRANSPORT + bool + help +diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h +index 039f686f4580d1..6c223487121544 100644 +--- a/drivers/firmware/arm_scmi/common.h ++++ b/drivers/firmware/arm_scmi/common.h +@@ -303,6 +303,41 @@ extern const struct scmi_desc scmi_optee_desc; + + void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); + ++enum debug_counters { ++ SENT_OK, ++ SENT_FAIL, ++ SENT_FAIL_POLLING_UNSUPPORTED, ++ SENT_FAIL_CHANNEL_NOT_FOUND, ++ RESPONSE_OK, ++ NOTIFICATION_OK, ++ DELAYED_RESPONSE_OK, ++ XFERS_RESPONSE_TIMEOUT, ++ XFERS_RESPONSE_POLLED_TIMEOUT, ++ RESPONSE_POLLED_OK, ++ ERR_MSG_UNEXPECTED, ++ ERR_MSG_INVALID, ++ ERR_MSG_NOMEM, ++ ERR_PROTOCOL, ++ SCMI_DEBUG_COUNTERS_LAST ++}; ++ ++static inline void scmi_inc_count(atomic_t *arr, int stat) ++{ ++ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) ++ atomic_inc(&arr[stat]); ++} ++ ++enum scmi_bad_msg { ++ MSG_UNEXPECTED = -1, ++ MSG_INVALID = -2, ++ MSG_UNKNOWN = -3, ++ MSG_NOMEM = -4, ++ MSG_MBOX_SPURIOUS = -5, ++}; ++ ++void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, ++ enum scmi_bad_msg err); ++ + /* shmem related declarations */ + struct scmi_shared_mem; + +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c +index efa9698c876a09..65d1e66a347d75 100644 +--- a/drivers/firmware/arm_scmi/driver.c ++++ b/drivers/firmware/arm_scmi/driver.c +@@ -108,12 +108,14 @@ struct scmi_protocol_instance { + * @name: Name of this SCMI instance + * @type: Type of this SCMI instance + * @is_atomic: Flag to state if the transport of this instance is atomic ++ * @counters: An array of atomic_c's used for tracking statistics (if enabled) + */ + struct scmi_debug_info { + struct dentry *top_dentry; + const char *name; + const char *type; + bool is_atomic; ++ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; + }; + + /** +@@ -687,6 +689,45 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) + return xfer ?: ERR_PTR(-EINVAL); + } + ++/** ++ * scmi_bad_message_trace - A helper to trace weird messages ++ * ++ * @cinfo: A reference to the channel descriptor on which the message was ++ * received ++ * @msg_hdr: Message header to track ++ * @err: A specific error code used as a status value in traces. ++ * ++ * This helper can be used to trace any kind of weird, incomplete, unexpected, ++ * timed-out message that arrives and as such, can be traced only referring to ++ * the header content, since the payload is missing/unreliable. ++ */ ++void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, ++ enum scmi_bad_msg err) ++{ ++ char *tag; ++ struct scmi_info *info = handle_to_scmi_info(cinfo->handle); ++ ++ switch (MSG_XTRACT_TYPE(msg_hdr)) { ++ case MSG_TYPE_COMMAND: ++ tag = "!RESP"; ++ break; ++ case MSG_TYPE_DELAYED_RESP: ++ tag = "!DLYD"; ++ break; ++ case MSG_TYPE_NOTIFICATION: ++ tag = "!NOTI"; ++ break; ++ default: ++ tag = "!UNKN"; ++ break; ++ } ++ ++ trace_scmi_msg_dump(info->id, cinfo->id, ++ MSG_XTRACT_PROT_ID(msg_hdr), ++ MSG_XTRACT_ID(msg_hdr), tag, ++ MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0); ++} ++ + /** + * scmi_msg_response_validate - Validate message type against state of related + * xfer +@@ -813,6 +854,10 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) + "Message for %d type %d is not expected!\n", + xfer_id, msg_type); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); ++ ++ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); ++ scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); ++ + return xfer; + } + refcount_inc(&xfer->users); +@@ -837,6 +882,11 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) + dev_err(cinfo->dev, + "Invalid message type:%d for %d - HDR:0x%X state:%d\n", + msg_type, xfer_id, msg_hdr, xfer->state); ++ ++ scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); ++ scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); ++ ++ + /* On error the refcount incremented above has to be dropped */ + __scmi_xfer_put(minfo, xfer); + xfer = ERR_PTR(-EINVAL); +@@ -878,6 +928,10 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, + if (IS_ERR(xfer)) { + dev_err(dev, "failed to get free message slot (%ld)\n", + PTR_ERR(xfer)); ++ ++ scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); ++ scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); ++ + scmi_clear_channel(info, cinfo); + return; + } +@@ -892,6 +946,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "NOTI", xfer->hdr.seq, + xfer->hdr.status, xfer->rx.buf, xfer->rx.len); ++ scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); + + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, + xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); +@@ -951,8 +1006,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { + scmi_clear_channel(info, cinfo); + complete(xfer->async_done); ++ scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); + } else { + complete(&xfer->done); ++ scmi_inc_count(info->dbg->counters, RESPONSE_OK); + } + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { +@@ -997,6 +1054,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) + break; + default: + WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); ++ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN); + break; + } + } +@@ -1017,7 +1075,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph, + } + + static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, +- struct scmi_xfer *xfer, ktime_t stop) ++ struct scmi_xfer *xfer, ktime_t stop, ++ bool *ooo) + { + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + +@@ -1026,7 +1085,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, + * in case of out-of-order receptions of delayed responses + */ + return info->desc->ops->poll_done(cinfo, xfer) || +- try_wait_for_completion(&xfer->done) || ++ (*ooo = try_wait_for_completion(&xfer->done)) || + ktime_after(ktime_get(), stop); + } + +@@ -1035,6 +1094,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + struct scmi_xfer *xfer, unsigned int timeout_ms) + { + int ret = 0; ++ struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + + if (xfer->hdr.poll_completion) { + /* +@@ -1042,26 +1102,27 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + * itself to support synchronous commands replies. + */ + if (!desc->sync_cmds_completed_on_ret) { ++ bool ooo = false; ++ + /* + * Poll on xfer using transport provided .poll_done(); + * assumes no completion interrupt was available. + */ + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + +- spin_until_cond(scmi_xfer_done_no_timeout(cinfo, +- xfer, stop)); +- if (ktime_after(ktime_get(), stop)) { ++ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, ++ stop, &ooo)); ++ if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; ++ scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); + } + } + + if (!ret) { + unsigned long flags; +- struct scmi_info *info = +- handle_to_scmi_info(cinfo->handle); + + /* + * Do not fetch_response if an out-of-order delayed +@@ -1081,6 +1142,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + "RESP" : "resp", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); ++ scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); + + if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + struct scmi_info *info = +@@ -1098,6 +1160,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; ++ scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); + } + } + +@@ -1181,13 +1244,15 @@ static int do_xfer(const struct scmi_protocol_handle *ph, + !is_transport_polling_capable(info->desc)) { + dev_warn_once(dev, + "Polling mode is not supported by transport.\n"); ++ scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); + return -EINVAL; + } + + cinfo = idr_find(&info->tx_idr, pi->proto->id); +- if (unlikely(!cinfo)) ++ if (unlikely(!cinfo)) { ++ scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); + return -EINVAL; +- ++ } + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, info->desc)) + xfer->hdr.poll_completion = true; +@@ -1219,16 +1284,20 @@ static int do_xfer(const struct scmi_protocol_handle *ph, + ret = info->desc->ops->send_message(cinfo, xfer); + if (ret < 0) { + dev_dbg(dev, "Failed to send message %d\n", ret); ++ scmi_inc_count(info->dbg->counters, SENT_FAIL); + return ret; + } + + trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, + xfer->hdr.id, "CMND", xfer->hdr.seq, + xfer->hdr.status, xfer->tx.buf, xfer->tx.len); ++ scmi_inc_count(info->dbg->counters, SENT_OK); + + ret = scmi_wait_for_message_response(cinfo, xfer); +- if (!ret && xfer->hdr.status) ++ if (!ret && xfer->hdr.status) { + ret = scmi_to_linux_errno(xfer->hdr.status); ++ scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); ++ } + + if (info->desc->ops->mark_txdone) + info->desc->ops->mark_txdone(cinfo, ret, xfer); +diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c +index 8e513f70b75d4c..f1d5e3fba35e07 100644 +--- a/drivers/firmware/arm_scmi/mailbox.c ++++ b/drivers/firmware/arm_scmi/mailbox.c +@@ -58,6 +58,9 @@ static void rx_callback(struct mbox_client *cl, void *m) + */ + if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { + dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); ++ scmi_bad_message_trace(smbox->cinfo, ++ shmem_read_header(smbox->shmem), ++ MSG_MBOX_SPURIOUS); + return; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +index d59e8536192ca9..c5d706a4c7b4a7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +@@ -788,6 +788,7 @@ struct amdgpu_device { + bool need_swiotlb; + bool accel_working; + struct notifier_block acpi_nb; ++ struct notifier_block pm_nb; + struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; + struct debugfs_blob_wrapper debugfs_vbios_blob; + struct debugfs_blob_wrapper debugfs_discovery_blob; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 10f5a3d0f59163..f8058dd5356a13 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -141,6 +141,10 @@ const char *amdgpu_asic_name[] = { + "LAST", + }; + ++static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); ++static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, ++ void *data); ++ + /** + * DOC: pcie_replay_count + * +@@ -3920,6 +3924,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, + + amdgpu_device_check_iommu_direct_map(adev); + ++ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; ++ r = register_pm_notifier(&adev->pm_nb); ++ if (r) ++ goto failed; ++ + return 0; + + release_ras_con: +@@ -3981,6 +3990,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) + flush_delayed_work(&adev->delayed_init_work); + adev->shutdown = true; + ++ unregister_pm_notifier(&adev->pm_nb); ++ + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + */ +@@ -4107,6 +4118,33 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) + /* + * Suspend & resume. + */ ++/** ++ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events ++ * @nb: notifier block ++ * @mode: suspend mode ++ * @data: data ++ * ++ * This function is called when the system is about to suspend or hibernate. ++ * It is used to set the appropriate flags so that eviction can be optimized ++ * in the pm prepare callback. ++ */ ++static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, ++ void *data) ++{ ++ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); ++ ++ switch (mode) { ++ case PM_HIBERNATION_PREPARE: ++ adev->in_s4 = true; ++ break; ++ case PM_POST_HIBERNATION: ++ adev->in_s4 = false; ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++ + /** + * amdgpu_device_prepare - prepare for device suspend + * +@@ -4551,6 +4589,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, + retry: + amdgpu_amdkfd_pre_reset(adev); + ++ amdgpu_device_stop_pending_resets(adev); ++ + if (from_hypervisor) + r = amdgpu_virt_request_full_gpu(adev, true); + else +@@ -5347,11 +5387,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + tmp_adev->asic_reset_res = r; + } + +- /* +- * Drop all pending non scheduler resets. Scheduler resets +- * were already dropped during drm_sched_stop +- */ +- amdgpu_device_stop_pending_resets(tmp_adev); ++ if (!amdgpu_sriov_vf(tmp_adev)) ++ /* ++ * Drop all pending non scheduler resets. Scheduler resets ++ * were already dropped during drm_sched_stop ++ */ ++ amdgpu_device_stop_pending_resets(tmp_adev); + } + + /* Actual ASIC resets if needed.*/ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index bacf2e5de2abce..940411f8e99be0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -2463,7 +2463,6 @@ static int amdgpu_pmops_freeze(struct device *dev) + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; + +- adev->in_s4 = true; + r = amdgpu_device_suspend(drm_dev, true); + if (r) + return r; +@@ -2476,13 +2475,8 @@ static int amdgpu_pmops_freeze(struct device *dev) + static int amdgpu_pmops_thaw(struct device *dev) + { + struct drm_device *drm_dev = dev_get_drvdata(dev); +- struct amdgpu_device *adev = drm_to_adev(drm_dev); +- int r; +- +- r = amdgpu_device_resume(drm_dev, true); +- adev->in_s4 = false; + +- return r; ++ return amdgpu_device_resume(drm_dev, true); + } + + static int amdgpu_pmops_poweroff(struct device *dev) +@@ -2495,9 +2489,6 @@ static int amdgpu_pmops_poweroff(struct device *dev) + static int amdgpu_pmops_restore(struct device *dev) + { + struct drm_device *drm_dev = dev_get_drvdata(dev); +- struct amdgpu_device *adev = drm_to_adev(drm_dev); +- +- adev->in_s4 = false; + + return amdgpu_device_resume(drm_dev, true); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index 22575422ca7ec1..7cb4b4118335a6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -32,6 +32,7 @@ + + #include "amdgpu.h" + #include "amdgpu_ras.h" ++#include "amdgpu_reset.h" + #include "vi.h" + #include "soc15.h" + #include "nv.h" +@@ -468,7 +469,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) + return -EINVAL; + + if (pf2vf_info->size > 1024) { +- DRM_ERROR("invalid pf2vf message size\n"); ++ dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size); + return -EINVAL; + } + +@@ -479,7 +480,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) + adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, + adev->virt.fw_reserve.checksum_key, checksum); + if (checksum != checkval) { +- DRM_ERROR("invalid pf2vf message\n"); ++ dev_err(adev->dev, ++ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", ++ checksum, checkval); + return -EINVAL; + } + +@@ -493,7 +496,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) + adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, + 0, checksum); + if (checksum != checkval) { +- DRM_ERROR("invalid pf2vf message\n"); ++ dev_err(adev->dev, ++ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", ++ checksum, checkval); + return -EINVAL; + } + +@@ -529,7 +534,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; + break; + default: +- DRM_ERROR("invalid pf2vf version\n"); ++ dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); + return -EINVAL; + } + +@@ -628,8 +633,21 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) + int ret; + + ret = amdgpu_virt_read_pf2vf_data(adev); +- if (ret) ++ if (ret) { ++ adev->virt.vf2pf_update_retry_cnt++; ++ if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && ++ amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { ++ if (amdgpu_reset_domain_schedule(adev->reset_domain, ++ &adev->virt.flr_work)) ++ return; ++ else ++ dev_err(adev->dev, "Failed to queue work! at %s", __func__); ++ } ++ + goto out; ++ } ++ ++ adev->virt.vf2pf_update_retry_cnt = 0; + amdgpu_virt_write_vf2pf_data(adev); + + out: +@@ -650,6 +668,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) + adev->virt.fw_reserve.p_pf2vf = NULL; + adev->virt.fw_reserve.p_vf2pf = NULL; + adev->virt.vf2pf_update_interval_ms = 0; ++ adev->virt.vf2pf_update_retry_cnt = 0; + + if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { + DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +index 23b6efa9d25df8..891713757a8f5a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +@@ -51,6 +51,8 @@ + /* tonga/fiji use this offset */ + #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 + ++#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 30 ++ + enum amdgpu_sriov_vf_mode { + SRIOV_VF_MODE_BARE_METAL = 0, + SRIOV_VF_MODE_ONE_VF, +@@ -253,6 +255,7 @@ struct amdgpu_virt { + /* vf2pf message */ + struct delayed_work vf2pf_work; + uint32_t vf2pf_update_interval_ms; ++ int vf2pf_update_retry_cnt; + + /* multimedia bandwidth config */ + bool is_mm_bw_enabled; +diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +index 63725b2ebc0373..37ac6d8ff81362 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c ++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +@@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) + timeout -= 10; + } while (timeout > 1); + ++ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); ++ + flr_done: + atomic_set(&adev->reset_domain->in_gpu_reset, 0); + up_write(&adev->reset_domain->sem); +diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +index 6a68ee946f1cc3..96edd5d11326dd 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c ++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +@@ -298,6 +298,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) + timeout -= 10; + } while (timeout > 1); + ++ dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); ++ + flr_done: + atomic_set(&adev->reset_domain->in_gpu_reset, 0); + up_write(&adev->reset_domain->sem); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index f6017be8f9957e..bcf0dc05c76765 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -11069,7 +11069,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; + +- if (!payload->write && p_notify->aux_reply.length) ++ /*write req may receive a byte indicating partially written number as well*/ ++ if (p_notify->aux_reply.length) + memcpy(payload->data, p_notify->aux_reply.data, + p_notify->aux_reply.length); + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index c0cacd501c83eb..2698e5c74ddfda 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -59,6 +59,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + enum aux_return_code_type operation_result; + struct amdgpu_device *adev; + struct ddc_service *ddc; ++ uint8_t copy[16]; + + if (WARN_ON(msg->size > 16)) + return -E2BIG; +@@ -74,6 +75,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; + payload.defer_delay = 0; + ++ if (payload.write) { ++ memcpy(copy, msg->buffer, msg->size); ++ payload.data = copy; ++ } ++ + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, + &operation_result); + +@@ -97,9 +103,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + */ + if (payload.write && result >= 0) { + if (result) { +- /*one byte indicating partially written bytes. Force 0 to retry*/ +- drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); +- result = 0; ++ /*one byte indicating partially written bytes*/ ++ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); ++ result = payload.data[0]; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; +@@ -124,11 +130,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + break; + } + +- drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); ++ drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) +- drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", ++ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + + return result; +diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c +index 3b81468a1df297..0bf70664c35ee1 100644 +--- a/drivers/hid/hid-thrustmaster.c ++++ b/drivers/hid/hid-thrustmaster.c +@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev) + u8 ep_addr[2] = {b_ep, 0}; + + if (!usb_check_int_endpoints(usbif, ep_addr)) { ++ kfree(send_buf); + hid_err(hdev, "Unexpected non-int endpoint\n"); + return; + } +diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c +index ad74cbc9a0aa59..45de01dea4b1c0 100644 +--- a/drivers/hid/hid-uclogic-core.c ++++ b/drivers/hid/hid-uclogic-core.c +@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev, + suffix = "System Control"; + break; + } +- } +- +- if (suffix) ++ } else { + hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, + "%s %s", hdev->name, suffix); ++ if (!hi->input->name) ++ return -ENOMEM; ++ } + + return 0; + } +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 47e1bd8de9fcf0..53026356475ac1 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -1113,68 +1113,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, + EXPORT_SYMBOL(vmbus_sendpacket); + + /* +- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer +- * packets using a GPADL Direct packet type. This interface allows you +- * to control notifying the host. This will be useful for sending +- * batched data. Also the sender can control the send flags +- * explicitly. +- */ +-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, +- struct hv_page_buffer pagebuffers[], +- u32 pagecount, void *buffer, u32 bufferlen, +- u64 requestid) +-{ +- int i; +- struct vmbus_channel_packet_page_buffer desc; +- u32 descsize; +- u32 packetlen; +- u32 packetlen_aligned; +- struct kvec bufferlist[3]; +- u64 aligned_data = 0; +- +- if (pagecount > MAX_PAGE_BUFFER_COUNT) +- return -EINVAL; +- +- /* +- * Adjust the size down since vmbus_channel_packet_page_buffer is the +- * largest size we support +- */ +- descsize = sizeof(struct vmbus_channel_packet_page_buffer) - +- ((MAX_PAGE_BUFFER_COUNT - pagecount) * +- sizeof(struct hv_page_buffer)); +- packetlen = descsize + bufferlen; +- packetlen_aligned = ALIGN(packetlen, sizeof(u64)); +- +- /* Setup the descriptor */ +- desc.type = VM_PKT_DATA_USING_GPA_DIRECT; +- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; +- desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ +- desc.length8 = (u16)(packetlen_aligned >> 3); +- desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ +- desc.reserved = 0; +- desc.rangecount = pagecount; +- +- for (i = 0; i < pagecount; i++) { +- desc.range[i].len = pagebuffers[i].len; +- desc.range[i].offset = pagebuffers[i].offset; +- desc.range[i].pfn = pagebuffers[i].pfn; +- } +- +- bufferlist[0].iov_base = &desc; +- bufferlist[0].iov_len = descsize; +- bufferlist[1].iov_base = buffer; +- bufferlist[1].iov_len = bufferlen; +- bufferlist[2].iov_base = &aligned_data; +- bufferlist[2].iov_len = (packetlen_aligned - packetlen); +- +- return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL); +-} +-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); +- +-/* +- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet ++ * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets + * using a GPADL Direct packet type. +- * The buffer includes the vmbus descriptor. ++ * The desc argument must include space for the VMBus descriptor. The ++ * rangecount field must already be set. + */ + int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, + struct vmbus_packet_mpb_array *desc, +@@ -1196,7 +1138,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, + desc->length8 = (u16)(packetlen_aligned >> 3); + desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ + desc->reserved = 0; +- desc->rangecount = 1; + + bufferlist[0].iov_base = desc; + bufferlist[0].iov_len = desc_size; +diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c +index 98648c679a55c1..2ace3aafe49788 100644 +--- a/drivers/iio/adc/ad7266.c ++++ b/drivers/iio/adc/ad7266.c +@@ -44,7 +44,7 @@ struct ad7266_state { + */ + struct { + __be16 sample[2]; +- s64 timestamp; ++ aligned_s64 timestamp; + } data __aligned(IIO_DMA_MINALIGN); + }; + +diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c +index 74b0c85944bd61..967f06cd3f94e7 100644 +--- a/drivers/iio/adc/ad7768-1.c ++++ b/drivers/iio/adc/ad7768-1.c +@@ -169,7 +169,7 @@ struct ad7768_state { + union { + struct { + __be32 chan; +- s64 timestamp; ++ aligned_s64 timestamp; + } scan; + __be32 d32; + u8 d8[2]; +diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c +index 814ce0aad1cccd..4085a36cd1db75 100644 +--- a/drivers/iio/chemical/sps30.c ++++ b/drivers/iio/chemical/sps30.c +@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) + int ret; + struct { + s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ +- s64 ts; ++ aligned_s64 ts; + } scan; + + mutex_lock(&state->lock); +diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c +index fec87c9030abdc..fffd144d509eb0 100644 +--- a/drivers/infiniband/sw/rxe/rxe_cq.c ++++ b/drivers/infiniband/sw/rxe/rxe_cq.c +@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, + + err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, + cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); +- if (err) { +- vfree(cq->queue->buf); +- kfree(cq->queue); ++ if (err) + return err; +- } + + cq->is_user = uresp; + +diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c +index 1a367e64bc3b1d..843e50b5a0ec57 100644 +--- a/drivers/net/dsa/sja1105/sja1105_main.c ++++ b/drivers/net/dsa/sja1105/sja1105_main.c +@@ -2076,6 +2076,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, + switch (state) { + case BR_STATE_DISABLED: + case BR_STATE_BLOCKING: ++ case BR_STATE_LISTENING: + /* From UM10944 description of DRPDTAG (why put this there?): + * "Management traffic flows to the port regardless of the state + * of the INGRESS flag". So BPDUs are still be allowed to pass. +@@ -2085,11 +2086,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, + mac[port].egress = false; + mac[port].dyn_learn = false; + break; +- case BR_STATE_LISTENING: +- mac[port].ingress = true; +- mac[port].egress = false; +- mac[port].dyn_learn = false; +- break; + case BR_STATE_LEARNING: + mac[port].ingress = true; + mac[port].egress = false; +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 4325d0ace1f268..6f45f4d9fba71f 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -1016,22 +1016,15 @@ static void macb_update_stats(struct macb *bp) + + static int macb_halt_tx(struct macb *bp) + { +- unsigned long halt_time, timeout; +- u32 status; ++ u32 status; + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); + +- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); +- do { +- halt_time = jiffies; +- status = macb_readl(bp, TSR); +- if (!(status & MACB_BIT(TGO))) +- return 0; +- +- udelay(250); +- } while (time_before(halt_time, timeout)); +- +- return -ETIMEDOUT; ++ /* Poll TSR until TGO is cleared or timeout. */ ++ return read_poll_timeout_atomic(macb_readl, status, ++ !(status & MACB_BIT(TGO)), ++ 250, MACB_HALT_TIMEOUT, false, ++ bp, TSR); + } + + static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) +diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h +index 55e1caf193a69d..64c97eb66f6715 100644 +--- a/drivers/net/ethernet/engleder/tsnep_hw.h ++++ b/drivers/net/ethernet/engleder/tsnep_hw.h +@@ -181,6 +181,8 @@ struct tsnep_gcl_operation { + #define TSNEP_DESC_SIZE 256 + #define TSNEP_DESC_SIZE_DATA_AFTER 2048 + #define TSNEP_DESC_OFFSET 128 ++#define TSNEP_DESC_SIZE_DATA_AFTER_INLINE (64 - sizeof(struct tsnep_tx_desc) + \ ++ sizeof_field(struct tsnep_tx_desc, tx)) + #define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000 + #define TSNEP_DESC_OWNER_COUNTER_SHIFT 30 + #define TSNEP_DESC_LENGTH_MASK 0x00003FFF +diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c +index 4f36b29d66c860..215ae6745932ae 100644 +--- a/drivers/net/ethernet/engleder/tsnep_main.c ++++ b/drivers/net/ethernet/engleder/tsnep_main.c +@@ -51,12 +51,24 @@ + #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ + ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) + +-#define TSNEP_TX_TYPE_SKB BIT(0) +-#define TSNEP_TX_TYPE_SKB_FRAG BIT(1) +-#define TSNEP_TX_TYPE_XDP_TX BIT(2) +-#define TSNEP_TX_TYPE_XDP_NDO BIT(3) +-#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) +-#define TSNEP_TX_TYPE_XSK BIT(4) ++/* mapping type */ ++#define TSNEP_TX_TYPE_MAP BIT(0) ++#define TSNEP_TX_TYPE_MAP_PAGE BIT(1) ++#define TSNEP_TX_TYPE_INLINE BIT(2) ++/* buffer type */ ++#define TSNEP_TX_TYPE_SKB BIT(8) ++#define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP) ++#define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE) ++#define TSNEP_TX_TYPE_SKB_FRAG BIT(9) ++#define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE) ++#define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE) ++#define TSNEP_TX_TYPE_XDP_TX BIT(10) ++#define TSNEP_TX_TYPE_XDP_NDO BIT(11) ++#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) ++#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) ++#define TSNEP_TX_TYPE_XSK BIT(12) ++#define TSNEP_TX_TYPE_TSTAMP BIT(13) ++#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP) + + #define TSNEP_XDP_TX BIT(0) + #define TSNEP_XDP_REDIRECT BIT(1) +@@ -375,8 +387,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, + if (entry->skb) { + entry->properties = length & TSNEP_DESC_LENGTH_MASK; + entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; +- if ((entry->type & TSNEP_TX_TYPE_SKB) && +- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) ++ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) + entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; + + /* toggle user flag to prevent false acknowledge +@@ -416,6 +427,8 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, + entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; + entry->desc->more_properties = + __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); ++ if (entry->type & TSNEP_TX_TYPE_INLINE) ++ entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; + + /* descriptor properties shall be written last, because valid data is + * signaled there +@@ -433,39 +446,83 @@ static int tsnep_tx_desc_available(struct tsnep_tx *tx) + return tx->read - tx->write - 1; + } + +-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) ++static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, ++ struct device *dmadev, dma_addr_t *dma) ++{ ++ unsigned int len; ++ int mapped; ++ ++ len = skb_frag_size(frag); ++ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { ++ *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE); ++ if (dma_mapping_error(dmadev, *dma)) ++ return -ENOMEM; ++ entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; ++ mapped = 1; ++ } else { ++ void *fragdata = skb_frag_address_safe(frag); ++ ++ if (likely(fragdata)) { ++ memcpy(&entry->desc->tx, fragdata, len); ++ } else { ++ struct page *page = skb_frag_page(frag); ++ ++ fragdata = kmap_local_page(page); ++ memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), ++ len); ++ kunmap_local(fragdata); ++ } ++ entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; ++ mapped = 0; ++ } ++ ++ return mapped; ++} ++ ++static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count, ++ bool do_tstamp) + { + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + unsigned int len; +- dma_addr_t dma; + int map_len = 0; +- int i; ++ dma_addr_t dma; ++ int i, mapped; + + for (i = 0; i < count; i++) { + entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; + + if (!i) { + len = skb_headlen(skb); +- dma = dma_map_single(dmadev, skb->data, len, +- DMA_TO_DEVICE); ++ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { ++ dma = dma_map_single(dmadev, skb->data, len, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dmadev, dma)) ++ return -ENOMEM; ++ entry->type = TSNEP_TX_TYPE_SKB_MAP; ++ mapped = 1; ++ } else { ++ memcpy(&entry->desc->tx, skb->data, len); ++ entry->type = TSNEP_TX_TYPE_SKB_INLINE; ++ mapped = 0; ++ } + +- entry->type = TSNEP_TX_TYPE_SKB; ++ if (do_tstamp) ++ entry->type |= TSNEP_TX_TYPE_TSTAMP; + } else { +- len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); +- dma = skb_frag_dma_map(dmadev, +- &skb_shinfo(skb)->frags[i - 1], +- 0, len, DMA_TO_DEVICE); ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + +- entry->type = TSNEP_TX_TYPE_SKB_FRAG; ++ len = skb_frag_size(frag); ++ mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma); ++ if (mapped < 0) ++ return mapped; + } +- if (dma_mapping_error(dmadev, dma)) +- return -ENOMEM; + + entry->len = len; +- dma_unmap_addr_set(entry, dma, dma); +- +- entry->desc->tx = __cpu_to_le64(dma); ++ if (likely(mapped)) { ++ dma_unmap_addr_set(entry, dma, dma); ++ entry->desc->tx = __cpu_to_le64(dma); ++ } + + map_len += len; + } +@@ -484,13 +541,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) + entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; + + if (entry->len) { +- if (entry->type & TSNEP_TX_TYPE_SKB) ++ if (entry->type & TSNEP_TX_TYPE_MAP) + dma_unmap_single(dmadev, + dma_unmap_addr(entry, dma), + dma_unmap_len(entry, len), + DMA_TO_DEVICE); +- else if (entry->type & +- (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO)) ++ else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) + dma_unmap_page(dmadev, + dma_unmap_addr(entry, dma), + dma_unmap_len(entry, len), +@@ -506,11 +562,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) + static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, + struct tsnep_tx *tx) + { +- int count = 1; + struct tsnep_tx_entry *entry; ++ bool do_tstamp = false; ++ int count = 1; + int length; +- int i; + int retval; ++ int i; + + if (skb_shinfo(skb)->nr_frags > 0) + count += skb_shinfo(skb)->nr_frags; +@@ -527,7 +584,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, + entry = &tx->entry[tx->write]; + entry->skb = skb; + +- retval = tsnep_tx_map(skb, tx, count); ++ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ++ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) { ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++ do_tstamp = true; ++ } ++ ++ retval = tsnep_tx_map(skb, tx, count, do_tstamp); + if (retval < 0) { + tsnep_tx_unmap(tx, tx->write, count); + dev_kfree_skb_any(entry->skb); +@@ -539,9 +602,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, + } + length = retval; + +- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) +- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +- + for (i = 0; i < count; i++) + tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, + i == count - 1); +@@ -586,7 +646,7 @@ static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + +- entry->type = TSNEP_TX_TYPE_XDP_NDO; ++ entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; + } else { + page = unlikely(frag) ? skb_frag_page(frag) : + virt_to_page(xdpf->data); +@@ -792,8 +852,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) + + length = tsnep_tx_unmap(tx, tx->read, count); + +- if ((entry->type & TSNEP_TX_TYPE_SKB) && +- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && ++ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) && + (__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { + struct skb_shared_hwtstamps hwtstamps; +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +index 52792546fe00dd..339be6950c0395 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +@@ -707,6 +707,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; ++ ++ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ ++ if (idx >= CGX_RX_STAT_GLOBAL_INDEX) ++ lmac_id = 0; ++ + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); + return 0; + } +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +index 6cc7a78968fc1c..74953f67a2bf9c 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +@@ -533,7 +533,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, + if (sw_tx_sc->encrypt) + sectag_tci |= (MCS_TCI_E | MCS_TCI_C); + +- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); ++ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, ++ pfvf->netdev->mtu + OTX2_ETH_HLEN); + /* Write SecTag excluding AN bits(1..0) */ + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); + policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index c6ccfbd4226570..cb8efc952dfda9 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -4628,7 +4628,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) + } + + if (mtk_is_netsys_v3_or_greater(mac->hw) && +- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) && ++ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) && + id == MTK_GMAC1_ID) { + mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 8a892614015cd9..d9dc7280302eb7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4136,6 +4136,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev + if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n"); + ++ features &= ~NETIF_F_HW_MACSEC; ++ if (netdev->features & NETIF_F_HW_MACSEC) ++ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n"); ++ + return features; + } + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +index d15aa6b25a8884..0534b10e29c5c4 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +@@ -3013,6 +3013,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp, + .rif = rif, + }; + ++ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif))) ++ return 0; ++ + neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms); + if (rms.err) + goto err_arp; +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c +index 99df00c30b8c6c..b5d744d2586f72 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c +@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = { + }; + + static struct qed_eth_cb_ops qede_ll_ops = { +- { ++ .common = { + #ifdef CONFIG_RFS_ACCEL + .arfs_filter_op = qede_arfs_filter_op, + #endif +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +index 28d24d59efb84f..d57b976b904095 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o + } + + cmd_op = (cmd.rsp.arg[0] & 0xff); +- if (cmd.rsp.arg[0] >> 25 == 2) +- return 2; ++ if (cmd.rsp.arg[0] >> 25 == 2) { ++ ret = 2; ++ goto out; ++ } ++ + if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) + set_bit(QLC_BC_VF_STATE, &vf->state); + else +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h +index 810977952f950b..4c12067b07f05e 100644 +--- a/drivers/net/hyperv/hyperv_net.h ++++ b/drivers/net/hyperv/hyperv_net.h +@@ -158,7 +158,6 @@ struct hv_netvsc_packet { + u8 cp_partial; /* partial copy into send buffer */ + + u8 rmsg_size; /* RNDIS header and PPI size */ +- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */ + u8 page_buf_cnt; + + u16 q_idx; +@@ -893,6 +892,18 @@ struct nvsp_message { + sizeof(struct nvsp_message)) + #define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor) + ++/* Maximum # of contiguous data ranges that can make up a trasmitted packet. ++ * Typically it's the max SKB fragments plus 2 for the rndis packet and the ++ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may ++ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries ++ * in a GPA direct packet sent to netvsp over VMBus. ++ */ ++#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT ++#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2) ++#else ++#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT ++#endif ++ + /* Estimated requestor size: + * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size + */ +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c +index b2f27e505f76c6..61584b40cb0386 100644 +--- a/drivers/net/hyperv/netvsc.c ++++ b/drivers/net/hyperv/netvsc.c +@@ -945,8 +945,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + + pend_size; + int i; + u32 padding = 0; +- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : +- packet->page_buf_cnt; ++ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt; + u32 remain; + + /* Add padding */ +@@ -1047,6 +1046,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev, + return 0; + } + ++/* Build an "array" of mpb entries describing the data to be transferred ++ * over VMBus. After the desc header fields, each "array" entry is variable ++ * size, and each entry starts after the end of the previous entry. The ++ * "offset" and "len" fields for each entry imply the size of the entry. ++ * ++ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V ++ * uses that granularity, even if the system page size of the guest is larger. ++ * Each entry in the input "pb" array must describe a contiguous range of ++ * guest physical memory so that the pfns are sequential if the range crosses ++ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE. ++ */ ++static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb, ++ u32 page_buffer_count, ++ struct vmbus_packet_mpb_array *desc, ++ u32 *desc_size) ++{ ++ struct hv_mpb_array *mpb_entry = &desc->range; ++ int i, j; ++ ++ for (i = 0; i < page_buffer_count; i++) { ++ u32 offset = pb[i].offset; ++ u32 len = pb[i].len; ++ ++ mpb_entry->offset = offset; ++ mpb_entry->len = len; ++ ++ for (j = 0; j < HVPFN_UP(offset + len); j++) ++ mpb_entry->pfn_array[j] = pb[i].pfn + j; ++ ++ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j]; ++ } ++ ++ desc->rangecount = page_buffer_count; ++ *desc_size = (char *)mpb_entry - (char *)desc; ++} ++ + static inline int netvsc_send_pkt( + struct hv_device *device, + struct hv_netvsc_packet *packet, +@@ -1089,8 +1124,11 @@ static inline int netvsc_send_pkt( + + packet->dma_range = NULL; + if (packet->page_buf_cnt) { ++ struct vmbus_channel_packet_page_buffer desc; ++ u32 desc_size; ++ + if (packet->cp_partial) +- pb += packet->rmsg_pgcnt; ++ pb++; + + ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); + if (ret) { +@@ -1098,11 +1136,12 @@ static inline int netvsc_send_pkt( + goto exit; + } + +- ret = vmbus_sendpacket_pagebuffer(out_channel, +- pb, packet->page_buf_cnt, +- &nvmsg, sizeof(nvmsg), +- req_id); +- ++ netvsc_build_mpb_array(pb, packet->page_buf_cnt, ++ (struct vmbus_packet_mpb_array *)&desc, ++ &desc_size); ++ ret = vmbus_sendpacket_mpb_desc(out_channel, ++ (struct vmbus_packet_mpb_array *)&desc, ++ desc_size, &nvmsg, sizeof(nvmsg), req_id); + if (ret) + netvsc_dma_unmap(ndev_ctx->device_ctx, packet); + } else { +@@ -1251,7 +1290,7 @@ int netvsc_send(struct net_device *ndev, + packet->send_buf_index = section_index; + + if (packet->cp_partial) { +- packet->page_buf_cnt -= packet->rmsg_pgcnt; ++ packet->page_buf_cnt--; + packet->total_data_buflen = msd_len + packet->rmsg_size; + } else { + packet->page_buf_cnt = 0; +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index 8698d2db3dc8e1..ce6ac26131b347 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -325,43 +325,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + return txq; + } + +-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, +- struct hv_page_buffer *pb) +-{ +- int j = 0; +- +- hvpfn += offset >> HV_HYP_PAGE_SHIFT; +- offset = offset & ~HV_HYP_PAGE_MASK; +- +- while (len > 0) { +- unsigned long bytes; +- +- bytes = HV_HYP_PAGE_SIZE - offset; +- if (bytes > len) +- bytes = len; +- pb[j].pfn = hvpfn; +- pb[j].offset = offset; +- pb[j].len = bytes; +- +- offset += bytes; +- len -= bytes; +- +- if (offset == HV_HYP_PAGE_SIZE && len) { +- hvpfn++; +- offset = 0; +- j++; +- } +- } +- +- return j + 1; +-} +- + static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, + struct hv_netvsc_packet *packet, + struct hv_page_buffer *pb) + { +- u32 slots_used = 0; +- char *data = skb->data; + int frags = skb_shinfo(skb)->nr_frags; + int i; + +@@ -370,28 +337,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, + * 2. skb linear data + * 3. skb fragment data + */ +- slots_used += fill_pg_buf(virt_to_hvpfn(hdr), +- offset_in_hvpage(hdr), +- len, +- &pb[slots_used]); + ++ pb[0].offset = offset_in_hvpage(hdr); ++ pb[0].len = len; ++ pb[0].pfn = virt_to_hvpfn(hdr); + packet->rmsg_size = len; +- packet->rmsg_pgcnt = slots_used; + +- slots_used += fill_pg_buf(virt_to_hvpfn(data), +- offset_in_hvpage(data), +- skb_headlen(skb), +- &pb[slots_used]); ++ pb[1].offset = offset_in_hvpage(skb->data); ++ pb[1].len = skb_headlen(skb); ++ pb[1].pfn = virt_to_hvpfn(skb->data); + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; ++ struct hv_page_buffer *cur_pb = &pb[i + 2]; ++ u64 pfn = page_to_hvpfn(skb_frag_page(frag)); ++ u32 offset = skb_frag_off(frag); + +- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), +- skb_frag_off(frag), +- skb_frag_size(frag), +- &pb[slots_used]); ++ cur_pb->offset = offset_in_hvpage(offset); ++ cur_pb->len = skb_frag_size(frag); ++ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT); + } +- return slots_used; ++ return frags + 2; + } + + static int count_skb_frag_slots(struct sk_buff *skb) +@@ -482,7 +448,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) + struct net_device *vf_netdev; + u32 rndis_msg_size; + u32 hash; +- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; ++ struct hv_page_buffer pb[MAX_DATA_RANGES]; + + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c +index af95947a87c552..09144f0ec2aa4f 100644 +--- a/drivers/net/hyperv/rndis_filter.c ++++ b/drivers/net/hyperv/rndis_filter.c +@@ -226,8 +226,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, + struct rndis_request *req) + { + struct hv_netvsc_packet *packet; +- struct hv_page_buffer page_buf[2]; +- struct hv_page_buffer *pb = page_buf; ++ struct hv_page_buffer pb; + int ret; + + /* Setup the packet to send it */ +@@ -236,27 +235,14 @@ static int rndis_filter_send_request(struct rndis_device *dev, + packet->total_data_buflen = req->request_msg.msg_len; + packet->page_buf_cnt = 1; + +- pb[0].pfn = virt_to_phys(&req->request_msg) >> +- HV_HYP_PAGE_SHIFT; +- pb[0].len = req->request_msg.msg_len; +- pb[0].offset = offset_in_hvpage(&req->request_msg); +- +- /* Add one page_buf when request_msg crossing page boundary */ +- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) { +- packet->page_buf_cnt++; +- pb[0].len = HV_HYP_PAGE_SIZE - +- pb[0].offset; +- pb[1].pfn = virt_to_phys((void *)&req->request_msg +- + pb[0].len) >> HV_HYP_PAGE_SHIFT; +- pb[1].offset = 0; +- pb[1].len = req->request_msg.msg_len - +- pb[0].len; +- } ++ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT; ++ pb.len = req->request_msg.msg_len; ++ pb.offset = offset_in_hvpage(&req->request_msg); + + trace_rndis_send(dev->ndev, 0, &req->request_msg); + + rcu_read_lock_bh(); +- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false); ++ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false); + rcu_read_unlock_bh(); + + return ret; +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c +index cd048659706436..b7e100710601a5 100644 +--- a/drivers/net/wireless/mediatek/mt76/dma.c ++++ b/drivers/net/wireless/mediatek/mt76/dma.c +@@ -957,6 +957,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) + int i; + + mt76_worker_disable(&dev->tx_worker); ++ napi_disable(&dev->tx_napi); + netif_napi_del(&dev->tx_napi); + + for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index fdde38903ebcd5..1e5c8220e365ca 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -386,7 +386,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +-static int nvme_pci_npages_prp(void) ++static __always_inline int nvme_pci_npages_prp(void) + { + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); +@@ -1107,7 +1107,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) + WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); + + disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); ++ spin_lock(&nvmeq->cq_poll_lock); + nvme_poll_cq(nvmeq, NULL); ++ spin_unlock(&nvmeq->cq_poll_lock); + enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + } + +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +index 6387c0d34c551c..aa578be2bcb6df 100644 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +@@ -101,7 +101,6 @@ struct rcar_gen3_phy { + struct rcar_gen3_chan *ch; + u32 int_enable_bits; + bool initialized; +- bool otg_initialized; + bool powered; + }; + +@@ -309,16 +308,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch) + return false; + } + +-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch) ++static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch) + { +- int i; +- +- for (i = 0; i < NUM_OF_PHYS; i++) { +- if (ch->rphys[i].otg_initialized) +- return false; ++ for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; ++ i++) { ++ if (ch->rphys[i].initialized) ++ return true; + } + +- return true; ++ return false; + } + + static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch) +@@ -340,7 +338,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, + bool is_b_device; + enum phy_mode cur_mode, new_mode; + +- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) ++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) + return -EIO; + + if (sysfs_streq(buf, "host")) +@@ -378,7 +376,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr, + { + struct rcar_gen3_chan *ch = dev_get_drvdata(dev); + +- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) ++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) + return -EIO; + + return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" : +@@ -391,6 +389,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) + void __iomem *usb2_base = ch->base; + u32 val; + ++ if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch)) ++ return; ++ + /* Should not use functions of read-modify-write a register */ + val = readl(usb2_base + USB2_LINECTRL1); + val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN | +@@ -451,16 +452,16 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) + val = readl(usb2_base + USB2_INT_ENABLE); + val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits; + writel(val, usb2_base + USB2_INT_ENABLE); +- writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); +- writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); +- +- /* Initialize otg part */ +- if (channel->is_otg_channel) { +- if (rcar_gen3_needs_init_otg(channel)) +- rcar_gen3_init_otg(channel); +- rphy->otg_initialized = true; ++ ++ if (!rcar_gen3_is_any_rphy_initialized(channel)) { ++ writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); ++ writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); + } + ++ /* Initialize otg part (only if we initialize a PHY with IRQs). */ ++ if (rphy->int_enable_bits) ++ rcar_gen3_init_otg(channel); ++ + rphy->initialized = true; + + return 0; +@@ -475,9 +476,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) + + rphy->initialized = false; + +- if (channel->is_otg_channel) +- rphy->otg_initialized = false; +- + val = readl(usb2_base + USB2_INT_ENABLE); + val &= ~rphy->int_enable_bits; + if (!rcar_gen3_is_any_rphy_initialized(channel)) +diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c +index fae6242aa730e0..23a23f2d64e586 100644 +--- a/drivers/phy/tegra/xusb-tegra186.c ++++ b/drivers/phy/tegra/xusb-tegra186.c +@@ -237,6 +237,8 @@ + #define DATA0_VAL_PD BIT(1) + #define USE_XUSB_AO BIT(4) + ++#define TEGRA_UTMI_PAD_MAX 4 ++ + #define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \ + { \ + .name = _name, \ +@@ -269,7 +271,7 @@ struct tegra186_xusb_padctl { + + /* UTMI bias and tracking */ + struct clk *usb2_trk_clk; +- unsigned int bias_pad_enable; ++ DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX); + + /* padctl context */ + struct tegra186_xusb_padctl_context context; +@@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) + u32 value; + int err; + +- mutex_lock(&padctl->lock); +- +- if (priv->bias_pad_enable++ > 0) { +- mutex_unlock(&padctl->lock); ++ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) + return; +- } + + err = clk_prepare_enable(priv->usb2_trk_clk); + if (err < 0) +@@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) + } else { + clk_disable_unprepare(priv->usb2_trk_clk); + } +- +- mutex_unlock(&padctl->lock); + } + + static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) +@@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) + struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); + u32 value; + +- mutex_lock(&padctl->lock); +- +- if (WARN_ON(priv->bias_pad_enable == 0)) { +- mutex_unlock(&padctl->lock); +- return; +- } +- +- if (--priv->bias_pad_enable > 0) { +- mutex_unlock(&padctl->lock); ++ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) + return; +- } + + value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1); + value |= USB2_PD_TRK; +@@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) + clk_disable_unprepare(priv->usb2_trk_clk); + } + +- mutex_unlock(&padctl->lock); + } + + static void tegra186_utmi_pad_power_on(struct phy *phy) + { + struct tegra_xusb_lane *lane = phy_get_drvdata(phy); + struct tegra_xusb_padctl *padctl = lane->pad->padctl; ++ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); + struct tegra_xusb_usb2_port *port; + struct device *dev = padctl->dev; + unsigned int index = lane->index; +@@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) + if (!phy) + return; + ++ mutex_lock(&padctl->lock); ++ if (test_bit(index, priv->utmi_pad_enabled)) { ++ mutex_unlock(&padctl->lock); ++ return; ++ } ++ + port = tegra_xusb_find_usb2_port(padctl, index); + if (!port) { + dev_err(dev, "no port found for USB2 lane %u\n", index); ++ mutex_unlock(&padctl->lock); + return; + } + +@@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) + value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); + value &= ~USB2_OTG_PD_DR; + padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); ++ ++ set_bit(index, priv->utmi_pad_enabled); ++ mutex_unlock(&padctl->lock); + } + + static void tegra186_utmi_pad_power_down(struct phy *phy) + { + struct tegra_xusb_lane *lane = phy_get_drvdata(phy); + struct tegra_xusb_padctl *padctl = lane->pad->padctl; ++ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); + unsigned int index = lane->index; + u32 value; + + if (!phy) + return; + ++ mutex_lock(&padctl->lock); ++ if (!test_bit(index, priv->utmi_pad_enabled)) { ++ mutex_unlock(&padctl->lock); ++ return; ++ } ++ + dev_dbg(padctl->dev, "power down UTMI pad %u\n", index); + + value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index)); +@@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy) + + udelay(2); + ++ clear_bit(index, priv->utmi_pad_enabled); ++ + tegra186_utmi_bias_pad_power_off(padctl); ++ ++ mutex_unlock(&padctl->lock); + } + + static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, +diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c +index 983a6e6173bd21..3a04a56ca52de9 100644 +--- a/drivers/phy/tegra/xusb.c ++++ b/drivers/phy/tegra/xusb.c +@@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port, + + err = dev_set_name(&port->dev, "%s-%u", name, index); + if (err < 0) +- goto unregister; ++ goto put_device; + + err = device_add(&port->dev); + if (err < 0) +- goto unregister; ++ goto put_device; + + return 0; + +-unregister: +- device_unregister(&port->dev); ++put_device: ++ put_device(&port->dev); + return err; + } + +diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c +index b4f49720c87f62..2e3f6fc67c568d 100644 +--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c ++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c +@@ -217,6 +217,13 @@ static const struct dmi_system_id fwbug_list[] = { + DMI_MATCH(DMI_BIOS_VERSION, "03.05"), + } + }, ++ { ++ .ident = "MECHREVO Wujie 14X (GX4HRXL)", ++ .driver_data = &quirk_spurious_8042, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), ++ } ++ }, + {} + }; + +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 222e429931ef9e..2c894ea8aa8174 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -4404,7 +4404,8 @@ static int asus_wmi_add(struct platform_device *pdev) + goto fail_leds; + + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); +- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) ++ if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) == ++ (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + asus->driver->wlan_ctrl_by_user = 1; + + if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) { +diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c +index 32f47b896fd1e2..ebfbcadbca5295 100644 +--- a/drivers/regulator/max20086-regulator.c ++++ b/drivers/regulator/max20086-regulator.c +@@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip) + + static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) + { +- struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { }; ++ struct of_regulator_match *matches; + struct device_node *node; + unsigned int i; + int ret; +@@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) + return -ENODEV; + } + ++ matches = devm_kcalloc(chip->dev, chip->info->num_outputs, ++ sizeof(*matches), GFP_KERNEL); ++ if (!matches) ++ return -ENOMEM; ++ + for (i = 0; i < chip->info->num_outputs; ++i) + matches[i].name = max20086_output_names[i]; + +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c +index 203df5e53b1a84..9bbabae253e53b 100644 +--- a/drivers/scsi/sd_zbc.c ++++ b/drivers/scsi/sd_zbc.c +@@ -202,6 +202,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, + unsigned int nr_zones, size_t *buflen) + { + struct request_queue *q = sdkp->disk->queue; ++ unsigned int max_segments; + size_t bufsize; + void *buf; + +@@ -213,12 +214,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, + * Furthermore, since the report zone command cannot be split, make + * sure that the allocated buffer can always be mapped by limiting the + * number of pages allocated to the HBA max segments limit. ++ * Since max segments can be larger than the max inline bio vectors, ++ * further limit the allocated buffer to BIO_MAX_INLINE_VECS. + */ + nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); + bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); + bufsize = min_t(size_t, bufsize, + queue_max_hw_sectors(q) << SECTOR_SHIFT); +- bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); ++ max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q)); ++ bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT); + + while (bufsize >= SECTOR_SIZE) { + buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index b8186feccdf5aa..48b0ca92b44fb3 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) + return SCSI_MLQUEUE_DEVICE_BUSY; + } + ++ payload->rangecount = 1; + payload->range.len = length; + payload->range.offset = offset_in_hvpg; + +diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c +index bbf2015d8e5cce..69b6c87c5525e0 100644 +--- a/drivers/spi/spi-loopback-test.c ++++ b/drivers/spi/spi-loopback-test.c +@@ -421,7 +421,7 @@ MODULE_LICENSE("GPL"); + static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) + { + /* limit the hex_dump */ +- if (len < 1024) { ++ if (len <= 1024) { + print_hex_dump(KERN_INFO, pre, + DUMP_PREFIX_OFFSET, 16, 1, + ptr, len, 0); +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c +index 147d7052794f77..8d7ce4c556aa1d 100644 +--- a/drivers/spi/spi-tegra114.c ++++ b/drivers/spi/spi-tegra114.c +@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) + u32 inactive_cycles; + u8 cs_state; + +- if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) || +- (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) || +- (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) { ++ if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) || ++ (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) || ++ (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) { + dev_err(&spi->dev, + "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", + SPI_DELAY_UNIT_SCK); +diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c +index b7dada064890bd..90536f47906c33 100644 +--- a/drivers/usb/gadget/function/f_midi2.c ++++ b/drivers/usb/gadget/function/f_midi2.c +@@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep) + /* reply a UMP EP device info */ + static void reply_ump_stream_ep_device(struct f_midi2_ep *ep) + { +- struct snd_ump_stream_msg_devince_info rep = { ++ struct snd_ump_stream_msg_device_info rep = { + .type = UMP_MSG_TYPE_STREAM, + .status = UMP_STREAM_MSG_STATUS_DEVICE_INFO, + .manufacture_id = ep->info.manufacturer, +diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c +index 8c19081c325542..e3b5fa3b5f955d 100644 +--- a/drivers/usb/typec/ucsi/displayport.c ++++ b/drivers/usb/typec/ucsi/displayport.c +@@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) + u8 cur = 0; + int ret; + +- mutex_lock(&dp->con->lock); ++ if (!ucsi_con_mutex_lock(dp->con)) ++ return -ENOTCONN; + + if (!dp->override && dp->initialized) { + const struct typec_altmode *p = typec_altmode_get_partner(alt); +@@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) + schedule_work(&dp->work); + ret = 0; + err_unlock: +- mutex_unlock(&dp->con->lock); ++ ucsi_con_mutex_unlock(dp->con); + + return ret; + } +@@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) + u64 command; + int ret = 0; + +- mutex_lock(&dp->con->lock); ++ if (!ucsi_con_mutex_lock(dp->con)) ++ return -ENOTCONN; + + if (!dp->override) { + const struct typec_altmode *p = typec_altmode_get_partner(alt); +@@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) + schedule_work(&dp->work); + + out_unlock: +- mutex_unlock(&dp->con->lock); ++ ucsi_con_mutex_unlock(dp->con); + + return ret; + } +@@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, + int cmd = PD_VDO_CMD(header); + int svdm_version; + +- mutex_lock(&dp->con->lock); ++ if (!ucsi_con_mutex_lock(dp->con)) ++ return -ENOTCONN; + + if (!dp->override && dp->initialized) { + const struct typec_altmode *p = typec_altmode_get_partner(alt); + + dev_warn(&p->dev, + "firmware doesn't support alternate mode overriding\n"); +- mutex_unlock(&dp->con->lock); ++ ucsi_con_mutex_unlock(dp->con); + return -EOPNOTSUPP; + } + + svdm_version = typec_altmode_get_svdm_version(alt); + if (svdm_version < 0) { +- mutex_unlock(&dp->con->lock); ++ ucsi_con_mutex_unlock(dp->con); + return svdm_version; + } + +@@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, + break; + } + +- mutex_unlock(&dp->con->lock); ++ ucsi_con_mutex_unlock(dp->con); + + return 0; + } +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index 29a04d6795012d..ea98bc5674940d 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -1559,6 +1559,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data) + } + EXPORT_SYMBOL_GPL(ucsi_set_drvdata); + ++/** ++ * ucsi_con_mutex_lock - Acquire the connector mutex ++ * @con: The connector interface to lock ++ * ++ * Returns true on success, false if the connector is disconnected ++ */ ++bool ucsi_con_mutex_lock(struct ucsi_connector *con) ++{ ++ bool mutex_locked = false; ++ bool connected = true; ++ ++ while (connected && !mutex_locked) { ++ mutex_locked = mutex_trylock(&con->lock) != 0; ++ connected = con->status.flags & UCSI_CONSTAT_CONNECTED; ++ if (connected && !mutex_locked) ++ msleep(20); ++ } ++ ++ connected = connected && con->partner; ++ if (!connected && mutex_locked) ++ mutex_unlock(&con->lock); ++ ++ return connected; ++} ++ ++/** ++ * ucsi_con_mutex_unlock - Release the connector mutex ++ * @con: The connector interface to unlock ++ */ ++void ucsi_con_mutex_unlock(struct ucsi_connector *con) ++{ ++ mutex_unlock(&con->lock); ++} ++ + /** + * ucsi_create - Allocate UCSI instance + * @dev: Device interface to the PPM (Platform Policy Manager) +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 921ef0e115cffc..3bb23a2ea547ac 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -79,6 +79,8 @@ int ucsi_register(struct ucsi *ucsi); + void ucsi_unregister(struct ucsi *ucsi); + void *ucsi_get_drvdata(struct ucsi *ucsi); + void ucsi_set_drvdata(struct ucsi *ucsi, void *data); ++bool ucsi_con_mutex_lock(struct ucsi_connector *con); ++void ucsi_con_mutex_unlock(struct ucsi_connector *con); + + void ucsi_connector_change(struct ucsi *ucsi, u8 num); + +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index fb2c8d14327ae1..3ff7d2e47c7e90 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -110,25 +110,6 @@ static struct linux_binfmt elf_format = { + + #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) + +-static int set_brk(unsigned long start, unsigned long end, int prot) +-{ +- start = ELF_PAGEALIGN(start); +- end = ELF_PAGEALIGN(end); +- if (end > start) { +- /* +- * Map the last of the bss segment. +- * If the header is requesting these pages to be +- * executable, honour that (ppc32 needs this). +- */ +- int error = vm_brk_flags(start, end - start, +- prot & PROT_EXEC ? VM_EXEC : 0); +- if (error) +- return error; +- } +- current->mm->start_brk = current->mm->brk = end; +- return 0; +-} +- + /* We need to explicitly zero any fractional pages + after the data section (i.e. bss). This would + contain the junk from the file that should not +@@ -406,6 +387,51 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, + return(map_addr); + } + ++static unsigned long elf_load(struct file *filep, unsigned long addr, ++ const struct elf_phdr *eppnt, int prot, int type, ++ unsigned long total_size) ++{ ++ unsigned long zero_start, zero_end; ++ unsigned long map_addr; ++ ++ if (eppnt->p_filesz) { ++ map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); ++ if (BAD_ADDR(map_addr)) ++ return map_addr; ++ if (eppnt->p_memsz > eppnt->p_filesz) { ++ zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + ++ eppnt->p_filesz; ++ zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + ++ eppnt->p_memsz; ++ ++ /* Zero the end of the last mapped page */ ++ padzero(zero_start); ++ } ++ } else { ++ map_addr = zero_start = ELF_PAGESTART(addr); ++ zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + ++ eppnt->p_memsz; ++ } ++ if (eppnt->p_memsz > eppnt->p_filesz) { ++ /* ++ * Map the last of the segment. ++ * If the header is requesting these pages to be ++ * executable, honour that (ppc32 needs this). ++ */ ++ int error; ++ ++ zero_start = ELF_PAGEALIGN(zero_start); ++ zero_end = ELF_PAGEALIGN(zero_end); ++ ++ error = vm_brk_flags(zero_start, zero_end - zero_start, ++ prot & PROT_EXEC ? VM_EXEC : 0); ++ if (error) ++ map_addr = error; ++ } ++ return map_addr; ++} ++ ++ + static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr) + { + elf_addr_t min_addr = -1; +@@ -828,8 +854,8 @@ static int load_elf_binary(struct linux_binprm *bprm) + unsigned long error; + struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; + struct elf_phdr *elf_property_phdata = NULL; +- unsigned long elf_bss, elf_brk; +- int bss_prot = 0; ++ unsigned long elf_brk; ++ bool brk_moved = false; + int retval, i; + unsigned long elf_entry; + unsigned long e_entry; +@@ -1021,7 +1047,6 @@ static int load_elf_binary(struct linux_binprm *bprm) + if (retval < 0) + goto out_free_dentry; + +- elf_bss = 0; + elf_brk = 0; + + start_code = ~0UL; +@@ -1041,33 +1066,6 @@ static int load_elf_binary(struct linux_binprm *bprm) + if (elf_ppnt->p_type != PT_LOAD) + continue; + +- if (unlikely (elf_brk > elf_bss)) { +- unsigned long nbyte; +- +- /* There was a PT_LOAD segment with p_memsz > p_filesz +- before this one. Map anonymous pages, if needed, +- and clear the area. */ +- retval = set_brk(elf_bss + load_bias, +- elf_brk + load_bias, +- bss_prot); +- if (retval) +- goto out_free_dentry; +- nbyte = ELF_PAGEOFFSET(elf_bss); +- if (nbyte) { +- nbyte = ELF_MIN_ALIGN - nbyte; +- if (nbyte > elf_brk - elf_bss) +- nbyte = elf_brk - elf_bss; +- if (clear_user((void __user *)elf_bss + +- load_bias, nbyte)) { +- /* +- * This bss-zeroing can fail if the ELF +- * file specifies odd protections. So +- * we don't check the return value +- */ +- } +- } +- } +- + elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, + !!interpreter, false); + +@@ -1095,15 +1093,49 @@ static int load_elf_binary(struct linux_binprm *bprm) + * Header for ET_DYN binaries to calculate the + * randomization (load_bias) for all the LOAD + * Program Headers. ++ */ ++ ++ /* ++ * Calculate the entire size of the ELF mapping ++ * (total_size), used for the initial mapping, ++ * due to load_addr_set which is set to true later ++ * once the initial mapping is performed. ++ * ++ * Note that this is only sensible when the LOAD ++ * segments are contiguous (or overlapping). If ++ * used for LOADs that are far apart, this would ++ * cause the holes between LOADs to be mapped, ++ * running the risk of having the mapping fail, ++ * as it would be larger than the ELF file itself. ++ * ++ * As a result, only ET_DYN does this, since ++ * some ET_EXEC (e.g. ia64) may have large virtual ++ * memory holes between LOADs. ++ * ++ */ ++ total_size = total_mapping_size(elf_phdata, ++ elf_ex->e_phnum); ++ if (!total_size) { ++ retval = -EINVAL; ++ goto out_free_dentry; ++ } ++ ++ /* Calculate any requested alignment. */ ++ alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); ++ ++ /** ++ * DOC: PIE handling + * +- * There are effectively two types of ET_DYN +- * binaries: programs (i.e. PIE: ET_DYN with INTERP) +- * and loaders (ET_DYN without INTERP, since they +- * _are_ the ELF interpreter). The loaders must +- * be loaded away from programs since the program +- * may otherwise collide with the loader (especially +- * for ET_EXEC which does not have a randomized +- * position). For example to handle invocations of ++ * There are effectively two types of ET_DYN ELF ++ * binaries: programs (i.e. PIE: ET_DYN with ++ * PT_INTERP) and loaders (i.e. static PIE: ET_DYN ++ * without PT_INTERP, usually the ELF interpreter ++ * itself). Loaders must be loaded away from programs ++ * since the program may otherwise collide with the ++ * loader (especially for ET_EXEC which does not have ++ * a randomized position). ++ * ++ * For example, to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so +@@ -1116,17 +1148,49 @@ static int load_elf_binary(struct linux_binprm *bprm) + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED nor MAP_FIXED_NOREPLACE). ++ * ++ * See below for "brk" handling details, which is ++ * also affected by program vs loader and ASLR. + */ + if (interpreter) { ++ /* On ET_DYN with PT_INTERP, we do the ASLR. */ + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); +- alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); ++ /* Adjust alignment as requested. */ + if (alignment) + load_bias &= ~(alignment - 1); + elf_flags |= MAP_FIXED_NOREPLACE; +- } else +- load_bias = 0; ++ } else { ++ /* ++ * For ET_DYN without PT_INTERP, we rely on ++ * the architectures's (potentially ASLR) mmap ++ * base address (via a load_bias of 0). ++ * ++ * When a large alignment is requested, we ++ * must do the allocation at address "0" right ++ * now to discover where things will load so ++ * that we can adjust the resulting alignment. ++ * In this case (load_bias != 0), we can use ++ * MAP_FIXED_NOREPLACE to make sure the mapping ++ * doesn't collide with anything. ++ */ ++ if (alignment > ELF_MIN_ALIGN) { ++ load_bias = elf_load(bprm->file, 0, elf_ppnt, ++ elf_prot, elf_flags, total_size); ++ if (BAD_ADDR(load_bias)) { ++ retval = IS_ERR_VALUE(load_bias) ? ++ PTR_ERR((void*)load_bias) : -EINVAL; ++ goto out_free_dentry; ++ } ++ vm_munmap(load_bias, total_size); ++ /* Adjust alignment as requested. */ ++ if (alignment) ++ load_bias &= ~(alignment - 1); ++ elf_flags |= MAP_FIXED_NOREPLACE; ++ } else ++ load_bias = 0; ++ } + + /* + * Since load_bias is used for all subsequent loading +@@ -1136,34 +1200,9 @@ static int load_elf_binary(struct linux_binprm *bprm) + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); +- +- /* +- * Calculate the entire size of the ELF mapping +- * (total_size), used for the initial mapping, +- * due to load_addr_set which is set to true later +- * once the initial mapping is performed. +- * +- * Note that this is only sensible when the LOAD +- * segments are contiguous (or overlapping). If +- * used for LOADs that are far apart, this would +- * cause the holes between LOADs to be mapped, +- * running the risk of having the mapping fail, +- * as it would be larger than the ELF file itself. +- * +- * As a result, only ET_DYN does this, since +- * some ET_EXEC (e.g. ia64) may have large virtual +- * memory holes between LOADs. +- * +- */ +- total_size = total_mapping_size(elf_phdata, +- elf_ex->e_phnum); +- if (!total_size) { +- retval = -EINVAL; +- goto out_free_dentry; +- } + } + +- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, ++ error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, + elf_prot, elf_flags, total_size); + if (BAD_ADDR(error)) { + retval = IS_ERR_VALUE(error) ? +@@ -1211,41 +1250,23 @@ static int load_elf_binary(struct linux_binprm *bprm) + + k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; + +- if (k > elf_bss) +- elf_bss = k; + if ((elf_ppnt->p_flags & PF_X) && end_code < k) + end_code = k; + if (end_data < k) + end_data = k; + k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; +- if (k > elf_brk) { +- bss_prot = elf_prot; ++ if (k > elf_brk) + elf_brk = k; +- } + } + + e_entry = elf_ex->e_entry + load_bias; + phdr_addr += load_bias; +- elf_bss += load_bias; + elf_brk += load_bias; + start_code += load_bias; + end_code += load_bias; + start_data += load_bias; + end_data += load_bias; + +- /* Calling set_brk effectively mmaps the pages that we need +- * for the bss and break sections. We must do this before +- * mapping in the interpreter, to make sure it doesn't wind +- * up getting placed where the bss needs to go. +- */ +- retval = set_brk(elf_bss, elf_brk, bss_prot); +- if (retval) +- goto out_free_dentry; +- if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { +- retval = -EFAULT; /* Nobody gets to see this, but.. */ +- goto out_free_dentry; +- } +- + if (interpreter) { + elf_entry = load_elf_interp(interp_elf_ex, + interpreter, +@@ -1301,24 +1322,44 @@ static int load_elf_binary(struct linux_binprm *bprm) + mm->end_data = end_data; + mm->start_stack = bprm->p; + +- if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { ++ /** ++ * DOC: "brk" handling ++ * ++ * For architectures with ELF randomization, when executing a ++ * loader directly (i.e. static PIE: ET_DYN without PT_INTERP), ++ * move the brk area out of the mmap region and into the unused ++ * ELF_ET_DYN_BASE region. Since "brk" grows up it may collide ++ * early with the stack growing down or other regions being put ++ * into the mmap region by the kernel (e.g. vdso). ++ * ++ * In the CONFIG_COMPAT_BRK case, though, everything is turned ++ * off because we're not allowed to move the brk at all. ++ */ ++ if (!IS_ENABLED(CONFIG_COMPAT_BRK) && ++ IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && ++ elf_ex->e_type == ET_DYN && !interpreter) { ++ elf_brk = ELF_ET_DYN_BASE; ++ /* This counts as moving the brk, so let brk(2) know. */ ++ brk_moved = true; ++ } ++ mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk); ++ ++ if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) { + /* +- * For architectures with ELF randomization, when executing +- * a loader directly (i.e. no interpreter listed in ELF +- * headers), move the brk area out of the mmap region +- * (since it grows up, and may collide early with the stack +- * growing down), and into the unused ELF_ET_DYN_BASE region. ++ * If we didn't move the brk to ELF_ET_DYN_BASE (above), ++ * leave a gap between .bss and brk. + */ +- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && +- elf_ex->e_type == ET_DYN && !interpreter) { +- mm->brk = mm->start_brk = ELF_ET_DYN_BASE; +- } ++ if (!brk_moved) ++ mm->brk = mm->start_brk = mm->brk + PAGE_SIZE; + + mm->brk = mm->start_brk = arch_randomize_brk(mm); ++ brk_moved = true; ++ } ++ + #ifdef compat_brk_randomized ++ if (brk_moved) + current->brk_randomized = 1; + #endif +- } + + if (current->personality & MMAP_PAGE_ZERO) { + /* Why this, you ask??? Well SVr4 maps page 0 as read-only, +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index af03a1c6ba768c..ef77d420851040 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -164,6 +164,14 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, + ei = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_item); + num_refs = btrfs_extent_refs(leaf, ei); ++ if (unlikely(num_refs == 0)) { ++ ret = -EUCLEAN; ++ btrfs_err(fs_info, ++ "unexpected zero reference count for extent item (%llu %u %llu)", ++ key.objectid, key.type, key.offset); ++ btrfs_abort_transaction(trans, ret); ++ goto out_free; ++ } + extent_flags = btrfs_extent_flags(leaf, ei); + } else { + ret = -EUCLEAN; +@@ -177,8 +185,6 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, + + goto out_free; + } +- +- BUG_ON(num_refs == 0); + } else { + num_refs = 0; + extent_flags = 0; +@@ -208,10 +214,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, + goto search_again; + } + spin_lock(&head->lock); +- if (head->extent_op && head->extent_op->update_flags) ++ if (head->extent_op && head->extent_op->update_flags) { + extent_flags |= head->extent_op->flags_to_set; +- else +- BUG_ON(num_refs == 0); ++ } else if (unlikely(num_refs == 0)) { ++ spin_unlock(&head->lock); ++ mutex_unlock(&head->mutex); ++ spin_unlock(&delayed_refs->lock); ++ ret = -EUCLEAN; ++ btrfs_err(fs_info, ++ "unexpected zero reference count for extent %llu (%s)", ++ bytenr, metadata ? "metadata" : "data"); ++ btrfs_abort_transaction(trans, ret); ++ goto out_free; ++ } + + num_refs += head->ref_mod; + spin_unlock(&head->lock); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 4b12e45f575394..c140427e322ced 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -6880,10 +6880,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, + struct nfs4_unlockdata *p; + struct nfs4_state *state = lsp->ls_state; + struct inode *inode = state->inode; ++ struct nfs_lock_context *l_ctx; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) + return NULL; ++ l_ctx = nfs_get_lock_context(ctx); ++ if (!IS_ERR(l_ctx)) { ++ p->l_ctx = l_ctx; ++ } else { ++ kfree(p); ++ return NULL; ++ } + p->arg.fh = NFS_FH(inode); + p->arg.fl = &p->fl; + p->arg.seqid = seqid; +@@ -6891,7 +6899,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, + p->lsp = lsp; + /* Ensure we don't close file until we're done freeing locks! */ + p->ctx = get_nfs_open_context(ctx); +- p->l_ctx = nfs_get_lock_context(ctx); + locks_init_lock(&p->fl); + locks_copy_lock(&p->fl, fl); + p->server = NFS_SERVER(inode); +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index fe83c681e3fe03..73aa5a63afe3fb 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -732,6 +732,14 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, + return remaining; + } + ++static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo) ++{ ++ struct pnfs_layout_segment *lseg; ++ ++ list_for_each_entry(lseg, &lo->plh_return_segs, pls_list) ++ pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0); ++} ++ + static void + pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo, + struct list_head *free_me, +@@ -1180,6 +1188,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, + pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq); + pnfs_free_returned_lsegs(lo, &freeme, range, seq); + pnfs_set_layout_stateid(lo, stateid, NULL, true); ++ pnfs_reset_return_info(lo); + } else + pnfs_mark_layout_stateid_invalid(lo, &freeme); + out_unlock: +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index 4536b6fcfa0256..3e88e8b3c16ec2 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -2979,7 +2979,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, + /* Eventually save off posix specific response info and timestaps */ + + err_free_rsp_buf: +- free_rsp_buf(resp_buftype, rsp); ++ free_rsp_buf(resp_buftype, rsp_iov.iov_base); + kfree(pc_buf); + err_free_req: + cifs_small_buf_release(req); +diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c +index 4f33a4a4888613..b4071c9cf8c951 100644 +--- a/fs/udf/truncate.c ++++ b/fs/udf/truncate.c +@@ -115,7 +115,7 @@ void udf_truncate_tail_extent(struct inode *inode) + } + /* This inode entry is in-memory only and thus we don't have to mark + * the inode dirty */ +- if (ret == 0) ++ if (ret >= 0) + iinfo->i_lenExtents = inode->i_size; + brelse(epos.bh); + } +diff --git a/fs/xattr.c b/fs/xattr.c +index c20046548f218e..5fed22c22a2be8 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -1291,6 +1291,15 @@ static bool xattr_is_trusted(const char *name) + return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); + } + ++static bool xattr_is_maclabel(const char *name) ++{ ++ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; ++ ++ return !strncmp(name, XATTR_SECURITY_PREFIX, ++ XATTR_SECURITY_PREFIX_LEN) && ++ security_ismaclabel(suffix); ++} ++ + /** + * simple_xattr_list - list all xattr objects + * @inode: inode from which to get the xattrs +@@ -1323,6 +1332,17 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, + if (err) + return err; + ++ err = security_inode_listsecurity(inode, buffer, remaining_size); ++ if (err < 0) ++ return err; ++ ++ if (buffer) { ++ if (remaining_size < err) ++ return -ERANGE; ++ buffer += err; ++ } ++ remaining_size -= err; ++ + read_lock(&xattrs->lock); + for (rbp = rb_first(&xattrs->rb_root); rbp; rbp = rb_next(rbp)) { + xattr = rb_entry(rbp, struct simple_xattr, rb_node); +@@ -1331,6 +1351,10 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, + if (!trusted && xattr_is_trusted(xattr->name)) + continue; + ++ /* skip MAC labels; these are provided by LSM above */ ++ if (xattr_is_maclabel(xattr->name)) ++ continue; ++ + err = xattr_list_one(&buffer, &remaining_size, xattr->name); + if (err) + break; +diff --git a/include/linux/bio.h b/include/linux/bio.h +index 0286bada25ce72..b893418c3cc022 100644 +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -11,6 +11,7 @@ + #include + + #define BIO_MAX_VECS 256U ++#define BIO_MAX_INLINE_VECS UIO_MAXIOV + + struct queue_limits; + +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h +index 3e7fc905478984..b5bf5315ca8c10 100644 +--- a/include/linux/hyperv.h ++++ b/include/linux/hyperv.h +@@ -1224,13 +1224,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel, + enum vmbus_packet_type type, + u32 flags); + +-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, +- struct hv_page_buffer pagebuffers[], +- u32 pagecount, +- void *buffer, +- u32 bufferlen, +- u64 requestid); +- + extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, + struct vmbus_packet_mpb_array *mpb, + u32 desc_size, +diff --git a/include/linux/tpm.h b/include/linux/tpm.h +index 5f4998626a9889..bf8a4ec8a01c1f 100644 +--- a/include/linux/tpm.h ++++ b/include/linux/tpm.h +@@ -181,7 +181,7 @@ enum tpm2_const { + + enum tpm2_timeouts { + TPM2_TIMEOUT_A = 750, +- TPM2_TIMEOUT_B = 2000, ++ TPM2_TIMEOUT_B = 4000, + TPM2_TIMEOUT_C = 200, + TPM2_TIMEOUT_D = 30, + TPM2_DURATION_SHORT = 20, +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 4ec2a948ae3dbb..3287988a6a9878 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -1029,6 +1029,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) + return skb; + } + ++static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct) ++{ ++ struct sk_buff *skb; ++ ++ skb = __skb_dequeue(&sch->gso_skb); ++ if (skb) { ++ sch->q.qlen--; ++ return skb; ++ } ++ if (direct) ++ return __qdisc_dequeue_head(&sch->q); ++ else ++ return sch->dequeue(sch); ++} ++ + static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) + { + struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); +diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h +index 72f60ddfea7535..9556b4755a1ed8 100644 +--- a/include/sound/ump_msg.h ++++ b/include/sound/ump_msg.h +@@ -604,7 +604,7 @@ struct snd_ump_stream_msg_ep_info { + } __packed; + + /* UMP Stream Message: Device Info Notification (128bit) */ +-struct snd_ump_stream_msg_devince_info { ++struct snd_ump_stream_msg_device_info { + #ifdef __BIG_ENDIAN_BITFIELD + /* 0 */ + u32 type:4; +@@ -754,7 +754,7 @@ struct snd_ump_stream_msg_fb_name { + union snd_ump_stream_msg { + struct snd_ump_stream_msg_ep_discovery ep_discovery; + struct snd_ump_stream_msg_ep_info ep_info; +- struct snd_ump_stream_msg_devince_info device_info; ++ struct snd_ump_stream_msg_device_info device_info; + struct snd_ump_stream_msg_stream_cfg stream_cfg; + struct snd_ump_stream_msg_fb_discovery fb_discovery; + struct snd_ump_stream_msg_fb_info fb_info; +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 3646426c69e253..ad8b62202bdc46 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -1229,9 +1229,11 @@ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) + + if (top_cs) { + /* +- * Percpu kthreads in top_cpuset are ignored ++ * PF_NO_SETAFFINITY tasks are ignored. ++ * All per cpu kthreads should have PF_NO_SETAFFINITY ++ * flag set, see kthread_set_per_cpu(). + */ +- if (kthread_is_per_cpu(task)) ++ if (task->flags & PF_NO_SETAFFINITY) + continue; + cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus); + } else { +diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c +index 4376887e0d8aab..c9b0533407edeb 100644 +--- a/kernel/trace/trace_dynevent.c ++++ b/kernel/trace/trace_dynevent.c +@@ -16,7 +16,7 @@ + #include "trace_output.h" /* for trace_event_sem */ + #include "trace_dynevent.h" + +-static DEFINE_MUTEX(dyn_event_ops_mutex); ++DEFINE_MUTEX(dyn_event_ops_mutex); + static LIST_HEAD(dyn_event_ops_list); + + bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call) +@@ -125,6 +125,20 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type + return ret; + } + ++/* ++ * Locked version of event creation. The event creation must be protected by ++ * dyn_event_ops_mutex because of protecting trace_probe_log. ++ */ ++int dyn_event_create(const char *raw_command, struct dyn_event_operations *type) ++{ ++ int ret; ++ ++ mutex_lock(&dyn_event_ops_mutex); ++ ret = type->create(raw_command); ++ mutex_unlock(&dyn_event_ops_mutex); ++ return ret; ++} ++ + static int create_dyn_event(const char *raw_command) + { + struct dyn_event_operations *ops; +diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h +index 936477a111d3e7..beee3f8d754444 100644 +--- a/kernel/trace/trace_dynevent.h ++++ b/kernel/trace/trace_dynevent.h +@@ -100,6 +100,7 @@ void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos); + void dyn_event_seq_stop(struct seq_file *m, void *v); + int dyn_events_release_all(struct dyn_event_operations *type); + int dyn_event_release(const char *raw_command, struct dyn_event_operations *type); ++int dyn_event_create(const char *raw_command, struct dyn_event_operations *type); + + /* + * for_each_dyn_event - iterate over the dyn_event list +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index 76abc9a45f971a..2c233c0d38fa95 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -1554,7 +1554,7 @@ stacktrace_trigger(struct event_trigger_data *data, + struct trace_event_file *file = data->private_data; + + if (file) +- __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP); ++ __trace_stack(file->tr, tracing_gen_ctx_dec(), STACK_SKIP); + else + trace_dump_stack(STACK_SKIP); + } +diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c +index 69e92c7359fb9a..44ae51d1dc45d8 100644 +--- a/kernel/trace/trace_functions.c ++++ b/kernel/trace/trace_functions.c +@@ -561,11 +561,7 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, + + static __always_inline void trace_stack(struct trace_array *tr) + { +- unsigned int trace_ctx; +- +- trace_ctx = tracing_gen_ctx(); +- +- __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP); ++ __trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP); + } + + static void +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 508c10414a9343..46491f3c1569cd 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -1004,7 +1004,7 @@ static int create_or_delete_trace_kprobe(const char *raw_command) + if (raw_command[0] == '-') + return dyn_event_release(raw_command, &trace_kprobe_ops); + +- ret = trace_kprobe_create(raw_command); ++ ret = dyn_event_create(raw_command, &trace_kprobe_ops); + return ret == -ECANCELED ? -EINVAL : ret; + } + +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c +index 606190239c8776..694f32d843d90c 100644 +--- a/kernel/trace/trace_probe.c ++++ b/kernel/trace/trace_probe.c +@@ -153,9 +153,12 @@ static const struct fetch_type *find_fetch_type(const char *type, unsigned long + } + + static struct trace_probe_log trace_probe_log; ++extern struct mutex dyn_event_ops_mutex; + + void trace_probe_log_init(const char *subsystem, int argc, const char **argv) + { ++ lockdep_assert_held(&dyn_event_ops_mutex); ++ + trace_probe_log.subsystem = subsystem; + trace_probe_log.argc = argc; + trace_probe_log.argv = argv; +@@ -164,11 +167,15 @@ void trace_probe_log_init(const char *subsystem, int argc, const char **argv) + + void trace_probe_log_clear(void) + { ++ lockdep_assert_held(&dyn_event_ops_mutex); ++ + memset(&trace_probe_log, 0, sizeof(trace_probe_log)); + } + + void trace_probe_log_set_index(int index) + { ++ lockdep_assert_held(&dyn_event_ops_mutex); ++ + trace_probe_log.index = index; + } + +@@ -177,6 +184,8 @@ void __trace_probe_log_err(int offset, int err_type) + char *command, *p; + int i, len = 0, pos = 0; + ++ lockdep_assert_held(&dyn_event_ops_mutex); ++ + if (!trace_probe_log.argv) + return; + +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c +index 79f8da7e3cd492..ecf04e81ddf705 100644 +--- a/kernel/trace/trace_uprobe.c ++++ b/kernel/trace/trace_uprobe.c +@@ -730,7 +730,7 @@ static int create_or_delete_trace_uprobe(const char *raw_command) + if (raw_command[0] == '-') + return dyn_event_release(raw_command, &trace_uprobe_ops); + +- ret = trace_uprobe_create(raw_command); ++ ret = dyn_event_create(raw_command, &trace_uprobe_ops); + return ret == -ECANCELED ? -EINVAL : ret; + } + +diff --git a/mm/memblock.c b/mm/memblock.c +index 047dce35cf6e0e..0695284232f3c6 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -460,7 +460,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, + min(new_area_start, memblock.current_limit), + new_alloc_size, PAGE_SIZE); + +- new_array = addr ? __va(addr) : NULL; ++ if (addr) { ++ /* The memory may not have been accepted, yet. */ ++ accept_memory(addr, addr + new_alloc_size); ++ ++ new_array = __va(addr); ++ } else { ++ new_array = NULL; ++ } + } + if (!addr) { + pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 9beed7c71a8e91..aab16690545207 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1735,8 +1735,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) + if (PageHWPoison(page)) { + if (WARN_ON(folio_test_lru(folio))) + folio_isolate_lru(folio); +- if (folio_mapped(folio)) ++ if (folio_mapped(folio)) { ++ folio_lock(folio); + try_to_unmap(folio, TTU_IGNORE_MLOCK); ++ folio_unlock(folio); ++ } ++ + continue; + } + +diff --git a/mm/migrate.c b/mm/migrate.c +index 1004b1def1c201..4ed47088521746 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1504,6 +1504,7 @@ struct migrate_pages_stats { + int nr_thp_succeeded; /* THP migrated successfully */ + int nr_thp_failed; /* THP failed to be migrated */ + int nr_thp_split; /* THP split before migrating */ ++ int nr_split; /* Large folio (include THP) split before migrating */ + }; + + /* +@@ -1623,6 +1624,7 @@ static int migrate_pages_batch(struct list_head *from, + int nr_retry_pages = 0; + int pass = 0; + bool is_thp = false; ++ bool is_large = false; + struct folio *folio, *folio2, *dst = NULL, *dst2; + int rc, rc_saved = 0, nr_pages; + LIST_HEAD(unmap_folios); +@@ -1638,7 +1640,8 @@ static int migrate_pages_batch(struct list_head *from, + nr_retry_pages = 0; + + list_for_each_entry_safe(folio, folio2, from, lru) { +- is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); ++ is_large = folio_test_large(folio); ++ is_thp = is_large && folio_test_pmd_mappable(folio); + nr_pages = folio_nr_pages(folio); + + cond_resched(); +@@ -1658,6 +1661,7 @@ static int migrate_pages_batch(struct list_head *from, + stats->nr_thp_failed++; + if (!try_split_folio(folio, split_folios)) { + stats->nr_thp_split++; ++ stats->nr_split++; + continue; + } + stats->nr_failed_pages += nr_pages; +@@ -1686,11 +1690,12 @@ static int migrate_pages_batch(struct list_head *from, + nr_failed++; + stats->nr_thp_failed += is_thp; + /* Large folio NUMA faulting doesn't split to retry. */ +- if (folio_test_large(folio) && !nosplit) { ++ if (is_large && !nosplit) { + int ret = try_split_folio(folio, split_folios); + + if (!ret) { + stats->nr_thp_split += is_thp; ++ stats->nr_split += is_large; + break; + } else if (reason == MR_LONGTERM_PIN && + ret == -EAGAIN) { +@@ -1836,6 +1841,7 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, + stats->nr_succeeded += astats.nr_succeeded; + stats->nr_thp_succeeded += astats.nr_thp_succeeded; + stats->nr_thp_split += astats.nr_thp_split; ++ stats->nr_split += astats.nr_split; + if (rc < 0) { + stats->nr_failed_pages += astats.nr_failed_pages; + stats->nr_thp_failed += astats.nr_thp_failed; +@@ -1843,7 +1849,11 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, + return rc; + } + stats->nr_thp_failed += astats.nr_thp_split; +- nr_failed += astats.nr_thp_split; ++ /* ++ * Do not count rc, as pages will be retried below. ++ * Count nr_split only, since it includes nr_thp_split. ++ */ ++ nr_failed += astats.nr_split; + /* + * Fall back to migrate all failed folios one by one synchronously. All + * failed folios except split THPs will be retried, so their failure +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index bc62bb2a3b132e..74737c35082b45 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -303,7 +303,6 @@ EXPORT_SYMBOL(nr_online_nodes); + static bool page_contains_unaccepted(struct page *page, unsigned int order); + static void accept_page(struct page *page, unsigned int order); + static bool cond_accept_memory(struct zone *zone, unsigned int order); +-static inline bool has_unaccepted_memory(void); + static bool __free_unaccepted(struct page *page); + + int page_group_by_mobility_disabled __read_mostly; +@@ -6586,9 +6585,6 @@ bool has_managed_dma(void) + + #ifdef CONFIG_UNACCEPTED_MEMORY + +-/* Counts number of zones with unaccepted pages. */ +-static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); +- + static bool lazy_accept = true; + + static int __init accept_memory_parse(char *p) +@@ -6624,7 +6620,6 @@ static bool try_to_accept_memory_one(struct zone *zone) + { + unsigned long flags; + struct page *page; +- bool last; + + spin_lock_irqsave(&zone->lock, flags); + page = list_first_entry_or_null(&zone->unaccepted_pages, +@@ -6635,7 +6630,6 @@ static bool try_to_accept_memory_one(struct zone *zone) + } + + list_del(&page->lru); +- last = list_empty(&zone->unaccepted_pages); + + __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); +@@ -6645,9 +6639,6 @@ static bool try_to_accept_memory_one(struct zone *zone) + + __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); + +- if (last) +- static_branch_dec(&zones_with_unaccepted_pages); +- + return true; + } + +@@ -6656,9 +6647,6 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order) + long to_accept, wmark; + bool ret = false; + +- if (!has_unaccepted_memory()) +- return false; +- + if (list_empty(&zone->unaccepted_pages)) + return false; + +@@ -6688,30 +6676,20 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order) + return ret; + } + +-static inline bool has_unaccepted_memory(void) +-{ +- return static_branch_unlikely(&zones_with_unaccepted_pages); +-} +- + static bool __free_unaccepted(struct page *page) + { + struct zone *zone = page_zone(page); + unsigned long flags; +- bool first = false; + + if (!lazy_accept) + return false; + + spin_lock_irqsave(&zone->lock, flags); +- first = list_empty(&zone->unaccepted_pages); + list_add_tail(&page->lru, &zone->unaccepted_pages); + __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); + spin_unlock_irqrestore(&zone->lock, flags); + +- if (first) +- static_branch_inc(&zones_with_unaccepted_pages); +- + return true; + } + +@@ -6731,11 +6709,6 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order) + return false; + } + +-static inline bool has_unaccepted_memory(void) +-{ +- return false; +-} +- + static bool __free_unaccepted(struct page *page) + { + BUILD_BUG(); +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 29e420e9754bb3..589c3a481e4c10 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -7605,11 +7605,16 @@ static void add_device_complete(struct hci_dev *hdev, void *data, int err) + struct mgmt_cp_add_device *cp = cmd->param; + + if (!err) { ++ struct hci_conn_params *params; ++ ++ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, ++ le_addr_type(cp->addr.type)); ++ + device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type, + cp->action); + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, + cp->addr.type, hdev->conn_flags, +- PTR_UINT(cmd->user_data)); ++ params ? params->flags : 0); + } + + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE, +@@ -7712,8 +7717,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, + goto unlock; + } + +- cmd->user_data = UINT_PTR(current_flags); +- + err = hci_cmd_sync_queue(hdev, add_device_sync, cmd, + add_device_complete); + if (err < 0) { +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index d1046f495e63ff..3a6fff98748b86 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -1186,10 +1186,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + return -EINVAL; + } + +- local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + +- sizeof(void *) * channels, GFP_KERNEL); ++ local->int_scan_req = kzalloc(struct_size(local->int_scan_req, ++ channels, channels), ++ GFP_KERNEL); + if (!local->int_scan_req) + return -ENOMEM; ++ local->int_scan_req->n_channels = channels; + + eth_broadcast_addr(local->int_scan_req->bssid); + +diff --git a/net/mctp/device.c b/net/mctp/device.c +index 85cc5f31f1e7c0..8d1386601bbe06 100644 +--- a/net/mctp/device.c ++++ b/net/mctp/device.c +@@ -20,8 +20,7 @@ + #include + + struct mctp_dump_cb { +- int h; +- int idx; ++ unsigned long ifindex; + size_t a_idx; + }; + +@@ -115,43 +114,36 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb) + { + struct mctp_dump_cb *mcb = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); +- struct hlist_head *head; + struct net_device *dev; + struct ifaddrmsg *hdr; + struct mctp_dev *mdev; +- int ifindex; +- int idx = 0, rc; +- +- hdr = nlmsg_data(cb->nlh); +- // filter by ifindex if requested +- ifindex = hdr->ifa_index; ++ int ifindex = 0, rc; ++ ++ /* Filter by ifindex if a header is provided */ ++ if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) { ++ hdr = nlmsg_data(cb->nlh); ++ ifindex = hdr->ifa_index; ++ } else { ++ if (cb->strict_check) { ++ NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request"); ++ return -EINVAL; ++ } ++ } + + rcu_read_lock(); +- for (; mcb->h < NETDEV_HASHENTRIES; mcb->h++, mcb->idx = 0) { +- idx = 0; +- head = &net->dev_index_head[mcb->h]; +- hlist_for_each_entry_rcu(dev, head, index_hlist) { +- if (idx >= mcb->idx && +- (ifindex == 0 || ifindex == dev->ifindex)) { +- mdev = __mctp_dev_get(dev); +- if (mdev) { +- rc = mctp_dump_dev_addrinfo(mdev, +- skb, cb); +- mctp_dev_put(mdev); +- // Error indicates full buffer, this +- // callback will get retried. +- if (rc < 0) +- goto out; +- } +- } +- idx++; +- // reset for next iteration +- mcb->a_idx = 0; +- } ++ for_each_netdev_dump(net, dev, mcb->ifindex) { ++ if (ifindex && ifindex != dev->ifindex) ++ continue; ++ mdev = __mctp_dev_get(dev); ++ if (!mdev) ++ continue; ++ rc = mctp_dump_dev_addrinfo(mdev, skb, cb); ++ mctp_dev_put(mdev); ++ if (rc < 0) ++ break; ++ mcb->a_idx = 0; + } +-out: + rcu_read_unlock(); +- mcb->idx = idx; + + return skb->len; + } +@@ -525,9 +517,12 @@ static struct notifier_block mctp_dev_nb = { + }; + + static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = { +- {THIS_MODULE, PF_MCTP, RTM_NEWADDR, mctp_rtm_newaddr, NULL, 0}, +- {THIS_MODULE, PF_MCTP, RTM_DELADDR, mctp_rtm_deladdr, NULL, 0}, +- {THIS_MODULE, PF_MCTP, RTM_GETADDR, NULL, mctp_dump_addrinfo, 0}, ++ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR, ++ .doit = mctp_rtm_newaddr}, ++ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR, ++ .doit = mctp_rtm_deladdr}, ++ {.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR, ++ .dumpit = mctp_dump_addrinfo}, + }; + + int __init mctp_device_init(void) +diff --git a/net/mctp/route.c b/net/mctp/route.c +index d3c1f54386efc1..009ba5edbd5254 100644 +--- a/net/mctp/route.c ++++ b/net/mctp/route.c +@@ -274,8 +274,10 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) + + key = flow->key; + +- if (WARN_ON(key->dev && key->dev != dev)) ++ if (key->dev) { ++ WARN_ON(key->dev != dev); + return; ++ } + + mctp_dev_set_key(dev, key); + } +diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c +index 5f2e0681574567..63c02040b426ae 100644 +--- a/net/sched/sch_codel.c ++++ b/net/sched/sch_codel.c +@@ -168,7 +168,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt, + + qlen = sch->q.qlen; + while (sch->q.qlen > sch->limit) { +- struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, true); + + dropped += qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c +index f59a2cb2c803d7..91f5ef6be0f231 100644 +--- a/net/sched/sch_fq.c ++++ b/net/sched/sch_fq.c +@@ -901,7 +901,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, + sch_tree_lock(sch); + } + while (sch->q.qlen > sch->limit) { +- struct sk_buff *skb = fq_dequeue(sch); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, false); + + if (!skb) + break; +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c +index 9330923a624c02..47b5a056165cb0 100644 +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -431,7 +431,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, + + while (sch->q.qlen > sch->limit || + q->memory_usage > q->memory_limit) { +- struct sk_buff *skb = fq_codel_dequeue(sch); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, false); + + q->cstats.drop_len += qdisc_pkt_len(skb); + rtnl_kfree_skbs(skb, skb); +diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c +index 68e6acd0f130d9..607c580d75e4b6 100644 +--- a/net/sched/sch_fq_pie.c ++++ b/net/sched/sch_fq_pie.c +@@ -357,7 +357,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, + + /* Drop excess packets if new limit is lower */ + while (sch->q.qlen > sch->limit) { +- struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, false); + + len_dropped += qdisc_pkt_len(skb); + num_dropped += 1; +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c +index d26cd436cbe31b..83fc44f20e31cb 100644 +--- a/net/sched/sch_hhf.c ++++ b/net/sched/sch_hhf.c +@@ -560,7 +560,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt, + qlen = sch->q.qlen; + prev_backlog = sch->qstats.backlog; + while (sch->q.qlen > sch->limit) { +- struct sk_buff *skb = hhf_dequeue(sch); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, false); + + rtnl_kfree_skbs(skb, skb); + } +diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c +index 2da6250ec34636..48c5ab8ec143c1 100644 +--- a/net/sched/sch_pie.c ++++ b/net/sched/sch_pie.c +@@ -190,7 +190,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, + /* Drop excess packets if new limit is lower */ + qlen = sch->q.qlen; + while (sch->q.qlen > sch->limit) { +- struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); ++ struct sk_buff *skb = qdisc_dequeue_internal(sch, true); + + dropped += qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); +diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c +index fd73be940f4607..77a76634014a5a 100644 +--- a/net/sctp/sysctl.c ++++ b/net/sctp/sysctl.c +@@ -529,6 +529,8 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write, + return ret; + } + ++static DEFINE_MUTEX(sctp_sysctl_mutex); ++ + static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) + { +@@ -553,6 +555,7 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, + if (new_value > max || new_value < min) + return -EINVAL; + ++ mutex_lock(&sctp_sysctl_mutex); + net->sctp.udp_port = new_value; + sctp_udp_sock_stop(net); + if (new_value) { +@@ -565,6 +568,7 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, + lock_sock(sk); + sctp_sk(sk)->udp_port = htons(net->sctp.udp_port); + release_sock(sk); ++ mutex_unlock(&sctp_sysctl_mutex); + } + + return ret; +diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c +index 5df08d848b5c9c..1852fac3e72b76 100644 +--- a/net/tls/tls_strp.c ++++ b/net/tls/tls_strp.c +@@ -395,7 +395,6 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) + return 0; + + shinfo = skb_shinfo(strp->anchor); +- shinfo->frag_list = NULL; + + /* If we don't know the length go max plus page for cipher overhead */ + need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; +@@ -411,6 +410,8 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) + page, 0, 0); + } + ++ shinfo->frag_list = NULL; ++ + strp->copy_mode = 1; + strp->stm.offset = 0; + +diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c +index d0ee9001c7b376..aaa8fa92e24d52 100644 +--- a/samples/ftrace/sample-trace-array.c ++++ b/samples/ftrace/sample-trace-array.c +@@ -112,7 +112,7 @@ static int __init sample_trace_array_init(void) + /* + * If context specific per-cpu buffers havent already been allocated. + */ +- trace_printk_init_buffers(); ++ trace_array_init_printk(tr); + + simple_tsk = kthread_run(simple_thread, NULL, "sample-instance"); + if (IS_ERR(simple_tsk)) { +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 6195fe9dda1799..49f6763c3250dd 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -736,15 +736,21 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client, + */ + static int __deliver_to_subscribers(struct snd_seq_client *client, + struct snd_seq_event *event, +- struct snd_seq_client_port *src_port, +- int atomic, int hop) ++ int port, int atomic, int hop) + { ++ struct snd_seq_client_port *src_port; + struct snd_seq_subscribers *subs; + int err, result = 0, num_ev = 0; + union __snd_seq_event event_saved; + size_t saved_size; + struct snd_seq_port_subs_info *grp; + ++ if (port < 0) ++ return 0; ++ src_port = snd_seq_port_use_ptr(client, port); ++ if (!src_port) ++ return 0; ++ + /* save original event record */ + saved_size = snd_seq_event_packet_size(event); + memcpy(&event_saved, event, saved_size); +@@ -780,6 +786,7 @@ static int __deliver_to_subscribers(struct snd_seq_client *client, + read_unlock(&grp->list_lock); + else + up_read(&grp->list_mutex); ++ snd_seq_port_unlock(src_port); + memcpy(event, &event_saved, saved_size); + return (result < 0) ? result : num_ev; + } +@@ -788,25 +795,32 @@ static int deliver_to_subscribers(struct snd_seq_client *client, + struct snd_seq_event *event, + int atomic, int hop) + { +- struct snd_seq_client_port *src_port; +- int ret = 0, ret2; +- +- src_port = snd_seq_port_use_ptr(client, event->source.port); +- if (src_port) { +- ret = __deliver_to_subscribers(client, event, src_port, atomic, hop); +- snd_seq_port_unlock(src_port); +- } +- +- if (client->ump_endpoint_port < 0 || +- event->source.port == client->ump_endpoint_port) +- return ret; ++ int ret; ++#if IS_ENABLED(CONFIG_SND_SEQ_UMP) ++ int ret2; ++#endif + +- src_port = snd_seq_port_use_ptr(client, client->ump_endpoint_port); +- if (!src_port) ++ ret = __deliver_to_subscribers(client, event, ++ event->source.port, atomic, hop); ++#if IS_ENABLED(CONFIG_SND_SEQ_UMP) ++ if (!snd_seq_client_is_ump(client) || client->ump_endpoint_port < 0) + return ret; +- ret2 = __deliver_to_subscribers(client, event, src_port, atomic, hop); +- snd_seq_port_unlock(src_port); +- return ret2 < 0 ? ret2 : ret; ++ /* If it's an event from EP port (and with a UMP group), ++ * deliver to subscribers of the corresponding UMP group port, too. ++ * Or, if it's from non-EP port, deliver to subscribers of EP port, too. ++ */ ++ if (event->source.port == client->ump_endpoint_port) ++ ret2 = __deliver_to_subscribers(client, event, ++ snd_seq_ump_group_port(event), ++ atomic, hop); ++ else ++ ret2 = __deliver_to_subscribers(client, event, ++ client->ump_endpoint_port, ++ atomic, hop); ++ if (ret2 < 0) ++ return ret2; ++#endif ++ return ret; + } + + /* deliver an event to the destination port(s). +diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c +index 4dd540cbb1cbbc..83a27362b7a066 100644 +--- a/sound/core/seq/seq_ump_convert.c ++++ b/sound/core/seq/seq_ump_convert.c +@@ -1284,3 +1284,21 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source, + else + return cvt_to_ump_midi1(dest, dest_port, event, atomic, hop); + } ++ ++/* return the UMP group-port number of the event; ++ * return -1 if groupless or non-UMP event ++ */ ++int snd_seq_ump_group_port(const struct snd_seq_event *event) ++{ ++ const struct snd_seq_ump_event *ump_ev = ++ (const struct snd_seq_ump_event *)event; ++ unsigned char type; ++ ++ if (!snd_seq_ev_is_ump(event)) ++ return -1; ++ type = ump_message_type(ump_ev->ump[0]); ++ if (ump_is_groupless_msg(type)) ++ return -1; ++ /* group-port number starts from 1 */ ++ return ump_message_group(ump_ev->ump[0]) + 1; ++} +diff --git a/sound/core/seq/seq_ump_convert.h b/sound/core/seq/seq_ump_convert.h +index 6c146d8032804f..4abf0a7637d701 100644 +--- a/sound/core/seq/seq_ump_convert.h ++++ b/sound/core/seq/seq_ump_convert.h +@@ -18,5 +18,6 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source, + struct snd_seq_client_port *dest_port, + struct snd_seq_event *event, + int atomic, int hop); ++int snd_seq_ump_group_port(const struct snd_seq_event *event); + + #endif /* __SEQ_UMP_CONVERT_H */ +diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c +index 4bc0f53c223b79..bc9203da35fb6f 100644 +--- a/sound/pci/es1968.c ++++ b/sound/pci/es1968.c +@@ -1569,7 +1569,7 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) + struct snd_pcm_runtime *runtime = substream->runtime; + struct es1968 *chip = snd_pcm_substream_chip(substream); + struct esschan *es; +- int apu1, apu2; ++ int err, apu1, apu2; + + apu1 = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_CAPTURE); + if (apu1 < 0) +@@ -1613,7 +1613,9 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) + runtime->hw = snd_es1968_capture; + runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = + calc_available_memory_size(chip) - 1024; /* keep MIXBUF size */ +- snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); ++ err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); ++ if (err < 0) ++ return err; + + spin_lock_irq(&chip->substream_lock); + list_add(&es->list, &chip->substream_list); +diff --git a/sound/sh/Kconfig b/sound/sh/Kconfig +index b75fbb3236a7b9..f5fa09d740b4c9 100644 +--- a/sound/sh/Kconfig ++++ b/sound/sh/Kconfig +@@ -14,7 +14,7 @@ if SND_SUPERH + + config SND_AICA + tristate "Dreamcast Yamaha AICA sound" +- depends on SH_DREAMCAST ++ depends on SH_DREAMCAST && SH_DMA_API + select SND_PCM + select G2_DMA + help +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index d9d4c5922a50bb..0b8b20550ab381 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -2140,6 +2140,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */ + QUIRK_FLAG_GET_SAMPLE_RATE), ++ DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */ ++ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */ + QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ +@@ -2148,6 +2150,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_FIXED_RATE), + DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */ + QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), ++ DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */ ++ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), + DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */ +diff --git a/tools/net/ynl/ethtool.py b/tools/net/ynl/ethtool.py +index 6c9f7e31250cdb..ffd8eb6d190483 100755 +--- a/tools/net/ynl/ethtool.py ++++ b/tools/net/ynl/ethtool.py +@@ -320,20 +320,37 @@ def main(): + return + + if args.show_time_stamping: +- tsinfo = dumpit(ynl, args, 'tsinfo-get') ++ req = { ++ 'header': { ++ 'flags': 'stats', ++ }, ++ } ++ ++ tsinfo = dumpit(ynl, args, 'tsinfo-get', req) + + print(f'Time stamping parameters for {args.device}:') + + print('Capabilities:') + [print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])] + +- print(f'PTP Hardware Clock: {tsinfo["phc-index"]}') ++ print(f'PTP Hardware Clock: {tsinfo.get("phc-index", "none")}') ++ ++ if 'tx-types' in tsinfo: ++ print('Hardware Transmit Timestamp Modes:') ++ [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])] ++ else: ++ print('Hardware Transmit Timestamp Modes: none') ++ ++ if 'rx-filters' in tsinfo: ++ print('Hardware Receive Filter Modes:') ++ [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])] ++ else: ++ print('Hardware Receive Filter Modes: none') + +- print('Hardware Transmit Timestamp Modes:') +- [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])] ++ if 'stats' in tsinfo and tsinfo['stats']: ++ print('Statistics:') ++ [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()] + +- print('Hardware Receive Filter Modes:') +- [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])] + return + + print(f'Settings for {args.device}:') +diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile +index a0b8688b083694..a705493c04bb73 100644 +--- a/tools/testing/selftests/exec/Makefile ++++ b/tools/testing/selftests/exec/Makefile +@@ -3,8 +3,13 @@ CFLAGS = -Wall + CFLAGS += -Wno-nonnull + CFLAGS += -D_GNU_SOURCE + ++ALIGNS := 0x1000 0x200000 0x1000000 ++ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS)) ++ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS)) ++ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES) ++ + TEST_PROGS := binfmt_script.py +-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular ++TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS) + TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir + # Makefile is a run-time dependency, since it's accessed by the execveat test + TEST_FILES := Makefile +@@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat + $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat + cp $< $@ + chmod -x $@ +-$(OUTPUT)/load_address_4096: load_address.c +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@ +-$(OUTPUT)/load_address_2097152: load_address.c +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@ +-$(OUTPUT)/load_address_16777216: load_address.c +- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@ ++$(OUTPUT)/load_address.0x%: load_address.c ++ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \ ++ -fPIE -pie $< -o $@ ++$(OUTPUT)/load_address.static.0x%: load_address.c ++ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \ ++ -fPIE -static-pie $< -o $@ +diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c +index d487c2f6a61509..8257fddba8c8db 100644 +--- a/tools/testing/selftests/exec/load_address.c ++++ b/tools/testing/selftests/exec/load_address.c +@@ -5,10 +5,13 @@ + #include + #include + #include ++#include ++#include "../kselftest.h" + + struct Statistics { + unsigned long long load_address; + unsigned long long alignment; ++ bool interp; + }; + + int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) +@@ -25,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) + stats->alignment = 0; + + for (i = 0; i < info->dlpi_phnum; i++) { ++ unsigned long long align; ++ ++ if (info->dlpi_phdr[i].p_type == PT_INTERP) { ++ stats->interp = true; ++ continue; ++ } ++ + if (info->dlpi_phdr[i].p_type != PT_LOAD) + continue; + +- if (info->dlpi_phdr[i].p_align > stats->alignment) +- stats->alignment = info->dlpi_phdr[i].p_align; ++ align = info->dlpi_phdr[i].p_align; ++ ++ if (align > stats->alignment) ++ stats->alignment = align; + } + + return 1; // Terminate dl_iterate_phdr. +@@ -37,32 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) + + int main(int argc, char **argv) + { +- struct Statistics extracted; +- unsigned long long misalign; ++ struct Statistics extracted = { }; ++ unsigned long long misalign, pow2; ++ bool interp_needed; ++ char buf[1024]; ++ FILE *maps; + int ret; + +- ret = dl_iterate_phdr(ExtractStatistics, &extracted); +- if (ret != 1) { +- fprintf(stderr, "FAILED\n"); +- return 1; +- } ++ ksft_print_header(); ++ ksft_set_plan(4); + +- if (extracted.alignment == 0) { +- fprintf(stderr, "No alignment found\n"); +- return 1; +- } else if (extracted.alignment & (extracted.alignment - 1)) { +- fprintf(stderr, "Alignment is not a power of 2\n"); +- return 1; ++ /* Dump maps file for debugging reference. */ ++ maps = fopen("/proc/self/maps", "r"); ++ if (!maps) ++ ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno)); ++ while (fgets(buf, sizeof(buf), maps)) { ++ ksft_print_msg("%s", buf); + } ++ fclose(maps); ++ ++ /* Walk the program headers. */ ++ ret = dl_iterate_phdr(ExtractStatistics, &extracted); ++ if (ret != 1) ++ ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n"); ++ ++ /* Report our findings. */ ++ ksft_print_msg("load_address=%#llx alignment=%#llx\n", ++ extracted.load_address, extracted.alignment); ++ ++ /* If we're named with ".static." we expect no INTERP. */ ++ interp_needed = strstr(argv[0], ".static.") == NULL; ++ ++ /* Were we built as expected? */ ++ ksft_test_result(interp_needed == extracted.interp, ++ "%s INTERP program header %s\n", ++ interp_needed ? "Wanted" : "Unwanted", ++ extracted.interp ? "seen" : "missing"); + ++ /* Did we find an alignment? */ ++ ksft_test_result(extracted.alignment != 0, ++ "Alignment%s found\n", extracted.alignment ? "" : " NOT"); ++ ++ /* Is the alignment sane? */ ++ pow2 = extracted.alignment & (extracted.alignment - 1); ++ ksft_test_result(pow2 == 0, ++ "Alignment is%s a power of 2: %#llx\n", ++ pow2 == 0 ? "" : " NOT", extracted.alignment); ++ ++ /* Is the load address aligned? */ + misalign = extracted.load_address & (extracted.alignment - 1); +- if (misalign) { +- printf("alignment = %llu, load_address = %llu\n", +- extracted.alignment, extracted.load_address); +- fprintf(stderr, "FAILED\n"); +- return 1; +- } ++ ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n", ++ misalign ? "MIS" : "", misalign); + +- fprintf(stderr, "PASS\n"); +- return 0; ++ ksft_finished(); + } +diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c +index 309b3750e57e13..38fec412206b9a 100644 +--- a/tools/testing/selftests/mm/compaction_test.c ++++ b/tools/testing/selftests/mm/compaction_test.c +@@ -89,6 +89,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size) + int compaction_index = 0; + char initial_nr_hugepages[20] = {0}; + char nr_hugepages[20] = {0}; ++ char target_nr_hugepages[24] = {0}; ++ int slen; + + /* We want to test with 80% of available memory. Else, OOM killer comes + in to play */ +@@ -119,11 +121,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size) + + lseek(fd, 0, SEEK_SET); + +- /* Request a large number of huge pages. The Kernel will allocate +- as much as it can */ +- if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) { +- ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n", +- strerror(errno)); ++ /* ++ * Request huge pages for about half of the free memory. The Kernel ++ * will allocate as much as it can, and we expect it will get at least 1/3 ++ */ ++ nr_hugepages_ul = mem_free / hugepage_size / 2; ++ snprintf(target_nr_hugepages, sizeof(target_nr_hugepages), ++ "%lu", nr_hugepages_ul); ++ ++ slen = strlen(target_nr_hugepages); ++ if (write(fd, target_nr_hugepages, slen) != slen) { ++ ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n", ++ nr_hugepages_ul, strerror(errno)); + goto close_fd; + } + diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.92-93.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.92-93.patch new file mode 100644 index 0000000000..24359d6adb --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.92-93.patch @@ -0,0 +1,20602 @@ +diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd +index 825e619250bf2e..f2ec42949a54d7 100644 +--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd ++++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd +@@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format + correlates to the operations allowed. It's visible only + on platforms that support the capability. + ++What: /sys/bus/dsa/devices/wq./driver_name ++Date: Sept 8, 2023 ++KernelVersion: 6.7.0 ++Contact: dmaengine@vger.kernel.org ++Description: Name of driver to be bounded to the wq. ++ + What: /sys/bus/dsa/devices/engine./group_id + Date: Oct 25, 2019 + KernelVersion: 5.6.0 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index f95734ceb82b86..315a817e338042 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -5978,6 +5978,8 @@ + + Selecting 'on' will also enable the mitigation + against user space to user space task attacks. ++ Selecting specific mitigation does not force enable ++ user mitigations. + + Selecting 'off' will disable both the kernel and + the user space protections. +diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst +index 84b43061c11be2..60434f2b028637 100644 +--- a/Documentation/driver-api/serial/driver.rst ++++ b/Documentation/driver-api/serial/driver.rst +@@ -103,4 +103,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO. + .. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c + :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod + mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms +- mctrl_gpio_disable_ms ++ mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync +diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst +index d8f1d6859b964b..1c12fbba440bca 100644 +--- a/Documentation/hwmon/dell-smm-hwmon.rst ++++ b/Documentation/hwmon/dell-smm-hwmon.rst +@@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard + =============================== ======= ======================================= + Name Perm Description + =============================== ======= ======================================= +-fan[1-3]_input RO Fan speed in RPM. +-fan[1-3]_label RO Fan label. +-fan[1-3]_min RO Minimal Fan speed in RPM +-fan[1-3]_max RO Maximal Fan speed in RPM +-fan[1-3]_target RO Expected Fan speed in RPM +-pwm[1-3] RW Control the fan PWM duty-cycle. ++fan[1-4]_input RO Fan speed in RPM. ++fan[1-4]_label RO Fan label. ++fan[1-4]_min RO Minimal Fan speed in RPM ++fan[1-4]_max RO Maximal Fan speed in RPM ++fan[1-4]_target RO Expected Fan speed in RPM ++pwm[1-4] RW Control the fan PWM duty-cycle. + pwm1_enable WO Enable or disable automatic BIOS fan + control (not supported on all laptops, + see below for details). +@@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches! + --------------------------- + + The driver also exports the fans as thermal cooling devices with +-``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control ++``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control + using one of the thermal governors. + + Module parameters +diff --git a/Makefile b/Makefile +index 51d975b3555195..c9a1e2286b3a79 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 92 ++SUBLEVEL = 93 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm/boot/dts/nvidia/tegra114.dtsi b/arch/arm/boot/dts/nvidia/tegra114.dtsi +index 86f14e2fd29f3a..6c057b50695140 100644 +--- a/arch/arm/boot/dts/nvidia/tegra114.dtsi ++++ b/arch/arm/boot/dts/nvidia/tegra114.dtsi +@@ -139,7 +139,7 @@ dsib: dsi@54400000 { + reg = <0x54400000 0x00040000>; + clocks = <&tegra_car TEGRA114_CLK_DSIB>, + <&tegra_car TEGRA114_CLK_DSIBLP>, +- <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; ++ <&tegra_car TEGRA114_CLK_PLL_D_OUT0>; + clock-names = "dsi", "lp", "parent"; + resets = <&tegra_car 82>; + reset-names = "dsi"; +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c +index 22ecaf09d00f96..f635ad29511f6a 100644 +--- a/arch/arm/mach-at91/pm.c ++++ b/arch/arm/mach-at91/pm.c +@@ -538,11 +538,12 @@ extern u32 at91_pm_suspend_in_sram_sz; + + static int at91_suspend_finish(unsigned long val) + { +- unsigned char modified_gray_code[] = { +- 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, +- 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, +- 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, +- 0x10, 0x11, ++ /* SYNOPSYS workaround to fix a bug in the calibration logic */ ++ unsigned char modified_fix_code[] = { ++ 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18, ++ 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13, ++ 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14, ++ 0x1e, 0x1f, + }; + unsigned int tmp, index; + int i; +@@ -553,25 +554,25 @@ static int at91_suspend_finish(unsigned long val) + * restore the ZQ0SR0 with the value saved here. But the + * calibration is buggy and restoring some values from ZQ0SR0 + * is forbidden and risky thus we need to provide processed +- * values for these (modified gray code values). ++ * values for these. + */ + tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); + + /* Store pull-down output impedance select. */ + index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; +- soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; ++ soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF; + + /* Store pull-up output impedance select. */ + index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; +- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; ++ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF; + + /* Store pull-down on-die termination impedance select. */ + index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; +- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; ++ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF; + + /* Store pull-up on-die termination impedance select. */ + index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; +- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; ++ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF; + + /* + * The 1st 8 words of memory might get corrupted in the process +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +index 381d58cea092d9..c854c7e3105196 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +@@ -151,28 +151,12 @@ &pio { + vcc-pg-supply = <®_aldo1>; + }; + +-&r_ir { +- linux,rc-map-name = "rc-beelink-gs1"; +- status = "okay"; +-}; +- +-&r_pio { +- /* +- * FIXME: We can't add that supply for now since it would +- * create a circular dependency between pinctrl, the regulator +- * and the RSB Bus. +- * +- * vcc-pl-supply = <®_aldo1>; +- */ +- vcc-pm-supply = <®_aldo1>; +-}; +- +-&r_rsb { ++&r_i2c { + status = "okay"; + +- axp805: pmic@745 { ++ axp805: pmic@36 { + compatible = "x-powers,axp805", "x-powers,axp806"; +- reg = <0x745>; ++ reg = <0x36>; + interrupt-parent = <&r_intc>; + interrupts = ; + interrupt-controller; +@@ -290,6 +274,22 @@ sw { + }; + }; + ++&r_ir { ++ linux,rc-map-name = "rc-beelink-gs1"; ++ status = "okay"; ++}; ++ ++&r_pio { ++ /* ++ * PL0 and PL1 are used for PMIC I2C ++ * don't enable the pl-supply else ++ * it will fail at boot ++ * ++ * vcc-pl-supply = <®_aldo1>; ++ */ ++ vcc-pm-supply = <®_aldo1>; ++}; ++ + &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&spdif_tx_pin>; +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +index 6fc65e8db22068..8c476e089185b5 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +@@ -175,16 +175,12 @@ &pio { + vcc-pg-supply = <®_vcc_wifi_io>; + }; + +-&r_ir { +- status = "okay"; +-}; +- +-&r_rsb { ++&r_i2c { + status = "okay"; + +- axp805: pmic@745 { ++ axp805: pmic@36 { + compatible = "x-powers,axp805", "x-powers,axp806"; +- reg = <0x745>; ++ reg = <0x36>; + interrupt-parent = <&r_intc>; + interrupts = ; + interrupt-controller; +@@ -295,6 +291,10 @@ sw { + }; + }; + ++&r_ir { ++ status = "okay"; ++}; ++ + &rtc { + clocks = <&ext_osc32k>; + }; +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +index 92745128fcfebd..4ec4996592befb 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +@@ -112,20 +112,12 @@ &pio { + vcc-pg-supply = <®_aldo1>; + }; + +-&r_ir { +- status = "okay"; +-}; +- +-&r_pio { +- vcc-pm-supply = <®_bldo3>; +-}; +- +-&r_rsb { ++&r_i2c { + status = "okay"; + +- axp805: pmic@745 { ++ axp805: pmic@36 { + compatible = "x-powers,axp805", "x-powers,axp806"; +- reg = <0x745>; ++ reg = <0x36>; + interrupt-parent = <&r_intc>; + interrupts = ; + interrupt-controller; +@@ -240,6 +232,14 @@ sw { + }; + }; + ++&r_ir { ++ status = "okay"; ++}; ++ ++&r_pio { ++ vcc-pm-supply = <®_bldo3>; ++}; ++ + &rtc { + clocks = <&ext_osc32k>; + }; +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi +index 3f79923376fb28..37244e8816d9e8 100644 +--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi +@@ -26,6 +26,8 @@ memory@0 { + + leds { + compatible = "gpio-leds"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi_quad_pins>; + + led-power1 { + label = "udpu:green:power"; +@@ -82,8 +84,6 @@ &sdhci0 { + + &spi0 { + status = "okay"; +- pinctrl-names = "default"; +- pinctrl-0 = <&spi_quad_pins>; + + flash@0 { + compatible = "jedec,spi-nor"; +@@ -108,6 +108,10 @@ partition@180000 { + }; + }; + ++&spi_quad_pins { ++ function = "gpio"; ++}; ++ + &pinctrl_nb { + i2c2_recovery_pins: i2c2-recovery-pins { + groups = "i2c2"; +diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +index b4a1108c2dd74f..0639f5ce1bd9e4 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +@@ -1635,7 +1635,7 @@ vdd_1v8_dis: regulator-vdd-1v8-dis { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; +- gpio = <&exp1 14 GPIO_ACTIVE_HIGH>; ++ gpio = <&exp1 9 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_1v8>; + }; +diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts +index bac611d735c589..2fa48972b2a91f 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts ++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts +@@ -102,6 +102,16 @@ pcie@14160000 { + }; + + pcie@141a0000 { ++ reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ ++ 0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */ ++ 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ ++ 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */ ++ 0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB) */ ++ ++ ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */ ++ 0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000 /* non-prefetchable memory (128MB) */ ++ 0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>; /* prefetchable memory (25088MB) */ ++ + status = "okay"; + vddio-pex-ctl-supply = <&vdd_1v8_ls>; + phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, +diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi +index 8a72ad4afd0320..82e4fd5eb388ff 100644 +--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi +@@ -231,6 +231,8 @@ cryptobam: dma-controller@704000 { + interrupts = ; + #dma-cells = <1>; + qcom,ee = <1>; ++ qcom,num-ees = <4>; ++ num-channels = <16>; + qcom,controlled-remotely; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi +index 2a4d950ac02bfe..5376c0a00fab65 100644 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi +@@ -442,7 +442,7 @@ cdsp_secure_heap: memory@80c00000 { + no-map; + }; + +- pil_camera_mem: mmeory@85200000 { ++ pil_camera_mem: memory@85200000 { + reg = <0x0 0x85200000 0x0 0x500000>; + no-map; + }; +diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi +index 3b4d7882300897..c1ed39cac8c5b7 100644 +--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi +@@ -4233,6 +4233,8 @@ cryptobam: dma-controller@1dc4000 { + interrupts = ; + #dma-cells = <1>; + qcom,ee = <0>; ++ qcom,num-ees = <4>; ++ num-channels = <16>; + qcom,controlled-remotely; + iommus = <&apps_smmu 0x584 0x11>, + <&apps_smmu 0x588 0x0>, +diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi +index bc9a1fca2db3ae..c14c6f8583d548 100644 +--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi +@@ -1866,6 +1866,8 @@ cryptobam: dma-controller@1dc4000 { + interrupts = ; + #dma-cells = <1>; + qcom,ee = <0>; ++ qcom,num-ees = <4>; ++ num-channels = <20>; + qcom,controlled-remotely; + iommus = <&apps_smmu 0x480 0x0>, + <&apps_smmu 0x481 0x0>; +diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts +index 5df5946687b348..2e92f4174b3cf0 100644 +--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts ++++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts +@@ -43,6 +43,17 @@ vusb_main: regulator-vusb-main5v0 { + regulator-boot-on; + }; + ++ vsys_5v0: regulator-vsys5v0 { ++ /* Output of LM61460 */ ++ compatible = "regulator-fixed"; ++ regulator-name = "vsys_5v0"; ++ regulator-min-microvolt = <5000000>; ++ regulator-max-microvolt = <5000000>; ++ vin-supply = <&vusb_main>; ++ regulator-always-on; ++ regulator-boot-on; ++ }; ++ + vsys_3v3: regulator-vsys3v3 { + /* Output of LM5141 */ + compatible = "regulator-fixed"; +@@ -75,7 +86,7 @@ vdd_sd_dv: regulator-tlv71033 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; +- vin-supply = <&vsys_3v3>; ++ vin-supply = <&vsys_5v0>; + gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>; + states = <1800000 0x0>, + <3300000 0x1>; +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi +index ccaca29200bb93..995bd8ce9d43af 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi +@@ -10,39 +10,44 @@ + + #include + / { +- pss_ref_clk: pss_ref_clk { ++ pss_ref_clk: pss-ref-clk { + bootph-all; + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <33333333>; ++ clock-output-names = "pss_ref_clk"; + }; + +- video_clk: video_clk { ++ video_clk: video-clk { + bootph-all; + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <27000000>; ++ clock-output-names = "video_clk"; + }; + +- pss_alt_ref_clk: pss_alt_ref_clk { ++ pss_alt_ref_clk: pss-alt-ref-clk { + bootph-all; + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; ++ clock-output-names = "pss_alt_ref_clk"; + }; + +- gt_crx_ref_clk: gt_crx_ref_clk { ++ gt_crx_ref_clk: gt-crx-ref-clk { + bootph-all; + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <108000000>; ++ clock-output-names = "gt_crx_ref_clk"; + }; + +- aux_ref_clk: aux_ref_clk { ++ aux_ref_clk: aux-ref-clk { + bootph-all; + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <27000000>; ++ clock-output-names = "aux_ref_clk"; + }; + }; + +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 8a6b7feca3e428..d92a0203e5a93d 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -132,6 +132,7 @@ + #define FUJITSU_CPU_PART_A64FX 0x001 + + #define HISI_CPU_PART_TSV110 0xD01 ++#define HISI_CPU_PART_HIP09 0xD02 + + #define APPLE_CPU_PART_M1_ICESTORM 0x022 + #define APPLE_CPU_PART_M1_FIRESTORM 0x023 +@@ -208,6 +209,7 @@ + #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) + #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) + #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) ++#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09) + #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) + #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) + #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 07bdf5dd8ebef5..0212129b13d074 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -679,7 +679,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) + pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) + + #define pud_none(pud) (!pud_val(pud)) +-#define pud_bad(pud) (!pud_table(pud)) ++#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ ++ PUD_TYPE_TABLE) + #define pud_present(pud) pte_present(pud_pte(pud)) + #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) + #define pud_valid(pud) pte_valid(pud_pte(pud)) +diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c +index 28c48bc9c09538..2c81e0efaf378e 100644 +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -904,6 +904,7 @@ static u8 spectre_bhb_loop_affected(void) + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD), ++ MIDR_ALL_VERSIONS(MIDR_HISI_HIP09), + {}, + }; + static const struct midr_range spectre_bhb_k11_list[] = { +diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h +index db497a8167da29..e3212f44446fa9 100644 +--- a/arch/mips/include/asm/ftrace.h ++++ b/arch/mips/include/asm/ftrace.h +@@ -87,4 +87,20 @@ struct dyn_arch_ftrace { + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* __ASSEMBLY__ */ + #endif /* CONFIG_FUNCTION_TRACER */ ++ ++#ifdef CONFIG_FTRACE_SYSCALLS ++#ifndef __ASSEMBLY__ ++/* ++ * Some syscall entry functions on mips start with "__sys_" (fork and clone, ++ * for instance). We should also match the sys_ variant with those. ++ */ ++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME ++static inline bool arch_syscall_match_sym_name(const char *sym, ++ const char *name) ++{ ++ return !strcmp(sym, name) || ++ (!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4)); ++} ++#endif /* __ASSEMBLY__ */ ++#endif /* CONFIG_FTRACE_SYSCALLS */ + #endif /* _ASM_MIPS_FTRACE_H */ +diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c +index 9bf60d7d44d362..a7bcf2b814c865 100644 +--- a/arch/mips/kernel/pm-cps.c ++++ b/arch/mips/kernel/pm-cps.c +@@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); + /* Indicates online CPUs coupled with the current CPU */ + static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); + +-/* +- * Used to synchronize entry to deep idle states. Actually per-core rather +- * than per-CPU. +- */ ++/* Used to synchronize entry to deep idle states */ + static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); + + /* Saved CPU state across the CPS_PM_POWER_GATED state */ +@@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state) + cps_nc_entry_fn entry; + struct core_boot_config *core_cfg; + struct vpe_boot_config *vpe_cfg; ++ atomic_t *barrier; + + /* Check that there is an entry function for this state */ +- entry = per_cpu(nc_asm_enter, core)[state]; ++ entry = per_cpu(nc_asm_enter, cpu)[state]; + if (!entry) + return -EINVAL; + +@@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state) + smp_mb__after_atomic(); + + /* Create a non-coherent mapping of the core ready_count */ +- core_ready_count = per_cpu(ready_count, core); ++ core_ready_count = per_cpu(ready_count, cpu); + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), + (unsigned long)core_ready_count); + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); +@@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state) + + /* Ensure ready_count is zero-initialised before the assembly runs */ + WRITE_ONCE(*nc_core_ready_count, 0); +- coupled_barrier(&per_cpu(pm_barrier, core), online); ++ barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); ++ coupled_barrier(barrier, online); + + /* Run the generated entry code */ + left = entry(online, nc_core_ready_count); +@@ -635,12 +634,14 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) + + static int cps_pm_online_cpu(unsigned int cpu) + { +- enum cps_pm_state state; +- unsigned core = cpu_core(&cpu_data[cpu]); ++ unsigned int sibling, core; + void *entry_fn, *core_rc; ++ enum cps_pm_state state; ++ ++ core = cpu_core(&cpu_data[cpu]); + + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { +- if (per_cpu(nc_asm_enter, core)[state]) ++ if (per_cpu(nc_asm_enter, cpu)[state]) + continue; + if (!test_bit(state, state_support)) + continue; +@@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu) + clear_bit(state, state_support); + } + +- per_cpu(nc_asm_enter, core)[state] = entry_fn; ++ for_each_cpu(sibling, &cpu_sibling_map[cpu]) ++ per_cpu(nc_asm_enter, sibling)[state] = entry_fn; + } + +- if (!per_cpu(ready_count, core)) { ++ if (!per_cpu(ready_count, cpu)) { + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); + if (!core_rc) { + pr_err("Failed allocate core %u ready_count\n", core); + return -ENOMEM; + } +- per_cpu(ready_count, core) = core_rc; ++ ++ for_each_cpu(sibling, &cpu_sibling_map[cpu]) ++ per_cpu(ready_count, sibling) = core_rc; + } + + return 0; +diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h +index da827d2d08666e..f2c4457c94c397 100644 +--- a/arch/powerpc/include/asm/mmzone.h ++++ b/arch/powerpc/include/asm/mmzone.h +@@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[]; + #ifdef CONFIG_MEMORY_HOTPLUG + extern unsigned long max_pfn; + u64 memory_hotplug_max(void); ++u64 hot_add_drconf_memory_max(void); + #else + #define memory_hotplug_max() memblock_end_of_DRAM() + #endif +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c +index a6090896f74979..ac669e58e20230 100644 +--- a/arch/powerpc/kernel/prom_init.c ++++ b/arch/powerpc/kernel/prom_init.c +@@ -2974,11 +2974,11 @@ static void __init fixup_device_tree_pmac(void) + char type[8]; + phandle node; + +- // Some pmacs are missing #size-cells on escc nodes ++ // Some pmacs are missing #size-cells on escc or i2s nodes + for (node = 0; prom_next_node(&node); ) { + type[0] = '\0'; + prom_getprop(node, "device_type", type, sizeof(type)); +- if (prom_strcmp(type, "escc")) ++ if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s")) + continue; + + if (prom_getproplen(node, "#size-cells") != PROM_ERROR) +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c +index 28460e33408084..aff3b37e32d64e 100644 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c +@@ -912,7 +912,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, + return 0; + } + +- ++#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP + bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) + { + if (radix_enabled()) +@@ -920,6 +920,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) + + return false; + } ++#endif + + int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c +index f6c4ace3b22197..65a9df0b9e5a09 100644 +--- a/arch/powerpc/mm/numa.c ++++ b/arch/powerpc/mm/numa.c +@@ -1342,7 +1342,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) + return nid; + } + +-static u64 hot_add_drconf_memory_max(void) ++u64 hot_add_drconf_memory_max(void) + { + struct device_node *memory = NULL; + struct device_node *dn = NULL; +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index 10b946e9c6e756..4bb84dc4393fca 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -2229,6 +2229,10 @@ static struct pmu power_pmu = { + #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ + PERF_SAMPLE_PHYS_ADDR | \ + PERF_SAMPLE_DATA_PAGE_SIZE) ++ ++#define SIER_TYPE_SHIFT 15 ++#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT) ++ + /* + * A counter has overflowed; update its count and record + * things if requested. Note that interrupts are hard-disabled +@@ -2297,6 +2301,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + is_kernel_addr(mfspr(SPRN_SIAR))) + record = 0; + ++ /* ++ * SIER[46-48] presents instruction type of the sampled instruction. ++ * In ISA v3.0 and before values "0" and "7" are considered reserved. ++ * In ISA v3.1, value "7" has been used to indicate "larx/stcx". ++ * Drop the sample if "type" has reserved values for this field with a ++ * ISA version check. ++ */ ++ if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && ++ ppmu->get_mem_data_src) { ++ val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT; ++ if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) { ++ record = 0; ++ atomic64_inc(&event->lost_samples); ++ } ++ } ++ + /* + * Finally record data if requested. + */ +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c +index 56301b2bc8ae87..031a2b63c171dc 100644 +--- a/arch/powerpc/perf/isa207-common.c ++++ b/arch/powerpc/perf/isa207-common.c +@@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, + + sier = mfspr(SPRN_SIER); + val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; +- if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) ++ if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) { ++ dsrc->val = 0; + return; ++ } + + idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; + sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c +index b1e6d275cda9eb..bf02f94a973dbd 100644 +--- a/arch/powerpc/platforms/pseries/iommu.c ++++ b/arch/powerpc/platforms/pseries/iommu.c +@@ -1183,17 +1183,13 @@ static LIST_HEAD(failed_ddw_pdn_list); + + static phys_addr_t ddw_memory_hotplug_max(void) + { +- resource_size_t max_addr = memory_hotplug_max(); +- struct device_node *memory; ++ resource_size_t max_addr; + +- for_each_node_by_type(memory, "memory") { +- struct resource res; +- +- if (of_address_to_resource(memory, 0, &res)) +- continue; +- +- max_addr = max_t(resource_size_t, max_addr, res.end + 1); +- } ++#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) ++ max_addr = hot_add_drconf_memory_max(); ++#else ++ max_addr = memblock_end_of_DRAM(); ++#endif + + return max_addr; + } +@@ -1471,7 +1467,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) + window->direct = true; + + /* DDW maps the whole partition, so enable direct DMA mapping */ +- ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, ++ ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, + win64->value, tce_setrange_multi_pSeriesLP_walk); + if (ret) { + dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", +@@ -1658,11 +1654,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, + struct memory_notify *arg = data; + int ret = 0; + ++ /* This notifier can get called when onlining persistent memory as well. ++ * TCEs are not pre-mapped for persistent memory. Persistent memory will ++ * always be above ddw_memory_hotplug_max() ++ */ ++ + switch (action) { + case MEM_GOING_ONLINE: + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { +- if (window->direct) { ++ if (window->direct && (arg->start_pfn << PAGE_SHIFT) < ++ ddw_memory_hotplug_max()) { + ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + } +@@ -1674,7 +1676,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, + case MEM_OFFLINE: + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { +- if (window->direct) { ++ if (window->direct && (arg->start_pfn << PAGE_SHIFT) < ++ ddw_memory_hotplug_max()) { + ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + } +diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h +index 4d1f58848129e8..dbb9d0d0f405e1 100644 +--- a/arch/riscv/include/asm/page.h ++++ b/arch/riscv/include/asm/page.h +@@ -26,12 +26,9 @@ + * When not using MMU this corresponds to the first free page in + * physical memory (aligned on a page boundary). + */ +-#ifdef CONFIG_64BIT + #ifdef CONFIG_MMU ++#ifdef CONFIG_64BIT + #define PAGE_OFFSET kernel_map.page_offset +-#else +-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +-#endif + /* + * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so + * define the PAGE_OFFSET value for SV48 and SV39. +@@ -41,6 +38,9 @@ + #else + #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) + #endif /* CONFIG_64BIT */ ++#else ++#define PAGE_OFFSET ((unsigned long)phys_ram_base) ++#endif /* CONFIG_MMU */ + + #ifndef __ASSEMBLY__ + +@@ -97,11 +97,7 @@ typedef struct page *pgtable_t; + #define MIN_MEMBLOCK_ADDR 0 + #endif + +-#ifdef CONFIG_MMU + #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) +-#else +-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +-#endif /* CONFIG_MMU */ + + struct kernel_mapping { + unsigned long page_offset; +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index f540b2625714d0..332a6bf72b1d54 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -12,7 +12,7 @@ + #include + + #ifndef CONFIG_MMU +-#define KERNEL_LINK_ADDR PAGE_OFFSET ++#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL) + #define KERN_VIRT_SIZE (UL(-1)) + #else + +diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c +index 00a6d370a28032..280266a74f378d 100644 +--- a/arch/s390/hypfs/hypfs_diag_fs.c ++++ b/arch/s390/hypfs/hypfs_diag_fs.c +@@ -208,6 +208,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info) + snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(), + cpu_info)); + cpu_dir = hypfs_mkdir(cpus_dir, buffer); ++ if (IS_ERR(cpu_dir)) ++ return PTR_ERR(cpu_dir); + rc = hypfs_create_u64(cpu_dir, "mgmtime", + cpu_info__acc_time(diag204_get_info_type(), cpu_info) - + cpu_info__lp_time(diag204_get_info_type(), cpu_info)); +diff --git a/arch/um/Makefile b/arch/um/Makefile +index 34957dcb88b9c3..744c5d0bdeb8fc 100644 +--- a/arch/um/Makefile ++++ b/arch/um/Makefile +@@ -151,5 +151,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated + archclean: + @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ + -o -name '*.gcov' \) -type f -print | xargs rm -f ++ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean + + export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH +diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c +index 38d5a71a579bcb..f6c766b2bdf5e4 100644 +--- a/arch/um/kernel/mem.c ++++ b/arch/um/kernel/mem.c +@@ -68,6 +68,7 @@ void __init mem_init(void) + map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); + memblock_free((void *)brk_end, uml_reserved - brk_end); + uml_reserved = brk_end; ++ min_low_pfn = PFN_UP(__pa(uml_reserved)); + + /* this will put all low memory onto the freelists */ + memblock_free_all(); +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index c83582b5a010de..6d593fb85a9e93 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -43,7 +43,7 @@ endif + + # How to compile the 16-bit code. Note we always compile for -march=i386; + # that way we can complain to the user if the CPU is insufficient. +-REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ ++REALMODE_CFLAGS := -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ + -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) +diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh +index c9299aeb7333e6..3882ead513f742 100644 +--- a/arch/x86/boot/genimage.sh ++++ b/arch/x86/boot/genimage.sh +@@ -22,6 +22,7 @@ + # This script requires: + # bash + # syslinux ++# genisoimage + # mtools (for fdimage* and hdimage) + # edk2/OVMF (for hdimage) + # +@@ -251,7 +252,9 @@ geniso() { + cp "$isolinux" "$ldlinux" "$tmp_dir" + cp "$FBZIMAGE" "$tmp_dir"/linux + echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg +- cp "${FDINITRDS[@]}" "$tmp_dir"/ ++ if [ ${#FDINITRDS[@]} -gt 0 ]; then ++ cp "${FDINITRDS[@]}" "$tmp_dir"/ ++ fi + genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \ + -quiet -o "$FIMAGE" -b isolinux.bin \ + -c boot.cat -no-emul-boot -boot-load-size 4 \ +diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S +index 78fd2442b49dcd..ad292c0d971a3f 100644 +--- a/arch/x86/entry/entry.S ++++ b/arch/x86/entry/entry.S +@@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(mds_verw_sel); + * entirely in the C code, and use an alias emitted by the linker script + * instead. + */ +-#ifdef CONFIG_STACKPROTECTOR ++#if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP) + EXPORT_SYMBOL(__ref_stack_chk_guard); + #endif + #endif +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index f483874fa20f19..fac3d97111b098 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -272,7 +272,7 @@ static int perf_ibs_init(struct perf_event *event) + { + struct hw_perf_event *hwc = &event->hw; + struct perf_ibs *perf_ibs; +- u64 max_cnt, config; ++ u64 config; + int ret; + + perf_ibs = get_ibs_pmu(event->attr.type); +@@ -306,10 +306,19 @@ static int perf_ibs_init(struct perf_event *event) + if (!hwc->sample_period) + hwc->sample_period = 0x10; + } else { +- max_cnt = config & perf_ibs->cnt_mask; ++ u64 period = 0; ++ ++ if (perf_ibs == &perf_ibs_op) { ++ period = (config & IBS_OP_MAX_CNT) << 4; ++ if (ibs_caps & IBS_CAPS_OPCNTEXT) ++ period |= config & IBS_OP_MAX_CNT_EXT_MASK; ++ } else { ++ period = (config & IBS_FETCH_MAX_CNT) << 4; ++ } ++ + config &= ~perf_ibs->cnt_mask; +- event->attr.sample_period = max_cnt << 4; +- hwc->sample_period = event->attr.sample_period; ++ event->attr.sample_period = period; ++ hwc->sample_period = period; + } + + if (!hwc->sample_period) +@@ -1219,7 +1228,8 @@ static __init int perf_ibs_op_init(void) + if (ibs_caps & IBS_CAPS_OPCNTEXT) { + perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; +- perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; ++ perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK | ++ IBS_OP_CUR_CNT_EXT_MASK); + } + + if (ibs_caps & IBS_CAPS_ZEN4) +diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h +index 806649c7f23dc6..9a0f29be1a9ea6 100644 +--- a/arch/x86/include/asm/bug.h ++++ b/arch/x86/include/asm/bug.h +@@ -22,8 +22,9 @@ + #define SECOND_BYTE_OPCODE_UD2 0x0b + + #define BUG_NONE 0xffff +-#define BUG_UD1 0xfffe +-#define BUG_UD2 0xfffd ++#define BUG_UD2 0xfffe ++#define BUG_UD1 0xfffd ++#define BUG_UD1_UBSAN 0xfffc + + #ifdef CONFIG_GENERIC_BUG + +diff --git a/arch/x86/include/asm/ibt.h b/arch/x86/include/asm/ibt.h +index 1e59581d500ca9..b778ae6e67ee8c 100644 +--- a/arch/x86/include/asm/ibt.h ++++ b/arch/x86/include/asm/ibt.h +@@ -41,7 +41,7 @@ + _ASM_PTR fname "\n\t" \ + ".popsection\n\t" + +-static inline __attribute_const__ u32 gen_endbr(void) ++static __always_inline __attribute_const__ u32 gen_endbr(void) + { + u32 endbr; + +@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void) + return endbr; + } + +-static inline __attribute_const__ u32 gen_endbr_poison(void) ++static __always_inline __attribute_const__ u32 gen_endbr_poison(void) + { + /* + * 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it +diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h +index 5c5f1e56c4048d..6f3d145670a957 100644 +--- a/arch/x86/include/asm/nmi.h ++++ b/arch/x86/include/asm/nmi.h +@@ -59,6 +59,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *); + + void unregister_nmi_handler(unsigned int, const char *); + ++void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler); ++ + void stop_nmi(void); + void restart_nmi(void); + void local_touch_nmi(void); +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h +index 384e8a7db4827b..ba2a3935dc624d 100644 +--- a/arch/x86/include/asm/perf_event.h ++++ b/arch/x86/include/asm/perf_event.h +@@ -501,6 +501,7 @@ struct pebs_xmm { + */ + #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) + #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) ++#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52) + #define IBS_OP_CNT_CTL (1ULL<<19) + #define IBS_OP_VAL (1ULL<<18) + #define IBS_OP_ENABLE (1ULL<<17) +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 07b45bbf6348de..e9c4bcb38f4586 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1442,9 +1442,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; + static enum spectre_v2_user_cmd __init + spectre_v2_parse_user_cmdline(void) + { ++ enum spectre_v2_user_cmd mode; + char arg[20]; + int ret, i; + ++ mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? ++ SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; ++ + switch (spectre_v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; +@@ -1457,7 +1461,7 @@ spectre_v2_parse_user_cmdline(void) + ret = cmdline_find_option(boot_command_line, "spectre_v2_user", + arg, sizeof(arg)); + if (ret < 0) +- return SPECTRE_V2_USER_CMD_AUTO; ++ return mode; + + for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { + if (match_option(arg, ret, v2_user_options[i].option)) { +@@ -1467,8 +1471,8 @@ spectre_v2_parse_user_cmdline(void) + } + } + +- pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); +- return SPECTRE_V2_USER_CMD_AUTO; ++ pr_err("Unknown user space protection option (%s). Switching to default\n", arg); ++ return mode; + } + + static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 6da2cfa23c2939..35fd5f1444fdb4 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -39,8 +39,12 @@ + #define CREATE_TRACE_POINTS + #include + ++/* ++ * An emergency handler can be set in any context including NMI ++ */ + struct nmi_desc { + raw_spinlock_t lock; ++ nmi_handler_t emerg_handler; + struct list_head head; + }; + +@@ -131,9 +135,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) + static int nmi_handle(unsigned int type, struct pt_regs *regs) + { + struct nmi_desc *desc = nmi_to_desc(type); ++ nmi_handler_t ehandler; + struct nmiaction *a; + int handled=0; + ++ /* ++ * Call the emergency handler, if set ++ * ++ * In the case of crash_nmi_callback() emergency handler, it will ++ * return in the case of the crashing CPU to enable it to complete ++ * other necessary crashing actions ASAP. Other handlers in the ++ * linked list won't need to be run. ++ */ ++ ehandler = desc->emerg_handler; ++ if (ehandler) ++ return ehandler(type, regs); ++ + rcu_read_lock(); + + /* +@@ -223,6 +240,31 @@ void unregister_nmi_handler(unsigned int type, const char *name) + } + EXPORT_SYMBOL_GPL(unregister_nmi_handler); + ++/** ++ * set_emergency_nmi_handler - Set emergency handler ++ * @type: NMI type ++ * @handler: the emergency handler to be stored ++ * ++ * Set an emergency NMI handler which, if set, will preempt all the other ++ * handlers in the linked list. If a NULL handler is passed in, it will clear ++ * it. It is expected that concurrent calls to this function will not happen ++ * or the system is screwed beyond repair. ++ */ ++void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler) ++{ ++ struct nmi_desc *desc = nmi_to_desc(type); ++ ++ if (WARN_ON_ONCE(desc->emerg_handler == handler)) ++ return; ++ desc->emerg_handler = handler; ++ ++ /* ++ * Ensure the emergency handler is visible to other CPUs before ++ * function return ++ */ ++ smp_wmb(); ++} ++ + static void + pci_serr_error(unsigned char reason, struct pt_regs *regs) + { +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index 830425e6d38e2f..456e61070a730b 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -908,15 +908,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) + shootdown_callback = callback; + + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); +- /* Would it be better to replace the trap vector here? */ +- if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, +- NMI_FLAG_FIRST, "crash")) +- return; /* Return what? */ ++ + /* +- * Ensure the new callback function is set before sending +- * out the NMI ++ * Set emergency handler to preempt other handlers. + */ +- wmb(); ++ set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback); + + apic_send_IPI_allbutself(NMI_VECTOR); + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index d8d9bc5a9b3280..8718d58dd0fbea 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -92,10 +92,17 @@ __always_inline int is_valid_bugaddr(unsigned long addr) + + /* + * Check for UD1 or UD2, accounting for Address Size Override Prefixes. +- * If it's a UD1, get the ModRM byte to pass along to UBSan. ++ * If it's a UD1, further decode to determine its use: ++ * ++ * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax ++ * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax ++ * static_call: 0f b9 cc ud1 %esp,%ecx ++ * ++ * Notably UBSAN uses EAX, static_call uses ECX. + */ +-__always_inline int decode_bug(unsigned long addr, u32 *imm) ++__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len) + { ++ unsigned long start = addr; + u8 v; + + if (addr < TASK_SIZE_MAX) +@@ -108,24 +115,42 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm) + return BUG_NONE; + + v = *(u8 *)(addr++); +- if (v == SECOND_BYTE_OPCODE_UD2) ++ if (v == SECOND_BYTE_OPCODE_UD2) { ++ *len = addr - start; + return BUG_UD2; ++ } + +- if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1) ++ if (v != SECOND_BYTE_OPCODE_UD1) + return BUG_NONE; + +- /* Retrieve the immediate (type value) for the UBSAN UD1 */ +- v = *(u8 *)(addr++); +- if (X86_MODRM_RM(v) == 4) +- addr++; +- + *imm = 0; +- if (X86_MODRM_MOD(v) == 1) +- *imm = *(u8 *)addr; +- else if (X86_MODRM_MOD(v) == 2) +- *imm = *(u32 *)addr; +- else +- WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v)); ++ v = *(u8 *)(addr++); /* ModRM */ ++ ++ if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4) ++ addr++; /* SIB */ ++ ++ /* Decode immediate, if present */ ++ switch (X86_MODRM_MOD(v)) { ++ case 0: if (X86_MODRM_RM(v) == 5) ++ addr += 4; /* RIP + disp32 */ ++ break; ++ ++ case 1: *imm = *(s8 *)addr; ++ addr += 1; ++ break; ++ ++ case 2: *imm = *(s32 *)addr; ++ addr += 4; ++ break; ++ ++ case 3: break; ++ } ++ ++ /* record instruction length */ ++ *len = addr - start; ++ ++ if (X86_MODRM_REG(v) == 0) /* EAX */ ++ return BUG_UD1_UBSAN; + + return BUG_UD1; + } +@@ -256,10 +281,10 @@ static inline void handle_invalid_op(struct pt_regs *regs) + static noinstr bool handle_bug(struct pt_regs *regs) + { + bool handled = false; +- int ud_type; +- u32 imm; ++ int ud_type, ud_len; ++ s32 ud_imm; + +- ud_type = decode_bug(regs->ip, &imm); ++ ud_type = decode_bug(regs->ip, &ud_imm, &ud_len); + if (ud_type == BUG_NONE) + return handled; + +@@ -279,15 +304,28 @@ static noinstr bool handle_bug(struct pt_regs *regs) + */ + if (regs->flags & X86_EFLAGS_IF) + raw_local_irq_enable(); +- if (ud_type == BUG_UD2) { ++ ++ switch (ud_type) { ++ case BUG_UD2: + if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || + handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { +- regs->ip += LEN_UD2; ++ regs->ip += ud_len; + handled = true; + } +- } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { +- pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip); ++ break; ++ ++ case BUG_UD1_UBSAN: ++ if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { ++ pr_crit("%s at %pS\n", ++ report_ubsan_failure(regs, ud_imm), ++ (void *)regs->ip); ++ } ++ break; ++ ++ default: ++ break; + } ++ + if (regs->flags & X86_EFLAGS_IF) + raw_local_irq_disable(); + instrumentation_end(); +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 71d29dd7ad761e..6cbb5974e4f9ea 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -644,8 +644,13 @@ static void __init memory_map_top_down(unsigned long map_start, + */ + addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, + map_end); +- memblock_phys_free(addr, PMD_SIZE); +- real_end = addr + PMD_SIZE; ++ if (!addr) { ++ pr_warn("Failed to release memory for alloc_low_pages()"); ++ real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE)); ++ } else { ++ memblock_phys_free(addr, PMD_SIZE); ++ real_end = addr + PMD_SIZE; ++ } + + /* step_size need to be small so pgt_buf from BRK could cover it */ + step_size = PMD_SIZE; +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index aa69353da49f24..11eb93e13ce175 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + ret = __add_pages(nid, start_pfn, nr_pages, params); + WARN_ON_ONCE(ret); + +- /* update max_pfn, max_low_pfn and high_memory */ +- update_end_of_memory_vars(start_pfn << PAGE_SHIFT, +- nr_pages << PAGE_SHIFT); ++ /* ++ * Special case: add_pages() is called by memremap_pages() for adding device ++ * private pages. Do not bump up max_pfn in the device private path, ++ * because max_pfn changes affect dma_addressing_limited(). ++ * ++ * dma_addressing_limited() returning true when max_pfn is the device's ++ * addressable memory can force device drivers to use bounce buffers ++ * and impact their performance negatively: ++ */ ++ if (!params->pgmap) ++ /* update max_pfn, max_low_pfn and high_memory */ ++ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); + + return ret; + } +diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c +index 230f1dee4f0954..e0b0ec0f824574 100644 +--- a/arch/x86/mm/kaslr.c ++++ b/arch/x86/mm/kaslr.c +@@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void) + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; + +- /* Adapt physical memory region size based on available memory */ +- if (memory_tb < kaslr_regions[0].size_tb) ++ /* ++ * Adapt physical memory region size based on available memory, ++ * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the ++ * device BAR space assuming the direct map space is large enough ++ * for creating a ZONE_DEVICE mapping in the direct map corresponding ++ * to the physical BAR address. ++ */ ++ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb)) + kaslr_regions[0].size_tb = memory_tb; + + /* +diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c +index 49c3744cac371b..81b9d1f9f4e68b 100644 +--- a/arch/x86/um/os-Linux/mcontext.c ++++ b/arch/x86/um/os-Linux/mcontext.c +@@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) + COPY(RIP); + COPY2(EFLAGS, EFL); + COPY2(CS, CSGSFS); +- regs->gp[CS / sizeof(unsigned long)] &= 0xffff; +- regs->gp[CS / sizeof(unsigned long)] |= 3; ++ regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48; + #endif + } +diff --git a/crypto/ahash.c b/crypto/ahash.c +index 709ef094079913..6168f3532f552a 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -427,6 +427,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) + hash->setkey = ahash_nosetkey; + + crypto_ahash_set_statesize(hash, alg->halg.statesize); ++ crypto_ahash_set_reqsize(hash, alg->reqsize); + + if (tfm->__crt_alg->cra_type != &crypto_ahash_type) + return crypto_init_shash_ops_async(tfm); +@@ -599,6 +600,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg) + if (alg->halg.statesize == 0) + return -EINVAL; + ++ if (alg->reqsize && alg->reqsize < alg->halg.statesize) ++ return -EINVAL; ++ + err = hash_prepare_alg(&alg->halg); + if (err) + return err; +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index e24c829d7a0154..5ab7441734b8e0 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags, + goto out_free_state; + + err = crypto_ahash_import(&ctx2->req, state); +- if (err) { +- sock_orphan(sk2); +- sock_put(sk2); +- } + + out_free_state: + kfree_sensitive(state); +diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c +index 0631d975bfac11..0abc2d87f04200 100644 +--- a/crypto/lzo-rle.c ++++ b/crypto/lzo-rle.c +@@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + int err; + +- err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); ++ err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); + + if (err != LZO_E_OK) + return -EINVAL; +diff --git a/crypto/lzo.c b/crypto/lzo.c +index ebda132dd22bf5..8338851c7406a3 100644 +--- a/crypto/lzo.c ++++ b/crypto/lzo.c +@@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen, + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + int err; + +- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); ++ err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); + + if (err != LZO_E_OK) + return -EINVAL; +diff --git a/crypto/skcipher.c b/crypto/skcipher.c +index 7b275716cf4e3a..acc879ed6031a9 100644 +--- a/crypto/skcipher.c ++++ b/crypto/skcipher.c +@@ -811,6 +811,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( + + /* Only sync algorithms allowed. */ + mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; ++ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); + + tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); + +diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c +index b5de82e6eb4d56..e69bfb30b44e07 100644 +--- a/drivers/accel/qaic/qaic_drv.c ++++ b/drivers/accel/qaic/qaic_drv.c +@@ -400,7 +400,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) + int bars; + int ret; + +- bars = pci_select_bars(pdev, IORESOURCE_MEM); ++ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f; + + /* make sure the device has the expected BARs */ + if (bars != (BIT(0) | BIT(2) | BIT(4))) { +diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig +index cee82b473dc509..648228831f5e87 100644 +--- a/drivers/acpi/Kconfig ++++ b/drivers/acpi/Kconfig +@@ -438,7 +438,7 @@ config ACPI_SBS + the modules will be called sbs and sbshc. + + config ACPI_HED +- tristate "Hardware Error Device" ++ bool "Hardware Error Device" + help + This driver supports the Hardware Error Device (PNP0C33), + which is used to report some hardware errors notified via +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c +index 01abf26764b00c..3f5a1840f57330 100644 +--- a/drivers/acpi/acpi_pnp.c ++++ b/drivers/acpi/acpi_pnp.c +@@ -355,8 +355,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc + * device represented by it. + */ + static const struct acpi_device_id acpi_nonpnp_device_ids[] = { ++ {"INT3F0D"}, + {"INTC1080"}, + {"INTC1081"}, ++ {"INTC1099"}, + {""}, + }; + +diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c +index 46c6f8c35b4368..2e01eaa8d8cd51 100644 +--- a/drivers/acpi/hed.c ++++ b/drivers/acpi/hed.c +@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = { + .remove = acpi_hed_remove, + }, + }; +-module_acpi_driver(acpi_hed_driver); ++ ++static int __init acpi_hed_driver_init(void) ++{ ++ return acpi_bus_register_driver(&acpi_hed_driver); ++} ++subsys_initcall(acpi_hed_driver_init); + + MODULE_AUTHOR("Huang Ying"); + MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); +diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c +index 6d309e4971b617..e243291a7e77c9 100644 +--- a/drivers/auxdisplay/charlcd.c ++++ b/drivers/auxdisplay/charlcd.c +@@ -594,18 +594,19 @@ static int charlcd_init(struct charlcd *lcd) + return 0; + } + +-struct charlcd *charlcd_alloc(void) ++struct charlcd *charlcd_alloc(unsigned int drvdata_size) + { + struct charlcd_priv *priv; + struct charlcd *lcd; + +- priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL); + if (!priv) + return NULL; + + priv->esc_seq.len = -1; + + lcd = &priv->lcd; ++ lcd->drvdata = priv->drvdata; + + return lcd; + } +diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h +index eed80063a6d20d..4bbf106b2dd8a2 100644 +--- a/drivers/auxdisplay/charlcd.h ++++ b/drivers/auxdisplay/charlcd.h +@@ -49,7 +49,7 @@ struct charlcd { + unsigned long y; + } addr; + +- void *drvdata; ++ void *drvdata; /* Set by charlcd_alloc() */ + }; + + /** +@@ -93,7 +93,8 @@ struct charlcd_ops { + }; + + void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on); +-struct charlcd *charlcd_alloc(void); ++ ++struct charlcd *charlcd_alloc(unsigned int drvdata_size); + void charlcd_free(struct charlcd *lcd); + + int charlcd_register(struct charlcd *lcd); +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c +index 8b690f59df27d6..ebaf0ff518f4c2 100644 +--- a/drivers/auxdisplay/hd44780.c ++++ b/drivers/auxdisplay/hd44780.c +@@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev) + if (!hdc) + return -ENOMEM; + +- lcd = charlcd_alloc(); ++ lcd = charlcd_alloc(0); + if (!lcd) + goto fail1; + +diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c +index 6422be0dfe20e6..0ecf6a9469f24c 100644 +--- a/drivers/auxdisplay/lcd2s.c ++++ b/drivers/auxdisplay/lcd2s.c +@@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c) + if (err < 0) + return err; + +- lcd = charlcd_alloc(); ++ lcd = charlcd_alloc(0); + if (!lcd) + return -ENOMEM; + +diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c +index eba04c0de7eb3f..0f3999b665e70f 100644 +--- a/drivers/auxdisplay/panel.c ++++ b/drivers/auxdisplay/panel.c +@@ -835,7 +835,7 @@ static void lcd_init(void) + if (!hdc) + return; + +- charlcd = charlcd_alloc(); ++ charlcd = charlcd_alloc(0); + if (!charlcd) { + kfree(hdc); + return; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index d6195565ef7aeb..e0dd6988960883 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -3525,9 +3525,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev) + static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + { + int ret = 0; ++ unsigned int skip = 0; + u8 pkt_type; +- u8 *sk_ptr; +- unsigned int sk_len; + u16 seqno; + u32 dump_size; + +@@ -3536,18 +3535,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + struct usb_device *udev = btdata->udev; + + pkt_type = hci_skb_pkt_type(skb); +- sk_ptr = skb->data; +- sk_len = skb->len; ++ skip = sizeof(struct hci_event_hdr); ++ if (pkt_type == HCI_ACLDATA_PKT) ++ skip += sizeof(struct hci_acl_hdr); + +- if (pkt_type == HCI_ACLDATA_PKT) { +- sk_ptr += HCI_ACL_HDR_SIZE; +- sk_len -= HCI_ACL_HDR_SIZE; +- } +- +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ skb_pull(skb, skip); ++ dump_hdr = (struct qca_dump_hdr *)skb->data; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; + seqno = le16_to_cpu(dump_hdr->seqno); + if (seqno == 0) { + set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); +@@ -3567,16 +3561,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + + btdata->qca_dump.ram_dump_size = dump_size; + btdata->qca_dump.ram_dump_seqno = 0; +- sk_ptr += offsetof(struct qca_dump_hdr, data0); +- sk_len -= offsetof(struct qca_dump_hdr, data0); ++ ++ skb_pull(skb, offsetof(struct qca_dump_hdr, data0)); + + usb_disable_autosuspend(udev); + bt_dev_info(hdev, "%s memdump size(%u)\n", + (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", + dump_size); + } else { +- sk_ptr += offsetof(struct qca_dump_hdr, data); +- sk_len -= offsetof(struct qca_dump_hdr, data); ++ skb_pull(skb, offsetof(struct qca_dump_hdr, data)); + } + + if (!btdata->qca_dump.ram_dump_size) { +@@ -3596,7 +3589,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + return ret; + } + +- skb_pull(skb, skb->len - sk_len); + hci_devcd_append(hdev, skb); + btdata->qca_dump.ram_dump_seqno++; + if (seqno == QCA_LAST_SEQUENCE_NUM) { +@@ -3624,68 +3616,58 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + /* Return: true if the ACL packet is a dump packet, false otherwise. */ + static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- u8 *sk_ptr; +- unsigned int sk_len; +- + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; ++ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); ++ bool is_dump = false; + +- sk_ptr = skb->data; +- sk_len = skb->len; +- +- acl_hdr = hci_acl_hdr(skb); +- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) ++ if (!clone) + return false; + +- sk_ptr += HCI_ACL_HDR_SIZE; +- sk_len -= HCI_ACL_HDR_SIZE; +- event_hdr = (struct hci_event_hdr *)sk_ptr; +- +- if ((event_hdr->evt != HCI_VENDOR_PKT) || +- (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) +- return false; ++ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); ++ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) ++ goto out; + +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); ++ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) ++ goto out; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; +- if ((sk_len < offsetof(struct qca_dump_hdr, data)) || +- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || +- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) +- return false; ++ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); ++ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ goto out; + +- return true; ++ is_dump = true; ++out: ++ consume_skb(clone); ++ return is_dump; + } + + /* Return: true if the event packet is a dump packet, false otherwise. */ + static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- u8 *sk_ptr; +- unsigned int sk_len; +- + struct hci_event_hdr *event_hdr; + struct qca_dump_hdr *dump_hdr; ++ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); ++ bool is_dump = false; + +- sk_ptr = skb->data; +- sk_len = skb->len; +- +- event_hdr = hci_event_hdr(skb); +- +- if ((event_hdr->evt != HCI_VENDOR_PKT) +- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) ++ if (!clone) + return false; + +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); ++ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) ++ goto out; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; +- if ((sk_len < offsetof(struct qca_dump_hdr, data)) || +- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || +- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) +- return false; ++ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); ++ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ goto out; + +- return true; ++ is_dump = true; ++out: ++ consume_skb(clone); ++ return is_dump; + } + + static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c +index 38c456540d1b98..337144570fafa2 100644 +--- a/drivers/clk/clk-s2mps11.c ++++ b/drivers/clk/clk-s2mps11.c +@@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev) + if (!clk_data) + return -ENOMEM; + ++ clk_data->num = S2MPS11_CLKS_NUM; ++ + switch (hwid) { + case S2MPS11X: + s2mps11_reg = S2MPS11_REG_RTC_CTRL; +@@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) + clk_data->hws[i] = &s2mps11_clks[i].hw; + } + +- clk_data->num = S2MPS11_CLKS_NUM; + of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, + clk_data); + +diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c +index 747f5397692e5f..2a0804dd4b8462 100644 +--- a/drivers/clk/imx/clk-imx8mp.c ++++ b/drivers/clk/imx/clk-imx8mp.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_ + static struct clk_hw **hws; + static struct clk_hw_onecell_data *clk_hw_data; + ++struct imx8mp_clock_constraints { ++ unsigned int clkid; ++ u32 maxrate; ++}; ++ ++/* ++ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023 ++ * Table 13. Maximum frequency of modules. ++ * Probable typos fixed are marked with a comment. ++ */ ++static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = { ++ { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */ ++ { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */ ++ { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */ ++ { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_AHB, 133333333 }, ++ { IMX8MP_CLK_IPG_ROOT, 66666667 }, ++ { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_DRAM_ALT, 666666667 }, ++ { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */ ++ { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ }, ++ { /* Sentinel */ } ++}; ++ ++static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = { ++ { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */ ++ { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ }, ++ { /* Sentinel */ } ++}; ++ ++static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = { ++ { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ}, ++ { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */ ++ { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ }, ++ { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ }, ++ { /* Sentinel */ } ++}; ++ ++static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[]) ++{ ++ const struct imx8mp_clock_constraints *constr; ++ ++ for (constr = constraints; constr->clkid; constr++) ++ clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate); ++} ++ + static int imx8mp_clocks_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct device_node *np; + void __iomem *anatop_base, *ccm_base; ++ const char *opmode; + int err; + + np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop"); +@@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) + + imx_check_clk_hws(hws, IMX8MP_CLK_END); + ++ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints); ++ ++ err = of_property_read_string(np, "fsl,operating-mode", &opmode); ++ if (!err) { ++ if (!strcmp(opmode, "nominal")) ++ imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints); ++ else if (!strcmp(opmode, "overdrive")) ++ imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints); ++ } ++ + err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); + if (err < 0) { + dev_err(dev, "failed to register hws for i.MX8MP\n"); +diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig +index 1de1661037b1b1..95cbea8d380c37 100644 +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -148,7 +148,7 @@ config IPQ_GCC_4019 + + config IPQ_GCC_5018 + tristate "IPQ5018 Global Clock Controller" +- depends on ARM64 || COMPILE_TEST ++ depends on ARM || ARM64 || COMPILE_TEST + help + Support for global clock controller on ipq5018 devices. + Say Y if you want to use peripheral devices such as UART, SPI, +diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c +index 9b32c56a5bc5af..e29706d7828707 100644 +--- a/drivers/clk/qcom/camcc-sm8250.c ++++ b/drivers/clk/qcom/camcc-sm8250.c +@@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = { + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_shared_ops, + }, + }; + +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c +index 80aadafffacdb1..732ca46703ba30 100644 +--- a/drivers/clk/qcom/clk-alpha-pll.c ++++ b/drivers/clk/qcom/clk-alpha-pll.c +@@ -645,14 +645,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 alpha_width = pll_alpha_width(pll); + +- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); ++ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) ++ return 0; ++ ++ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) ++ return 0; + +- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); + if (ctl & PLL_ALPHA_EN) { +- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low); ++ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low)) ++ return 0; + if (alpha_width > 32) { +- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), +- &high); ++ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), ++ &high)) ++ return 0; + a = (u64)high << 32 | low; + } else { + a = low & GENMASK(alpha_width - 1, 0); +@@ -844,8 +849,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l, alpha = 0, ctl, alpha_m, alpha_n; + +- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); +- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); ++ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) ++ return 0; ++ ++ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) ++ return 0; + + if (ctl & PLL_ALPHA_EN) { + regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha); +@@ -1039,8 +1047,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l, frac, alpha_width = pll_alpha_width(pll); + +- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); +- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac); ++ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) ++ return 0; ++ ++ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac)) ++ return 0; + + return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); + } +@@ -1098,7 +1109,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) + struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); + u32 ctl; + +- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); ++ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) ++ return 0; + + ctl >>= PLL_POST_DIV_SHIFT; + ctl &= PLL_POST_DIV_MASK(pll); +@@ -1314,8 +1326,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw, + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l, frac, alpha_width = pll_alpha_width(pll); + +- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); +- regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac); ++ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) ++ return 0; ++ ++ if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac)) ++ return 0; + + return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); + } +@@ -1465,7 +1480,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) + struct regmap *regmap = pll->clkr.regmap; + u32 i, div = 1, val; + +- regmap_read(regmap, PLL_USER_CTL(pll), &val); ++ if (regmap_read(regmap, PLL_USER_CTL(pll), &val)) ++ return 0; + + val >>= pll->post_div_shift; + val &= PLL_POST_DIV_MASK(pll); +@@ -2339,9 +2355,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw, + struct regmap *regmap = pll->clkr.regmap; + u32 l, frac; + +- regmap_read(regmap, PLL_L_VAL(pll), &l); ++ if (regmap_read(regmap, PLL_L_VAL(pll), &l)) ++ return 0; + l &= LUCID_EVO_PLL_L_VAL_MASK; +- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac); ++ ++ if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac)) ++ return 0; + + return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll)); + } +@@ -2416,7 +2435,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw, + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l; + +- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); ++ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) ++ return 0; + + return parent_rate * l; + } +diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +index f95c3615ca7727..98f107e96317ec 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c ++++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { + { .hw = &pll_periph0_2x_clk.common.hw }, + { .hw = &pll_audio1_div2_clk.common.hw }, + }; +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); +- +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", ++ mmc0_mmc1_parents, 0x830, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); ++ ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", ++ mmc0_mmc1_parents, 0x834, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); + + static const struct clk_parent_data mmc2_parents[] = { + { .fw_name = "hosc" }, +@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { + { .hw = &pll_periph0_800M_clk.common.hw }, + { .hw = &pll_audio1_div2_clk.common.hw }, + }; +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, ++ 0x838, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); + + static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, + 0x84c, BIT(0), 0); +diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h +index 6e50f3728fb5f1..7d836a9fb3db34 100644 +--- a/drivers/clk/sunxi-ng/ccu_mp.h ++++ b/drivers/clk/sunxi-ng/ccu_mp.h +@@ -52,6 +52,28 @@ struct ccu_mp { + } \ + } + ++#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ ++ _reg, \ ++ _mshift, _mwidth, \ ++ _pshift, _pwidth, \ ++ _muxshift, _muxwidth, \ ++ _gate, _postdiv, _flags)\ ++ struct ccu_mp _struct = { \ ++ .enable = _gate, \ ++ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ ++ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ ++ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ ++ .fixed_post_div = _postdiv, \ ++ .common = { \ ++ .reg = _reg, \ ++ .features = CCU_FEATURE_FIXED_POSTDIV, \ ++ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ ++ _parents, \ ++ &ccu_mp_ops, \ ++ _flags), \ ++ } \ ++ } ++ + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ +diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c +index b3ae38f3672052..39c70b5ac44c96 100644 +--- a/drivers/clocksource/mips-gic-timer.c ++++ b/drivers/clocksource/mips-gic-timer.c +@@ -114,6 +114,9 @@ static void gic_update_frequency(void *data) + + static int gic_starting_cpu(unsigned int cpu) + { ++ /* Ensure the GIC counter is running */ ++ clear_gic_config(GIC_CONFIG_COUNTSTOP); ++ + gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); + return 0; + } +@@ -248,9 +251,6 @@ static int __init gic_clocksource_of_init(struct device_node *node) + pr_warn("Unable to register clock notifier\n"); + } + +- /* And finally start the counter */ +- clear_gic_config(GIC_CONFIG_COUNTSTOP); +- + /* + * It's safe to use the MIPS GIC timer as a sched clock source only if + * its ticks are stable, which is true on either the platforms with +diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c +index 09becf14653b58..c58c1defd74588 100644 +--- a/drivers/cpufreq/cpufreq-dt-platdev.c ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c +@@ -165,6 +165,7 @@ static const struct of_device_id blocklist[] __initconst = { + { .compatible = "qcom,sm8350", }, + { .compatible = "qcom,sm8450", }, + { .compatible = "qcom,sm8550", }, ++ { .compatible = "qcom,sm8650", }, + + { .compatible = "st,stih407", }, + { .compatible = "st,stih410", }, +diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c +index 7b8fcfa55038bc..4e5b6f9a56d1b2 100644 +--- a/drivers/cpufreq/tegra186-cpufreq.c ++++ b/drivers/cpufreq/tegra186-cpufreq.c +@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) + { + struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); + unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; ++ u32 cpu; + + policy->freq_table = data->clusters[cluster].table; + policy->cpuinfo.transition_latency = 300 * 1000; + policy->driver_data = NULL; + ++ /* set same policy for all cpus in a cluster */ ++ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { ++ if (data->cpus[cpu].bpmp_cluster_id == cluster) ++ cpumask_set_cpu(cpu, policy->cpus); ++ } ++ + return 0; + } + +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c +index b96e3da0fedd01..edd9a8fb9878d6 100644 +--- a/drivers/cpuidle/governors/menu.c ++++ b/drivers/cpuidle/governors/menu.c +@@ -246,8 +246,19 @@ static unsigned int get_typical_interval(struct menu_device *data) + * This can deal with workloads that have long pauses interspersed + * with sporadic activity with a bunch of short pauses. + */ +- if ((divisor * 4) <= INTERVALS * 3) ++ if (divisor * 4 <= INTERVALS * 3) { ++ /* ++ * If there are sufficiently many data points still under ++ * consideration after the outliers have been eliminated, ++ * returning without a prediction would be a mistake because it ++ * is likely that the next interval will not exceed the current ++ * maximum, so return the latter in that case. ++ */ ++ if (divisor >= INTERVALS / 2) ++ return max; ++ + return UINT_MAX; ++ } + + thresh = max - 1; + goto again; +diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +index 811ded72ce5fbd..798bb40fed68df 100644 +--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c ++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +@@ -410,9 +410,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, + break; + } + +- dev_err(&pdev->dev, +- "Request failed with software error code 0x%x\n", +- cpt_status->s.uc_compcode); ++ pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", ++ cpt_status->s.uc_compcode, ++ info->req->areq->tfm->__crt_alg->cra_name, ++ info->req->areq->tfm->__crt_alg->cra_driver_name); + otx2_cpt_dump_sg_list(pdev, info->req); + break; + } +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index cc9923ab686dcd..eccbcf67951fbe 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -58,7 +58,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) + + intr = edma_readl_chreg(fsl_chan, ch_int); + if (!intr) +- return IRQ_HANDLED; ++ return IRQ_NONE; + + edma_writel_chreg(fsl_chan, 1, ch_int); + +diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c +index c18633ad8455fa..7e3a67f9f0a654 100644 +--- a/drivers/dma/idxd/cdev.c ++++ b/drivers/dma/idxd/cdev.c +@@ -225,7 +225,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) + struct idxd_wq *wq; + struct device *dev, *fdev; + int rc = 0; +- struct iommu_sva *sva; ++ struct iommu_sva *sva = NULL; + unsigned int pasid; + struct idxd_cdev *idxd_cdev; + +@@ -322,7 +322,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) + if (device_user_pasid_enabled(idxd)) + idxd_xa_pasid_remove(ctx); + failed_get_pasid: +- if (device_user_pasid_enabled(idxd)) ++ if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) + iommu_sva_unbind_device(sva); + failed: + mutex_unlock(&wq->wq_lock); +@@ -412,6 +412,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) + if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) + return -EPERM; + ++ if (current->mm != ctx->mm) ++ return -EPERM; ++ + rc = check_vma(wq, vma, __func__); + if (rc < 0) + return rc; +@@ -478,6 +481,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t + ssize_t written = 0; + int i; + ++ if (current->mm != ctx->mm) ++ return -EPERM; ++ + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { + int rc = idxd_submit_user_descriptor(ctx, udesc + i); + +@@ -498,6 +504,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, + struct idxd_device *idxd = wq->idxd; + __poll_t out = 0; + ++ if (current->mm != ctx->mm) ++ return POLLNVAL; ++ + poll_wait(filp, &wq->err_queue, wait); + spin_lock(&idxd->dev_lock); + if (idxd->sw_err.valid) +@@ -584,6 +593,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) + + static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) + { ++ struct device *dev = &idxd_dev->conf_dev; + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + struct idxd_device *idxd = wq->idxd; + int rc; +@@ -611,6 +621,12 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) + + mutex_lock(&wq->wq_lock); + ++ if (!idxd_wq_driver_name_match(wq, dev)) { ++ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; ++ rc = -ENODEV; ++ goto wq_err; ++ } ++ + wq->wq = create_workqueue(dev_name(wq_confdev(wq))); + if (!wq->wq) { + rc = -ENOMEM; +diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c +index 07623fb0f52fc2..47a01893cfdbf9 100644 +--- a/drivers/dma/idxd/dma.c ++++ b/drivers/dma/idxd/dma.c +@@ -306,6 +306,12 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) + return -ENXIO; + + mutex_lock(&wq->wq_lock); ++ if (!idxd_wq_driver_name_match(wq, dev)) { ++ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; ++ rc = -ENODEV; ++ goto err; ++ } ++ + wq->type = IDXD_WQT_KERNEL; + + rc = drv_enable_wq(wq); +diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h +index bea10c5cdb76bb..fcbb8caea89952 100644 +--- a/drivers/dma/idxd/idxd.h ++++ b/drivers/dma/idxd/idxd.h +@@ -159,6 +159,8 @@ struct idxd_cdev { + int minor; + }; + ++#define DRIVER_NAME_SIZE 128 ++ + #define IDXD_ALLOCATED_BATCH_SIZE 128U + #define WQ_NAME_SIZE 1024 + #define WQ_TYPE_SIZE 10 +@@ -227,6 +229,8 @@ struct idxd_wq { + /* Lock to protect upasid_xa access. */ + struct mutex uc_lock; + struct xarray upasid_xa; ++ ++ char driver_name[DRIVER_NAME_SIZE + 1]; + }; + + struct idxd_engine { +@@ -648,6 +652,11 @@ static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wq + wqcfg->max_batch_shift = max_batch_shift; + } + ++static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) ++{ ++ return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); ++} ++ + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, + struct module *module, const char *mod_name); + #define idxd_driver_register(driver) \ +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c +index 1fd5a93045f79e..3a5ce477a81ad6 100644 +--- a/drivers/dma/idxd/sysfs.c ++++ b/drivers/dma/idxd/sysfs.c +@@ -1282,6 +1282,39 @@ static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *a + static struct device_attribute dev_attr_wq_op_config = + __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); + ++static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct idxd_wq *wq = confdev_to_wq(dev); ++ ++ return sysfs_emit(buf, "%s\n", wq->driver_name); ++} ++ ++static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct idxd_wq *wq = confdev_to_wq(dev); ++ char *input, *pos; ++ ++ if (wq->state != IDXD_WQ_DISABLED) ++ return -EPERM; ++ ++ if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0) ++ return -EINVAL; ++ ++ input = kstrndup(buf, count, GFP_KERNEL); ++ if (!input) ++ return -ENOMEM; ++ ++ pos = strim(input); ++ memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); ++ sprintf(wq->driver_name, "%s", pos); ++ kfree(input); ++ return count; ++} ++ ++static struct device_attribute dev_attr_wq_driver_name = ++ __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store); ++ + static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_clients.attr, + &dev_attr_wq_state.attr, +@@ -1301,6 +1334,7 @@ static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_occupancy.attr, + &dev_attr_wq_enqcmds_retries.attr, + &dev_attr_wq_op_config.attr, ++ &dev_attr_wq_driver_name.attr, + NULL, + }; + +diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c +index 56be8ef40f376b..e3635fba63b493 100644 +--- a/drivers/edac/ie31200_edac.c ++++ b/drivers/edac/ie31200_edac.c +@@ -405,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) + int i, j, ret; + struct mem_ctl_info *mci = NULL; + struct edac_mc_layer layers[2]; +- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL]; + void __iomem *window; + struct ie31200_priv *priv; +- u32 addr_decode, mad_offset; ++ u32 addr_decode[IE31200_CHANNELS], mad_offset; + + /* + * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit +@@ -466,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) + mad_offset = IE31200_MAD_DIMM_0_OFFSET; + } + +- /* populate DIMM info */ + for (i = 0; i < IE31200_CHANNELS; i++) { +- addr_decode = readl(window + mad_offset + ++ addr_decode[i] = readl(window + mad_offset + + (i * 4)); +- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode); +- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) { +- populate_dimm_info(&dimm_info[i][j], addr_decode, j, +- skl); +- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", +- dimm_info[i][j].size, +- dimm_info[i][j].dual_rank, +- dimm_info[i][j].x16_width); +- } ++ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]); + } + + /* +@@ -489,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) + */ + for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) { + for (j = 0; j < IE31200_CHANNELS; j++) { ++ struct dimm_data dimm_info; + struct dimm_info *dimm; + unsigned long nr_pages; + +- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl); ++ populate_dimm_info(&dimm_info, addr_decode[j], i, ++ skl); ++ edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", ++ dimm_info.size, ++ dimm_info.dual_rank, ++ dimm_info.x16_width); ++ ++ nr_pages = IE31200_PAGES(dimm_info.size, skl); + if (nr_pages == 0) + continue; + +- if (dimm_info[j][i].dual_rank) { ++ if (dimm_info.dual_rank) { + nr_pages = nr_pages / 2; + dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0); + dimm->nr_pages = nr_pages; +diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c +index 7865438b36960d..d885e1381072ac 100644 +--- a/drivers/firmware/arm_ffa/bus.c ++++ b/drivers/firmware/arm_ffa/bus.c +@@ -191,6 +191,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, + dev = &ffa_dev->dev; + dev->bus = &ffa_bus_type; + dev->release = ffa_release_device; ++ dev->dma_mask = &dev->coherent_dma_mask; + dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); + + ffa_dev->id = id; +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +index 7c2db3f017651b..488f8345dd1b63 100644 +--- a/drivers/firmware/arm_ffa/driver.c ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -121,6 +121,14 @@ static int ffa_version_check(u32 *version) + return -EOPNOTSUPP; + } + ++ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) { ++ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n", ++ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), ++ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), ++ FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); ++ return -EINVAL; ++ } ++ + if (ver.a0 < FFA_MIN_VERSION) { + pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", + FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), +diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c +index 51eeaf14367dac..e1b949aedf9e08 100644 +--- a/drivers/firmware/arm_scmi/bus.c ++++ b/drivers/firmware/arm_scmi/bus.c +@@ -42,7 +42,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); + * This helper let an SCMI driver request specific devices identified by the + * @id_table to be created for each active SCMI instance. + * +- * The requested device name MUST NOT be already existent for any protocol; ++ * The requested device name MUST NOT be already existent for this protocol; + * at first the freshly requested @id_table is annotated in the IDR table + * @scmi_requested_devices and then the requested device is advertised to any + * registered party via the @scmi_requested_devices_nh notification chain. +@@ -52,7 +52,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); + static int scmi_protocol_device_request(const struct scmi_device_id *id_table) + { + int ret = 0; +- unsigned int id = 0; + struct list_head *head, *phead = NULL; + struct scmi_requested_dev *rdev; + +@@ -67,19 +66,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table) + } + + /* +- * Search for the matching protocol rdev list and then search +- * of any existent equally named device...fails if any duplicate found. ++ * Find the matching protocol rdev list and then search of any ++ * existent equally named device...fails if any duplicate found. + */ + mutex_lock(&scmi_requested_devices_mtx); +- idr_for_each_entry(&scmi_requested_devices, head, id) { +- if (!phead) { +- /* A list found registered in the IDR is never empty */ +- rdev = list_first_entry(head, struct scmi_requested_dev, +- node); +- if (rdev->id_table->protocol_id == +- id_table->protocol_id) +- phead = head; +- } ++ phead = idr_find(&scmi_requested_devices, id_table->protocol_id); ++ if (phead) { ++ head = phead; + list_for_each_entry(rdev, head, node) { + if (!strcmp(rdev->id_table->name, id_table->name)) { + pr_err("Ignoring duplicate request [%d] %s\n", +diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c +index 4ffb9da537d82c..5295ff90482bc6 100644 +--- a/drivers/fpga/altera-cvp.c ++++ b/drivers/fpga/altera-cvp.c +@@ -52,7 +52,7 @@ + /* V2 Defines */ + #define VSE_CVP_TX_CREDITS 0x49 /* 8bit */ + +-#define V2_CREDIT_TIMEOUT_US 20000 ++#define V2_CREDIT_TIMEOUT_US 40000 + #define V2_CHECK_CREDIT_US 10 + #define V2_POLL_TIMEOUT_US 1000000 + #define V2_USER_TIMEOUT_US 500000 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index b882b26ab5007b..faadbe66b23e71 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -10,6 +10,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -519,12 +520,10 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) + struct pca953x_chip *chip = gpiochip_get_data(gc); + u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off); + u8 bit = BIT(off % BANK_SZ); +- int ret; + +- mutex_lock(&chip->i2c_lock); +- ret = regmap_write_bits(chip->regmap, dirreg, bit, bit); +- mutex_unlock(&chip->i2c_lock); +- return ret; ++ guard(mutex)(&chip->i2c_lock); ++ ++ return regmap_write_bits(chip->regmap, dirreg, bit, bit); + } + + static int pca953x_gpio_direction_output(struct gpio_chip *gc, +@@ -536,17 +535,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, + u8 bit = BIT(off % BANK_SZ); + int ret; + +- mutex_lock(&chip->i2c_lock); ++ guard(mutex)(&chip->i2c_lock); ++ + /* set output level */ + ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); + if (ret) +- goto exit; ++ return ret; + + /* then direction */ +- ret = regmap_write_bits(chip->regmap, dirreg, bit, 0); +-exit: +- mutex_unlock(&chip->i2c_lock); +- return ret; ++ return regmap_write_bits(chip->regmap, dirreg, bit, 0); + } + + static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) +@@ -557,9 +554,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) + u32 reg_val; + int ret; + +- mutex_lock(&chip->i2c_lock); +- ret = regmap_read(chip->regmap, inreg, ®_val); +- mutex_unlock(&chip->i2c_lock); ++ scoped_guard(mutex, &chip->i2c_lock) ++ ret = regmap_read(chip->regmap, inreg, ®_val); + if (ret < 0) + return ret; + +@@ -572,9 +568,9 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) + u8 outreg = chip->recalc_addr(chip, chip->regs->output, off); + u8 bit = BIT(off % BANK_SZ); + +- mutex_lock(&chip->i2c_lock); ++ guard(mutex)(&chip->i2c_lock); ++ + regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); +- mutex_unlock(&chip->i2c_lock); + } + + static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) +@@ -585,9 +581,8 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) + u32 reg_val; + int ret; + +- mutex_lock(&chip->i2c_lock); +- ret = regmap_read(chip->regmap, dirreg, ®_val); +- mutex_unlock(&chip->i2c_lock); ++ scoped_guard(mutex, &chip->i2c_lock) ++ ret = regmap_read(chip->regmap, dirreg, ®_val); + if (ret < 0) + return ret; + +@@ -604,9 +599,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc, + DECLARE_BITMAP(reg_val, MAX_LINE); + int ret; + +- mutex_lock(&chip->i2c_lock); +- ret = pca953x_read_regs(chip, chip->regs->input, reg_val); +- mutex_unlock(&chip->i2c_lock); ++ scoped_guard(mutex, &chip->i2c_lock) ++ ret = pca953x_read_regs(chip, chip->regs->input, reg_val); + if (ret) + return ret; + +@@ -621,16 +615,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc, + DECLARE_BITMAP(reg_val, MAX_LINE); + int ret; + +- mutex_lock(&chip->i2c_lock); ++ guard(mutex)(&chip->i2c_lock); ++ + ret = pca953x_read_regs(chip, chip->regs->output, reg_val); + if (ret) +- goto exit; ++ return; + + bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio); + + pca953x_write_regs(chip, chip->regs->output, reg_val); +-exit: +- mutex_unlock(&chip->i2c_lock); + } + + static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, +@@ -638,7 +631,6 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, + unsigned long config) + { + enum pin_config_param param = pinconf_to_config_param(config); +- + u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset); + u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset); + u8 bit = BIT(offset % BANK_SZ); +@@ -651,7 +643,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, + if (!(chip->driver_data & PCA_PCAL)) + return -ENOTSUPP; + +- mutex_lock(&chip->i2c_lock); ++ guard(mutex)(&chip->i2c_lock); + + /* Configure pull-up/pull-down */ + if (param == PIN_CONFIG_BIAS_PULL_UP) +@@ -661,17 +653,13 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, + else + ret = 0; + if (ret) +- goto exit; ++ return ret; + + /* Disable/Enable pull-up/pull-down */ + if (param == PIN_CONFIG_BIAS_DISABLE) +- ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); ++ return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); + else +- ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); +- +-exit: +- mutex_unlock(&chip->i2c_lock); +- return ret; ++ return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); + } + + static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, +@@ -883,10 +871,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) + + bitmap_zero(pending, MAX_LINE); + +- mutex_lock(&chip->i2c_lock); +- ret = pca953x_irq_pending(chip, pending); +- mutex_unlock(&chip->i2c_lock); +- ++ scoped_guard(mutex, &chip->i2c_lock) ++ ret = pca953x_irq_pending(chip, pending); + if (ret) { + ret = 0; + +@@ -1168,9 +1154,9 @@ static int pca953x_probe(struct i2c_client *client) + } + + #ifdef CONFIG_PM_SLEEP +-static int pca953x_regcache_sync(struct device *dev) ++static int pca953x_regcache_sync(struct pca953x_chip *chip) + { +- struct pca953x_chip *chip = dev_get_drvdata(dev); ++ struct device *dev = &chip->client->dev; + int ret; + u8 regaddr; + +@@ -1217,13 +1203,38 @@ static int pca953x_regcache_sync(struct device *dev) + return 0; + } + ++static int pca953x_restore_context(struct pca953x_chip *chip) ++{ ++ int ret; ++ ++ guard(mutex)(&chip->i2c_lock); ++ ++ if (chip->client->irq > 0) ++ enable_irq(chip->client->irq); ++ regcache_cache_only(chip->regmap, false); ++ regcache_mark_dirty(chip->regmap); ++ ret = pca953x_regcache_sync(chip); ++ if (ret) ++ return ret; ++ ++ return regcache_sync(chip->regmap); ++} ++ ++static void pca953x_save_context(struct pca953x_chip *chip) ++{ ++ guard(mutex)(&chip->i2c_lock); ++ ++ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */ ++ if (chip->client->irq > 0) ++ disable_irq(chip->client->irq); ++ regcache_cache_only(chip->regmap, true); ++} ++ + static int pca953x_suspend(struct device *dev) + { + struct pca953x_chip *chip = dev_get_drvdata(dev); + +- mutex_lock(&chip->i2c_lock); +- regcache_cache_only(chip->regmap, true); +- mutex_unlock(&chip->i2c_lock); ++ pca953x_save_context(chip); + + if (atomic_read(&chip->wakeup_path)) + device_set_wakeup_path(dev); +@@ -1246,17 +1257,7 @@ static int pca953x_resume(struct device *dev) + } + } + +- mutex_lock(&chip->i2c_lock); +- regcache_cache_only(chip->regmap, false); +- regcache_mark_dirty(chip->regmap); +- ret = pca953x_regcache_sync(dev); +- if (ret) { +- mutex_unlock(&chip->i2c_lock); +- return ret; +- } +- +- ret = regcache_sync(chip->regmap); +- mutex_unlock(&chip->i2c_lock); ++ ret = pca953x_restore_context(chip); + if (ret) { + dev_err(dev, "Failed to restore register map: %d\n", ret); + return ret; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +index be4cc4868a748e..493e18bcea069e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +@@ -43,6 +43,29 @@ + #include + #include + ++static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; ++ ++/** ++ * dma_buf_attach_adev - Helper to get adev of an attachment ++ * ++ * @attach: attachment ++ * ++ * Returns: ++ * A struct amdgpu_device * if the attaching device is an amdgpu device or ++ * partition, NULL otherwise. ++ */ ++static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) ++{ ++ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { ++ struct drm_gem_object *obj = attach->importer_priv; ++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); ++ ++ return amdgpu_ttm_adev(bo->tbo.bdev); ++ } ++ ++ return NULL; ++} ++ + /** + * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation + * +@@ -54,12 +77,14 @@ + static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) + { ++ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); + struct drm_gem_object *obj = dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + +- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) ++ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && ++ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) + attach->peer2peer = false; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); +@@ -482,6 +507,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, + struct drm_gem_object *obj = &bo->tbo.base; + struct drm_gem_object *gobj; + ++ if (!adev) ++ return false; ++ + if (obj->import_attach) { + struct dma_buf *dma_buf = obj->import_attach->dmabuf; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +index 6a24e8ceb94493..ffa5e72a84ebcb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +@@ -43,7 +43,7 @@ + #include "amdgpu_securedisplay.h" + #include "amdgpu_atomfirmware.h" + +-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) ++#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) + + static int psp_load_smu_fw(struct psp_context *psp); + static int psp_rap_terminate(struct psp_context *psp); +@@ -506,7 +506,6 @@ static int psp_sw_fini(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; +- struct psp_gfx_cmd_resp *cmd = psp->cmd; + + psp_memory_training_fini(psp); + +@@ -516,8 +515,8 @@ static int psp_sw_fini(void *handle) + amdgpu_ucode_release(&psp->cap_fw); + amdgpu_ucode_release(&psp->toc_fw); + +- kfree(cmd); +- cmd = NULL; ++ kfree(psp->cmd); ++ psp->cmd = NULL; + + psp_free_shared_bufs(psp); + +diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +index 66c6bab75f8a58..0d3d00681edac5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) + { + uint64_t value; + +- /* Program the AGP BAR */ +- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); +- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); +- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); +- + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { ++ /* Program the AGP BAR */ ++ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); ++ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); ++ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); ++ + /* Program the system aperture low logical page number. */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +index 9086f2fdfaf422..553f4f24f5adeb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +@@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) + WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + } + ++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ ++static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev) ++{ ++ uint32_t tmp; ++ int i; ++ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; ++ ++ for (i = 0; i < 5; i++) { /* DAGB instances */ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance); ++ tmp |= (1 << 15); /* SDMA client is BIT15 */ ++ WREG32_SOC15_OFFSET(MMHUB, 0, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp); ++ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance); ++ tmp |= (1 << 15); ++ WREG32_SOC15_OFFSET(MMHUB, 0, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp); ++ } ++ ++} ++ + static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev) + { + uint32_t tmp; +@@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) + mmhub_v1_7_init_system_aperture_regs(adev); + mmhub_v1_7_init_tlb_regs(adev); + mmhub_v1_7_init_cache_regs(adev); ++ mmhub_v1_7_init_snoop_override_regs(adev); + + mmhub_v1_7_enable_system_domain(adev); + mmhub_v1_7_disable_identity_aperture(adev); +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +index 3d8e579d5c4e8a..c7bdccff785b71 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +@@ -213,6 +213,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) + } + } + ++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ ++static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev) ++{ ++ uint32_t tmp, inst_mask; ++ int i, j; ++ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; ++ ++ inst_mask = adev->aid_mask; ++ for_each_inst(i, inst_mask) { ++ for (j = 0; j < 5; j++) { /* DAGB instances */ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, i, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance); ++ tmp |= (1 << 15); /* SDMA client is BIT15 */ ++ WREG32_SOC15_OFFSET(MMHUB, i, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp); ++ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, i, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance); ++ tmp |= (1 << 15); ++ WREG32_SOC15_OFFSET(MMHUB, i, ++ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp); ++ } ++ } ++} ++ + static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) + { + uint32_t tmp, inst_mask; +@@ -418,6 +444,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) + mmhub_v1_8_init_system_aperture_regs(adev); + mmhub_v1_8_init_tlb_regs(adev); + mmhub_v1_8_init_cache_regs(adev); ++ mmhub_v1_8_init_snoop_override_regs(adev); + + mmhub_v1_8_enable_system_domain(adev); + mmhub_v1_8_disable_identity_aperture(adev); +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +index 5718e4d40e6665..9713cb59d1c14f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +@@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + } + ++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ ++static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid) ++{ ++ uint32_t tmp; ++ int i; ++ uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - ++ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; ++ uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET; ++ ++ for (i = 0; i < 5 - (2 * hubid); i++) { ++ /* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, ++ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, ++ huboffset + i * distance); ++ tmp |= (1 << 15); /* SDMA client is BIT15 */ ++ WREG32_SOC15_OFFSET(MMHUB, 0, ++ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, ++ huboffset + i * distance, tmp); ++ ++ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, ++ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, ++ huboffset + i * distance); ++ tmp |= (1 << 15); ++ WREG32_SOC15_OFFSET(MMHUB, 0, ++ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, ++ huboffset + i * distance, tmp); ++ } ++ ++} ++ + static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) + { + uint32_t tmp; +@@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_init_cache_regs(adev, i); + ++ mmhub_v9_4_init_snoop_override_regs(adev, i); + mmhub_v9_4_enable_system_domain(adev, i); + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_disable_identity_aperture(adev, i); +diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c +index 7910c463ae3855..82709e692f4cc0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/nv.c ++++ b/drivers/gpu/drm/amd/amdgpu/nv.c +@@ -142,23 +142,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { + }; + + static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, + }; + + static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = { +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c +index 4712ffc0a482c8..7819f5d584f597 100644 +--- a/drivers/gpu/drm/amd/amdgpu/soc21.c ++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c +@@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = { + }; + + static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = { +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, + }; + + static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = { +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, +- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + }; + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +index 4d9a406925e189..fd4a75073364c7 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +@@ -2147,14 +2147,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, + return retval; + } + +-/* +- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to +- * stay in user mode. +- */ +-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL +-/* APE1 limit is inclusive and 64K aligned. */ +-#define APE1_LIMIT_ALIGNMENT 0xFFFF +- + static bool set_cache_memory_policy(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, +@@ -2169,34 +2161,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, + + dqm_lock(dqm); + +- if (alternate_aperture_size == 0) { +- /* base > limit disables APE1 */ +- qpd->sh_mem_ape1_base = 1; +- qpd->sh_mem_ape1_limit = 0; +- } else { +- /* +- * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, +- * SH_MEM_APE1_BASE[31:0], 0x0000 } +- * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, +- * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } +- * Verify that the base and size parameters can be +- * represented in this format and convert them. +- * Additionally restrict APE1 to user-mode addresses. +- */ +- +- uint64_t base = (uintptr_t)alternate_aperture_base; +- uint64_t limit = base + alternate_aperture_size - 1; +- +- if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || +- (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { +- retval = false; +- goto out; +- } +- +- qpd->sh_mem_ape1_base = base >> 16; +- qpd->sh_mem_ape1_limit = limit >> 16; +- } +- + retval = dqm->asic_ops.set_cache_memory_policy( + dqm, + qpd, +@@ -2205,6 +2169,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, + alternate_aperture_base, + alternate_aperture_size); + ++ if (retval) ++ goto out; ++ + if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) + program_sh_mem_settings(dqm, qpd); + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +index d4d95c7f2e5d40..32bedef912b3b2 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +@@ -27,6 +27,14 @@ + #include "oss/oss_2_4_sh_mask.h" + #include "gca/gfx_7_2_sh_mask.h" + ++/* ++ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to ++ * stay in user mode. ++ */ ++#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL ++/* APE1 limit is inclusive and 64K aligned. */ ++#define APE1_LIMIT_ALIGNMENT 0xFFFF ++ + static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, +@@ -84,6 +92,36 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, + { + uint32_t default_mtype; + uint32_t ape1_mtype; ++ unsigned int temp; ++ bool retval = true; ++ ++ if (alternate_aperture_size == 0) { ++ /* base > limit disables APE1 */ ++ qpd->sh_mem_ape1_base = 1; ++ qpd->sh_mem_ape1_limit = 0; ++ } else { ++ /* ++ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, ++ * SH_MEM_APE1_BASE[31:0], 0x0000 } ++ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, ++ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } ++ * Verify that the base and size parameters can be ++ * represented in this format and convert them. ++ * Additionally restrict APE1 to user-mode addresses. ++ */ ++ ++ uint64_t base = (uintptr_t)alternate_aperture_base; ++ uint64_t limit = base + alternate_aperture_size - 1; ++ ++ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || ++ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { ++ retval = false; ++ goto out; ++ } ++ ++ qpd->sh_mem_ape1_base = base >> 16; ++ qpd->sh_mem_ape1_limit = limit >> 16; ++ } + + default_mtype = (default_policy == cache_policy_coherent) ? + MTYPE_NONCACHED : +@@ -97,37 +135,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, + | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) + | DEFAULT_MTYPE(default_mtype) + | APE1_MTYPE(ape1_mtype); +- +- return true; +-} +- +-static int update_qpd_cik(struct device_queue_manager *dqm, +- struct qcm_process_device *qpd) +-{ +- struct kfd_process_device *pdd; +- unsigned int temp; +- +- pdd = qpd_to_pdd(qpd); +- +- /* check if sh_mem_config register already configured */ +- if (qpd->sh_mem_config == 0) { +- qpd->sh_mem_config = +- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | +- DEFAULT_MTYPE(MTYPE_NONCACHED) | +- APE1_MTYPE(MTYPE_NONCACHED); +- qpd->sh_mem_ape1_limit = 0; +- qpd->sh_mem_ape1_base = 0; +- } +- + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ +- temp = get_sh_mem_bases_nybble_64(pdd); ++ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd)); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + + pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", + qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); + ++out: ++ return retval; ++} ++ ++static int update_qpd_cik(struct device_queue_manager *dqm, ++ struct qcm_process_device *qpd) ++{ + return 0; + } + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +index b291ee0fab9439..320518f418903d 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +@@ -27,6 +27,14 @@ + #include "gca/gfx_8_0_sh_mask.h" + #include "oss/oss_3_0_sh_mask.h" + ++/* ++ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to ++ * stay in user mode. ++ */ ++#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL ++/* APE1 limit is inclusive and 64K aligned. */ ++#define APE1_LIMIT_ALIGNMENT 0xFFFF ++ + static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, +@@ -85,6 +93,36 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, + { + uint32_t default_mtype; + uint32_t ape1_mtype; ++ unsigned int temp; ++ bool retval = true; ++ ++ if (alternate_aperture_size == 0) { ++ /* base > limit disables APE1 */ ++ qpd->sh_mem_ape1_base = 1; ++ qpd->sh_mem_ape1_limit = 0; ++ } else { ++ /* ++ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, ++ * SH_MEM_APE1_BASE[31:0], 0x0000 } ++ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, ++ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } ++ * Verify that the base and size parameters can be ++ * represented in this format and convert them. ++ * Additionally restrict APE1 to user-mode addresses. ++ */ ++ ++ uint64_t base = (uintptr_t)alternate_aperture_base; ++ uint64_t limit = base + alternate_aperture_size - 1; ++ ++ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || ++ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { ++ retval = false; ++ goto out; ++ } ++ ++ qpd->sh_mem_ape1_base = base >> 16; ++ qpd->sh_mem_ape1_limit = limit >> 16; ++ } + + default_mtype = (default_policy == cache_policy_coherent) ? + MTYPE_UC : +@@ -100,40 +138,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, + default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT; + +- return true; +-} +- +-static int update_qpd_vi(struct device_queue_manager *dqm, +- struct qcm_process_device *qpd) +-{ +- struct kfd_process_device *pdd; +- unsigned int temp; +- +- pdd = qpd_to_pdd(qpd); +- +- /* check if sh_mem_config register already configured */ +- if (qpd->sh_mem_config == 0) { +- qpd->sh_mem_config = +- SH_MEM_ALIGNMENT_MODE_UNALIGNED << +- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | +- MTYPE_UC << +- SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | +- MTYPE_UC << +- SH_MEM_CONFIG__APE1_MTYPE__SHIFT; +- +- qpd->sh_mem_ape1_limit = 0; +- qpd->sh_mem_ape1_base = 0; +- } +- + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ +- temp = get_sh_mem_bases_nybble_64(pdd); ++ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd)); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + + pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n", + temp, qpd->sh_mem_bases); ++out: ++ return retval; ++} + ++static int update_qpd_vi(struct device_queue_manager *dqm, ++ struct qcm_process_device *qpd) ++{ + return 0; + } + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c +index a6d08dee74f6ea..93740b8fc3f44b 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c +@@ -812,6 +812,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) + return ERR_PTR(-EINVAL); + } + ++ /* If the process just called exec(3), it is possible that the ++ * cleanup of the kfd_process (following the release of the mm ++ * of the old process image) is still in the cleanup work queue. ++ * Make sure to drain any job before trying to recreate any ++ * resource for this process. ++ */ ++ flush_workqueue(kfd_process_wq); ++ + /* + * take kfd processes mutex before starting of process creation + * so there won't be a case where two threads of the same process +@@ -830,14 +838,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) + if (process) { + pr_debug("Process already found\n"); + } else { +- /* If the process just called exec(3), it is possible that the +- * cleanup of the kfd_process (following the release of the mm +- * of the old process image) is still in the cleanup work queue. +- * Make sure to drain any job before trying to recreate any +- * resource for this process. +- */ +- flush_workqueue(kfd_process_wq); +- + process = create_process(thread); + if (IS_ERR(process)) + goto out; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index bcf0dc05c76765..9189864c236a7e 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2896,11 +2896,6 @@ static int dm_resume(void *handle) + + return 0; + } +- +- /* leave display off for S4 sequence */ +- if (adev->in_s4) +- return 0; +- + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ + dc_release_state(dm_state->context); + dm_state->context = dc_create_state(dm->dc); +@@ -7474,7 +7469,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, + int i; + int result = -EIO; + +- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) ++ if (!ddc_service->ddc_pin) + return result; + + cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +index d4d3f58a613f7a..327776eeb9f3ec 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +@@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct dc *dc = clk_mgr_base->ctx->dc; +- int display_count; ++ int display_count = 0; + bool update_dppclk = false; + bool update_dispclk = false; + bool dpp_clock_lowered = false; +@@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; +- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) +- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) +@@ -204,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, + update_dppclk = true; + } + +- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { +- /* No need to apply the w/a if we haven't taken over from bios yet */ +- if (clk_mgr_base->clks.dispclk_khz) +- dcn315_disable_otg_wa(clk_mgr_base, context, true); ++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && ++ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { ++ int requested_dispclk_khz = new_clocks->dispclk_khz; + ++ dcn315_disable_otg_wa(clk_mgr_base, context, true); ++ ++ /* Clamp the requested clock to PMFW based on their limit. */ ++ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) ++ requested_dispclk_khz = dc->debug.min_disp_clk_khz; ++ ++ dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz); + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; +- dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); +- if (clk_mgr_base->clks.dispclk_khz) +- dcn315_disable_otg_wa(clk_mgr_base, context, false); ++ dcn315_disable_otg_wa(clk_mgr_base, context, false); + + update_dispclk = true; + } +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +index a13ead3d21e310..f95e5e767eb1a6 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct dc *dc = clk_mgr_base->ctx->dc; +- int display_count; ++ int display_count = 0; + bool update_dppclk = false; + bool update_dispclk = false; + bool dpp_clock_lowered = false; +@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; +- if (new_clocks->dispclk_khz < 100000) +- new_clocks->dispclk_khz = 100000; + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) +@@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, + update_dppclk = true; + } + +- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { ++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && ++ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { ++ int requested_dispclk_khz = new_clocks->dispclk_khz; ++ + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + ++ /* Clamp the requested clock to PMFW based on their limit. */ ++ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) ++ requested_dispclk_khz = dc->debug.min_disp_clk_khz; ++ ++ dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz); + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; +- dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); + + update_dispclk = true; +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index c2efe18ceacd07..640d010b52bec3 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -266,6 +266,7 @@ static bool create_links( + link->link_id.type = OBJECT_TYPE_CONNECTOR; + link->link_id.id = CONNECTOR_ID_VIRTUAL; + link->link_id.enum_id = ENUM_ID_1; ++ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); + + if (!link->link_enc) { +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +index 7b5c1498941dd6..d389eeb264a79e 100644 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +@@ -1066,7 +1066,8 @@ void dce110_edp_backlight_control( + DC_LOG_DC("edp_receiver_ready_T9 skipped\n"); + } + +- if (!enable && link->dpcd_sink_ext_caps.bits.oled) { ++ if (!enable) { ++ /*follow oem panel config's requirement*/ + pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms; + msleep(pre_T11_delay); + } +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +index 50dc834046446a..4ce45f1bdac0fe 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +@@ -392,11 +392,6 @@ bool dpp3_get_optimal_number_of_taps( + int min_taps_y, min_taps_c; + enum lb_memory_config lb_config; + +- if (scl_data->viewport.width > scl_data->h_active && +- dpp->ctx->dc->debug.max_downscale_src_width != 0 && +- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) +- return false; +- + /* + * Set default taps if none are provided + * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling +@@ -434,6 +429,12 @@ bool dpp3_get_optimal_number_of_taps( + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + ++ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first ++ if (scl_data->viewport.width > scl_data->h_active && ++ dpp->ctx->dc->debug.max_downscale_src_width != 0 && ++ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) ++ return false; ++ + /*Ensure we can support the requested number of vtaps*/ + min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); + min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); +diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +index 597fa0364a3a9b..d1601d61f05adc 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +@@ -1692,7 +1692,7 @@ static int dcn315_populate_dml_pipes_from_context( + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + DC_FP_START(); + dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); +- if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { ++ if (pixel_rate_crb) { + int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); + /* Ceil to crb segment size */ + int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( +@@ -1749,28 +1749,26 @@ static int dcn315_populate_dml_pipes_from_context( + continue; + } + +- if (!pipe->top_pipe && !pipe->prev_odm_pipe) { +- bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) +- || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); +- +- if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) +- pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + +- (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); +- if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { +- /* Clamp to 2 pipe split max det segments */ +- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); +- pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; +- } +- if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { +- /* If we are splitting we must have an even number of segments */ +- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; +- pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; +- } +- /* Convert segments into size for DML use */ +- pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; +- +- crb_idx++; ++ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) ++ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); ++ ++ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) ++ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + ++ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); ++ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { ++ /* Clamp to 2 pipe split max det segments */ ++ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); ++ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; ++ } ++ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { ++ /* If we are splitting we must have an even number of segments */ ++ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; ++ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } ++ /* Convert segments into size for DML use */ ++ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; ++ ++ crb_idx++; + pipe_cnt++; + } + } +diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h +index eaad1260bfd180..4b284ce669ae52 100644 +--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h ++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h +@@ -532,7 +532,7 @@ struct dc_state { + */ + struct bw_context bw_ctx; + +- struct block_sequence block_sequence[50]; ++ struct block_sequence block_sequence[100]; + unsigned int block_sequence_steps; + struct dc_dmub_cmd dc_dmub_cmd[10]; + unsigned int dmub_cmd_count; +diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +index 4901e27f678bcf..9b470812d96a5f 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c ++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +@@ -145,6 +145,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) + void link_set_all_streams_dpms_off_for_link(struct dc_link *link) + { + struct pipe_ctx *pipes[MAX_PIPES]; ++ struct dc_stream_state *streams[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; +@@ -157,10 +158,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + ++ /* The subsequent call to dc_commit_updates_for_stream for a full update ++ * will release the current state and swap to a new state. Releasing the ++ * current state results in the stream pointers in the pipe_ctx structs ++ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. ++ */ ++ for (i = 0; i < count; i++) ++ streams[i] = pipes[i]->stream; ++ + for (i = 0; i < count; i++) { +- stream_update.stream = pipes[i]->stream; ++ stream_update.stream = streams[i]; + dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, +- pipes[i]->stream, &stream_update, ++ streams[i], &stream_update, + state); + } + +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +index 3d589072fe307e..adf0ef8b70e4b1 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +@@ -239,21 +239,21 @@ static uint32_t intersect_frl_link_bw_support( + { + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + +- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) +- if (hdmi_encoded_link_bw.bits.FRL_MODE) { +- if (hdmi_encoded_link_bw.bits.BW_48Gbps) +- supported_bw_in_kbps = 48000000; +- else if (hdmi_encoded_link_bw.bits.BW_40Gbps) +- supported_bw_in_kbps = 40000000; +- else if (hdmi_encoded_link_bw.bits.BW_32Gbps) +- supported_bw_in_kbps = 32000000; +- else if (hdmi_encoded_link_bw.bits.BW_24Gbps) +- supported_bw_in_kbps = 24000000; +- else if (hdmi_encoded_link_bw.bits.BW_18Gbps) +- supported_bw_in_kbps = 18000000; +- else if (hdmi_encoded_link_bw.bits.BW_9Gbps) +- supported_bw_in_kbps = 9000000; +- } ++ /* Skip checking FRL_MODE bit, as certain PCON will clear ++ * it despite supporting the link BW indicated in the other bits. ++ */ ++ if (hdmi_encoded_link_bw.bits.BW_48Gbps) ++ supported_bw_in_kbps = 48000000; ++ else if (hdmi_encoded_link_bw.bits.BW_40Gbps) ++ supported_bw_in_kbps = 40000000; ++ else if (hdmi_encoded_link_bw.bits.BW_32Gbps) ++ supported_bw_in_kbps = 32000000; ++ else if (hdmi_encoded_link_bw.bits.BW_24Gbps) ++ supported_bw_in_kbps = 24000000; ++ else if (hdmi_encoded_link_bw.bits.BW_18Gbps) ++ supported_bw_in_kbps = 18000000; ++ else if (hdmi_encoded_link_bw.bits.BW_9Gbps) ++ supported_bw_in_kbps = 9000000; + + return supported_bw_in_kbps; + } +@@ -920,6 +920,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream, + * TODO: add MST specific link training routine + */ + decide_mst_link_settings(link, link_setting); ++ } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) { ++ link_setting->lane_count = LANE_COUNT_FOUR; ++ link_setting->link_rate = LINK_RATE_HIGH3; + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +index 9bde0c8bf914a6..f01a3df584552f 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +@@ -74,7 +74,8 @@ void dp_disable_link_phy(struct dc_link *link, + struct dc *dc = link->ctx->dc; + + if (!link->wa_flags.dp_keep_receiver_powered && +- !link->skip_implict_edp_power_control) ++ !link->skip_implict_edp_power_control && ++ link->type != dc_connection_none) + dpcd_write_rx_power_ctrl(link, false); + + dc->hwss.disable_link_output(link, link_res, signal); +@@ -159,8 +160,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource + } else { + if (link->fec_state == dc_link_fec_ready) { + fec_config = 0; +- core_link_write_dpcd(link, DP_FEC_CONFIGURATION, +- &fec_config, sizeof(fec_config)); ++ if (link->type != dc_connection_none) ++ core_link_write_dpcd(link, DP_FEC_CONFIGURATION, ++ &fec_config, sizeof(fec_config)); + + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +index 2b4c15b0b40708..52261e7c11c0b4 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +@@ -36,7 +36,8 @@ + link->ctx->logger + + static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, +- const struct dc_link_settings *link_settings) ++ const struct dc_link_settings *link_settings, ++ enum lttpr_mode lttpr_mode) + { + union training_aux_rd_interval training_rd_interval; + uint32_t wait_in_micro_secs = 100; +@@ -49,6 +50,8 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); ++ if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) ++ wait_in_micro_secs = 400; + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } +@@ -110,7 +113,6 @@ void decide_8b_10b_training_settings( + */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? + LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; +- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); + lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); +@@ -119,6 +121,7 @@ void decide_8b_10b_training_settings( + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); ++ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode); + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + +diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +index 13104d000b9e09..d4d92da153ece2 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +@@ -662,6 +662,18 @@ bool edp_setup_psr(struct dc_link *link, + if (!link) + return false; + ++ //Clear PSR cfg ++ memset(&psr_configuration, 0, sizeof(psr_configuration)); ++ dm_helpers_dp_write_dpcd( ++ link->ctx, ++ link, ++ DP_PSR_EN_CFG, ++ &psr_configuration.raw, ++ sizeof(psr_configuration.raw)); ++ ++ if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) ++ return false; ++ + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; + psr = dc->res_pool->psr; +@@ -672,9 +684,6 @@ bool edp_setup_psr(struct dc_link *link, + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + +- +- memset(&psr_configuration, 0, sizeof(psr_configuration)); +- + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = +@@ -938,6 +947,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream + if (!link) + return false; + ++ //Clear Replay config ++ dm_helpers_dp_write_dpcd(link->ctx, link, ++ DP_SINK_PR_ENABLE_AND_CONFIGURATION, ++ (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); ++ ++ if (!(link->replay_settings.config.replay_supported)) ++ return false; ++ ++ link->replay_settings.config.replay_error_status.raw = 0; ++ + dc = link->ctx->dc; + + replay = dc->res_pool->replay; +diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h +index c488d4a50cf46a..b2252deabc17a4 100644 +--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h ++++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h +@@ -203,6 +203,10 @@ + #define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB0_WR_MISC_CREDIT 0x0058 + #define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x005b ++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x005c ++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB0_WRCLI_ASK_PENDING 0x005d + #define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB0_WRCLI_GO_PENDING 0x005e +@@ -455,6 +459,10 @@ + #define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB1_WR_MISC_CREDIT 0x00d8 + #define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00db ++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00dc ++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB1_WRCLI_ASK_PENDING 0x00dd + #define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB1_WRCLI_GO_PENDING 0x00de +@@ -707,6 +715,10 @@ + #define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB2_WR_MISC_CREDIT 0x0158 + #define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x015b ++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x015c ++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB2_WRCLI_ASK_PENDING 0x015d + #define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB2_WRCLI_GO_PENDING 0x015e +@@ -959,6 +971,10 @@ + #define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB3_WR_MISC_CREDIT 0x01d8 + #define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01db ++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01dc ++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB3_WRCLI_ASK_PENDING 0x01dd + #define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB3_WRCLI_GO_PENDING 0x01de +@@ -1211,6 +1227,10 @@ + #define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB4_WR_MISC_CREDIT 0x0258 + #define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x025b ++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x025c ++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB4_WRCLI_ASK_PENDING 0x025d + #define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB4_WRCLI_GO_PENDING 0x025e +@@ -4793,6 +4813,10 @@ + #define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB5_WR_MISC_CREDIT 0x3058 + #define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE 0x305b ++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x305c ++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB5_WRCLI_ASK_PENDING 0x305d + #define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB5_WRCLI_GO_PENDING 0x305e +@@ -5045,6 +5069,10 @@ + #define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB6_WR_MISC_CREDIT 0x30d8 + #define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE 0x30db ++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x30dc ++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB6_WRCLI_ASK_PENDING 0x30dd + #define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB6_WRCLI_GO_PENDING 0x30de +@@ -5297,6 +5325,10 @@ + #define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1 + #define mmDAGB7_WR_MISC_CREDIT 0x3158 + #define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1 ++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE 0x315b ++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 ++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x315c ++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 + #define mmDAGB7_WRCLI_ASK_PENDING 0x315d + #define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1 + #define mmDAGB7_WRCLI_GO_PENDING 0x315e +diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +index 2969fbf282b7d0..5069d2fd467f2b 100644 +--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h ++++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +@@ -1532,6 +1532,12 @@ + //DAGB0_WRCLI_DBUS_GO_PENDING + #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB0_DAGB_DLY + #define DAGB0_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB0_DAGB_DLY__CLI__SHIFT 0x8 +@@ -3207,6 +3213,12 @@ + //DAGB1_WRCLI_DBUS_GO_PENDING + #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB1_DAGB_DLY + #define DAGB1_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB1_DAGB_DLY__CLI__SHIFT 0x8 +@@ -4882,6 +4894,12 @@ + //DAGB2_WRCLI_DBUS_GO_PENDING + #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB2_DAGB_DLY + #define DAGB2_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB2_DAGB_DLY__CLI__SHIFT 0x8 +@@ -6557,6 +6575,12 @@ + //DAGB3_WRCLI_DBUS_GO_PENDING + #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB3_DAGB_DLY + #define DAGB3_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB3_DAGB_DLY__CLI__SHIFT 0x8 +@@ -8232,6 +8256,12 @@ + //DAGB4_WRCLI_DBUS_GO_PENDING + #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB4_DAGB_DLY + #define DAGB4_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB4_DAGB_DLY__CLI__SHIFT 0x8 +@@ -28737,6 +28767,12 @@ + //DAGB5_WRCLI_DBUS_GO_PENDING + #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB5_DAGB_DLY + #define DAGB5_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB5_DAGB_DLY__CLI__SHIFT 0x8 +@@ -30412,6 +30448,12 @@ + //DAGB6_WRCLI_DBUS_GO_PENDING + #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB6_DAGB_DLY + #define DAGB6_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB6_DAGB_DLY__CLI__SHIFT 0x8 +@@ -32087,6 +32129,12 @@ + //DAGB7_WRCLI_DBUS_GO_PENDING + #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 + #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL ++//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE ++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 ++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL ++//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE ++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 ++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL + //DAGB7_DAGB_DLY + #define DAGB7_DAGB_DLY__DLY__SHIFT 0x0 + #define DAGB7_DAGB_DLY__CLI__SHIFT 0x8 +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index 3de0f457fff6ab..5f58da6ebaadb4 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -132,7 +132,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + return false; + } + +- switch (mode->crtc_hdisplay) { ++ switch (mode->hdisplay) { + case 640: + vbios_mode->enh_table = &res_640x480[refresh_rate_index]; + break; +@@ -146,7 +146,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; + break; + case 1280: +- if (mode->crtc_vdisplay == 800) ++ if (mode->vdisplay == 800) + vbios_mode->enh_table = &res_1280x800[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1280x1024[refresh_rate_index]; +@@ -158,7 +158,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; + break; + case 1600: +- if (mode->crtc_vdisplay == 900) ++ if (mode->vdisplay == 900) + vbios_mode->enh_table = &res_1600x900[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1600x1200[refresh_rate_index]; +@@ -167,7 +167,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; + break; + case 1920: +- if (mode->crtc_vdisplay == 1080) ++ if (mode->vdisplay == 1080) + vbios_mode->enh_table = &res_1920x1080[refresh_rate_index]; + else + vbios_mode->enh_table = &res_1920x1200[refresh_rate_index]; +@@ -211,6 +211,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; + vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; + ++ adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde; + adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; + adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; + adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; +@@ -220,6 +221,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + vbios_mode->enh_table->hfp + + vbios_mode->enh_table->hsync); + ++ adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde; + adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; + adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; + adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +index 8f786592143b6c..24e1e11acf6978 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +@@ -244,7 +244,9 @@ static const struct hdmi_codec_pdata codec_data = { + .ops = &adv7511_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, ++ .no_i2s_capture = 1, + .spdif = 1, ++ .no_spdif_capture = 1, + }; + + int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index f3681970887cc8..1aa59586c8f81a 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -573,6 +573,30 @@ mode_valid(struct drm_atomic_state *state) + return 0; + } + ++static int drm_atomic_check_valid_clones(struct drm_atomic_state *state, ++ struct drm_crtc *crtc) ++{ ++ struct drm_encoder *drm_enc; ++ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, ++ crtc); ++ ++ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) { ++ if (!drm_enc->possible_clones) { ++ DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id); ++ continue; ++ } ++ ++ if ((crtc_state->encoder_mask & drm_enc->possible_clones) != ++ crtc_state->encoder_mask) { ++ DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n", ++ crtc->base.id, crtc_state->encoder_mask); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ + /** + * drm_atomic_helper_check_modeset - validate state object for modeset changes + * @dev: DRM device +@@ -744,6 +768,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret != 0) + return ret; ++ ++ ret = drm_atomic_check_valid_clones(state, crtc); ++ if (ret != 0) ++ return ret; + } + + /* +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index ad872c61aac0e3..c6e6e4766c8bf6 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -6471,6 +6471,7 @@ static void drm_reset_display_info(struct drm_connector *connector) + info->has_hdmi_infoframe = false; + info->rgb_quant_range_selectable = false; + memset(&info->hdmi, 0, sizeof(info->hdmi)); ++ memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata)); + + info->edid_hdmi_rgb444_dc_modes = 0; + info->edid_hdmi_ycbcr444_dc_modes = 0; +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 44a948b80ee14e..deb93f78ce3442 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -322,7 +322,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + return -ENOENT; + + /* Don't allow imported objects to be mapped */ +- if (obj->import_attach) { ++ if (drm_gem_is_imported(obj)) { + ret = -EINVAL; + goto out; + } +@@ -1155,7 +1155,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, + drm_vma_node_start(&obj->vma_node)); + drm_printf_indent(p, indent, "size=%zu\n", obj->size); + drm_printf_indent(p, indent, "imported=%s\n", +- str_yes_no(obj->import_attach)); ++ str_yes_no(drm_gem_is_imported(obj))); + + if (obj->funcs->print_info) + obj->funcs->print_info(p, indent, obj); +diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c +index 54fc3f819577e3..6391afdf202e27 100644 +--- a/drivers/gpu/drm/mediatek/mtk_dpi.c ++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c +@@ -410,12 +410,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable) + + static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi) + { +- mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); ++ if (dpi->conf->reg_h_fre_con) ++ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); + } + + static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi) + { +- if (dpi->conf->edge_sel_en) ++ if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con) + mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN); + } + +diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c +index 94fe2f3836a9a3..53b3b24d7d7c05 100644 +--- a/drivers/gpu/drm/panel/panel-edp.c ++++ b/drivers/gpu/drm/panel/panel-edp.c +@@ -1923,6 +1923,7 @@ static const struct edp_panel_entry edp_panels[] = { + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + ++ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"), + EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), + + { /* sentinal */ } +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +index d8f8c37c326c43..0193d10867dd2f 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +@@ -1290,10 +1290,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, + + rb_swap = vop2_win_rb_swap(fb->format->format); + vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap); +- if (!vop2_cluster_window(win)) { +- uv_swap = vop2_win_uv_swap(fb->format->format); +- vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); +- } ++ uv_swap = vop2_win_uv_swap(fb->format->format); ++ vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); + + if (fb->format->is_yuv) { + vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4)); +diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c +index ffbbe9d527d324..0e8ea990118844 100644 +--- a/drivers/gpu/drm/v3d/v3d_drv.c ++++ b/drivers/gpu/drm/v3d/v3d_drv.c +@@ -226,11 +226,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) + if (ret) + return ret; + ++ v3d->clk = devm_clk_get_optional(dev, NULL); ++ if (IS_ERR(v3d->clk)) ++ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); ++ ++ ret = clk_prepare_enable(v3d->clk); ++ if (ret) { ++ dev_err(&pdev->dev, "Couldn't enable the V3D clock\n"); ++ return ret; ++ } ++ + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); + mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + ret = dma_set_mask_and_coherent(dev, mask); + if (ret) +- return ret; ++ goto clk_disable; + + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); + +@@ -245,28 +255,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) + ret = PTR_ERR(v3d->reset); + + if (ret == -EPROBE_DEFER) +- return ret; ++ goto clk_disable; + + v3d->reset = NULL; + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) { + dev_err(dev, + "Failed to get reset control or bridge regs\n"); +- return ret; ++ goto clk_disable; + } + } + + if (v3d->ver < 41) { + ret = map_regs(v3d, &v3d->gca_regs, "gca"); + if (ret) +- return ret; ++ goto clk_disable; + } + + v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->mmu_scratch) { + dev_err(dev, "Failed to allocate MMU scratch page\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto clk_disable; + } + + ret = v3d_gem_init(drm); +@@ -289,6 +300,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) + v3d_gem_destroy(drm); + dma_free: + dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); ++clk_disable: ++ clk_disable_unprepare(v3d->clk); + return ret; + } + +@@ -303,6 +316,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) + + dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, + v3d->mmu_scratch_paddr); ++ ++ clk_disable_unprepare(v3d->clk); + } + + static struct platform_driver v3d_platform_driver = { +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 8e721ec3faaff3..a8665d57094b22 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -41,6 +41,10 @@ + #define USB_VENDOR_ID_ACTIONSTAR 0x2101 + #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 + ++#define USB_VENDOR_ID_ADATA_XPG 0x125f ++#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505 ++#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506 ++ + #define USB_VENDOR_ID_ADS_TECH 0x06e1 + #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 + +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 5d7a418ccdbecf..73979643315bfd 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -27,6 +27,8 @@ + static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c +index c439ed2f16dbca..af6bc76dbf6493 100644 +--- a/drivers/hid/usbhid/usbkbd.c ++++ b/drivers/hid/usbhid/usbkbd.c +@@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type, + return -1; + + spin_lock_irqsave(&kbd->leds_lock, flags); +- kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | ++ kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | + (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | + (!!test_bit(LED_NUML, dev->led)); + +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c +index 44aaf9b9191d41..8d94ecc3cc468c 100644 +--- a/drivers/hwmon/dell-smm-hwmon.c ++++ b/drivers/hwmon/dell-smm-hwmon.c +@@ -67,7 +67,7 @@ + #define I8K_POWER_BATTERY 0x01 + + #define DELL_SMM_NO_TEMP 10 +-#define DELL_SMM_NO_FANS 3 ++#define DELL_SMM_NO_FANS 4 + + struct dell_smm_data { + struct mutex i8k_mutex; /* lock for sensors writes */ +@@ -940,11 +940,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = { + HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | + HWMON_F_TARGET, + HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | ++ HWMON_F_TARGET, ++ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | + HWMON_F_TARGET + ), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + HWMON_PWM_INPUT, ++ HWMON_PWM_INPUT, + HWMON_PWM_INPUT + ), + NULL +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c +index d92c536be9af78..b779240328d59f 100644 +--- a/drivers/hwmon/gpio-fan.c ++++ b/drivers/hwmon/gpio-fan.c +@@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev, + if (state >= fan_data->num_speed) + return -EINVAL; + ++ mutex_lock(&fan_data->lock); ++ + set_fan_speed(fan_data, state); ++ ++ mutex_unlock(&fan_data->lock); ++ + return 0; + } + +@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match); + + static void gpio_fan_stop(void *data) + { ++ struct gpio_fan_data *fan_data = data; ++ ++ mutex_lock(&fan_data->lock); + set_fan_speed(data, 0); ++ mutex_unlock(&fan_data->lock); + } + + static int gpio_fan_probe(struct platform_device *pdev) +@@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev) + + if (fan_data->gpios) { + fan_data->resume_speed = fan_data->speed_index; ++ mutex_lock(&fan_data->lock); + set_fan_speed(fan_data, 0); ++ mutex_unlock(&fan_data->lock); + } + + return 0; +@@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev) + { + struct gpio_fan_data *fan_data = dev_get_drvdata(dev); + +- if (fan_data->gpios) ++ if (fan_data->gpios) { ++ mutex_lock(&fan_data->lock); + set_fan_speed(fan_data, fan_data->resume_speed); ++ mutex_unlock(&fan_data->lock); ++ } + + return 0; + } +diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c +index 207084d55044a1..6768dbf3903906 100644 +--- a/drivers/hwmon/xgene-hwmon.c ++++ b/drivers/hwmon/xgene-hwmon.c +@@ -111,7 +111,7 @@ struct xgene_hwmon_dev { + + phys_addr_t comm_base_addr; + void *pcc_comm_addr; +- u64 usecs_lat; ++ unsigned int usecs_lat; + }; + + /* +diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig +index 4b6359326ede99..4f7d2b6d79e294 100644 +--- a/drivers/hwtracing/intel_th/Kconfig ++++ b/drivers/hwtracing/intel_th/Kconfig +@@ -60,6 +60,7 @@ config INTEL_TH_STH + + config INTEL_TH_MSU + tristate "Intel(R) Trace Hub Memory Storage Unit" ++ depends on MMU + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. It supports single +diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c +index 9621efe0e95c4d..54629458fb710c 100644 +--- a/drivers/hwtracing/intel_th/msu.c ++++ b/drivers/hwtracing/intel_th/msu.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_X86 + #include +@@ -965,7 +966,6 @@ static void msc_buffer_contig_free(struct msc *msc) + for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { + struct page *page = virt_to_page(msc->base + off); + +- page->mapping = NULL; + __free_page(page); + } + +@@ -1147,9 +1147,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) + int i; + + for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { +- struct page *page = msc_sg_page(sg); +- +- page->mapping = NULL; + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, + sg_virt(sg), sg_dma_address(sg)); + } +@@ -1584,22 +1581,10 @@ static void msc_mmap_close(struct vm_area_struct *vma) + { + struct msc_iter *iter = vma->vm_file->private_data; + struct msc *msc = iter->msc; +- unsigned long pg; + + if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) + return; + +- /* drop page _refcounts */ +- for (pg = 0; pg < msc->nr_pages; pg++) { +- struct page *page = msc_buffer_get_page(msc, pg); +- +- if (WARN_ON_ONCE(!page)) +- continue; +- +- if (page->mapping) +- page->mapping = NULL; +- } +- + /* last mapping -- drop user_count */ + atomic_dec(&msc->user_count); + mutex_unlock(&msc->buf_mutex); +@@ -1609,16 +1594,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) + { + struct msc_iter *iter = vmf->vma->vm_file->private_data; + struct msc *msc = iter->msc; ++ struct page *page; + +- vmf->page = msc_buffer_get_page(msc, vmf->pgoff); +- if (!vmf->page) ++ page = msc_buffer_get_page(msc, vmf->pgoff); ++ if (!page) + return VM_FAULT_SIGBUS; + +- get_page(vmf->page); +- vmf->page->mapping = vmf->vma->vm_file->f_mapping; +- vmf->page->index = vmf->pgoff; +- +- return 0; ++ get_page(page); ++ return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); + } + + static const struct vm_operations_struct msc_mmap_ops = { +@@ -1659,7 +1642,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) + atomic_dec(&msc->user_count); + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); ++ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); + vma->vm_ops = &msc_mmap_ops; + return ret; + } +diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c +index ced2fb4aeda8d3..79e083aab08e0c 100644 +--- a/drivers/i2c/busses/i2c-designware-common.c ++++ b/drivers/i2c/busses/i2c-designware-common.c +@@ -669,6 +669,7 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) + + i2c_dw_release_lock(dev); + } ++EXPORT_SYMBOL_GPL(i2c_dw_disable); + + MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h +index 5eb130c1d67195..e93870a0f9a459 100644 +--- a/drivers/i2c/busses/i2c-designware-core.h ++++ b/drivers/i2c/busses/i2c-designware-core.h +@@ -238,7 +238,6 @@ struct reset_control; + * @semaphore_idx: Index of table with semaphore type attached to the bus. It's + * -1 if there is no semaphore. + * @shared_with_punit: true if this bus is shared with the SoCs PUNIT +- * @disable: function to disable the controller + * @init: function to initialize the I2C hardware + * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing + * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE +@@ -295,7 +294,6 @@ struct dw_i2c_dev { + void (*release_lock)(void); + int semaphore_idx; + bool shared_with_punit; +- void (*disable)(struct dw_i2c_dev *dev); + int (*init)(struct dw_i2c_dev *dev); + int (*set_sda_hold_time)(struct dw_i2c_dev *dev); + int mode; +@@ -305,6 +303,7 @@ struct dw_i2c_dev { + #define ACCESS_INTR_MASK BIT(0) + #define ACCESS_NO_IRQ_SUSPEND BIT(1) + #define ARBITRATION_SEMAPHORE BIT(2) ++#define ACCESS_POLLING BIT(3) + + #define MODEL_MSCC_OCELOT BIT(8) + #define MODEL_BAIKAL_BT1 BIT(9) +@@ -339,7 +338,6 @@ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev); + int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev); + int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev); + u32 i2c_dw_func(struct i2c_adapter *adap); +-void i2c_dw_disable(struct dw_i2c_dev *dev); + + static inline void __i2c_dw_enable(struct dw_i2c_dev *dev) + { +@@ -354,6 +352,7 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev) + } + + void __i2c_dw_disable(struct dw_i2c_dev *dev); ++void i2c_dw_disable(struct dw_i2c_dev *dev); + + extern void i2c_dw_configure_master(struct dw_i2c_dev *dev); + extern int i2c_dw_probe_master(struct dw_i2c_dev *dev); +diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c +index 579c668cb78a6d..51f5491648c077 100644 +--- a/drivers/i2c/busses/i2c-designware-master.c ++++ b/drivers/i2c/busses/i2c-designware-master.c +@@ -991,31 +991,6 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) + return 0; + } + +-static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev) +-{ +- struct i2c_adapter *adap = &dev->adapter; +- int ret; +- +- pm_runtime_get_noresume(dev->dev); +- ret = i2c_add_numbered_adapter(adap); +- if (ret) +- dev_err(dev->dev, "Failed to add adapter: %d\n", ret); +- pm_runtime_put_noidle(dev->dev); +- +- return ret; +-} +- +-static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev) +-{ +- switch (dev->flags & MODEL_MASK) { +- case MODEL_AMD_NAVI_GPU: +- case MODEL_WANGXUN_SP: +- return true; +- default: +- return false; +- } +-} +- + int i2c_dw_probe_master(struct dw_i2c_dev *dev) + { + struct i2c_adapter *adap = &dev->adapter; +@@ -1026,7 +1001,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) + init_completion(&dev->cmd_complete); + + dev->init = i2c_dw_init_master; +- dev->disable = i2c_dw_disable; + + ret = i2c_dw_init_regmap(dev); + if (ret) +@@ -1071,9 +1045,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) + adap->dev.parent = dev->dev; + i2c_set_adapdata(adap, dev); + +- if (i2c_dw_is_model_poll(dev)) +- return i2c_dw_poll_adap_quirk(dev); +- + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { + irq_flags = IRQF_NO_SUSPEND; + } else { +@@ -1087,12 +1058,14 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) + regmap_write(dev->map, DW_IC_INTR_MASK, 0); + i2c_dw_release_lock(dev); + +- ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, +- dev_name(dev->dev), dev); +- if (ret) { +- dev_err(dev->dev, "failure requesting irq %i: %d\n", +- dev->irq, ret); +- return ret; ++ if (!(dev->flags & ACCESS_POLLING)) { ++ ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, ++ irq_flags, dev_name(dev->dev), dev); ++ if (ret) { ++ dev_err(dev->dev, "failure requesting irq %i: %d\n", ++ dev->irq, ret); ++ return ret; ++ } + } + + ret = i2c_dw_init_recovery_info(dev); +diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c +index 61d7a27aa07018..b85f1e4ed13bc8 100644 +--- a/drivers/i2c/busses/i2c-designware-pcidrv.c ++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c +@@ -154,7 +154,7 @@ static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c) + { + struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); + +- dev->flags |= MODEL_AMD_NAVI_GPU; ++ dev->flags |= MODEL_AMD_NAVI_GPU | ACCESS_POLLING; + dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; + return 0; + } +@@ -198,7 +198,7 @@ static int __maybe_unused i2c_dw_pci_runtime_suspend(struct device *dev) + { + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + +- i_dev->disable(i_dev); ++ i2c_dw_disable(i_dev); + return 0; + } + +@@ -248,6 +248,7 @@ static const struct software_node dgpu_node = { + static int i2c_dw_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) + { ++ struct device *device = &pdev->dev; + struct dw_i2c_dev *dev; + struct i2c_adapter *adap; + int r; +@@ -256,25 +257,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, + struct i2c_timings *t; + + if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) +- return dev_err_probe(&pdev->dev, -EINVAL, +- "Invalid driver data %ld\n", ++ return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n", + id->driver_data); + + controller = &dw_pci_controllers[id->driver_data]; + + r = pcim_enable_device(pdev); + if (r) +- return dev_err_probe(&pdev->dev, r, +- "Failed to enable I2C PCI device\n"); ++ return dev_err_probe(device, r, "Failed to enable I2C PCI device\n"); + + pci_set_master(pdev); + + r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (r) +- return dev_err_probe(&pdev->dev, r, +- "I/O memory remapping failed\n"); ++ return dev_err_probe(device, r, "I/O memory remapping failed\n"); + +- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); ++ dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + +@@ -284,7 +282,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, + + dev->get_clk_rate_khz = controller->get_clk_rate_khz; + dev->base = pcim_iomap_table(pdev)[0]; +- dev->dev = &pdev->dev; ++ dev->dev = device; + dev->irq = pci_irq_vector(pdev, 0); + dev->flags |= controller->flags; + +@@ -337,15 +335,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, + + if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { + dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); +- if (IS_ERR(dev->slave)) +- return dev_err_probe(dev->dev, PTR_ERR(dev->slave), ++ if (IS_ERR(dev->slave)) { ++ i2c_del_adapter(&dev->adapter); ++ return dev_err_probe(device, PTR_ERR(dev->slave), + "register UCSI failed\n"); ++ } + } + +- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); +- pm_runtime_use_autosuspend(&pdev->dev); +- pm_runtime_put_autosuspend(&pdev->dev); +- pm_runtime_allow(&pdev->dev); ++ pm_runtime_set_autosuspend_delay(device, 1000); ++ pm_runtime_use_autosuspend(device); ++ pm_runtime_put_autosuspend(device); ++ pm_runtime_allow(device); + + return 0; + } +@@ -353,10 +353,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, + static void i2c_dw_pci_remove(struct pci_dev *pdev) + { + struct dw_i2c_dev *dev = pci_get_drvdata(pdev); ++ struct device *device = &pdev->dev; ++ ++ i2c_dw_disable(dev); + +- dev->disable(dev); +- pm_runtime_forbid(&pdev->dev); +- pm_runtime_get_noresume(&pdev->dev); ++ pm_runtime_forbid(device); ++ pm_runtime_get_noresume(device); + + i2c_del_adapter(&dev->adapter); + devm_free_irq(&pdev->dev, dev->irq, dev); +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 855b698e99c080..f3245a68563095 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -275,6 +275,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) + + static int dw_i2c_plat_probe(struct platform_device *pdev) + { ++ struct device *device = &pdev->dev; + struct i2c_adapter *adap; + struct dw_i2c_dev *dev; + struct i2c_timings *t; +@@ -284,15 +285,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) + if (irq < 0) + return irq; + +- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); ++ dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + +- dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); +- if (device_property_present(&pdev->dev, "wx,i2c-snps-model")) +- dev->flags = MODEL_WANGXUN_SP; ++ dev->flags = (uintptr_t)device_get_match_data(device); ++ if (device_property_present(device, "wx,i2c-snps-model")) ++ dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING; + +- dev->dev = &pdev->dev; ++ dev->dev = device; + dev->irq = irq; + platform_set_drvdata(pdev, dev); + +@@ -300,7 +301,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) + if (ret) + return ret; + +- dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); ++ dev->rst = devm_reset_control_get_optional_exclusive(device, NULL); + if (IS_ERR(dev->rst)) + return PTR_ERR(dev->rst); + +@@ -328,13 +329,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) + i2c_dw_configure(dev); + + /* Optional interface clock */ +- dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); ++ dev->pclk = devm_clk_get_optional(device, "pclk"); + if (IS_ERR(dev->pclk)) { + ret = PTR_ERR(dev->pclk); + goto exit_reset; + } + +- dev->clk = devm_clk_get_optional(&pdev->dev, NULL); ++ dev->clk = devm_clk_get_optional(device, NULL); + if (IS_ERR(dev->clk)) { + ret = PTR_ERR(dev->clk); + goto exit_reset; +@@ -363,28 +364,24 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) + adap->dev.of_node = pdev->dev.of_node; + adap->nr = -1; + +- if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { +- dev_pm_set_driver_flags(&pdev->dev, +- DPM_FLAG_SMART_PREPARE); +- } else { +- dev_pm_set_driver_flags(&pdev->dev, +- DPM_FLAG_SMART_PREPARE | +- DPM_FLAG_SMART_SUSPEND); +- } ++ if (dev->flags & ACCESS_NO_IRQ_SUSPEND) ++ dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE); ++ else ++ dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND); + +- device_enable_async_suspend(&pdev->dev); ++ device_enable_async_suspend(device); + + /* The code below assumes runtime PM to be disabled. */ +- WARN_ON(pm_runtime_enabled(&pdev->dev)); ++ WARN_ON(pm_runtime_enabled(device)); + +- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); +- pm_runtime_use_autosuspend(&pdev->dev); +- pm_runtime_set_active(&pdev->dev); ++ pm_runtime_set_autosuspend_delay(device, 1000); ++ pm_runtime_use_autosuspend(device); ++ pm_runtime_set_active(device); + + if (dev->shared_with_punit) +- pm_runtime_get_noresume(&pdev->dev); ++ pm_runtime_get_noresume(device); + +- pm_runtime_enable(&pdev->dev); ++ pm_runtime_enable(device); + + ret = i2c_dw_probe(dev); + if (ret) +@@ -402,15 +399,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) + static void dw_i2c_plat_remove(struct platform_device *pdev) + { + struct dw_i2c_dev *dev = platform_get_drvdata(pdev); ++ struct device *device = &pdev->dev; + +- pm_runtime_get_sync(&pdev->dev); ++ pm_runtime_get_sync(device); + + i2c_del_adapter(&dev->adapter); + +- dev->disable(dev); ++ i2c_dw_disable(dev); + +- pm_runtime_dont_use_autosuspend(&pdev->dev); +- pm_runtime_put_sync(&pdev->dev); ++ pm_runtime_dont_use_autosuspend(device); ++ pm_runtime_put_sync(device); + dw_i2c_plat_pm_cleanup(dev); + + i2c_dw_remove_lock_support(dev); +@@ -436,7 +434,7 @@ static int dw_i2c_plat_runtime_suspend(struct device *dev) + if (i_dev->shared_with_punit) + return 0; + +- i_dev->disable(i_dev); ++ i2c_dw_disable(i_dev); + i2c_dw_prepare_clk(i_dev, false); + + return 0; +diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c +index 78e2c47e3d7da7..345b532a2b455d 100644 +--- a/drivers/i2c/busses/i2c-designware-slave.c ++++ b/drivers/i2c/busses/i2c-designware-slave.c +@@ -88,7 +88,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) + struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + regmap_write(dev->map, DW_IC_INTR_MASK, 0); +- dev->disable(dev); ++ i2c_dw_disable(dev); + synchronize_irq(dev->irq); + dev->slave = NULL; + pm_runtime_put(dev->dev); +@@ -235,7 +235,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) + int ret; + + dev->init = i2c_dw_init_slave; +- dev->disable = i2c_dw_disable; + + ret = i2c_dw_init_regmap(dev); + if (ret) +diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c +index 3bd406470940fb..affdd94f06aaf0 100644 +--- a/drivers/i2c/busses/i2c-pxa.c ++++ b/drivers/i2c/busses/i2c-pxa.c +@@ -1504,7 +1504,10 @@ static int i2c_pxa_probe(struct platform_device *dev) + i2c->adap.name); + } + +- clk_prepare_enable(i2c->clk); ++ ret = clk_prepare_enable(i2c->clk); ++ if (ret) ++ return dev_err_probe(&dev->dev, ret, ++ "failed to enable clock\n"); + + if (i2c->use_pio) { + i2c->adap.algo = &i2c_pxa_pio_algorithm; +diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c +index 598102d16677a1..ee92a315f074fe 100644 +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -150,6 +151,8 @@ + /* TAG length for DATA READ in RX FIFO */ + #define READ_RX_TAGS_LEN 2 + ++#define QUP_BUS_WIDTH 8 ++ + static unsigned int scl_freq; + module_param_named(scl_freq, scl_freq, uint, 0444); + MODULE_PARM_DESC(scl_freq, "SCL frequency override"); +@@ -227,6 +230,7 @@ struct qup_i2c_dev { + int irq; + struct clk *clk; + struct clk *pclk; ++ struct icc_path *icc_path; + struct i2c_adapter adap; + + int clk_ctl; +@@ -255,6 +259,10 @@ struct qup_i2c_dev { + /* To configure when bus is in run state */ + u32 config_run; + ++ /* bandwidth votes */ ++ u32 src_clk_freq; ++ u32 cur_bw_clk_freq; ++ + /* dma parameters */ + bool is_dma; + /* To check if the current transfer is using DMA */ +@@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) + return ret; + } + ++static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq) ++{ ++ u32 needed_peak_bw; ++ int ret; ++ ++ if (qup->cur_bw_clk_freq == clk_freq) ++ return 0; ++ ++ needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH); ++ ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw); ++ if (ret) ++ return ret; ++ ++ qup->cur_bw_clk_freq = clk_freq; ++ return 0; ++} ++ + static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) + { + struct qup_i2c_block *blk = &qup->blk; +@@ -840,6 +865,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, + int ret = 0; + int idx = 0; + ++ ret = qup_i2c_vote_bw(qup, qup->src_clk_freq); ++ if (ret) ++ return ret; ++ + enable_irq(qup->irq); + ret = qup_i2c_req_dma(qup); + +@@ -1645,6 +1674,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) + config = readl(qup->base + QUP_CONFIG); + config |= QUP_CLOCK_AUTO_GATE; + writel(config, qup->base + QUP_CONFIG); ++ qup_i2c_vote_bw(qup, 0); + clk_disable_unprepare(qup->pclk); + } + +@@ -1745,6 +1775,11 @@ static int qup_i2c_probe(struct platform_device *pdev) + goto fail_dma; + } + qup->is_dma = true; ++ ++ qup->icc_path = devm_of_icc_get(&pdev->dev, NULL); ++ if (IS_ERR(qup->icc_path)) ++ return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path), ++ "failed to get interconnect path\n"); + } + + nodma: +@@ -1793,6 +1828,7 @@ static int qup_i2c_probe(struct platform_device *pdev) + qup_i2c_enable_clocks(qup); + src_clk_freq = clk_get_rate(qup->clk); + } ++ qup->src_clk_freq = src_clk_freq; + + /* + * Bootloaders might leave a pending interrupt on certain QUP's, +diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c +index fa1f12a89158cf..d1630d47ef6fcf 100644 +--- a/drivers/i3c/master/svc-i3c-master.c ++++ b/drivers/i3c/master/svc-i3c-master.c +@@ -503,6 +503,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) + queue_work(master->base.wq, &master->hj_work); + break; + case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: ++ svc_i3c_master_emit_stop(master); ++ break; + default: + break; + } +@@ -840,6 +842,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, + u32 reg; + int ret, i; + ++ svc_i3c_master_flush_fifo(master); ++ + while (true) { + /* Enter/proceed with DAA */ + writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA | +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index 07c571c7b69992..c5b68639476058 100644 +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt) + { +- struct scatterlist *sg; ++ unsigned long curr_len = 0; ++ dma_addr_t curr_base = ~0; + unsigned long va, pgoff; ++ struct scatterlist *sg; + dma_addr_t mask; ++ dma_addr_t end; + int i; + + umem->iova = va = virt; +@@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + pgoff = umem->address & ~PAGE_MASK; + + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { +- /* Walk SGL and reduce max page size if VA/PA bits differ +- * for any address. ++ /* If the current entry is physically contiguous with the previous ++ * one, no need to take its start addresses into consideration. + */ +- mask |= (sg_dma_address(sg) + pgoff) ^ va; ++ if (check_add_overflow(curr_base, curr_len, &end) || ++ end != sg_dma_address(sg)) { ++ ++ curr_base = sg_dma_address(sg); ++ curr_len = 0; ++ ++ /* Reduce max page size if VA/PA bits differ */ ++ mask |= (curr_base + pgoff) ^ va; ++ ++ /* The alignment of any VA matching a discontinuity point ++ * in the physical memory sets the maximum possible page ++ * size as this must be a starting point of a new page that ++ * needs to be aligned. ++ */ ++ if (i != 0) ++ mask |= va; ++ } ++ ++ curr_len += sg_dma_len(sg); + va += sg_dma_len(sg) - pgoff; +- /* Except for the last entry, the ending iova alignment sets +- * the maximum possible page size as the low bits of the iova +- * must be zero when starting the next chunk. +- */ +- if (i != (umem->sgt_append.sgt.nents - 1)) +- mask |= va; ++ + pgoff = 0; + } + +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index c6053e82ecf6f3..33e2fe0facd529 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) + goto err_free; + + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); +- if (!pd) { +- ret = -EINVAL; ++ if (IS_ERR(pd)) { ++ ret = PTR_ERR(pd); + goto err_free; + } + +@@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) + if (cmd.flags & IB_MR_REREG_PD) { + new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, + attrs); +- if (!new_pd) { +- ret = -EINVAL; ++ if (IS_ERR(new_pd)) { ++ ret = PTR_ERR(new_pd); + goto put_uobjs; + } + } else { +@@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs) + return PTR_ERR(uobj); + + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); +- if (!pd) { +- ret = -EINVAL; ++ if (IS_ERR(pd)) { ++ ret = PTR_ERR(pd); + goto err_free; + } + +@@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs) + return ret; + + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); +- if (!cq) +- return -EINVAL; ++ if (IS_ERR(cq)) ++ return PTR_ERR(cq); + + ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); + if (ret) +@@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) + return ret; + + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); +- if (!cq) +- return -EINVAL; ++ if (IS_ERR(cq)) ++ return PTR_ERR(cq); + + /* we copy a struct ib_uverbs_poll_cq_resp to user space */ + header_ptr = attrs->ucore.outbuf; +@@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs) + return ret; + + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); +- if (!cq) +- return -EINVAL; ++ if (IS_ERR(cq)) ++ return PTR_ERR(cq); + + ib_req_notify_cq(cq, cmd.solicited_only ? + IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); +@@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs, + ind_tbl = uobj_get_obj_read(rwq_ind_table, + UVERBS_OBJECT_RWQ_IND_TBL, + cmd->rwq_ind_tbl_handle, attrs); +- if (!ind_tbl) { +- ret = -EINVAL; ++ if (IS_ERR(ind_tbl)) { ++ ret = PTR_ERR(ind_tbl); + goto err_put; + } + +@@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs, + if (cmd->is_srq) { + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, + cmd->srq_handle, attrs); +- if (!srq || srq->srq_type == IB_SRQT_XRC) { +- ret = -EINVAL; ++ if (IS_ERR(srq) || ++ srq->srq_type == IB_SRQT_XRC) { ++ ret = IS_ERR(srq) ? PTR_ERR(srq) : ++ -EINVAL; + goto err_put; + } + } +@@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs, + rcq = uobj_get_obj_read( + cq, UVERBS_OBJECT_CQ, + cmd->recv_cq_handle, attrs); +- if (!rcq) { +- ret = -EINVAL; ++ if (IS_ERR(rcq)) { ++ ret = PTR_ERR(rcq); + goto err_put; + } + } + } + } + +- if (has_sq) ++ if (has_sq) { + scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, + cmd->send_cq_handle, attrs); ++ if (IS_ERR(scq)) { ++ ret = PTR_ERR(scq); ++ goto err_put; ++ } ++ } ++ + if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI) + rcq = rcq ?: scq; + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, + attrs); +- if (!pd || (!scq && has_sq)) { +- ret = -EINVAL; ++ if (IS_ERR(pd)) { ++ ret = PTR_ERR(pd); + goto err_put; + } + +@@ -1482,18 +1490,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs, + err_put: + if (!IS_ERR(xrcd_uobj)) + uobj_put_read(xrcd_uobj); +- if (pd) ++ if (!IS_ERR_OR_NULL(pd)) + uobj_put_obj_read(pd); +- if (scq) ++ if (!IS_ERR_OR_NULL(scq)) + rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, + UVERBS_LOOKUP_READ); +- if (rcq && rcq != scq) ++ if (!IS_ERR_OR_NULL(rcq) && rcq != scq) + rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject, + UVERBS_LOOKUP_READ); +- if (srq) ++ if (!IS_ERR_OR_NULL(srq)) + rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, + UVERBS_LOOKUP_READ); +- if (ind_tbl) ++ if (!IS_ERR_OR_NULL(ind_tbl)) + uobj_put_obj_read(ind_tbl); + + uobj_alloc_abort(&obj->uevent.uobject, attrs); +@@ -1655,8 +1663,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs) + } + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) { +- ret = -EINVAL; ++ if (IS_ERR(qp)) { ++ ret = PTR_ERR(qp); + goto out; + } + +@@ -1761,8 +1769,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs, + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, + attrs); +- if (!qp) { +- ret = -EINVAL; ++ if (IS_ERR(qp)) { ++ ret = PTR_ERR(qp); + goto out; + } + +@@ -2027,8 +2035,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) + return -ENOMEM; + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) { +- ret = -EINVAL; ++ if (IS_ERR(qp)) { ++ ret = PTR_ERR(qp); + goto out; + } + +@@ -2065,9 +2073,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) + + ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, + user_wr->wr.ud.ah, attrs); +- if (!ud->ah) { ++ if (IS_ERR(ud->ah)) { ++ ret = PTR_ERR(ud->ah); + kfree(ud); +- ret = -EINVAL; + goto out_put; + } + ud->remote_qpn = user_wr->wr.ud.remote_qpn; +@@ -2304,8 +2312,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs) + return PTR_ERR(wr); + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) { +- ret = -EINVAL; ++ if (IS_ERR(qp)) { ++ ret = PTR_ERR(qp); + goto out; + } + +@@ -2355,8 +2363,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs) + return PTR_ERR(wr); + + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); +- if (!srq) { +- ret = -EINVAL; ++ if (IS_ERR(srq)) { ++ ret = PTR_ERR(srq); + goto out; + } + +@@ -2412,8 +2420,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs) + } + + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); +- if (!pd) { +- ret = -EINVAL; ++ if (IS_ERR(pd)) { ++ ret = PTR_ERR(pd); + goto err; + } + +@@ -2482,8 +2490,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs) + return ret; + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) +- return -EINVAL; ++ if (IS_ERR(qp)) ++ return PTR_ERR(qp); + + obj = qp->uobject; + +@@ -2532,8 +2540,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs) + return ret; + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) +- return -EINVAL; ++ if (IS_ERR(qp)) ++ return PTR_ERR(qp); + + obj = qp->uobject; + mutex_lock(&obj->mcast_lock); +@@ -2667,8 +2675,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, + UVERBS_OBJECT_FLOW_ACTION, + kern_spec->action.handle, + attrs); +- if (!ib_spec->action.act) +- return -EINVAL; ++ if (IS_ERR(ib_spec->action.act)) ++ return PTR_ERR(ib_spec->action.act); + ib_spec->action.size = + sizeof(struct ib_flow_spec_action_handle); + flow_resources_add(uflow_res, +@@ -2685,8 +2693,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, + UVERBS_OBJECT_COUNTERS, + kern_spec->flow_count.handle, + attrs); +- if (!ib_spec->flow_count.counters) +- return -EINVAL; ++ if (IS_ERR(ib_spec->flow_count.counters)) ++ return PTR_ERR(ib_spec->flow_count.counters); + ib_spec->flow_count.size = + sizeof(struct ib_flow_spec_action_count); + flow_resources_add(uflow_res, +@@ -2904,14 +2912,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) + return PTR_ERR(obj); + + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); +- if (!pd) { +- err = -EINVAL; ++ if (IS_ERR(pd)) { ++ err = PTR_ERR(pd); + goto err_uobj; + } + + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); +- if (!cq) { +- err = -EINVAL; ++ if (IS_ERR(cq)) { ++ err = PTR_ERR(cq); + goto err_put_pd; + } + +@@ -3012,8 +3020,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs) + return -EINVAL; + + wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); +- if (!wq) +- return -EINVAL; ++ if (IS_ERR(wq)) ++ return PTR_ERR(wq); + + if (cmd.attr_mask & IB_WQ_FLAGS) { + wq_attr.flags = cmd.flags; +@@ -3096,8 +3104,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs) + num_read_wqs++) { + wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, + wqs_handles[num_read_wqs], attrs); +- if (!wq) { +- err = -EINVAL; ++ if (IS_ERR(wq)) { ++ err = PTR_ERR(wq); + goto put_wqs; + } + +@@ -3252,8 +3260,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs) + } + + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); +- if (!qp) { +- err = -EINVAL; ++ if (IS_ERR(qp)) { ++ err = PTR_ERR(qp); + goto err_uobj; + } + +@@ -3399,15 +3407,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, + if (ib_srq_has_cq(cmd->srq_type)) { + attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, + cmd->cq_handle, attrs); +- if (!attr.ext.cq) { +- ret = -EINVAL; ++ if (IS_ERR(attr.ext.cq)) { ++ ret = PTR_ERR(attr.ext.cq); + goto err_put_xrcd; + } + } + + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); +- if (!pd) { +- ret = -EINVAL; ++ if (IS_ERR(pd)) { ++ ret = PTR_ERR(pd); + goto err_put_cq; + } + +@@ -3514,8 +3522,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs) + return ret; + + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); +- if (!srq) +- return -EINVAL; ++ if (IS_ERR(srq)) ++ return PTR_ERR(srq); + + attr.max_wr = cmd.max_wr; + attr.srq_limit = cmd.srq_limit; +@@ -3542,8 +3550,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs) + return ret; + + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); +- if (!srq) +- return -EINVAL; ++ if (IS_ERR(srq)) ++ return PTR_ERR(srq); + + ret = ib_query_srq(srq, &attr); + +@@ -3668,8 +3676,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs) + return -EOPNOTSUPP; + + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); +- if (!cq) +- return -EINVAL; ++ if (IS_ERR(cq)) ++ return PTR_ERR(cq); + + ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); + +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index ba05de0380e96e..6567d437512808 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -3029,22 +3029,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start); + bool __rdma_block_iter_next(struct ib_block_iter *biter) + { + unsigned int block_offset; +- unsigned int sg_delta; ++ unsigned int delta; + + if (!biter->__sg_nents || !biter->__sg) + return false; + + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); +- sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; ++ delta = BIT_ULL(biter->__pg_bit) - block_offset; + +- if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { +- biter->__sg_advance += sg_delta; +- } else { ++ while (biter->__sg_nents && biter->__sg && ++ sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) { ++ delta -= sg_dma_len(biter->__sg) - biter->__sg_advance; + biter->__sg_advance = 0; + biter->__sg = sg_next(biter->__sg); + biter->__sg_nents--; + } ++ biter->__sg_advance += delta; + + return true; + } +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index c65321964131cf..e6fed973ea7411 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -289,6 +289,8 @@ static const struct xpad_device { + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, ++ { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, ++ { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, +@@ -353,6 +355,7 @@ static const struct xpad_device { + { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, +diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c +index cbf0c46015125a..6c0777a3c57b79 100644 +--- a/drivers/iommu/amd/io_pgtable_v2.c ++++ b/drivers/iommu/amd/io_pgtable_v2.c +@@ -259,7 +259,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd, + iova, map_size, gfp, &updated); + if (!pte) { +- ret = -EINVAL; ++ ret = -ENOMEM; + goto out; + } + +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c +index 2da969fc899004..3f7fcf1801a97a 100644 +--- a/drivers/iommu/dma-iommu.c ++++ b/drivers/iommu/dma-iommu.c +@@ -1716,7 +1716,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ + + if (!domain || !domain->iova_cookie) { +- desc->iommu_cookie = NULL; ++ msi_desc_set_iommu_msi_iova(desc, 0, 0); + return 0; + } + +@@ -1728,11 +1728,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) + mutex_lock(&msi_prepare_lock); + msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); + mutex_unlock(&msi_prepare_lock); +- +- msi_desc_set_iommu_cookie(desc, msi_page); +- + if (!msi_page) + return -ENOMEM; ++ ++ msi_desc_set_iommu_msi_iova( ++ desc, msi_page->iova, ++ ilog2(cookie_msi_granule(domain->iova_cookie))); + return 0; + } + +@@ -1743,18 +1744,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) + */ + void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) + { +- struct device *dev = msi_desc_to_dev(desc); +- const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); +- const struct iommu_dma_msi_page *msi_page; ++#ifdef CONFIG_IRQ_MSI_IOMMU ++ if (desc->iommu_msi_shift) { ++ u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift; + +- msi_page = msi_desc_get_iommu_cookie(desc); +- +- if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) +- return; +- +- msg->address_hi = upper_32_bits(msi_page->iova); +- msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; +- msg->address_lo += lower_32_bits(msi_page->iova); ++ msg->address_hi = upper_32_bits(msi_iova); ++ msg->address_lo = lower_32_bits(msi_iova) | ++ (msg->address_lo & ((1 << desc->iommu_msi_shift) - 1)); ++ } ++#endif + } + + static int iommu_dma_init(void) +diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c +index e1a81e0109e8a5..c0aa34b1d0e2dc 100644 +--- a/drivers/leds/rgb/leds-pwm-multicolor.c ++++ b/drivers/leds/rgb/leds-pwm-multicolor.c +@@ -135,8 +135,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev) + + /* init the multicolor's LED class device */ + cdev = &priv->mc_cdev.led_cdev; +- fwnode_property_read_u32(mcnode, "max-brightness", ++ ret = fwnode_property_read_u32(mcnode, "max-brightness", + &cdev->max_brightness); ++ if (ret) ++ goto release_mcnode; ++ + cdev->flags = LED_CORE_SUSPENDRESUME; + cdev->brightness_set_blocking = led_pwm_mc_set; + +diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c +index 79719fc8a08fb4..f8912fa60c4988 100644 +--- a/drivers/leds/trigger/ledtrig-netdev.c ++++ b/drivers/leds/trigger/ledtrig-netdev.c +@@ -54,6 +54,7 @@ struct led_netdev_data { + unsigned int last_activity; + + unsigned long mode; ++ unsigned long blink_delay; + int link_speed; + u8 duplex; + +@@ -69,6 +70,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) + /* Already validated, hw control is possible with the requested mode */ + if (trigger_data->hw_control) { + led_cdev->hw_control_set(led_cdev, trigger_data->mode); ++ if (led_cdev->blink_set) { ++ led_cdev->blink_set(led_cdev, &trigger_data->blink_delay, ++ &trigger_data->blink_delay); ++ } + + return; + } +@@ -386,10 +391,11 @@ static ssize_t interval_store(struct device *dev, + size_t size) + { + struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); ++ struct led_classdev *led_cdev = trigger_data->led_cdev; + unsigned long value; + int ret; + +- if (trigger_data->hw_control) ++ if (trigger_data->hw_control && !led_cdev->blink_set) + return -EINVAL; + + ret = kstrtoul(buf, 0, &value); +@@ -398,9 +404,13 @@ static ssize_t interval_store(struct device *dev, + + /* impose some basic bounds on the timer interval */ + if (value >= 5 && value <= 10000) { +- cancel_delayed_work_sync(&trigger_data->work); ++ if (trigger_data->hw_control) { ++ trigger_data->blink_delay = value; ++ } else { ++ cancel_delayed_work_sync(&trigger_data->work); + +- atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); ++ atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); ++ } + set_baseline_state(trigger_data); /* resets timer */ + } + +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c +index ebff3baf304515..f13d705f7861af 100644 +--- a/drivers/mailbox/mailbox.c ++++ b/drivers/mailbox/mailbox.c +@@ -415,11 +415,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) + + mutex_lock(&con_mutex); + +- if (of_parse_phandle_with_args(dev->of_node, "mboxes", +- "#mbox-cells", index, &spec)) { ++ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", ++ index, &spec); ++ if (ret) { + dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); + mutex_unlock(&con_mutex); +- return ERR_PTR(-ENODEV); ++ return ERR_PTR(ret); + } + + chan = ERR_PTR(-EPROBE_DEFER); +diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c +index f8215a8f656a46..49254d99a8ad68 100644 +--- a/drivers/mailbox/pcc.c ++++ b/drivers/mailbox/pcc.c +@@ -419,8 +419,12 @@ int pcc_mbox_ioremap(struct mbox_chan *chan) + return -1; + pchan_info = chan->con_priv; + pcc_mbox_chan = &pchan_info->chan; +- pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr, +- pcc_mbox_chan->shmem_size); ++ ++ pcc_mbox_chan->shmem = acpi_os_ioremap(pcc_mbox_chan->shmem_base_addr, ++ pcc_mbox_chan->shmem_size); ++ if (!pcc_mbox_chan->shmem) ++ return -ENXIO; ++ + return 0; + } + EXPORT_SYMBOL_GPL(pcc_mbox_ioremap); +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index c5851c9f7ec041..0d002d50329da9 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -2903,6 +2903,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache) + return to_cblock(size); + } + ++static bool can_resume(struct cache *cache) ++{ ++ /* ++ * Disallow retrying the resume operation for devices that failed the ++ * first resume attempt, as the failure leaves the policy object partially ++ * initialized. Retrying could trigger BUG_ON when loading cache mappings ++ * into the incomplete policy object. ++ */ ++ if (cache->sized && !cache->loaded_mappings) { ++ if (get_cache_mode(cache) != CM_WRITE) ++ DMERR("%s: unable to resume a failed-loaded cache, please check metadata.", ++ cache_device_name(cache)); ++ else ++ DMERR("%s: unable to resume cache due to missing proper cache table reload", ++ cache_device_name(cache)); ++ return false; ++ } ++ ++ return true; ++} ++ + static bool can_resize(struct cache *cache, dm_cblock_t new_size) + { + if (from_cblock(new_size) > from_cblock(cache->cache_size)) { +@@ -2951,6 +2972,9 @@ static int cache_preresume(struct dm_target *ti) + struct cache *cache = ti->private; + dm_cblock_t csize = get_cache_dev_size(cache); + ++ if (!can_resume(cache)) ++ return -EINVAL; ++ + /* + * Check to see if the cache has resized. + */ +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 7a33da2dd64b12..bf2ade89c8c2dc 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -669,6 +669,10 @@ int dm_table_add_target(struct dm_table *t, const char *type, + DMERR("%s: zero-length target", dm_device_name(t->md)); + return -EINVAL; + } ++ if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) { ++ DMERR("%s: too large device", dm_device_name(t->md)); ++ return -EINVAL; ++ } + + ti->type = dm_get_target_type(type); + if (!ti->type) { +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 5dd0a42463a2b8..9ea868bd0d1293 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1536,14 +1536,18 @@ static void __send_empty_flush(struct clone_info *ci) + { + struct dm_table *t = ci->map; + struct bio flush_bio; ++ blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; ++ ++ if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) == ++ (REQ_IDLE | REQ_SYNC)) ++ opf |= REQ_IDLE; + + /* + * Use an on-stack bio for this, it's safe since we don't + * need to reference it after submit. It's just used as + * the basis for the clone(s). + */ +- bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, +- REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); ++ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); + + ci->bio = &flush_bio; + ci->sector_count = 0; +diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c +index 99ba925e8ec8e6..114ac0c263fb2b 100644 +--- a/drivers/media/i2c/adv7180.c ++++ b/drivers/media/i2c/adv7180.c +@@ -194,6 +194,7 @@ struct adv7180_state; + #define ADV7180_FLAG_V2 BIT(1) + #define ADV7180_FLAG_MIPI_CSI2 BIT(2) + #define ADV7180_FLAG_I2P BIT(3) ++#define ADV7180_FLAG_TEST_PATTERN BIT(4) + + struct adv7180_chip_info { + unsigned int flags; +@@ -673,11 +674,15 @@ static int adv7180_init_controls(struct adv7180_state *state) + ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF); + v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL); + +- v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops, +- V4L2_CID_TEST_PATTERN, +- ARRAY_SIZE(test_pattern_menu) - 1, +- 0, ARRAY_SIZE(test_pattern_menu) - 1, +- test_pattern_menu); ++ if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) { ++ v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, ++ &adv7180_ctrl_ops, ++ V4L2_CID_TEST_PATTERN, ++ ARRAY_SIZE(test_pattern_menu) - 1, ++ 0, ++ ARRAY_SIZE(test_pattern_menu) - 1, ++ test_pattern_menu); ++ } + + state->sd.ctrl_handler = &state->ctrl_hdl; + if (state->ctrl_hdl.error) { +@@ -1209,7 +1214,7 @@ static const struct adv7180_chip_info adv7182_info = { + }; + + static const struct adv7180_chip_info adv7280_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN3) | +@@ -1223,7 +1228,8 @@ static const struct adv7180_chip_info adv7280_info = { + }; + + static const struct adv7180_chip_info adv7280_m_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | ++ ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN3) | +@@ -1244,7 +1250,8 @@ static const struct adv7180_chip_info adv7280_m_info = { + }; + + static const struct adv7180_chip_info adv7281_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ++ ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN7) | +@@ -1259,7 +1266,8 @@ static const struct adv7180_chip_info adv7281_info = { + }; + + static const struct adv7180_chip_info adv7281_m_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ++ ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN3) | +@@ -1279,7 +1287,8 @@ static const struct adv7180_chip_info adv7281_m_info = { + }; + + static const struct adv7180_chip_info adv7281_ma_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ++ ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN3) | +@@ -1304,7 +1313,7 @@ static const struct adv7180_chip_info adv7281_ma_info = { + }; + + static const struct adv7180_chip_info adv7282_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN7) | +@@ -1319,7 +1328,8 @@ static const struct adv7180_chip_info adv7282_info = { + }; + + static const struct adv7180_chip_info adv7282_m_info = { +- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, ++ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | ++ ADV7180_FLAG_TEST_PATTERN, + .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | + BIT(ADV7182_INPUT_CVBS_AIN2) | + BIT(ADV7182_INPUT_CVBS_AIN3) | +diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c +index a14e571dc62bc5..a3d5a8a7c660b0 100644 +--- a/drivers/media/i2c/imx219.c ++++ b/drivers/media/i2c/imx219.c +@@ -77,7 +77,7 @@ + #define IMX219_VTS_30FPS_640x480 0x06e3 + #define IMX219_VTS_MAX 0xffff + +-#define IMX219_VBLANK_MIN 4 ++#define IMX219_VBLANK_MIN 32 + + /*Frame Length Line*/ + #define IMX219_FLL_MIN 0x08a6 +diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c +index 566f5eaddd572e..b12a6bd42102e9 100644 +--- a/drivers/media/i2c/tc358746.c ++++ b/drivers/media/i2c/tc358746.c +@@ -460,24 +460,20 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746) + return err; + } + +-/* Use MHz as base so the div needs no u64 */ +-static u32 tc358746_cfg_to_cnt(unsigned int cfg_val, +- unsigned int clk_mhz, +- unsigned int time_base) ++static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz, ++ unsigned long long time_base) + { +- return DIV_ROUND_UP(cfg_val * clk_mhz, time_base); ++ return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base); + } + +-static u32 tc358746_ps_to_cnt(unsigned int cfg_val, +- unsigned int clk_mhz) ++static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz) + { +- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC); ++ return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC); + } + +-static u32 tc358746_us_to_cnt(unsigned int cfg_val, +- unsigned int clk_mhz) ++static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz) + { +- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1); ++ return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC); + } + + static int tc358746_apply_dphy_config(struct tc358746 *tc358746) +@@ -492,7 +488,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746) + + /* The hs_byte_clk is also called SYSCLK in the excel sheet */ + hs_byte_clk = cfg->hs_clk_rate / 8; +- hs_byte_clk /= HZ_PER_MHZ; + hf_clk = hs_byte_clk / 2; + + val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1; +diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c +index 6360314f04a636..b90e2e690f3aa1 100644 +--- a/drivers/media/platform/qcom/camss/camss-csid.c ++++ b/drivers/media/platform/qcom/camss/camss-csid.c +@@ -239,11 +239,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) + int ret; + + if (enable) { +- ret = v4l2_ctrl_handler_setup(&csid->ctrls); +- if (ret < 0) { +- dev_err(csid->camss->dev, +- "could not sync v4l2 controls: %d\n", ret); +- return ret; ++ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { ++ ret = v4l2_ctrl_handler_setup(&csid->ctrls); ++ if (ret < 0) { ++ dev_err(csid->camss->dev, ++ "could not sync v4l2 controls: %d\n", ret); ++ return ret; ++ } + } + + if (!csid->testgen.enabled && +@@ -318,7 +320,8 @@ static void csid_try_format(struct csid_device *csid, + break; + + case MSM_CSID_PAD_SRC: +- if (csid->testgen_mode->cur.val == 0) { ++ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || ++ csid->testgen_mode->cur.val == 0) { + /* Test generator is disabled, */ + /* keep pad formats in sync */ + u32 code = fmt->code; +@@ -368,7 +371,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd, + + code->code = csid->formats[code->index].code; + } else { +- if (csid->testgen_mode->cur.val == 0) { ++ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || ++ csid->testgen_mode->cur.val == 0) { + struct v4l2_mbus_framefmt *sink_fmt; + + sink_fmt = __csid_get_format(csid, sd_state, +@@ -750,7 +754,8 @@ static int csid_link_setup(struct media_entity *entity, + + /* If test generator is enabled */ + /* do not allow a link from CSIPHY to CSID */ +- if (csid->testgen_mode->cur.val != 0) ++ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED && ++ csid->testgen_mode->cur.val != 0) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(remote->entity); +@@ -843,24 +848,27 @@ int msm_csid_register_entity(struct csid_device *csid, + MSM_CSID_NAME, csid->id); + v4l2_set_subdevdata(sd, csid); + +- ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); +- if (ret < 0) { +- dev_err(dev, "Failed to init ctrl handler: %d\n", ret); +- return ret; +- } ++ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { ++ ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); ++ if (ret < 0) { ++ dev_err(dev, "Failed to init ctrl handler: %d\n", ret); ++ return ret; ++ } + +- csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, +- &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, +- csid->testgen.nmodes, 0, 0, +- csid->testgen.modes); ++ csid->testgen_mode = ++ v4l2_ctrl_new_std_menu_items(&csid->ctrls, ++ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, ++ csid->testgen.nmodes, 0, 0, ++ csid->testgen.modes); + +- if (csid->ctrls.error) { +- dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); +- ret = csid->ctrls.error; +- goto free_ctrl; +- } ++ if (csid->ctrls.error) { ++ dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); ++ ret = csid->ctrls.error; ++ goto free_ctrl; ++ } + +- csid->subdev.ctrl_handler = &csid->ctrls; ++ csid->subdev.ctrl_handler = &csid->ctrls; ++ } + + ret = csid_init_formats(sd, NULL); + if (ret < 0) { +@@ -891,7 +899,8 @@ int msm_csid_register_entity(struct csid_device *csid, + media_cleanup: + media_entity_cleanup(&sd->entity); + free_ctrl: +- v4l2_ctrl_handler_free(&csid->ctrls); ++ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) ++ v4l2_ctrl_handler_free(&csid->ctrls); + + return ret; + } +@@ -904,5 +913,6 @@ void msm_csid_unregister_entity(struct csid_device *csid) + { + v4l2_device_unregister_subdev(&csid->subdev); + media_entity_cleanup(&csid->subdev.entity); +- v4l2_ctrl_handler_free(&csid->ctrls); ++ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) ++ v4l2_ctrl_handler_free(&csid->ctrls); + } +diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c +index 5dc1f908b49bd6..9aa484126a0dd6 100644 +--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c ++++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c +@@ -806,13 +806,12 @@ static int c8sectpfe_probe(struct platform_device *pdev) + } + tsin->i2c_adapter = + of_find_i2c_adapter_by_node(i2c_bus); ++ of_node_put(i2c_bus); + if (!tsin->i2c_adapter) { + dev_err(&pdev->dev, "No i2c adapter found\n"); +- of_node_put(i2c_bus); + ret = -ENODEV; + goto err_node_put; + } +- of_node_put(i2c_bus); + + /* Acquire reset GPIO and activate it */ + tsin->rst_gpio = devm_fwnode_gpiod_get(dev, +diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c +index 42048727d7ff35..b8cdffc9a1e9e9 100644 +--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c ++++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c +@@ -765,9 +765,14 @@ static int vivid_thread_vid_cap(void *data) + next_jiffies_since_start = jiffies_since_start; + + wait_jiffies = next_jiffies_since_start - jiffies_since_start; +- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && +- !kthread_should_stop()) +- schedule(); ++ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) ++ continue; ++ ++ wait_queue_head_t wait; ++ ++ init_waitqueue_head(&wait); ++ wait_event_interruptible_timeout(wait, kthread_should_stop(), ++ cur_jiffies + wait_jiffies - jiffies); + } + dprintk(dev, 1, "Video Capture Thread End\n"); + return 0; +diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c +index fac6208b51da84..015a7b166a1e61 100644 +--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c ++++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c +@@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data) + next_jiffies_since_start = jiffies_since_start; + + wait_jiffies = next_jiffies_since_start - jiffies_since_start; +- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && +- !kthread_should_stop()) +- schedule(); ++ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) ++ continue; ++ ++ wait_queue_head_t wait; ++ ++ init_waitqueue_head(&wait); ++ wait_event_interruptible_timeout(wait, kthread_should_stop(), ++ cur_jiffies + wait_jiffies - jiffies); + } + dprintk(dev, 1, "Video Output Thread End\n"); + return 0; +diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c +index fa711ee36a3fbc..c862689786b69c 100644 +--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c ++++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c +@@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data) + next_jiffies_since_start = jiffies_since_start; + + wait_jiffies = next_jiffies_since_start - jiffies_since_start; +- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && +- !kthread_should_stop()) +- schedule(); ++ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) ++ continue; ++ ++ wait_queue_head_t wait; ++ ++ init_waitqueue_head(&wait); ++ wait_event_interruptible_timeout(wait, kthread_should_stop(), ++ cur_jiffies + wait_jiffies - jiffies); + } + dprintk(dev, 1, "Touch Capture Thread End\n"); + return 0; +diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c +index a81f26b769883f..1dd59c710dae74 100644 +--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c ++++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c +@@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data) + next_jiffies_since_start = jiffies_since_start; + + wait_jiffies = next_jiffies_since_start - jiffies_since_start; +- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && +- !kthread_should_stop()) +- schedule(); ++ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) ++ continue; ++ ++ wait_queue_head_t wait; ++ ++ init_waitqueue_head(&wait); ++ wait_event_interruptible_timeout(wait, kthread_should_stop(), ++ cur_jiffies + wait_jiffies - jiffies); + } + dprintk(dev, 1, "SDR Capture Thread End\n"); + return 0; +diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c +index c5e21785fafe28..02343e88cc618c 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-417.c ++++ b/drivers/media/usb/cx231xx/cx231xx-417.c +@@ -1722,6 +1722,8 @@ static void cx231xx_video_dev_init( + vfd->lock = &dev->lock; + vfd->release = video_device_release_empty; + vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl; ++ vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | ++ V4L2_CAP_VIDEO_CAPTURE; + video_set_drvdata(vfd, dev); + if (dev->tuner_type == TUNER_ABSENT) { + v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY); +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c +index 028c4a5049af97..5926a9dfb0b1f8 100644 +--- a/drivers/media/usb/uvc/uvc_ctrl.c ++++ b/drivers/media/usb/uvc/uvc_ctrl.c +@@ -815,6 +815,25 @@ static inline void uvc_clear_bit(u8 *data, int bit) + data[bit >> 3] &= ~(1 << (bit & 7)); + } + ++static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val) ++{ ++ unsigned int i; ++ ++ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { ++ u32 menu_value; ++ ++ if (!test_bit(i, &mapping->menu_mask)) ++ continue; ++ ++ menu_value = uvc_mapping_get_menu_value(mapping, i); ++ ++ if (menu_value == val) ++ return i; ++ } ++ ++ return val; ++} ++ + /* + * Extract the bit string specified by mapping->offset and mapping->size + * from the little-endian data stored at 'data' and return the result as +@@ -849,6 +868,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, + if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) + value |= -(value & (1 << (mapping->size - 1))); + ++ /* If it is a menu, convert from uvc to v4l2. */ ++ if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) ++ return value; ++ ++ switch (query) { ++ case UVC_GET_CUR: ++ case UVC_GET_DEF: ++ return uvc_menu_to_v4l2_menu(mapping, value); ++ } ++ + return value; + } + +@@ -1013,32 +1042,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain, + return 0; + } + +-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping, +- const u8 *data) +-{ +- s32 value = mapping->get(mapping, UVC_GET_CUR, data); +- +- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { +- unsigned int i; +- +- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { +- u32 menu_value; +- +- if (!test_bit(i, &mapping->menu_mask)) +- continue; +- +- menu_value = uvc_mapping_get_menu_value(mapping, i); +- +- if (menu_value == value) { +- value = i; +- break; +- } +- } +- } +- +- return value; +-} +- + static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain, + struct uvc_control *ctrl) + { +@@ -1089,8 +1092,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain, + if (ret < 0) + return ret; + +- *value = __uvc_ctrl_get_value(mapping, +- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); ++ *value = mapping->get(mapping, UVC_GET_CUR, ++ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); + + return 0; + } +@@ -1240,7 +1243,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, + { + struct uvc_control_mapping *master_map = NULL; + struct uvc_control *master_ctrl = NULL; +- unsigned int i; + + memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl)); + v4l2_ctrl->id = mapping->id; +@@ -1283,21 +1285,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, + v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1; + v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1; + v4l2_ctrl->step = 1; +- +- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { +- u32 menu_value; +- +- if (!test_bit(i, &mapping->menu_mask)) +- continue; +- +- menu_value = uvc_mapping_get_menu_value(mapping, i); +- +- if (menu_value == v4l2_ctrl->default_value) { +- v4l2_ctrl->default_value = i; +- break; +- } +- } +- + return 0; + + case V4L2_CTRL_TYPE_BOOLEAN: +@@ -1580,7 +1567,7 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain, + uvc_ctrl_set_handle(handle, ctrl, NULL); + + list_for_each_entry(mapping, &ctrl->info.mappings, list) { +- s32 value = __uvc_ctrl_get_value(mapping, data); ++ s32 value = mapping->get(mapping, UVC_GET_CUR, data); + + /* + * handle may be NULL here if the device sends auto-update +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c +index 7bcd706281daf3..cb7d9fb589fca9 100644 +--- a/drivers/media/usb/uvc/uvc_v4l2.c ++++ b/drivers/media/usb/uvc/uvc_v4l2.c +@@ -106,6 +106,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain, + struct uvc_control_mapping *map; + int ret; + ++ if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) { ++ uvc_dbg(chain->dev, CONTROL, ++ "Unsupported UVC data type %u\n", xmap->data_type); ++ return -EINVAL; ++ } ++ + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; +diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c +index 5f115438d07228..cb3ad72a3e54a6 100644 +--- a/drivers/media/v4l2-core/v4l2-subdev.c ++++ b/drivers/media/v4l2-core/v4l2-subdev.c +@@ -351,6 +351,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd, + static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_config *config) + { ++ memset(config, 0, sizeof(*config)); ++ + return check_pad(sd, pad) ? : + sd->ops->pad->get_mbus_config(sd, pad, config); + } +diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c +index 0e0c42e4fdfc75..72a5f51fe32a52 100644 +--- a/drivers/mfd/tps65219.c ++++ b/drivers/mfd/tps65219.c +@@ -228,7 +228,6 @@ static struct regmap_irq_chip tps65219_irq_chip = { + static int tps65219_probe(struct i2c_client *client) + { + struct tps65219 *tps; +- unsigned int chipid; + bool pwr_button; + int ret; + +@@ -253,12 +252,6 @@ static int tps65219_probe(struct i2c_client *client) + if (ret) + return ret; + +- ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid); +- if (ret) { +- dev_err(tps->dev, "Failed to read device ID: %d\n", ret); +- return ret; +- } +- + ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, + tps65219_cells, ARRAY_SIZE(tps65219_cells), + NULL, 0, regmap_irq_get_domain(tps->irq_data)); +diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c +index 698408e8bad035..381a61adf06eee 100644 +--- a/drivers/mmc/host/dw_mmc-exynos.c ++++ b/drivers/mmc/host/dw_mmc-exynos.c +@@ -28,6 +28,8 @@ enum dw_mci_exynos_type { + DW_MCI_TYPE_EXYNOS5420_SMU, + DW_MCI_TYPE_EXYNOS7, + DW_MCI_TYPE_EXYNOS7_SMU, ++ DW_MCI_TYPE_EXYNOS7870, ++ DW_MCI_TYPE_EXYNOS7870_SMU, + DW_MCI_TYPE_ARTPEC8, + }; + +@@ -70,6 +72,12 @@ static struct dw_mci_exynos_compatible { + }, { + .compatible = "samsung,exynos7-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU, ++ }, { ++ .compatible = "samsung,exynos7870-dw-mshc", ++ .ctrl_type = DW_MCI_TYPE_EXYNOS7870, ++ }, { ++ .compatible = "samsung,exynos7870-dw-mshc-smu", ++ .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU, + }, { + .compatible = "axis,artpec8-dw-mshc", + .ctrl_type = DW_MCI_TYPE_ARTPEC8, +@@ -86,6 +94,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host) + return EXYNOS4210_FIXED_CIU_CLK_DIV; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1; + else +@@ -101,7 +111,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host) + * set for non-ecryption mode at this time. + */ + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU || +- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) { ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { + mci_writel(host, MPSBEGIN0, 0); + mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX); + mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT | +@@ -127,6 +138,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host) + DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl); + } + ++ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { ++ /* Quirk needed for certain Exynos SoCs */ ++ host->quirks |= DW_MMC_QUIRK_FIFO64_32; ++ } ++ + if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) { + /* Quirk needed for the ARTPEC-8 SoC */ + host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT; +@@ -144,6 +161,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + clksel = mci_readl(host, CLKSEL64); + else +@@ -153,6 +172,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + mci_writel(host, CLKSEL64, clksel); + else +@@ -223,6 +244,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + clksel = mci_readl(host, CLKSEL64); + else +@@ -231,6 +254,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev) + if (clksel & SDMMC_CLKSEL_WAKEUP_INT) { + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + mci_writel(host, CLKSEL64, clksel); + else +@@ -410,6 +435,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64)); + else +@@ -423,6 +450,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + clksel = mci_readl(host, CLKSEL64); + else +@@ -430,6 +459,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) + clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + mci_writel(host, CLKSEL64, clksel); + else +@@ -444,6 +475,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + clksel = mci_readl(host, CLKSEL64); + else +@@ -454,6 +487,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || ++ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || + priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) + mci_writel(host, CLKSEL64, clksel); + else +@@ -633,6 +668,10 @@ static const struct of_device_id dw_mci_exynos_match[] = { + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7-dw-mshc-smu", + .data = &exynos_drv_data, }, ++ { .compatible = "samsung,exynos7870-dw-mshc", ++ .data = &exynos_drv_data, }, ++ { .compatible = "samsung,exynos7870-dw-mshc-smu", ++ .data = &exynos_drv_data, }, + { .compatible = "axis,artpec8-dw-mshc", + .data = &artpec_drv_data, }, + {}, +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c +index 6b351810a301c9..dbfe0a5324eaf0 100644 +--- a/drivers/mmc/host/sdhci-pci-core.c ++++ b/drivers/mmc/host/sdhci-pci-core.c +@@ -608,8 +608,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, + + sdhci_set_power(host, mode, vdd); + +- if (mode == MMC_POWER_OFF) ++ if (mode == MMC_POWER_OFF) { ++ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || ++ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD) ++ usleep_range(15000, 17500); + return; ++ } + + /* + * Bus power might not enable after D3 -> D0 transition due to the +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 9796a3cb3ca62c..f32429ff905ff6 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -2035,10 +2035,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) + + host->mmc->actual_clock = 0; + +- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ if (clk & SDHCI_CLOCK_CARD_EN) ++ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, ++ SDHCI_CLOCK_CONTROL); + +- if (clock == 0) ++ if (clock == 0) { ++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + return; ++ } + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_enable_clk(host, clk); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 7eb62fe55947fe..56c241246d1af4 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2469,7 +2469,7 @@ static int __bond_release_one(struct net_device *bond_dev, + + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + +- if (!all && (!bond->params.fail_over_mac || ++ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE || + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { + if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && + bond_has_slaves(bond)) +diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c +index 7f405bcf11c23e..603680792f1ff6 100644 +--- a/drivers/net/can/c_can/c_can_platform.c ++++ b/drivers/net/can/c_can/c_can_platform.c +@@ -333,7 +333,7 @@ static int c_can_plat_probe(struct platform_device *pdev) + /* Check if we need custom RAMINIT via syscon. Mostly for TI + * platforms. Only supported with DT boot. + */ +- if (np && of_property_read_bool(np, "syscon-raminit")) { ++ if (np && of_property_present(np, "syscon-raminit")) { + u32 id; + struct c_can_raminit *raminit = &priv->raminit_sys; + +diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c +index c490b4ba065ba4..73b448cd00f29f 100644 +--- a/drivers/net/can/kvaser_pciefd.c ++++ b/drivers/net/can/kvaser_pciefd.c +@@ -1137,7 +1137,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + skb = alloc_canfd_skb(priv->dev, &cf); + if (!skb) { + priv->dev->stats.rx_dropped++; +- return -ENOMEM; ++ return 0; + } + + cf->len = can_fd_dlc2len(dlc); +@@ -1149,7 +1149,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); + if (!skb) { + priv->dev->stats.rx_dropped++; +- return -ENOMEM; ++ return 0; + } + can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); + } +@@ -1167,7 +1167,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + priv->dev->stats.rx_packets++; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + +- return netif_rx(skb); ++ netif_rx(skb); ++ ++ return 0; + } + + static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, +@@ -1580,24 +1582,28 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) + return res; + } + +-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) ++static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) + { ++ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; + u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + +- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) ++ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); ++ ++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + kvaser_pciefd_read_buffer(pcie, 0); ++ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ ++ } + +- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) ++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + kvaser_pciefd_read_buffer(pcie, 1); ++ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ ++ } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); +- +- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); +- return irq; + } + + static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +@@ -1625,29 +1631,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; + u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); +- u32 srb_irq = 0; +- u32 srb_release = 0; + int i; + + if (!(pci_irq & irq_mask->all)) + return IRQ_NONE; + ++ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); ++ + if (pci_irq & irq_mask->kcan_rx0) +- srb_irq = kvaser_pciefd_receive_irq(pcie); ++ kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { + if (pci_irq & irq_mask->kcan_tx[i]) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + +- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) +- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; +- +- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) +- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; +- +- if (srb_release) +- iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); ++ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + + return IRQ_HANDLED; + } +@@ -1667,13 +1666,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) + } + } + ++static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) ++{ ++ unsigned int i; ++ ++ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ ++ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); ++ for (i = 0; i < pcie->nr_channels; ++i) ++ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); ++} ++ + static int kvaser_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) + { + int err; + struct kvaser_pciefd *pcie; + const struct kvaser_pciefd_irq_mask *irq_mask; +- void __iomem *irq_en_base; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) +@@ -1726,8 +1734,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + + /* Enable PCI interrupts */ +- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); +- iowrite32(irq_mask->all, irq_en_base); ++ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + /* Ready the DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); +@@ -1741,8 +1748,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, + return 0; + + err_free_irq: +- /* Disable PCI interrupts */ +- iowrite32(0, irq_en_base); ++ kvaser_pciefd_disable_irq_srcs(pcie); + free_irq(pcie->pci->irq, pcie); + + err_teardown_can_ctrls: +@@ -1762,35 +1768,25 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, + return err; + } + +-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) +-{ +- int i; +- +- for (i = 0; i < pcie->nr_channels; i++) { +- struct kvaser_pciefd_can *can = pcie->can[i]; +- +- if (can) { +- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +- unregister_candev(can->can.dev); +- del_timer(&can->bec_poll_timer); +- kvaser_pciefd_pwm_stop(can); +- free_candev(can->can.dev); +- } +- } +-} +- + static void kvaser_pciefd_remove(struct pci_dev *pdev) + { + struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); ++ unsigned int i; + +- kvaser_pciefd_remove_all_ctrls(pcie); ++ for (i = 0; i < pcie->nr_channels; ++i) { ++ struct kvaser_pciefd_can *can = pcie->can[i]; + +- /* Disable interrupts */ +- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); +- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); ++ unregister_candev(can->can.dev); ++ del_timer(&can->bec_poll_timer); ++ kvaser_pciefd_pwm_stop(can); ++ } + ++ kvaser_pciefd_disable_irq_srcs(pcie); + free_irq(pcie->pci->irq, pcie); + ++ for (i = 0; i < pcie->nr_channels; ++i) ++ free_candev(pcie->can[i]->can.dev); ++ + pci_iounmap(pdev, pcie->reg_base); + pci_release_regions(pdev); + pci_disable_device(pdev); +diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c +index 24c6622d36bd85..58ff2ec1d9757e 100644 +--- a/drivers/net/can/slcan/slcan-core.c ++++ b/drivers/net/can/slcan/slcan-core.c +@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi "); + #define SLCAN_CMD_LEN 1 + #define SLCAN_SFF_ID_LEN 3 + #define SLCAN_EFF_ID_LEN 8 ++#define SLCAN_DATA_LENGTH_LEN 1 ++#define SLCAN_ERROR_LEN 1 + #define SLCAN_STATE_LEN 1 + #define SLCAN_STATE_BE_RXCNT_LEN 3 + #define SLCAN_STATE_BE_TXCNT_LEN 3 +-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ +- SLCAN_STATE_BE_RXCNT_LEN + \ +- SLCAN_STATE_BE_TXCNT_LEN) ++#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ ++ SLCAN_STATE_LEN + \ ++ SLCAN_STATE_BE_RXCNT_LEN + \ ++ SLCAN_STATE_BE_TXCNT_LEN) ++#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ ++ SLCAN_ERROR_LEN + \ ++ SLCAN_DATA_LENGTH_LEN) ++#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ ++ SLCAN_SFF_ID_LEN + \ ++ SLCAN_DATA_LENGTH_LEN) + struct slcan { + struct can_priv can; + +@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) + u32 tmpid; + char *cmd = sl->rbuff; + ++ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) ++ return; ++ + skb = alloc_can_skb(sl->dev, &cf); + if (unlikely(!skb)) { + sl->dev->stats.rx_dropped++; +@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) + return; + } + +- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) ++ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) + return; + + cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; +@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) + bool rx_errors = false, tx_errors = false, rx_over_errors = false; + int i, len; + ++ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) ++ return; ++ + /* get len from sanitized ASCII value */ + len = cmd[1]; + if (len >= '0' && len < '9') +@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) + static void slcan_unesc(struct slcan *sl, unsigned char s) + { + if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ +- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && +- sl->rcount > 4) ++ if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) + slcan_bump(sl); + + sl->rcount = 0; +diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c +index b3fa867c8ccd91..c2ef55cff6b3ec 100644 +--- a/drivers/net/ethernet/amd/pds_core/core.c ++++ b/drivers/net/ethernet/amd/pds_core/core.c +@@ -413,10 +413,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init) + if (err) + return err; + +- /* Scale the descriptor ring length based on number of CPUs and VFs */ +- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus()); +- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev); +- numdescs = roundup_pow_of_two(numdescs); ++ numdescs = PDSC_ADMINQ_MAX_LENGTH; + err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq", + PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR, + numdescs, +diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h +index 61ee607ee48ace..42137140850314 100644 +--- a/drivers/net/ethernet/amd/pds_core/core.h ++++ b/drivers/net/ethernet/amd/pds_core/core.h +@@ -16,7 +16,7 @@ + + #define PDSC_WATCHDOG_SECS 5 + #define PDSC_QUEUE_NAME_MAX_SZ 16 +-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */ ++#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */ + #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + #define PDSC_TEARDOWN_RECOVERY false + #define PDSC_TEARDOWN_REMOVING true +diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c +index 379d19d18dbed0..5808e3c73a8f43 100644 +--- a/drivers/net/ethernet/apm/xgene-v2/main.c ++++ b/drivers/net/ethernet/apm/xgene-v2/main.c +@@ -9,8 +9,6 @@ + + #include "main.h" + +-static const struct acpi_device_id xge_acpi_match[]; +- + static int xge_get_resources(struct xge_pdata *pdata) + { + struct platform_device *pdev; +@@ -733,7 +731,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match); + static struct platform_driver xge_driver = { + .driver = { + .name = "xgene-enet-v2", +- .acpi_match_table = ACPI_PTR(xge_acpi_match), ++ .acpi_match_table = xge_acpi_match, + }, + .probe = xge_probe, + .remove = xge_remove, +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index 8feb7d4226bb58..0c09d82dbf00d4 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -1572,6 +1572,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, + } + } + ++static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, ++ int rx_ring_last) ++{ ++ while (rx_ring_first != rx_ring_last) { ++ enetc_flip_rx_buff(rx_ring, ++ &rx_ring->rx_swbd[rx_ring_first]); ++ enetc_bdr_idx_inc(rx_ring, &rx_ring_first); ++ } ++} ++ + static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit, + struct bpf_prog *prog) +@@ -1687,11 +1697,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_failures++; + } else { +- while (orig_i != i) { +- enetc_flip_rx_buff(rx_ring, +- &rx_ring->rx_swbd[orig_i]); +- enetc_bdr_idx_inc(rx_ring, &orig_i); +- } ++ enetc_bulk_flip_buff(rx_ring, orig_i, i); + xdp_redirect_frm_cnt++; + rx_ring->stats.xdp_redirect++; + } +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 7261838a09db63..291c88a76a27f4 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1079,6 +1079,29 @@ static void fec_enet_enable_ring(struct net_device *ndev) + } + } + ++/* Whack a reset. We should wait for this. ++ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC ++ * instead of reset MAC itself. ++ */ ++static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) ++{ ++ u32 val; ++ ++ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { ++ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || ++ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { ++ writel(0, fep->hwp + FEC_ECNTRL); ++ } else { ++ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); ++ udelay(10); ++ } ++ } else { ++ val = readl(fep->hwp + FEC_ECNTRL); ++ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); ++ writel(val, fep->hwp + FEC_ECNTRL); ++ } ++} ++ + /* + * This function is called to start or restart the FEC during a link + * change, transmit timeout, or to reconfigure the FEC. The network +@@ -1095,17 +1118,7 @@ fec_restart(struct net_device *ndev) + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); + +- /* Whack a reset. We should wait for this. +- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC +- * instead of reset MAC itself. +- */ +- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || +- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { +- writel(0, fep->hwp + FEC_ECNTRL); +- } else { +- writel(1, fep->hwp + FEC_ECNTRL); +- udelay(10); +- } ++ fec_ctrl_reset(fep, false); + + /* + * enet-mac reset will reset mac address registers too, +@@ -1359,22 +1372,7 @@ fec_stop(struct net_device *ndev) + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); + +- /* Whack a reset. We should wait for this. +- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC +- * instead of reset MAC itself. +- */ +- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { +- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { +- writel(0, fep->hwp + FEC_ECNTRL); +- } else { +- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); +- udelay(10); +- } +- } else { +- val = readl(fep->hwp + FEC_ECNTRL); +- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); +- writel(val, fep->hwp + FEC_ECNTRL); +- } ++ fec_ctrl_reset(fep, true); + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c +index 39b5f24be7e4fc..dd58b2372dc0c7 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c +@@ -3329,8 +3329,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) + ice_for_each_q_vector(vsi, q_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + +- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) +- combined++; ++ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); + } + + return combined; +diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c +index ad82ff7d199570..09f9c7ba52795b 100644 +--- a/drivers/net/ethernet/intel/ice/ice_irq.c ++++ b/drivers/net/ethernet/intel/ice/ice_irq.c +@@ -45,7 +45,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) + /** + * ice_get_irq_res - get an interrupt resource + * @pf: board private structure +- * @dyn_only: force entry to be dynamically allocated ++ * @dyn_allowed: allow entry to be dynamically allocated + * + * Allocate new irq entry in the free slot of the tracker. Since xarray + * is used, always allocate new entry at the lowest possible index. Set +@@ -53,11 +53,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) + * + * Returns allocated irq entry or NULL on failure. + */ +-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) ++static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, ++ bool dyn_allowed) + { +- struct xa_limit limit = { .max = pf->irq_tracker.num_entries, ++ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1, + .min = 0 }; +- unsigned int num_static = pf->irq_tracker.num_static; ++ unsigned int num_static = pf->irq_tracker.num_static - 1; + struct ice_irq_entry *entry; + unsigned int index; + int ret; +@@ -66,9 +67,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) + if (!entry) + return NULL; + +- /* skip preallocated entries if the caller says so */ +- if (dyn_only) +- limit.min = num_static; ++ /* only already allocated if the caller says so */ ++ if (!dyn_allowed) ++ limit.max = num_static; + + ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, + GFP_KERNEL); +@@ -78,7 +79,7 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) + entry = NULL; + } else { + entry->index = index; +- entry->dynamic = index >= num_static; ++ entry->dynamic = index > num_static; + } + + return entry; +@@ -272,7 +273,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) + /** + * ice_alloc_irq - Allocate new interrupt vector + * @pf: board private structure +- * @dyn_only: force dynamic allocation of the interrupt ++ * @dyn_allowed: allow dynamic allocation of the interrupt + * + * Allocate new interrupt vector for a given owner id. + * return struct msi_map with interrupt details and track +@@ -285,20 +286,20 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) + * interrupt will be allocated with pci_msix_alloc_irq_at. + * + * Some callers may only support dynamically allocated interrupts. +- * This is indicated with dyn_only flag. ++ * This is indicated with dyn_allowed flag. + * + * On failure, return map with negative .index. The caller + * is expected to check returned map index. + * + */ +-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) ++struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed) + { + int sriov_base_vector = pf->sriov_base_vector; + struct msi_map map = { .index = -ENOENT }; + struct device *dev = ice_pf_to_dev(pf); + struct ice_irq_entry *entry; + +- entry = ice_get_irq_res(pf, dyn_only); ++ entry = ice_get_irq_res(pf, dyn_allowed); + if (!entry) + return map; + +diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c +index 4e675c7c199fa1..4db0b770420e65 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lag.c ++++ b/drivers/net/ethernet/intel/ice/ice_lag.c +@@ -1229,12 +1229,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) + */ + if (!primary_lag) { + lag->primary = true; ++ if (!ice_is_switchdev_running(lag->pf)) ++ return; ++ + /* Configure primary's SWID to be shared */ + ice_lag_primary_swid(lag, true); + primary_lag = lag; + } else { + u16 swid; + ++ if (!ice_is_switchdev_running(primary_lag->pf)) ++ return; ++ + swid = primary_lag->pf->hw.port_info->sw_id; + ice_lag_set_swid(swid, lag, true); + ice_lag_add_prune_list(primary_lag, lag->pf); +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c +index 1fc4805353eb58..a6a290514e5484 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c +@@ -587,6 +587,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) + return -ENOMEM; + } + ++ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); ++ + switch (vsi->type) { + case ICE_VSI_SWITCHDEV_CTRL: + /* Setup eswitch MSIX irq handler for VSI */ +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c +index e709b10a29761b..1edcf930318315 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c +@@ -3769,7 +3769,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) + } + + ice_vfhw_mac_add(vf, &al->list[i]); +- vf->num_mac++; + break; + } + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +index 339be6950c0395..6302990e9a5ff8 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + /* Supported devices */ + static const struct pci_device_id cgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, +- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, +- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) }, ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, ++ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) }, + { 0, } /* end of table */ + }; + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +index a607c7294b0c59..9fbc071ef29b0c 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +@@ -30,6 +30,8 @@ + #define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 + #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 + #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 ++#define PCI_SUBSYS_DEVID_CN20KA 0xC220 ++#define PCI_SUBSYS_DEVID_CNF20KA 0xC320 + + /* PCI BAR nos */ + #define PCI_AF_REG_BAR_NUM 0 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +index 0e74c5a2231e63..1e4cd4f7d0cfd4 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +@@ -13,19 +13,26 @@ + /* RVU LMTST */ + #define LMT_TBL_OP_READ 0 + #define LMT_TBL_OP_WRITE 1 +-#define LMT_MAP_TABLE_SIZE (128 * 1024) + #define LMT_MAPTBL_ENTRY_SIZE 16 ++#define LMT_MAX_VFS 256 ++ ++#define LMT_MAP_ENTRY_ENA BIT_ULL(20) ++#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) + + /* Function to perform operations (read/write) on lmtst map table */ + static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) + { + void __iomem *lmt_map_base; +- u64 tbl_base; ++ u64 tbl_base, cfg; ++ int pfs, vfs; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); ++ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); ++ vfs = 1 << (cfg & 0xF); ++ pfs = 1 << ((cfg >> 4) & 0x7); + +- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); ++ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; +@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); ++ ++ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); ++ /* 2048 LMTLINES */ ++ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); ++ ++ writeq(cfg, (lmt_map_base + (index + 8))); ++ + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write +@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + #define LMT_MAP_TBL_W1_OFF 8 + static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) + { +- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + ++ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; + } + +@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + + mutex_lock(&rvu->rsrc_lock); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); +- pf = rvu_get_pf(pcifunc) & 0x1F; ++ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +index feca86e429df20..56dab11833b533 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +@@ -580,6 +580,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + u64 lmt_addr, val, tbl_base; + int pf, vf, num_vfs, hw_vfs; + void __iomem *lmt_map_base; ++ int apr_pfs, apr_vfs; + int buf_size = 10240; + size_t off = 0; + int index = 0; +@@ -595,8 +596,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + return -ENOMEM; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); ++ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); ++ apr_vfs = 1 << (val & 0xF); ++ apr_pfs = 1 << ((val >> 4) & 0x7); + +- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); ++ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * ++ LMT_MAPTBL_ENTRY_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + kfree(buf); +@@ -618,7 +623,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", + pf); + +- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; ++ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", + (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); +@@ -631,7 +636,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + /* Reading num of VFs per PF */ + rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); + for (vf = 0; vf < num_vfs; vf++) { +- index = (pf * rvu->hw->total_vfs * 16) + ++ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); + off += scnprintf(&buf[off], buf_size - 1 - off, + "PF%d:VF%d \t\t", pf, vf); +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +index 47adccf7a77765..1999918ca500fa 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +@@ -988,6 +988,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) + int err, pool_id, non_xdp_queues; + struct nix_aq_enq_req *aq; + struct otx2_cq_queue *cq; ++ struct otx2_pool *pool; + + cq = &qset->cq[qidx]; + cq->cq_idx = qidx; +@@ -996,8 +997,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) + cq->cq_type = CQ_RX; + cq->cint_idx = qidx; + cq->cqe_cnt = qset->rqe_cnt; +- if (pfvf->xdp_prog) ++ if (pfvf->xdp_prog) { ++ pool = &qset->pool[qidx]; + xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); ++ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, ++ MEM_TYPE_PAGE_POOL, ++ pool->page_pool); ++ } + } else if (qidx < non_xdp_queues) { + cq->cq_type = CQ_TX; + cq->cint_idx = qidx - pfvf->hw.rx_queues; +diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +index a4efbeb1620841..889fd26843e608 100644 +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -34,8 +34,10 @@ struct mtk_flow_data { + u16 vlan_in; + + struct { +- u16 id; +- __be16 proto; ++ struct { ++ u16 id; ++ __be16 proto; ++ } vlans[2]; + u8 num; + } vlan; + struct { +@@ -330,18 +332,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: +- if (data.vlan.num == 1 || ++ if (data.vlan.num + data.pppoe.num == 2 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + +- data.vlan.id = act->vlan.vid; +- data.vlan.proto = act->vlan.proto; ++ data.vlan.vlans[data.vlan.num].id = act->vlan.vid; ++ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_VLAN_POP: + break; + case FLOW_ACTION_PPPOE_PUSH: +- if (data.pppoe.num == 1) ++ if (data.pppoe.num == 1 || ++ data.vlan.num == 2) + return -EOPNOTSUPP; + + data.pppoe.sid = act->pppoe.sid; +@@ -431,12 +434,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + foe.bridge.vlan = data.vlan_in; + +- if (data.vlan.num == 1) { +- if (data.vlan.proto != htons(ETH_P_8021Q)) +- return -EOPNOTSUPP; ++ for (i = 0; i < data.vlan.num; i++) ++ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id); + +- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); +- } + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); + +diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c +index b330020dc0d674..f2bded847e61d1 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c ++++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c +@@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) + } + + static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, +- struct mlx4_db *db, int order) ++ struct mlx4_db *db, unsigned int order) + { +- int o; ++ unsigned int o; + int i; + + for (o = order; o <= 1; ++o) { +@@ -712,7 +712,7 @@ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, + return 0; + } + +-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) ++int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order) + { + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_db_pgdir *pgdir; +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +index 65cb63f6c46587..61a0fd8424a2c5 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -450,6 +450,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev, + + if (unlikely(!priv->port_up)) + return 0; ++ if (unlikely(!napi_budget) && cq->type == TX_XDP) ++ return 0; + + netdev_txq_bql_complete_prefetchw(ring->tx_queue); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 20a6bc1a234f4e..9cf33ae48c216f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -93,8 +93,6 @@ struct page_pool; + #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ + MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) + +-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 +- + /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. + * These are theoretical maximums, which can be further restricted by + * capabilities. These values are used for static resource allocations and +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +index 775010e94cb7c6..dcd5db907f1028 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +@@ -9,6 +9,9 @@ + #include + #include + ++#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 ++#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17 ++ + static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) + { + u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); +@@ -102,18 +105,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, + enum mlx5e_mpwrq_umr_mode umr_mode) + { + u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); +- u8 max_pages_per_wqe, max_log_mpwqe_size; ++ u8 max_pages_per_wqe, max_log_wqe_size_calc; ++ u8 max_log_wqe_size_cap; + u16 max_wqe_size; + + /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ + max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; + max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), + MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size; +- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; ++ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift; ++ ++ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU); + +- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); ++ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ? ++ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ; + +- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); ++ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap); + } + + u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index d9dc7280302eb7..5c6f01abdcb91d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -3627,8 +3627,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + /* MQPRIO is another toplevel qdisc that can't be attached + * simultaneously with the offloaded HTB. + */ +- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq))) +- return -EINVAL; ++ if (mlx5e_selq_is_htb_enabled(&priv->selq)) { ++ NL_SET_ERR_MSG_MOD(mqprio->extack, ++ "MQPRIO cannot be configured when HTB offload is enabled."); ++ return -EOPNOTSUPP; ++ } + + switch (mqprio->mode) { + case TC_MQPRIO_MODE_DCB: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 751d3ffcd2f6ce..851c499faa7954 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -63,6 +63,7 @@ + #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) + #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 ++#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8 + + static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; + +@@ -798,6 +799,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev) + + /* RQ */ + mlx5e_build_rq_params(mdev, params); ++ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev)) ++ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE; + + /* If netdev is already registered (e.g. move from nic profile to uplink, + * RTNL lock must be held before triggering netdev notifiers. +@@ -829,6 +832,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; + + netdev->watchdog_timeo = 15 * HZ; ++ if (mlx5_core_is_ecpf(mdev)) ++ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; + + #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + netdev->hw_features |= NETIF_F_HW_TC; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +index 08a75654f5f188..c170503b3aace1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +@@ -165,6 +165,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, + struct udphdr *udph; + struct iphdr *iph; + ++ if (skb_linearize(skb)) ++ goto out; ++ + /* We are only going to peek, no need to clone the SKB */ + if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) + goto out; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +index 8587cd572da536..bdb825aa872688 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) + if (!flow_group_in) + return -ENOMEM; + +- ft_attr.max_fte = POOL_NEXT_SIZE; ++ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE; + ft_attr.prio = LEGACY_FDB_PRIO; + fdb = mlx5_create_flow_table(root_ns, &ft_attr); + if (IS_ERR(fdb)) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c +index 3ec892d51f57d2..e7143d32b22119 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c +@@ -163,11 +163,16 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) + u64 value_msb; + + value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); ++ /* bit 1-63 are not supported for NICs, ++ * hence read only bit 0 (asic) from lsb. ++ */ ++ value_lsb &= 0x1; + value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); + +- mlx5_core_warn(events->dev, +- "High temperature on sensors with bit set %llx %llx", +- value_msb, value_lsb); ++ if (net_ratelimit()) ++ mlx5_core_warn(events->dev, ++ "High temperature on sensors with bit set %llx %llx", ++ value_msb, value_lsb); + + return NOTIFY_OK; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c +index c14590acc77260..f6abfd00d7e68c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c +@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab + int i, found_i = -1; + + for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) { +- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size && ++ if (dev->priv.ft_pool->ft_left[i] && ++ (FT_POOLS[i] >= desired_size || ++ desired_size == MLX5_FS_MAX_POOL_SIZE) && + FT_POOLS[i] <= max_ft_size) { + found_i = i; +- if (desired_size != POOL_NEXT_SIZE) ++ if (desired_size != MLX5_FS_MAX_POOL_SIZE) + break; + } + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h +index 25f4274b372b56..173e312db7204f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h +@@ -7,8 +7,6 @@ + #include + #include "fs_core.h" + +-#define POOL_NEXT_SIZE 0 +- + int mlx5_ft_pool_init(struct mlx5_core_dev *dev); + void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c +index d798834c4e755d..3ac8043f76dacc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c +@@ -833,6 +833,7 @@ static void poll_health(struct timer_list *t) + health->prev = count; + if (health->miss_counter == MAX_MISSES) { + mlx5_core_err(dev, "device's health compromised - reached miss count\n"); ++ health->synd = ioread8(&h->synd); + print_health_info(dev); + queue_work(health->wq, &health->report_work); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +index 711d14dea2485f..d313cb7f0ed88c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, + ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + +- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE; ++ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? ++ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE; + ft_attr.max_fte = sz; + + /* We use chains_default_ft(chains) as the table's next_ft till +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c +index 5d2ceff72784f2..f971d60484f065 100644 +--- a/drivers/net/ethernet/microchip/lan743x_main.c ++++ b/drivers/net/ethernet/microchip/lan743x_main.c +@@ -3259,6 +3259,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) + { + struct lan743x_tx *tx; ++ u32 sgmii_ctl; + int index; + int ret; + +@@ -3271,6 +3272,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + spin_lock_init(&adapter->eth_syslock_spinlock); + mutex_init(&adapter->sgmii_rw_lock); + pci11x1x_set_rfe_rd_fifo_threshold(adapter); ++ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); ++ if (adapter->is_sgmii_en) { ++ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; ++ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; ++ } else { ++ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; ++ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; ++ } ++ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + } else { + adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; + adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; +@@ -3319,7 +3329,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + + static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + { +- u32 sgmii_ctl; + int ret; + + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); +@@ -3331,10 +3340,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + adapter->mdiobus->priv = (void *)adapter; + if (adapter->is_pci11x1x) { + if (adapter->is_sgmii_en) { +- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); +- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; +- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; +- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "SGMII operation\n"); + adapter->mdiobus->read = lan743x_mdiobus_read_c22; +@@ -3345,10 +3350,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + netif_dbg(adapter, drv, adapter->netdev, + "lan743x-mdiobus-c45\n"); + } else { +- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); +- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; +- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; +- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "RGMII operation\n"); + // Only C22 support when RGMII I/F +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c +index ae014e21eb6056..9ed965d61e3554 100644 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c +@@ -1036,7 +1036,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, + header->inline_oob_size_div4 = client_oob_size / sizeof(u32); + + if (oob_in_sgl) { +- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2); ++ WARN_ON_ONCE(wqe_req->num_sge < 2); + + header->client_oob_in_sgl = 1; + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 7e5258b2c4290b..5af932a5e70c44 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -5125,6 +5125,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; ++ new_bus->phy_mask = GENMASK(31, 1); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +index 63998d65fef8eb..9377b05bfc71e1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -966,7 +966,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, + /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY + * address. No need to mask it again. + */ +- reg |= 1 << H3_EPHY_ADDR_SHIFT; ++ reg |= ret << H3_EPHY_ADDR_SHIFT; + } else { + /* For SoCs without internal PHY the PHY selection bit should be + * set to 0 (external PHY). +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c +index 9c8376b2718916..c379a958380ce0 100644 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c +@@ -2095,7 +2095,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) + port->slave.mac_addr); + if (!is_valid_ether_addr(port->slave.mac_addr)) { + eth_random_addr(port->slave.mac_addr); +- dev_err(dev, "Use random MAC address\n"); ++ dev_info(dev, "Use random MAC address\n"); + } + } + } +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c +index 9061dca97fcbfd..1c1d4806c119b8 100644 +--- a/drivers/net/ethernet/ti/cpsw_new.c ++++ b/drivers/net/ethernet/ti/cpsw_new.c +@@ -1416,6 +1416,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); ++ ndev->dev.of_node = slave_data->slave_node; + + if (!napi_ndev) { + /* CPSW Host port CPDMA interface is shared between +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c +index 0a0ad3d77557f9..587643a371de3c 100644 +--- a/drivers/net/ieee802154/ca8210.c ++++ b/drivers/net/ieee802154/ca8210.c +@@ -1446,8 +1446,7 @@ static u8 mcps_data_request( + command.pdata.data_req.src_addr_mode = src_addr_mode; + command.pdata.data_req.dst.mode = dst_address_mode; + if (dst_address_mode != MAC_MODE_NO_ADDR) { +- command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); +- command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); ++ put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id); + if (dst_address_mode == MAC_MODE_SHORT_ADDR) { + command.pdata.data_req.dst.address[0] = LS_BYTE( + dst_addr->short_address +@@ -1795,12 +1794,12 @@ static int ca8210_skb_rx( + } + hdr.source.mode = data_ind[0]; + dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); +- hdr.source.pan_id = *(u16 *)&data_ind[1]; ++ hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1])); + dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); + memcpy(&hdr.source.extended_addr, &data_ind[3], 8); + hdr.dest.mode = data_ind[11]; + dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); +- hdr.dest.pan_id = *(u16 *)&data_ind[12]; ++ hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12])); + dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); + memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); + +@@ -1927,7 +1926,7 @@ static int ca8210_skb_tx( + status = mcps_data_request( + header.source.mode, + header.dest.mode, +- header.dest.pan_id, ++ le16_to_cpu(header.dest.pan_id), + (union macaddr *)&header.dest.extended_addr, + skb->len - mac_len, + &skb->data[mac_len], +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c +index b5f012619e42da..800e8b9eb4532b 100644 +--- a/drivers/net/phy/phylink.c ++++ b/drivers/net/phy/phylink.c +@@ -1718,7 +1718,7 @@ bool phylink_expects_phy(struct phylink *pl) + { + if (pl->cfg_link_an_mode == MLO_AN_FIXED || + (pl->cfg_link_an_mode == MLO_AN_INBAND && +- phy_interface_mode_is_8023z(pl->link_config.interface))) ++ phy_interface_mode_is_8023z(pl->link_interface))) + return false; + return true; + } +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index bbcefcc7ef8f06..1e85cfe524e875 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -10032,6 +10032,7 @@ static const struct usb_device_id rtl8152_table[] = { + { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, + { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, + { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, ++ { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, + { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, + {} + }; +diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c +index 64db3e98a1b664..2ed879a0abc6ce 100644 +--- a/drivers/net/vxlan/vxlan_core.c ++++ b/drivers/net/vxlan/vxlan_core.c +@@ -227,9 +227,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, + be32_to_cpu(fdb->vni))) + goto nla_put_failure; + +- ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ++ ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used)); + ci.ndm_confirmed = 0; +- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ++ ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated)); + ci.ndm_refcnt = 0; + + if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) +@@ -435,8 +435,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, + struct vxlan_fdb *f; + + f = __vxlan_find_mac(vxlan, mac, vni); +- if (f && f->used != jiffies) +- f->used = jiffies; ++ if (f && READ_ONCE(f->used) != jiffies) ++ WRITE_ONCE(f->used, jiffies); + + return f; + } +@@ -1010,12 +1010,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, + !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { + if (f->state != state) { + f->state = state; +- f->updated = jiffies; ++ WRITE_ONCE(f->updated, jiffies); + notify = 1; + } + if (f->flags != fdb_flags) { + f->flags = fdb_flags; +- f->updated = jiffies; ++ WRITE_ONCE(f->updated, jiffies); + notify = 1; + } + } +@@ -1049,7 +1049,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, + } + + if (ndm_flags & NTF_USE) +- f->used = jiffies; ++ WRITE_ONCE(f->used, jiffies); + + if (notify) { + if (rd == NULL) +@@ -1478,7 +1478,7 @@ static bool vxlan_snoop(struct net_device *dev, + src_mac, &rdst->remote_ip.sa, &src_ip->sa); + + rdst->remote_ip = *src_ip; +- f->updated = jiffies; ++ WRITE_ONCE(f->updated, jiffies); + vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); + } else { + u32 hash_index = fdb_head_index(vxlan, src_mac, vni); +@@ -2920,7 +2920,7 @@ static void vxlan_cleanup(struct timer_list *t) + if (f->flags & NTF_EXT_LEARNED) + continue; + +- timeout = f->used + vxlan->cfg.age_interval * HZ; ++ timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ; + if (time_before_eq(timeout, jiffies)) { + netdev_dbg(vxlan->dev, + "garbage collect %pM\n", +@@ -4240,6 +4240,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], + struct netlink_ext_ack *extack) + { + struct vxlan_dev *vxlan = netdev_priv(dev); ++ bool rem_ip_changed, change_igmp; + struct net_device *lowerdev; + struct vxlan_config conf; + struct vxlan_rdst *dst; +@@ -4263,8 +4264,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], + if (err) + return err; + ++ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip); ++ change_igmp = vxlan->dev->flags & IFF_UP && ++ (rem_ip_changed || ++ dst->remote_ifindex != conf.remote_ifindex); ++ + /* handle default dst entry */ +- if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { ++ if (rem_ip_changed) { + u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); + + spin_lock_bh(&vxlan->hash_lock[hash_index]); +@@ -4308,6 +4314,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], + } + } + ++ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip)) ++ err = vxlan_multicast_leave(vxlan); ++ + if (conf.age_interval != vxlan->cfg.age_interval) + mod_timer(&vxlan->age_timer, jiffies); + +@@ -4315,7 +4324,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], + if (lowerdev && lowerdev != dst->remote_dev) + dst->remote_dev = lowerdev; + vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); +- return 0; ++ ++ if (!err && change_igmp && ++ vxlan_addr_multicast(&dst->remote_ip)) ++ err = vxlan_multicast_join(vxlan); ++ ++ return err; + } + + static void vxlan_dellink(struct net_device *dev, struct list_head *head) +diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h +index 33f4706af880d1..18dfd7aab610c9 100644 +--- a/drivers/net/wireless/ath/ath12k/core.h ++++ b/drivers/net/wireless/ath/ath12k/core.h +@@ -125,6 +125,7 @@ struct ath12k_ext_irq_grp { + u32 num_irq; + u32 grp_id; + u64 timestamp; ++ bool napi_enabled; + struct napi_struct napi; + struct net_device napi_ndev; + }; +diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c +index e025e4d0e7678f..474e0d4d406ea1 100644 +--- a/drivers/net/wireless/ath/ath12k/dp_tx.c ++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c +@@ -118,7 +118,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, + le32_encode_bits(ti->data_len, + HAL_TX_MSDU_EXT_INFO1_BUF_LEN); + +- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | ++ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | + le32_encode_bits(ti->encap_type, + HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) | + le32_encode_bits(ti->encrypt_type, +@@ -422,13 +422,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, + + switch (wbm_status) { + case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: +- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: +- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: + ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); + ts.ack_rssi = le32_get_bits(status_desc->info2, + HTT_TX_WBM_COMP_INFO2_ACK_RSSI); + ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); + break; ++ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: ++ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: + ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); +diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h +index 6c17adc6d60b56..1bb840c2bef577 100644 +--- a/drivers/net/wireless/ath/ath12k/hal_desc.h ++++ b/drivers/net/wireless/ath/ath12k/hal_desc.h +@@ -2918,7 +2918,7 @@ struct hal_mon_buf_ring { + + #define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0) + +-#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0) ++#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0) + #define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16) + #define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17) + #define HAL_MON_DEST_INFO0_INITIATOR BIT(18) +diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c +index 041a9602f0e15f..5fd80f90ecafed 100644 +--- a/drivers/net/wireless/ath/ath12k/pci.c ++++ b/drivers/net/wireless/ath/ath12k/pci.c +@@ -442,8 +442,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) + + ath12k_pci_ext_grp_disable(irq_grp); + +- napi_synchronize(&irq_grp->napi); +- napi_disable(&irq_grp->napi); ++ if (irq_grp->napi_enabled) { ++ napi_synchronize(&irq_grp->napi); ++ napi_disable(&irq_grp->napi); ++ irq_grp->napi_enabled = false; ++ } + } + } + +@@ -976,7 +979,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + +- napi_enable(&irq_grp->napi); ++ if (!irq_grp->napi_enabled) { ++ napi_enable(&irq_grp->napi); ++ irq_grp->napi_enabled = true; ++ } ++ + ath12k_pci_ext_grp_enable(irq_grp); + } + } +diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c +index c977dfbae0a464..d87d5980325e8f 100644 +--- a/drivers/net/wireless/ath/ath12k/wmi.c ++++ b/drivers/net/wireless/ath/ath12k/wmi.c +@@ -2115,8 +2115,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar, + arg->dwell_time_active = 50; + arg->dwell_time_active_2g = 0; + arg->dwell_time_passive = 150; +- arg->dwell_time_active_6g = 40; +- arg->dwell_time_passive_6g = 30; ++ arg->dwell_time_active_6g = 70; ++ arg->dwell_time_passive_6g = 70; + arg->min_rest_time = 50; + arg->max_rest_time = 500; + arg->repeat_probe_time = 0; +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c +index 4f00400c7ffb83..58386906598a73 100644 +--- a/drivers/net/wireless/ath/ath9k/init.c ++++ b/drivers/net/wireless/ath/ath9k/init.c +@@ -691,7 +691,9 @@ static int ath9k_of_init(struct ath_softc *sc) + ah->ah_flags |= AH_NO_EEP_SWAP; + } + +- of_get_mac_address(np, common->macaddr); ++ ret = of_get_mac_address(np, common->macaddr); ++ if (ret == -EPROBE_DEFER) ++ return ret; + + return 0; + } +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +index a97ed7cbe4d140..d588e4cd808d8e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + /* +- * Copyright (C) 2018-2024 Intel Corporation ++ * Copyright (C) 2018-2025 Intel Corporation + */ + #include + #include "iwl-drv.h" +@@ -1382,15 +1382,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + switch (tp_id) { + case IWL_FW_INI_TIME_POINT_EARLY: + iwl_dbg_tlv_init_cfg(fwrt); +- iwl_dbg_tlv_apply_config(fwrt, conf_list); + iwl_dbg_tlv_update_drams(fwrt); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); ++ iwl_dbg_tlv_apply_config(fwrt, conf_list); + break; + case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: + iwl_dbg_tlv_apply_buffers(fwrt); + iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); +- iwl_dbg_tlv_apply_config(fwrt, conf_list); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); ++ iwl_dbg_tlv_apply_config(fwrt, conf_list); + break; + case IWL_FW_INI_TIME_POINT_PERIODIC: + iwl_dbg_tlv_set_periodic_trigs(fwrt); +@@ -1400,14 +1400,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: + case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: + iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); +- iwl_dbg_tlv_apply_config(fwrt, conf_list); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, + iwl_dbg_tlv_check_fw_pkt); ++ iwl_dbg_tlv_apply_config(fwrt, conf_list); + break; + default: + iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); +- iwl_dbg_tlv_apply_config(fwrt, conf_list); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); ++ iwl_dbg_tlv_apply_config(fwrt, conf_list); + break; + } + } +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +index 4a2de79f2e864b..c01a9a6f06a4d0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +@@ -580,6 +580,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { + IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), ++ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), ++ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + + IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), + IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h +index 8b620d4fed4390..df0ea638370b56 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h +@@ -439,6 +439,7 @@ struct mt76_hw_cap { + #define MT_DRV_RX_DMA_HDR BIT(3) + #define MT_DRV_HW_MGMT_TXQ BIT(4) + #define MT_DRV_AMSDU_OFFLOAD BIT(5) ++#define MT_DRV_IGNORE_TXS_FAILED BIT(6) + + struct mt76_driver_ops { + u32 drv_flags; +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +index 87bfa441a93743..4979012d5abfa0 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +@@ -280,6 +280,9 @@ enum tx_mgnt_type { + #define MT_TXFREE_INFO_COUNT GENMASK(27, 24) + #define MT_TXFREE_INFO_STAT GENMASK(29, 28) + ++#define MT_TXS_HDR_SIZE 4 /* Unit: DW */ ++#define MT_TXS_SIZE 12 /* Unit: DW */ ++ + #define MT_TXS0_BW GENMASK(31, 29) + #define MT_TXS0_TID GENMASK(28, 26) + #define MT_TXS0_AMPDU BIT(25) +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +index 9277ff38b7a228..57ae362dad50b7 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +@@ -152,7 +152,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) + static const struct mt76_driver_ops drv_ops = { + .txwi_size = sizeof(struct mt76x02_txwi), + .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | +- MT_DRV_SW_RX_AIRTIME, ++ MT_DRV_SW_RX_AIRTIME | ++ MT_DRV_IGNORE_TXS_FAILED, + .survey_flags = SURVEY_INFO_TIME_TX, + .update_survey = mt76x02_update_channel, + .tx_prepare_skb = mt76x02_tx_prepare_skb, +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +index 0422c332354a13..520fd46227a7b8 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +@@ -210,7 +210,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) + { + static const struct mt76_driver_ops drv_ops = { +- .drv_flags = MT_DRV_SW_RX_AIRTIME, ++ .drv_flags = MT_DRV_SW_RX_AIRTIME | ++ MT_DRV_IGNORE_TXS_FAILED, + .survey_flags = SURVEY_INFO_TIME_TX, + .update_survey = mt76x02_update_channel, + .tx_prepare_skb = mt76x02u_tx_prepare_skb, +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +index df85ebc6e1df07..7e2475b3c278e0 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +@@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) + static const struct mt76_driver_ops drv_ops = { + .txwi_size = sizeof(struct mt76x02_txwi), + .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | +- MT_DRV_SW_RX_AIRTIME, ++ MT_DRV_SW_RX_AIRTIME | ++ MT_DRV_IGNORE_TXS_FAILED, + .survey_flags = SURVEY_INFO_TIME_TX, + .update_survey = mt76x02_update_channel, + .tx_prepare_skb = mt76x02_tx_prepare_skb, +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +index d8043099921966..70d3895762b4cd 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +@@ -29,7 +29,8 @@ static int mt76x2u_probe(struct usb_interface *intf, + const struct usb_device_id *id) + { + static const struct mt76_driver_ops drv_ops = { +- .drv_flags = MT_DRV_SW_RX_AIRTIME, ++ .drv_flags = MT_DRV_SW_RX_AIRTIME | ++ MT_DRV_IGNORE_TXS_FAILED, + .survey_flags = SURVEY_INFO_TIME_TX, + .update_survey = mt76x02_update_channel, + .tx_prepare_skb = mt76x02u_tx_prepare_skb, +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +index 73d46ec1181ae8..35d9673ec0d8fc 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +@@ -1354,7 +1354,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) + mt7996_mac_tx_free(dev, data, len); + return false; + case PKT_TYPE_TXS: +- for (rxd += 4; rxd + 8 <= end; rxd += 8) ++ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) + mt7996_mac_add_txs(dev, rxd); + return false; + case PKT_TYPE_RX_FW_MONITOR: +@@ -1391,7 +1391,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + mt7996_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_TXS: +- for (rxd += 4; rxd + 8 <= end; rxd += 8) ++ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) + mt7996_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c +index 1809b03292c3d9..47cdccdbed6aaf 100644 +--- a/drivers/net/wireless/mediatek/mt76/tx.c ++++ b/drivers/net/wireless/mediatek/mt76/tx.c +@@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, + return; + + /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ +- if (flags & MT_TX_CB_TXS_FAILED) { ++ if (flags & MT_TX_CB_TXS_FAILED && ++ (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) { + info->status.rates[0].count = 0; + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +index 6e47dde9389092..05e77d2bda3738 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +@@ -900,9 +900,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len) + return len; + + write_error: +- dev_info(&udev->dev, +- "%s: Failed to write block at addr: %04x size: %04x\n", +- __func__, addr, blocksize); ++ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) ++ dev_info(&udev->dev, ++ "%s: Failed to write block at addr: %04x size: %04x\n", ++ __func__, addr, blocksize); + return -EAGAIN; + } + +@@ -4073,8 +4074,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) + */ + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary); + +- ret = rtl8xxxu_download_firmware(priv); +- dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); ++ for (int retry = 5; retry >= 0 ; retry--) { ++ ret = rtl8xxxu_download_firmware(priv); ++ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); ++ if (ret != -EAGAIN) ++ break; ++ if (retry) ++ dev_dbg(dev, "%s: retry firmware download\n", __func__); ++ } + if (ret) + goto exit; + ret = rtl8xxxu_start_firmware(priv); +diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c +index 0c1c1ff31085cf..929182424b8b87 100644 +--- a/drivers/net/wireless/realtek/rtw88/mac.c ++++ b/drivers/net/wireless/realtek/rtw88/mac.c +@@ -783,7 +783,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev, + if (!check_firmware_size(data, size)) + return -EINVAL; + +- if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) ++ if (rtwdev->chip->ltecoex_addr && ++ !ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) + return -EBUSY; + + wlan_cpu_enable(rtwdev, false); +@@ -801,7 +802,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev, + + wlan_cpu_enable(rtwdev, true); + +- if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { ++ if (rtwdev->chip->ltecoex_addr && ++ !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { + ret = -EBUSY; + goto dlfw_fail; + } +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c +index b90ea6c88b15d9..0d0b5123b5fe28 100644 +--- a/drivers/net/wireless/realtek/rtw88/main.c ++++ b/drivers/net/wireless/realtek/rtw88/main.c +@@ -1544,6 +1544,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, + { + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; ++ int i; + + ht_cap->ht_supported = true; + ht_cap->cap = 0; +@@ -1563,25 +1564,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = chip->ampdu_density; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; +- if (efuse->hw_cap.nss > 1) { +- ht_cap->mcs.rx_mask[0] = 0xFF; +- ht_cap->mcs.rx_mask[1] = 0xFF; +- ht_cap->mcs.rx_mask[4] = 0x01; +- ht_cap->mcs.rx_highest = cpu_to_le16(300); +- } else { +- ht_cap->mcs.rx_mask[0] = 0xFF; +- ht_cap->mcs.rx_mask[1] = 0x00; +- ht_cap->mcs.rx_mask[4] = 0x01; +- ht_cap->mcs.rx_highest = cpu_to_le16(150); +- } ++ ++ for (i = 0; i < efuse->hw_cap.nss; i++) ++ ht_cap->mcs.rx_mask[i] = 0xFF; ++ ht_cap->mcs.rx_mask[4] = 0x01; ++ ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss); + } + + static void rtw_init_vht_cap(struct rtw_dev *rtwdev, + struct ieee80211_sta_vht_cap *vht_cap) + { + struct rtw_efuse *efuse = &rtwdev->efuse; +- u16 mcs_map; ++ u16 mcs_map = 0; + __le16 highest; ++ int i; + + if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && + efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) +@@ -1604,21 +1600,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev, + if (rtw_chip_has_rx_ldpc(rtwdev)) + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + +- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | +- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; +- if (efuse->hw_cap.nss > 1) { +- highest = cpu_to_le16(780); +- mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; +- } else { +- highest = cpu_to_le16(390); +- mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; ++ for (i = 0; i < 8; i++) { ++ if (i < efuse->hw_cap.nss) ++ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); ++ else ++ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + } + ++ highest = cpu_to_le16(390 * efuse->hw_cap.nss); ++ + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.rx_highest = highest; +diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h +index 7c6c11d50ff30f..0e76bc07bddef3 100644 +--- a/drivers/net/wireless/realtek/rtw88/reg.h ++++ b/drivers/net/wireless/realtek/rtw88/reg.h +@@ -108,6 +108,7 @@ + #define BIT_SHIFT_ROM_PGE 16 + #define BIT_FW_INIT_RDY BIT(15) + #define BIT_FW_DW_RDY BIT(14) ++#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13)) + #define BIT_RPWM_TOGGLE BIT(7) + #define BIT_RAM_DL_SEL BIT(7) /* legacy only */ + #define BIT_DMEM_CHKSUM_OK BIT(6) +@@ -125,7 +126,7 @@ + BIT_CHECK_SUM_OK) + #define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \ + BIT_WINTINI_RDY | BIT_RAM_DL_SEL) +-#define FW_READY_MASK 0xffff ++#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL) + + #define REG_MCU_TST_CFG 0x84 + #define VAL_FW_TRIGGER 0x1 +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c +index 3017a9760da8dc..99318a82b43f4b 100644 +--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c ++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c +@@ -975,11 +975,11 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, + } + + static void +-rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) ++rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, ++ u8 rs, u32 *phy_pwr_idx) + { + struct rtw_hal *hal = &rtwdev->hal; + static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; +- static u32 phy_pwr_idx; + u8 rate, rate_idx, pwr_index, shift; + int j; + +@@ -987,12 +987,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) + rate = rtw_rate_section[rs][j]; + pwr_index = hal->tx_pwr_tbl[path][rate]; + shift = rate & 0x3; +- phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); ++ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + if (shift == 0x3) { + rate_idx = rate & 0xfc; + rtw_write32(rtwdev, offset_txagc[path] + rate_idx, +- phy_pwr_idx); +- phy_pwr_idx = 0; ++ *phy_pwr_idx); ++ *phy_pwr_idx = 0; + } + } + } +@@ -1000,11 +1000,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) + static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev) + { + struct rtw_hal *hal = &rtwdev->hal; ++ u32 phy_pwr_idx = 0; + int rs, path; + + for (path = 0; path < hal->rf_path_num; path++) { + for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) +- rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs); ++ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs, ++ &phy_pwr_idx); + } + } + +diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c +index e222d3c01a77ec..66819f69440550 100644 +--- a/drivers/net/wireless/realtek/rtw88/util.c ++++ b/drivers/net/wireless/realtek/rtw88/util.c +@@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) + *nss = 4; + *mcs = rate - DESC_RATEVHT4SS_MCS0; + } else if (rate >= DESC_RATEMCS0 && +- rate <= DESC_RATEMCS15) { ++ rate <= DESC_RATEMCS31) { ++ *nss = 0; + *mcs = rate - DESC_RATEMCS0; + } + } +diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c +index a8e2efae6e5260..89b0a7970508e2 100644 +--- a/drivers/net/wireless/realtek/rtw89/fw.c ++++ b/drivers/net/wireless/realtek/rtw89/fw.c +@@ -755,7 +755,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); +- ret = -1; + goto fail; + } + +@@ -816,7 +815,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, + ret = rtw89_h2c_tx(rtwdev, skb, true); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); +- ret = -1; + goto fail; + } + +diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c +index 9e2328db186560..91f0895d9f5404 100644 +--- a/drivers/net/wireless/realtek/rtw89/regd.c ++++ b/drivers/net/wireless/realtek/rtw89/regd.c +@@ -451,6 +451,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct rtw89_dev *rtwdev = hw->priv; + ++ wiphy_lock(wiphy); + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + +@@ -468,6 +469,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request + + exit: + mutex_unlock(&rtwdev->mutex); ++ wiphy_unlock(wiphy); + } + + static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) +diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c +index 01b17b8f4ff9dc..45165cf3e824e6 100644 +--- a/drivers/net/wireless/realtek/rtw89/ser.c ++++ b/drivers/net/wireless/realtek/rtw89/ser.c +@@ -156,9 +156,11 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt) + rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", + ser_st_name(ser), ser_ev_name(ser, evt)); + ++ wiphy_lock(rtwdev->hw->wiphy); + mutex_lock(&rtwdev->mutex); + rtw89_leave_lps(rtwdev); + mutex_unlock(&rtwdev->mutex); ++ wiphy_unlock(rtwdev->hw->wiphy); + + ser->st_tbl[ser->state].st_func(ser, evt); + } +@@ -676,9 +678,11 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) + + switch (evt) { + case SER_EV_STATE_IN: ++ wiphy_lock(rtwdev->hw->wiphy); + mutex_lock(&rtwdev->mutex); + ser_l2_reset_st_pre_hdl(ser); + mutex_unlock(&rtwdev->mutex); ++ wiphy_unlock(rtwdev->hw->wiphy); + + ieee80211_restart_hw(rtwdev->hw); + ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c +index 082253a3a95607..04f4a049599a1a 100644 +--- a/drivers/nvdimm/label.c ++++ b/drivers/nvdimm/label.c +@@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd) + if (ndd->data) + return 0; + +- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { ++ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 || ++ ndd->nsarea.config_size == 0) { + dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", + ndd->nsarea.max_xfer, ndd->nsarea.config_size); + return -ENXIO; +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 1e5c8220e365ca..97ab91a479d112 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3429,6 +3429,9 @@ static const struct pci_device_id nvme_id_table[] = { + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ + .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, ++ { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | ++ NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_BOGUS_NID, }, +@@ -3452,6 +3455,9 @@ static const struct pci_device_id nvme_id_table[] = { + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ + .driver_data = NVME_QUIRK_BROKEN_MSI }, ++ { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ ++ .driver_data = NVME_QUIRK_BROKEN_MSI | ++ NVME_QUIRK_NO_DEEPEST_PS }, + { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ +@@ -3535,6 +3541,8 @@ static const struct pci_device_id nvme_id_table[] = { + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, ++ { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index a0af659a4c4a21..6a539c3b8b530e 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -1456,6 +1456,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) + { + struct socket *sock = queue->sock; + ++ if (!queue->state_change) ++ return; ++ + write_lock_bh(&sock->sk->sk_callback_lock); + sock->sk->sk_data_ready = queue->data_ready; + sock->sk->sk_state_change = queue->state_change; +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 3ea94bc26e8003..dd00cc09ae5ec5 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -456,9 +456,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, + cell->nbits = info->nbits; + cell->np = info->np; + +- if (cell->nbits) ++ if (cell->nbits) { + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); ++ cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); ++ } + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, +@@ -467,6 +469,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, + return -EINVAL; + } + ++ if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { ++ dev_err(&nvmem->dev, ++ "cell %s raw len %zd unaligned to nvmem word size %d\n", ++ cell->name ?: "", cell->raw_len, ++ nvmem->word_size); ++ ++ if (info->raw_len) ++ return -EINVAL; ++ ++ cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); ++ } ++ + return 0; + } + +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c +index 6c554040c6e67d..7b0621fdbc82e2 100644 +--- a/drivers/nvmem/qfprom.c ++++ b/drivers/nvmem/qfprom.c +@@ -321,19 +321,32 @@ static int qfprom_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) + { + struct qfprom_priv *priv = context; +- u8 *val = _val; +- int i = 0, words = bytes; ++ u32 *val = _val; + void __iomem *base = priv->qfpcorrected; ++ int words = DIV_ROUND_UP(bytes, sizeof(u32)); ++ int i; + + if (read_raw_data && priv->qfpraw) + base = priv->qfpraw; + +- while (words--) +- *val++ = readb(base + reg + i++); ++ for (i = 0; i < words; i++) ++ *val++ = readl(base + reg + i * sizeof(u32)); + + return 0; + } + ++/* Align reads to word boundary */ ++static void qfprom_fixup_dt_cell_info(struct nvmem_device *nvmem, ++ struct nvmem_cell_info *cell) ++{ ++ unsigned int byte_offset = cell->offset % sizeof(u32); ++ ++ cell->bit_offset += byte_offset * BITS_PER_BYTE; ++ cell->offset -= byte_offset; ++ if (byte_offset && !cell->nbits) ++ cell->nbits = cell->bytes * BITS_PER_BYTE; ++} ++ + static void qfprom_runtime_disable(void *data) + { + pm_runtime_disable(data); +@@ -358,10 +371,11 @@ static int qfprom_probe(struct platform_device *pdev) + struct nvmem_config econfig = { + .name = "qfprom", + .add_legacy_fixed_of_cells = true, +- .stride = 1, +- .word_size = 1, ++ .stride = 4, ++ .word_size = 4, + .id = NVMEM_DEVID_AUTO, + .reg_read = qfprom_reg_read, ++ .fixup_dt_cell_info = qfprom_fixup_dt_cell_info, + }; + struct device *dev = &pdev->dev; + struct resource *res; +diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c +index 7107d68a2f8c75..c6684ab14e742e 100644 +--- a/drivers/nvmem/rockchip-otp.c ++++ b/drivers/nvmem/rockchip-otp.c +@@ -59,7 +59,6 @@ + #define RK3588_OTPC_AUTO_EN 0x08 + #define RK3588_OTPC_INT_ST 0x84 + #define RK3588_OTPC_DOUT0 0x20 +-#define RK3588_NO_SECURE_OFFSET 0x300 + #define RK3588_NBYTES 4 + #define RK3588_BURST_NUM 1 + #define RK3588_BURST_SHIFT 8 +@@ -69,6 +68,7 @@ + + struct rockchip_data { + int size; ++ int read_offset; + const char * const *clks; + int num_clks; + nvmem_reg_read_t reg_read; +@@ -196,7 +196,7 @@ static int rk3588_otp_read(void *context, unsigned int offset, + addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES; + addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES; + addr_len = addr_end - addr_start; +- addr_start += RK3588_NO_SECURE_OFFSET; ++ addr_start += otp->data->read_offset / RK3588_NBYTES; + + buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL); + if (!buf) +@@ -273,12 +273,21 @@ static const struct rockchip_data px30_data = { + .reg_read = px30_otp_read, + }; + ++static const struct rockchip_data rk3576_data = { ++ .size = 0x100, ++ .read_offset = 0x700, ++ .clks = px30_otp_clocks, ++ .num_clks = ARRAY_SIZE(px30_otp_clocks), ++ .reg_read = rk3588_otp_read, ++}; ++ + static const char * const rk3588_otp_clocks[] = { + "otp", "apb_pclk", "phy", "arb", + }; + + static const struct rockchip_data rk3588_data = { + .size = 0x400, ++ .read_offset = 0xc00, + .clks = rk3588_otp_clocks, + .num_clks = ARRAY_SIZE(rk3588_otp_clocks), + .reg_read = rk3588_otp_read, +@@ -293,6 +302,10 @@ static const struct of_device_id rockchip_otp_match[] = { + .compatible = "rockchip,rk3308-otp", + .data = &px30_data, + }, ++ { ++ .compatible = "rockchip,rk3576-otp", ++ .data = &rk3576_data, ++ }, + { + .compatible = "rockchip,rk3588-otp", + .data = &rk3588_data, +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index e9ae66cc4189b1..a3927daebeb024 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -180,6 +180,12 @@ config PCI_P2PDMA + P2P DMA transactions must be between devices behind the same root + port. + ++ Enabling this option will reduce the entropy of x86 KASLR memory ++ regions. For example - on a 46 bit system, the entropy goes down ++ from 16 bits to 15 bits. The actual reduction in entropy depends ++ on the physical address bits, on processor features, kernel config ++ (5 level page table) and physical memory present on the system. ++ + If unsure, say N. + + config PCI_LABEL +diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c +index f2e5feba552678..26ad643fb42484 100644 +--- a/drivers/pci/controller/dwc/pcie-designware-ep.c ++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c +@@ -281,7 +281,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, + u32 index; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + +- for (index = 0; index < pci->num_ob_windows; index++) { ++ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { + if (ep->outbound_addr[index] != addr) + continue; + *atu_index = index; +diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c +index 940af934ce1bb8..9bcf4c68058eb3 100644 +--- a/drivers/pci/controller/pcie-brcmstb.c ++++ b/drivers/pci/controller/pcie-brcmstb.c +@@ -284,8 +284,8 @@ static int brcm_pcie_encode_ibar_size(u64 size) + if (log2_in >= 12 && log2_in <= 15) + /* Covers 4KB to 32KB (inclusive) */ + return (log2_in - 12) + 0x1c; +- else if (log2_in >= 16 && log2_in <= 35) +- /* Covers 64KB to 32GB, (inclusive) */ ++ else if (log2_in >= 16 && log2_in <= 36) ++ /* Covers 64KB to 64GB, (inclusive) */ + return log2_in - 15; + /* Something is awry so disable */ + return 0; +@@ -1632,3 +1632,4 @@ module_platform_driver(brcm_pcie_driver); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); + MODULE_AUTHOR("Broadcom"); ++MODULE_SOFTDEP("pre: irq_bcm2712_mip"); +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c +index dfa222e02c4da9..ad82feff0405ee 100644 +--- a/drivers/pci/controller/vmd.c ++++ b/drivers/pci/controller/vmd.c +@@ -17,6 +17,8 @@ + #include + #include + ++#include ++ + #include + + #define VMD_CFGBAR 0 +@@ -981,6 +983,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) + struct vmd_dev *vmd; + int err; + ++ if (xen_domain()) { ++ /* ++ * Xen doesn't have knowledge about devices in the VMD bus ++ * because the config space of devices behind the VMD bridge is ++ * not known to Xen, and hence Xen cannot discover or configure ++ * them in any way. ++ * ++ * Bypass of MSI remapping won't work in that case as direct ++ * write by Linux to the MSI entries won't result in functional ++ * interrupts, as Xen is the entity that manages the host ++ * interrupt controller and must configure interrupts. However ++ * multiplexing of interrupts by the VMD bridge will work under ++ * Xen, so force the usage of that mode which must always be ++ * supported by VMD bridges. ++ */ ++ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; ++ } ++ + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) + return -ENOMEM; + +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c +index fba402f4f6330d..3f40be417856e0 100644 +--- a/drivers/pci/setup-bus.c ++++ b/drivers/pci/setup-bus.c +@@ -802,11 +802,9 @@ static resource_size_t calculate_iosize(resource_size_t size, + size = (size & 0xff) + ((size & ~0xffUL) << 2); + #endif + size = size + size1; +- if (size < old_size) +- size = old_size; + +- size = ALIGN(max(size, add_size) + children_add_size, align); +- return size; ++ size = max(size, add_size) + children_add_size; ++ return ALIGN(max(size, old_size), align); + } + + static resource_size_t calculate_memsize(resource_size_t size, +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 7bd1733d797703..77aa37de59880f 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -684,8 +684,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + + if ((chan == 5 && cmn->rsp_vc_num < 2) || + (chan == 6 && cmn->dat_vc_num < 2) || +- (chan == 7 && cmn->snp_vc_num < 2) || +- (chan == 8 && cmn->req_vc_num < 2)) ++ (chan == 7 && cmn->req_vc_num < 2) || ++ (chan == 8 && cmn->snp_vc_num < 2)) + return 0; + } + +@@ -841,8 +841,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ + _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ + _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \ +- _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \ +- _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5)) ++ _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \ ++ _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5)) + + #define CMN_EVENT_XP_DAT(_name, _event) \ + _CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \ +@@ -2443,6 +2443,7 @@ static int arm_cmn_probe(struct platform_device *pdev) + + cmn->dev = &pdev->dev; + cmn->part = (unsigned long)device_get_match_data(cmn->dev); ++ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); + platform_set_drvdata(pdev, cmn); + + if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { +@@ -2470,7 +2471,6 @@ static int arm_cmn_probe(struct platform_device *pdev) + if (err) + return err; + +- cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); + cmn->pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_cmn_attr_groups, +diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c +index 0e8f54168cb641..0858e6096453ef 100644 +--- a/drivers/perf/arm_pmuv3.c ++++ b/drivers/perf/arm_pmuv3.c +@@ -751,10 +751,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu) + else + armv8pmu_disable_user_access(); + ++ kvm_vcpu_pmu_resync_el0(); ++ + /* Enable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); +- +- kvm_vcpu_pmu_resync_el0(); + } + + static void armv8pmu_stop(struct arm_pmu *cpu_pmu) +diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c +index a892e1d7e2d024..9417372a015547 100644 +--- a/drivers/phy/phy-core.c ++++ b/drivers/phy/phy-core.c +@@ -400,13 +400,14 @@ EXPORT_SYMBOL_GPL(phy_power_off); + + int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) + { +- int ret; ++ int ret = 0; + +- if (!phy || !phy->ops->set_mode) ++ if (!phy) + return 0; + + mutex_lock(&phy->mutex); +- ret = phy->ops->set_mode(phy, mode, submode); ++ if (phy->ops->set_mode) ++ ret = phy->ops->set_mode(phy, mode, submode); + if (!ret) + phy->attrs.mode = mode; + mutex_unlock(&phy->mutex); +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +index aa578be2bcb6df..9a6391361a0bed 100644 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +@@ -9,6 +9,7 @@ + * Copyright (C) 2014 Cogent Embedded, Inc. + */ + ++#include + #include + #include + #include +@@ -19,12 +20,14 @@ + #include + #include + #include ++#include + #include + #include + #include + + /******* USB2.0 Host registers (original offset is +0x200) *******/ + #define USB2_INT_ENABLE 0x000 ++#define USB2_AHB_BUS_CTR 0x008 + #define USB2_USBCTR 0x00c + #define USB2_SPD_RSM_TIMSET 0x10c + #define USB2_OC_TIMSET 0x110 +@@ -40,6 +43,10 @@ + #define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */ + #define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */ + ++/* AHB_BUS_CTR */ ++#define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0) ++#define USB2_AHB_BUS_CTR_MBL_INCR4 2 ++ + /* USBCTR */ + #define USB2_USBCTR_DIRPD BIT(2) + #define USB2_USBCTR_PLL_RST BIT(1) +@@ -110,10 +117,10 @@ struct rcar_gen3_chan { + struct extcon_dev *extcon; + struct rcar_gen3_phy rphys[NUM_OF_PHYS]; + struct regulator *vbus; ++ struct reset_control *rstc; + struct work_struct work; +- struct mutex lock; /* protects rphys[...].powered */ ++ spinlock_t lock; /* protects access to hardware and driver data structure. */ + enum usb_dr_mode dr_mode; +- int irq; + u32 obint_enable_bits; + bool extcon_host; + bool is_otg_channel; +@@ -124,6 +131,7 @@ struct rcar_gen3_chan { + struct rcar_gen3_phy_drv_data { + const struct phy_ops *phy_usb2_ops; + bool no_adp_ctrl; ++ bool init_bus; + }; + + /* +@@ -338,6 +346,8 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, + bool is_b_device; + enum phy_mode cur_mode, new_mode; + ++ guard(spinlock_irqsave)(&ch->lock); ++ + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) + return -EIO; + +@@ -405,7 +415,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) + val = readl(usb2_base + USB2_ADPCTRL); + writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); + } +- msleep(20); ++ mdelay(20); + + writel(0xffffffff, usb2_base + USB2_OBINTSTA); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); +@@ -417,16 +427,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) + { + struct rcar_gen3_chan *ch = _ch; + void __iomem *usb2_base = ch->base; +- u32 status = readl(usb2_base + USB2_OBINTSTA); ++ struct device *dev = ch->dev; + irqreturn_t ret = IRQ_NONE; ++ u32 status; ++ ++ pm_runtime_get_noresume(dev); ++ ++ if (pm_runtime_suspended(dev)) ++ goto rpm_put; + +- if (status & ch->obint_enable_bits) { +- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); +- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); +- rcar_gen3_device_recognition(ch); +- ret = IRQ_HANDLED; ++ scoped_guard(spinlock, &ch->lock) { ++ status = readl(usb2_base + USB2_OBINTSTA); ++ if (status & ch->obint_enable_bits) { ++ dev_vdbg(dev, "%s: %08x\n", __func__, status); ++ writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); ++ rcar_gen3_device_recognition(ch); ++ ret = IRQ_HANDLED; ++ } + } + ++rpm_put: ++ pm_runtime_put_noidle(dev); + return ret; + } + +@@ -436,17 +457,8 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) + struct rcar_gen3_chan *channel = rphy->ch; + void __iomem *usb2_base = channel->base; + u32 val; +- int ret; + +- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { +- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); +- ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, +- IRQF_SHARED, dev_name(channel->dev), channel); +- if (ret < 0) { +- dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); +- return ret; +- } +- } ++ guard(spinlock_irqsave)(&channel->lock); + + /* Initialize USB2 part */ + val = readl(usb2_base + USB2_INT_ENABLE); +@@ -474,6 +486,8 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) + void __iomem *usb2_base = channel->base; + u32 val; + ++ guard(spinlock_irqsave)(&channel->lock); ++ + rphy->initialized = false; + + val = readl(usb2_base + USB2_INT_ENABLE); +@@ -482,9 +496,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) + val &= ~USB2_INT_ENABLE_UCOM_INTEN; + writel(val, usb2_base + USB2_INT_ENABLE); + +- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) +- free_irq(channel->irq, channel); +- + return 0; + } + +@@ -496,16 +507,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) + u32 val; + int ret = 0; + +- mutex_lock(&channel->lock); +- if (!rcar_gen3_are_all_rphys_power_off(channel)) +- goto out; +- + if (channel->vbus) { + ret = regulator_enable(channel->vbus); + if (ret) +- goto out; ++ return ret; + } + ++ guard(spinlock_irqsave)(&channel->lock); ++ ++ if (!rcar_gen3_are_all_rphys_power_off(channel)) ++ goto out; ++ + val = readl(usb2_base + USB2_USBCTR); + val |= USB2_USBCTR_PLL_RST; + writel(val, usb2_base + USB2_USBCTR); +@@ -515,7 +527,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) + out: + /* The powered flag should be set for any other phys anyway */ + rphy->powered = true; +- mutex_unlock(&channel->lock); + + return 0; + } +@@ -526,18 +537,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) + struct rcar_gen3_chan *channel = rphy->ch; + int ret = 0; + +- mutex_lock(&channel->lock); +- rphy->powered = false; ++ scoped_guard(spinlock_irqsave, &channel->lock) { ++ rphy->powered = false; + +- if (!rcar_gen3_are_all_rphys_power_off(channel)) +- goto out; ++ if (rcar_gen3_are_all_rphys_power_off(channel)) { ++ u32 val = readl(channel->base + USB2_USBCTR); ++ ++ val |= USB2_USBCTR_PLL_RST; ++ writel(val, channel->base + USB2_USBCTR); ++ } ++ } + + if (channel->vbus) + ret = regulator_disable(channel->vbus); + +-out: +- mutex_unlock(&channel->lock); +- + return ret; + } + +@@ -645,13 +658,42 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np) + return candidate; + } + ++static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel) ++{ ++ struct device *dev = channel->dev; ++ int ret; ++ u32 val; ++ ++ channel->rstc = devm_reset_control_array_get_shared(dev); ++ if (IS_ERR(channel->rstc)) ++ return PTR_ERR(channel->rstc); ++ ++ ret = pm_runtime_resume_and_get(dev); ++ if (ret) ++ return ret; ++ ++ ret = reset_control_deassert(channel->rstc); ++ if (ret) ++ goto rpm_put; ++ ++ val = readl(channel->base + USB2_AHB_BUS_CTR); ++ val &= ~USB2_AHB_BUS_CTR_MBL_MASK; ++ val |= USB2_AHB_BUS_CTR_MBL_INCR4; ++ writel(val, channel->base + USB2_AHB_BUS_CTR); ++ ++rpm_put: ++ pm_runtime_put(dev); ++ ++ return ret; ++} ++ + static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + { + const struct rcar_gen3_phy_drv_data *phy_data; + struct device *dev = &pdev->dev; + struct rcar_gen3_chan *channel; + struct phy_provider *provider; +- int ret = 0, i; ++ int ret = 0, i, irq; + + if (!dev->of_node) { + dev_err(dev, "This driver needs device tree\n"); +@@ -667,8 +709,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + return PTR_ERR(channel->base); + + channel->obint_enable_bits = USB2_OBINT_BITS; +- /* get irq number here and request_irq for OTG in phy_init */ +- channel->irq = platform_get_irq_optional(pdev, 0); + channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); + if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { + channel->is_otg_channel = true; +@@ -698,11 +738,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + goto error; + } + ++ platform_set_drvdata(pdev, channel); ++ channel->dev = dev; ++ ++ if (phy_data->init_bus) { ++ ret = rcar_gen3_phy_usb2_init_bus(channel); ++ if (ret) ++ goto error; ++ } ++ + channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl; + if (phy_data->no_adp_ctrl) + channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; + +- mutex_init(&channel->lock); ++ spin_lock_init(&channel->lock); + for (i = 0; i < NUM_OF_PHYS; i++) { + channel->rphys[i].phy = devm_phy_create(dev, NULL, + phy_data->phy_usb2_ops); +@@ -725,8 +774,19 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + channel->vbus = NULL; + } + +- platform_set_drvdata(pdev, channel); +- channel->dev = dev; ++ irq = platform_get_irq_optional(pdev, 0); ++ if (irq < 0 && irq != -ENXIO) { ++ ret = irq; ++ goto error; ++ } else if (irq > 0) { ++ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); ++ ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, ++ IRQF_SHARED, dev_name(dev), channel); ++ if (ret < 0) { ++ dev_err(dev, "Failed to request irq (%d)\n", irq); ++ goto error; ++ } ++ } + + provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); + if (IS_ERR(provider)) { +@@ -754,6 +814,7 @@ static void rcar_gen3_phy_usb2_remove(struct platform_device *pdev) + if (channel->is_otg_channel) + device_remove_file(&pdev->dev, &dev_attr_role); + ++ reset_control_assert(channel->rstc); + pm_runtime_disable(&pdev->dev); + }; + +diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c +index 633912f8a05d04..bf52b41110db8e 100644 +--- a/drivers/phy/starfive/phy-jh7110-usb.c ++++ b/drivers/phy/starfive/phy-jh7110-usb.c +@@ -16,6 +16,8 @@ + #include + + #define USB_125M_CLK_RATE 125000000 ++#define USB_CLK_MODE_OFF 0x0 ++#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1) + #define USB_LS_KEEPALIVE_OFF 0x4 + #define USB_LS_KEEPALIVE_ENABLE BIT(4) + +@@ -68,6 +70,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy) + { + struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy); + int ret; ++ unsigned int val; + + ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE); + if (ret) +@@ -77,6 +80,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy) + if (ret) + return ret; + ++ val = readl(phy->regs + USB_CLK_MODE_OFF); ++ val |= USB_CLK_MODE_RX_NORMAL_PWR; ++ writel(val, phy->regs + USB_CLK_MODE_OFF); ++ + return 0; + } + +diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +index cf6efa9c0364a1..a039b490cdb8e6 100644 +--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c ++++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +@@ -72,7 +72,7 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI; + struct bcm281xx_pin_function { + const char *name; + const char * const *groups; +- const unsigned ngroups; ++ const unsigned int ngroups; + }; + + /* +@@ -84,10 +84,10 @@ struct bcm281xx_pinctrl_data { + + /* List of all pins */ + const struct pinctrl_pin_desc *pins; +- const unsigned npins; ++ const unsigned int npins; + + const struct bcm281xx_pin_function *functions; +- const unsigned nfunctions; ++ const unsigned int nfunctions; + + struct regmap *regmap; + }; +@@ -941,7 +941,7 @@ static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = { + }; + + static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev, +- unsigned pin) ++ unsigned int pin) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + +@@ -985,7 +985,7 @@ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) + } + + static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, +- unsigned group) ++ unsigned int group) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + +@@ -993,9 +993,9 @@ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + } + + static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, +- unsigned group, ++ unsigned int group, + const unsigned **pins, +- unsigned *num_pins) ++ unsigned int *num_pins) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + +@@ -1007,7 +1007,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + + static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, +- unsigned offset) ++ unsigned int offset) + { + seq_printf(s, " %s", dev_name(pctldev->dev)); + } +@@ -1029,7 +1029,7 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev) + } + + static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, +- unsigned function) ++ unsigned int function) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + +@@ -1037,9 +1037,9 @@ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, + } + + static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, +- unsigned function, ++ unsigned int function, + const char * const **groups, +- unsigned * const num_groups) ++ unsigned int * const num_groups) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + +@@ -1050,8 +1050,8 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, + } + + static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev, +- unsigned function, +- unsigned group) ++ unsigned int function, ++ unsigned int group) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + const struct bcm281xx_pin_function *f = &pdata->functions[function]; +@@ -1082,7 +1082,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = { + }; + + static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, +- unsigned pin, ++ unsigned int pin, + unsigned long *config) + { + return -ENOTSUPP; +@@ -1091,9 +1091,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, + + /* Goes through the configs and update register val/mask */ + static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev, +- unsigned pin, ++ unsigned int pin, + unsigned long *configs, +- unsigned num_configs, ++ unsigned int num_configs, + u32 *val, + u32 *mask) + { +@@ -1207,9 +1207,9 @@ static const u16 bcm281xx_pullup_map[] = { + + /* Goes through the configs and update register val/mask */ + static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev, +- unsigned pin, ++ unsigned int pin, + unsigned long *configs, +- unsigned num_configs, ++ unsigned int num_configs, + u32 *val, + u32 *mask) + { +@@ -1277,9 +1277,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev, + + /* Goes through the configs and update register val/mask */ + static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, +- unsigned pin, ++ unsigned int pin, + unsigned long *configs, +- unsigned num_configs, ++ unsigned int num_configs, + u32 *val, + u32 *mask) + { +@@ -1321,9 +1321,9 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, + } + + static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev, +- unsigned pin, ++ unsigned int pin, + unsigned long *configs, +- unsigned num_configs) ++ unsigned int num_configs) + { + struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); + enum bcm281xx_pin_type pin_type; +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c +index 5ee746cb81f591..6520b88db1105b 100644 +--- a/drivers/pinctrl/devicetree.c ++++ b/drivers/pinctrl/devicetree.c +@@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p, + pctldev = get_pinctrl_dev_from_of_node(np_pctldev); + if (pctldev) + break; +- /* Do not defer probing of hogs (circular loop) */ ++ /* ++ * Do not defer probing of hogs (circular loop) ++ * ++ * Return 1 to let the caller catch the case. ++ */ + if (np_pctldev == p->dev->of_node) { + of_node_put(np_pctldev); +- return -ENODEV; ++ return 1; + } + } + of_node_put(np_pctldev); +@@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) + ret = dt_to_map_one_config(p, pctldev, statename, + np_config); + of_node_put(np_config); ++ if (ret == 1) ++ continue; + if (ret < 0) + goto err; + } +diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c +index 524424ee6c4e71..5cc00fdc48d840 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson.c ++++ b/drivers/pinctrl/meson/pinctrl-meson.c +@@ -486,7 +486,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin, + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_PULL_UP: + if (meson_pinconf_get_pull(pc, pin) == param) +- arg = 1; ++ arg = 60000; + else + return -EINVAL; + break; +diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c +index 20c3b902504451..a18df416229993 100644 +--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c ++++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c +@@ -629,7 +629,7 @@ static struct platform_driver apq8064_pinctrl_driver = { + .of_match_table = apq8064_pinctrl_of_match, + }, + .probe = apq8064_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init apq8064_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c +index 3fc0a40762b631..afada80e52a235 100644 +--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c ++++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c +@@ -1207,7 +1207,7 @@ static struct platform_driver apq8084_pinctrl_driver = { + .of_match_table = apq8084_pinctrl_of_match, + }, + .probe = apq8084_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init apq8084_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c +index 1f7944dd829d1b..cb13576ad6cfb4 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c +@@ -710,7 +710,7 @@ static struct platform_driver ipq4019_pinctrl_driver = { + .of_match_table = ipq4019_pinctrl_of_match, + }, + .probe = ipq4019_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq4019_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c +index e2951f81c3eeb3..68f65b57003e91 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c +@@ -754,7 +754,7 @@ static struct platform_driver ipq5018_pinctrl_driver = { + .of_match_table = ipq5018_pinctrl_of_match, + }, + .probe = ipq5018_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq5018_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c +index 625f8014051f6a..88217511897088 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c +@@ -834,7 +834,7 @@ static struct platform_driver ipq5332_pinctrl_driver = { + .of_match_table = ipq5332_pinctrl_of_match, + }, + .probe = ipq5332_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq5332_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c +index 0ad08647dbcdf0..ac330d8712b5cb 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c +@@ -1080,7 +1080,7 @@ static struct platform_driver ipq6018_pinctrl_driver = { + .of_match_table = ipq6018_pinctrl_of_match, + }, + .probe = ipq6018_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq6018_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c +index e2bb94e86aef6e..e10e1bc4c91131 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c +@@ -631,7 +631,7 @@ static struct platform_driver ipq8064_pinctrl_driver = { + .of_match_table = ipq8064_pinctrl_of_match, + }, + .probe = ipq8064_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq8064_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c +index 337f3a1c92c192..fee32c1d1d3e9a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c +@@ -1041,7 +1041,7 @@ static struct platform_driver ipq8074_pinctrl_driver = { + .of_match_table = ipq8074_pinctrl_of_match, + }, + .probe = ipq8074_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq8074_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c +index e2491617b2364a..20ab59cb621bc3 100644 +--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c +@@ -799,7 +799,7 @@ static struct platform_driver ipq9574_pinctrl_driver = { + .of_match_table = ipq9574_pinctrl_of_match, + }, + .probe = ipq9574_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init ipq9574_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c +index e7cd3ef1cf3e81..415d24e16267d0 100644 +--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c ++++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c +@@ -1059,7 +1059,7 @@ static struct platform_driver mdm9607_pinctrl_driver = { + .of_match_table = mdm9607_pinctrl_of_match, + }, + .probe = mdm9607_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init mdm9607_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c +index 0a2ae383d3d57b..3f2eafea0b2467 100644 +--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c ++++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c +@@ -446,7 +446,7 @@ static struct platform_driver mdm9615_pinctrl_driver = { + .of_match_table = mdm9615_pinctrl_of_match, + }, + .probe = mdm9615_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init mdm9615_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index b252fc22f64e6b..ed70767ca0f0c9 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -43,7 +43,6 @@ + * @pctrl: pinctrl handle. + * @chip: gpiochip handle. + * @desc: pin controller descriptor +- * @restart_nb: restart notifier block. + * @irq: parent irq for the TLMM irq_chip. + * @intr_target_use_scm: route irq to application cpu using scm calls + * @lock: Spinlock to protect register resources as well +@@ -63,7 +62,6 @@ struct msm_pinctrl { + struct pinctrl_dev *pctrl; + struct gpio_chip chip; + struct pinctrl_desc desc; +- struct notifier_block restart_nb; + + int irq; + +@@ -1424,10 +1422,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + return 0; + } + +-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, +- void *data) ++static int msm_ps_hold_restart(struct sys_off_data *data) + { +- struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); ++ struct msm_pinctrl *pctrl = data->cb_data; + + writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); + mdelay(1000); +@@ -1438,7 +1435,11 @@ static struct msm_pinctrl *poweroff_pctrl; + + static void msm_ps_hold_poweroff(void) + { +- msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); ++ struct sys_off_data data = { ++ .cb_data = poweroff_pctrl, ++ }; ++ ++ msm_ps_hold_restart(&data); + } + + static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) +@@ -1448,9 +1449,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) + + for (i = 0; i < pctrl->soc->nfunctions; i++) + if (!strcmp(func[i].name, "ps_hold")) { +- pctrl->restart_nb.notifier_call = msm_ps_hold_restart; +- pctrl->restart_nb.priority = 128; +- if (register_restart_handler(&pctrl->restart_nb)) ++ if (devm_register_sys_off_handler(pctrl->dev, ++ SYS_OFF_MODE_RESTART, ++ 128, ++ msm_ps_hold_restart, ++ pctrl)) + dev_err(pctrl->dev, + "failed to setup restart handler.\n"); + poweroff_pctrl = pctrl; +@@ -1547,15 +1550,11 @@ int msm_pinctrl_probe(struct platform_device *pdev, + } + EXPORT_SYMBOL(msm_pinctrl_probe); + +-int msm_pinctrl_remove(struct platform_device *pdev) ++void msm_pinctrl_remove(struct platform_device *pdev) + { + struct msm_pinctrl *pctrl = platform_get_drvdata(pdev); + + gpiochip_remove(&pctrl->chip); +- +- unregister_restart_handler(&pctrl->restart_nb); +- +- return 0; + } + EXPORT_SYMBOL(msm_pinctrl_remove); + +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h +index 1d2f2e904da190..4968d08a384da9 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.h ++++ b/drivers/pinctrl/qcom/pinctrl-msm.h +@@ -166,6 +166,6 @@ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; + + int msm_pinctrl_probe(struct platform_device *pdev, + const struct msm_pinctrl_soc_data *soc_data); +-int msm_pinctrl_remove(struct platform_device *pdev); ++void msm_pinctrl_remove(struct platform_device *pdev); + + #endif +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c +index 994619840a706c..90b4004e7faf18 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c +@@ -638,7 +638,7 @@ static struct platform_driver msm8226_pinctrl_driver = { + .of_match_table = msm8226_pinctrl_of_match, + }, + .probe = msm8226_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8226_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c +index 999a5f867eb508..dba6d531b4a146 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c +@@ -981,7 +981,7 @@ static struct platform_driver msm8660_pinctrl_driver = { + .of_match_table = msm8660_pinctrl_of_match, + }, + .probe = msm8660_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8660_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c +index 756856d20d6b5f..14b17ba9f9061a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c +@@ -929,7 +929,7 @@ static struct platform_driver msm8909_pinctrl_driver = { + .of_match_table = msm8909_pinctrl_of_match, + }, + .probe = msm8909_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8909_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c +index cea5c54f92fec1..184dcf8422735b 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c +@@ -969,7 +969,7 @@ static struct platform_driver msm8916_pinctrl_driver = { + .of_match_table = msm8916_pinctrl_of_match, + }, + .probe = msm8916_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8916_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c +index 998351bdfee136..c2253821ae8d36 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c +@@ -1816,7 +1816,7 @@ static struct platform_driver msm8953_pinctrl_driver = { + .of_match_table = msm8953_pinctrl_of_match, + }, + .probe = msm8953_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8953_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c +index ebe230b3b437cc..6b9148d226e9b8 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c +@@ -1246,7 +1246,7 @@ static struct platform_driver msm8960_pinctrl_driver = { + .of_match_table = msm8960_pinctrl_of_match, + }, + .probe = msm8960_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8960_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c +index c30d80e4e98ca6..9a951888e8a1b1 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c +@@ -1096,7 +1096,7 @@ static struct platform_driver msm8976_pinctrl_driver = { + .of_match_table = msm8976_pinctrl_of_match, + }, + .probe = msm8976_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8976_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c +index b1a6759ab4a5e7..1ed1dd32d6c795 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c +@@ -1343,7 +1343,7 @@ static struct platform_driver msm8994_pinctrl_driver = { + .of_match_table = msm8994_pinctrl_of_match, + }, + .probe = msm8994_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8994_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c +index 46cc0b49dbab52..5f0e7f78fd5178 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c +@@ -1906,7 +1906,7 @@ static struct platform_driver msm8996_pinctrl_driver = { + .of_match_table = msm8996_pinctrl_of_match, + }, + .probe = msm8996_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8996_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c +index b7cbf32b3125a9..4aaf45e54f3a79 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c +@@ -1535,7 +1535,7 @@ static struct platform_driver msm8998_pinctrl_driver = { + .of_match_table = msm8998_pinctrl_of_match, + }, + .probe = msm8998_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8998_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c +index d5fe62992849c9..58b4f6f31ae6ae 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c +@@ -1071,7 +1071,7 @@ static struct platform_driver msm8x74_pinctrl_driver = { + .of_match_table = msm8x74_pinctrl_of_match, + }, + .probe = msm8x74_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init msm8x74_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c +index ba699eac9ee8b2..f5c1c427b44e91 100644 +--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c ++++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c +@@ -1113,7 +1113,7 @@ static struct platform_driver qcm2290_pinctrl_driver = { + .of_match_table = qcm2290_pinctrl_of_match, + }, + .probe = qcm2290_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init qcm2290_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c +index ae7224012f8aa0..9a875b7dc9989c 100644 +--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c ++++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c +@@ -1644,7 +1644,7 @@ static struct platform_driver qcs404_pinctrl_driver = { + .of_match_table = qcs404_pinctrl_of_match, + }, + .probe = qcs404_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init qcs404_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c +index b5808fcfb13cde..4d2f6f495163bc 100644 +--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c ++++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c +@@ -145,7 +145,7 @@ static struct platform_driver qdf2xxx_pinctrl_driver = { + .acpi_match_table = qdf2xxx_acpi_ids, + }, + .probe = qdf2xxx_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init qdf2xxx_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c +index 47bc529ef550d2..da4f940bc8d4e8 100644 +--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c ++++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c +@@ -1248,7 +1248,7 @@ static struct platform_driver qdu1000_tlmm_driver = { + .of_match_table = qdu1000_tlmm_of_match, + }, + .probe = qdu1000_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init qdu1000_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c +index 8fdea25d8d67e1..5459c0c681a23f 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c ++++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c +@@ -1530,7 +1530,7 @@ static struct platform_driver sa8775p_pinctrl_driver = { + .of_match_table = sa8775p_pinctrl_of_match, + }, + .probe = sa8775p_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sa8775p_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c +index 6eb0c73791c0bc..c27aaa599b917a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c ++++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c +@@ -1159,7 +1159,7 @@ static struct platform_driver sc7180_pinctrl_driver = { + .of_match_table = sc7180_pinctrl_of_match, + }, + .probe = sc7180_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sc7180_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c +index 0c10eeb60b55e7..c2db663e396eb4 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c ++++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c +@@ -1505,7 +1505,7 @@ static struct platform_driver sc7280_pinctrl_driver = { + .of_match_table = sc7280_pinctrl_of_match, + }, + .probe = sc7280_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sc7280_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c +index d6a79ad41a40a8..cfa7c8be9770c9 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c ++++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c +@@ -1720,7 +1720,7 @@ static struct platform_driver sc8180x_pinctrl_driver = { + .acpi_match_table = sc8180x_pinctrl_acpi_match, + }, + .probe = sc8180x_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sc8180x_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c +index 96f4fb5a5d297f..4b1c49697698de 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c ++++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c +@@ -1926,7 +1926,7 @@ static struct platform_driver sc8280xp_pinctrl_driver = { + .of_match_table = sc8280xp_pinctrl_of_match, + }, + .probe = sc8280xp_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sc8280xp_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c +index c2e0d5c034acf6..b0c29a24b09b5a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c +@@ -1428,7 +1428,7 @@ static struct platform_driver sdm660_pinctrl_driver = { + .of_match_table = sdm660_pinctrl_of_match, + }, + .probe = sdm660_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdm660_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c +index cc3cce077de4e6..1e694a966953ac 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c +@@ -1318,7 +1318,7 @@ static struct platform_driver sdm670_pinctrl_driver = { + .of_match_table = sdm670_pinctrl_of_match, + }, + .probe = sdm670_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdm670_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c +index cc05c415ed1551..3f3265e0018d66 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c +@@ -1351,7 +1351,7 @@ static struct platform_driver sdm845_pinctrl_driver = { + .acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match), + }, + .probe = sdm845_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdm845_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c +index 8826db9d21d04c..c88b8bfcacb6a7 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c +@@ -990,7 +990,7 @@ static struct platform_driver sdx55_pinctrl_driver = { + .of_match_table = sdx55_pinctrl_of_match, + }, + .probe = sdx55_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdx55_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c +index f6f319c997fc7a..bd44ec0fcab43c 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c +@@ -939,7 +939,7 @@ static struct platform_driver sdx65_pinctrl_driver = { + .of_match_table = sdx65_pinctrl_of_match, + }, + .probe = sdx65_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdx65_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c +index 3cfe8c7f04df81..396f6fc779a2e5 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c +@@ -1124,7 +1124,7 @@ static struct platform_driver sdx75_pinctrl_driver = { + .of_match_table = sdx75_pinctrl_of_match, + }, + .probe = sdx75_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sdx75_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c +index 2a06025f488584..87057089b2b649 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c +@@ -895,7 +895,7 @@ static struct platform_driver sm6115_tlmm_driver = { + .of_match_table = sm6115_tlmm_of_match, + }, + .probe = sm6115_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm6115_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c +index d5e2b896954c22..e07339ba72bcac 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c +@@ -1249,7 +1249,7 @@ static struct platform_driver sm6125_tlmm_driver = { + .of_match_table = sm6125_tlmm_of_match, + }, + .probe = sm6125_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm6125_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c +index f3828c07b13450..4aeb1ba43ee3d4 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c +@@ -1373,7 +1373,7 @@ static struct platform_driver sm6350_tlmm_driver = { + .of_match_table = sm6350_tlmm_of_match, + }, + .probe = sm6350_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm6350_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c +index c82c8516932ea2..d86630d7125c2a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c +@@ -1516,7 +1516,7 @@ static struct platform_driver sm6375_tlmm_driver = { + .of_match_table = sm6375_tlmm_of_match, + }, + .probe = sm6375_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm6375_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c +index edb5984cd35190..b9f067de8ef0e4 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c +@@ -1254,7 +1254,7 @@ static struct platform_driver sm7150_tlmm_driver = { + .of_match_table = sm7150_tlmm_of_match, + }, + .probe = sm7150_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm7150_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c +index 01aea9c70b7a78..f8f5bee74f1dc0 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c +@@ -1542,7 +1542,7 @@ static struct platform_driver sm8150_pinctrl_driver = { + .of_match_table = sm8150_pinctrl_of_match, + }, + .probe = sm8150_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm8150_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c +index e9961a49ff9811..54fda77bf2968c 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c +@@ -1351,7 +1351,7 @@ static struct platform_driver sm8250_pinctrl_driver = { + .of_match_table = sm8250_pinctrl_of_match, + }, + .probe = sm8250_pinctrl_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm8250_pinctrl_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c +index 9c69458bd91091..ac7f2820f2cbfb 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c +@@ -1642,7 +1642,7 @@ static struct platform_driver sm8350_tlmm_driver = { + .of_match_table = sm8350_tlmm_of_match, + }, + .probe = sm8350_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm8350_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c +index d11bb1ee9e3d8d..61728671169527 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c +@@ -1677,7 +1677,7 @@ static struct platform_driver sm8450_tlmm_driver = { + .of_match_table = sm8450_tlmm_of_match, + }, + .probe = sm8450_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm8450_tlmm_init(void) +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c +index 3c847d9cb5d93b..9184e0183755da 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c ++++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c +@@ -1762,7 +1762,7 @@ static struct platform_driver sm8550_tlmm_driver = { + .of_match_table = sm8550_tlmm_of_match, + }, + .probe = sm8550_tlmm_probe, +- .remove = msm_pinctrl_remove, ++ .remove_new = msm_pinctrl_remove, + }; + + static int __init sm8550_tlmm_init(void) +diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c +index 7c12a3470642c3..637b89ebe0e455 100644 +--- a/drivers/pinctrl/tegra/pinctrl-tegra.c ++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c +@@ -280,8 +280,8 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, + return 0; + } + +-static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, +- unsigned int offset) ++static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev, ++ unsigned int offset) + { + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + unsigned int group, num_pins, j; +@@ -294,12 +294,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev * + continue; + for (j = 0; j < num_pins; j++) { + if (offset == pins[j]) +- return &pmx->soc->groups[group]; ++ return group; + } + } + +- dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset); +- return NULL; ++ return -EINVAL; ++} ++ ++static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, ++ unsigned int offset, ++ int group_index) ++{ ++ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); ++ ++ if (group_index < 0 || group_index >= pmx->soc->ngroups) ++ return NULL; ++ ++ return &pmx->soc->groups[group_index]; ++} ++ ++static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev, ++ unsigned int offset, ++ int group_index) ++{ ++ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); ++ ++ if (group_index < 0) ++ return NULL; ++ ++ return &pmx->pingroup_configs[group_index]; + } + + static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, +@@ -308,12 +331,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, + { + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + const struct tegra_pingroup *group; ++ struct tegra_pingroup_config *config; ++ int group_index; + u32 value; + + if (!pmx->soc->sfsel_in_mux) + return 0; + +- group = tegra_pinctrl_get_group(pctldev, offset); ++ group_index = tegra_pinctrl_get_group_index(pctldev, offset); ++ group = tegra_pinctrl_get_group(pctldev, offset, group_index); + + if (!group) + return -EINVAL; +@@ -321,7 +347,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, + if (group->mux_reg < 0 || group->sfsel_bit < 0) + return -EINVAL; + ++ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); ++ if (!config) ++ return -EINVAL; + value = pmx_readl(pmx, group->mux_bank, group->mux_reg); ++ config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0; + value &= ~BIT(group->sfsel_bit); + pmx_writel(pmx, value, group->mux_bank, group->mux_reg); + +@@ -334,12 +364,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, + { + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + const struct tegra_pingroup *group; ++ struct tegra_pingroup_config *config; ++ int group_index; + u32 value; + + if (!pmx->soc->sfsel_in_mux) + return; + +- group = tegra_pinctrl_get_group(pctldev, offset); ++ group_index = tegra_pinctrl_get_group_index(pctldev, offset); ++ group = tegra_pinctrl_get_group(pctldev, offset, group_index); + + if (!group) + return; +@@ -347,8 +380,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, + if (group->mux_reg < 0 || group->sfsel_bit < 0) + return; + ++ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); ++ if (!config) ++ return; + value = pmx_readl(pmx, group->mux_bank, group->mux_reg); +- value |= BIT(group->sfsel_bit); ++ if (config->is_sfsel) ++ value |= BIT(group->sfsel_bit); + pmx_writel(pmx, value, group->mux_bank, group->mux_reg); + } + +@@ -785,6 +822,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev, + pmx->dev = &pdev->dev; + pmx->soc = soc_data; + ++ pmx->pingroup_configs = devm_kcalloc(&pdev->dev, ++ pmx->soc->ngroups, sizeof(*pmx->pingroup_configs), ++ GFP_KERNEL); ++ if (!pmx->pingroup_configs) ++ return -ENOMEM; ++ + /* + * Each mux group will appear in 4 functions' list of groups. + * This over-allocates slightly, since not all groups are mux groups. +diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h +index b3289bdf727d82..b97136685f7a88 100644 +--- a/drivers/pinctrl/tegra/pinctrl-tegra.h ++++ b/drivers/pinctrl/tegra/pinctrl-tegra.h +@@ -8,6 +8,10 @@ + #ifndef __PINMUX_TEGRA_H__ + #define __PINMUX_TEGRA_H__ + ++struct tegra_pingroup_config { ++ bool is_sfsel; ++}; ++ + struct tegra_pmx { + struct device *dev; + struct pinctrl_dev *pctl; +@@ -21,6 +25,8 @@ struct tegra_pmx { + int nbanks; + void __iomem **regs; + u32 *backup_regs; ++ /* Array of size soc->ngroups */ ++ struct tegra_pingroup_config *pingroup_configs; + }; + + enum tegra_pinconf_param { +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +index 230e6ee966366a..d8f1bf5e58a0f4 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +@@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj, + int length; + + length = strlen(buf); +- if (buf[length-1] == '\n') ++ if (length && buf[length - 1] == '\n') + length--; + + /* firmware does verifiation of min/max password length, +diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c +index 085e044e888e4e..d7ef4f046d99b5 100644 +--- a/drivers/platform/x86/fujitsu-laptop.c ++++ b/drivers/platform/x86/fujitsu-laptop.c +@@ -17,13 +17,13 @@ + /* + * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional + * features made available on a range of Fujitsu laptops including the +- * P2xxx/P5xxx/S6xxx/S7xxx series. ++ * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series. + * + * This driver implements a vendor-specific backlight control interface for + * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu + * laptops. + * +- * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and ++ * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and + * P8010. It should work on most P-series and S-series Lifebooks, but + * YMMV. + * +@@ -102,7 +102,11 @@ + #define KEY2_CODE 0x411 + #define KEY3_CODE 0x412 + #define KEY4_CODE 0x413 +-#define KEY5_CODE 0x420 ++#define KEY5_CODE 0x414 ++#define KEY6_CODE 0x415 ++#define KEY7_CODE 0x416 ++#define KEY8_CODE 0x417 ++#define KEY9_CODE 0x420 + + /* Hotkey ringbuffer limits */ + #define MAX_HOTKEY_RINGBUFFER_SIZE 100 +@@ -450,7 +454,7 @@ static const struct key_entry keymap_default[] = { + { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, + { KE_KEY, KEY3_CODE, { KEY_PROG3 } }, + { KE_KEY, KEY4_CODE, { KEY_PROG4 } }, +- { KE_KEY, KEY5_CODE, { KEY_RFKILL } }, ++ { KE_KEY, KEY9_CODE, { KEY_RFKILL } }, + /* Soft keys read from status flags */ + { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } }, + { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } }, +@@ -474,6 +478,18 @@ static const struct key_entry keymap_p8010[] = { + { KE_END, 0 } + }; + ++static const struct key_entry keymap_s2110[] = { ++ { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */ ++ { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */ ++ { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */ ++ { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */ ++ { KE_KEY, KEY5_CODE, { KEY_STOPCD } }, ++ { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } }, ++ { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } }, ++ { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } }, ++ { KE_END, 0 } ++}; ++ + static const struct key_entry *keymap = keymap_default; + + static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id) +@@ -511,6 +527,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = { + }, + .driver_data = (void *)keymap_p8010 + }, ++ { ++ .callback = fujitsu_laptop_dmi_keymap_override, ++ .ident = "Fujitsu LifeBook S2110", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"), ++ }, ++ .driver_data = (void *)keymap_s2110 ++ }, + {} + }; + +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index cde5f845cf2557..8de0d3232e48c5 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -212,6 +212,7 @@ enum tpacpi_hkey_event_t { + /* Thermal events */ + TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ + TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */ ++ TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/ + TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */ + TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */ + TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */ +@@ -3942,6 +3943,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, + pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n"); + /* recommended action: immediate sleep/hibernate */ + break; ++ case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE: ++ pr_debug("Battery Info: battery charge threshold changed\n"); ++ /* User changed charging threshold. No action needed */ ++ return true; + case TP_HKEY_EV_ALARM_SENSOR_HOT: + pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n"); + /* recommended action: warn user through gui, that */ +@@ -11315,6 +11320,8 @@ static int __must_check __init get_thinkpad_model_data( + tp->vendor = PCI_VENDOR_ID_IBM; + else if (dmi_name_in_vendors("LENOVO")) + tp->vendor = PCI_VENDOR_ID_LENOVO; ++ else if (dmi_name_in_vendors("NEC")) ++ tp->vendor = PCI_VENDOR_ID_LENOVO; + else + return 0; + +diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c +index 13fce2b134f60a..84d68c805cac85 100644 +--- a/drivers/pmdomain/imx/gpcv2.c ++++ b/drivers/pmdomain/imx/gpcv2.c +@@ -1350,7 +1350,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) + } + + if (IS_ENABLED(CONFIG_LOCKDEP) && +- of_property_read_bool(domain->dev->of_node, "power-domains")) ++ of_property_present(domain->dev->of_node, "power-domains")) + lockdep_set_subclass(&domain->genpd.mlock, 1); + + ret = of_genpd_add_provider_simple(domain->dev->of_node, +diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c +index 40f7dba42b5ad7..404cbe32711e73 100644 +--- a/drivers/regulator/ad5398.c ++++ b/drivers/regulator/ad5398.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #define AD5398_CURRENT_EN_MASK 0x8000 + +@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client) + const struct ad5398_current_data_format *df = + (struct ad5398_current_data_format *)id->driver_data; + +- if (!init_data) +- return -EINVAL; +- + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + config.dev = &client->dev; ++ if (client->dev.of_node) ++ init_data = of_get_regulator_init_data(&client->dev, ++ client->dev.of_node, ++ &ad5398_reg); ++ if (!init_data) ++ return -EINVAL; ++ + config.init_data = init_data; ++ config.of_node = client->dev.of_node; + config.driver_data = chip; + + chip->client = client; +diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c +index 90de22c81da976..37174b544113f8 100644 +--- a/drivers/remoteproc/qcom_wcnss.c ++++ b/drivers/remoteproc/qcom_wcnss.c +@@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = { + .pmu_offset = 0x1004, + .spare_offset = 0x1088, + +- .pd_names = { "mx", "cx" }, ++ .pd_names = { "cx", "mx" }, + .vregs = (struct wcnss_vreg_info[]) { +- { "vddmx", 950000, 1150000, 0 }, + { "vddcx", .super_turbo = true}, ++ { "vddmx", 950000, 1150000, 0 }, + { "vddpx", 1800000, 1800000, 0 }, + }, + .num_pd_vregs = 2, +@@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = { + .pmu_offset = 0x1004, + .spare_offset = 0x1088, + +- .pd_names = { "mx", "cx" }, ++ .pd_names = { "cx", "mx" }, + .vregs = (struct wcnss_vreg_info[]) { +- { "vddmx", 1287500, 1287500, 0 }, + { "vddcx", .super_turbo = true }, ++ { "vddmx", 1287500, 1287500, 0 }, + { "vddpx", 1800000, 1800000, 0 }, + }, + .num_pd_vregs = 2, +@@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) + static int wcnss_init_pds(struct qcom_wcnss *wcnss, + const char * const pd_names[WCNSS_MAX_PDS]) + { ++ struct device *dev = wcnss->dev; + int i, ret; + ++ /* Handle single power domain */ ++ if (dev->pm_domain) { ++ wcnss->pds[0] = dev; ++ wcnss->num_pds = 1; ++ pm_runtime_enable(dev); ++ return 0; ++ } ++ + for (i = 0; i < WCNSS_MAX_PDS; i++) { + if (!pd_names[i]) + break; +@@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss, + + static void wcnss_release_pds(struct qcom_wcnss *wcnss) + { ++ struct device *dev = wcnss->dev; + int i; + ++ /* Handle single power domain */ ++ if (wcnss->num_pds == 1 && dev->pm_domain) { ++ pm_runtime_disable(dev); ++ return; ++ } ++ + for (i = 0; i < wcnss->num_pds; i++) + dev_pm_domain_detach(wcnss->pds[i], false); + } +@@ -437,10 +453,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, + * the regulators for the power domains. For old device trees we need to + * reserve extra space to manage them through the regulator interface. + */ +- if (wcnss->num_pds) +- info += num_pd_vregs; +- else ++ if (wcnss->num_pds) { ++ info += wcnss->num_pds; ++ /* Handle single power domain case */ ++ if (wcnss->num_pds < num_pd_vregs) ++ num_vregs += num_pd_vregs - wcnss->num_pds; ++ } else { + num_vregs += num_pd_vregs; ++ } + + bulk = devm_kcalloc(wcnss->dev, + num_vregs, sizeof(struct regulator_bulk_data), +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c +index 506b7d1c239701..0c78451960926d 100644 +--- a/drivers/rtc/rtc-ds1307.c ++++ b/drivers/rtc/rtc-ds1307.c +@@ -1802,10 +1802,8 @@ static int ds1307_probe(struct i2c_client *client) + * For some variants, be sure alarms can trigger when we're + * running on Vbackup (BBSQI/BBSQW) + */ +- if (want_irq || ds1307_can_wakeup_device) { ++ if (want_irq || ds1307_can_wakeup_device) + regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit; +- regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); +- } + + regmap_write(ds1307->regmap, DS1337_REG_CONTROL, + regs[0]); +diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c +index 35b2e36b426a0d..cb01038a2e27fe 100644 +--- a/drivers/rtc/rtc-rv3032.c ++++ b/drivers/rtc/rtc-rv3032.c +@@ -69,7 +69,7 @@ + #define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5) + #define RV3032_CLKOUT2_OS BIT(7) + +-#define RV3032_CTRL1_EERD BIT(3) ++#define RV3032_CTRL1_EERD BIT(2) + #define RV3032_CTRL1_WADA BIT(5) + + #define RV3032_CTRL2_STOP BIT(0) +diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c +index d6ea2fd4c2a02b..d4151f519e8b22 100644 +--- a/drivers/s390/crypto/vfio_ap_ops.c ++++ b/drivers/s390/crypto/vfio_ap_ops.c +@@ -834,48 +834,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) + vfio_put_device(&matrix_mdev->vdev); + } + +-#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ +- "already assigned to %s" ++#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" + +-static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, +- unsigned long *apm, +- unsigned long *aqm) ++#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" ++ ++static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, ++ struct ap_matrix_mdev *assigned_to, ++ unsigned long *apm, unsigned long *aqm) + { + unsigned long apid, apqi; +- const struct device *dev = mdev_dev(matrix_mdev->mdev); +- const char *mdev_name = dev_name(dev); + +- for_each_set_bit_inv(apid, apm, AP_DEVICES) ++ for_each_set_bit_inv(apid, apm, AP_DEVICES) { ++ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { ++ dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, ++ apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); ++ } ++ } ++} ++ ++static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, ++ unsigned long *apm, unsigned long *aqm) ++{ ++ unsigned long apid, apqi; ++ ++ for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) +- dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); ++ dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); ++ } + } + + /** + * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs + * ++ * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being ++ * assigned; or, NULL if this function was called by the AP bus ++ * driver in_use callback to verify none of the APQNs being reserved ++ * for the host device driver are in use by a vfio_ap mediated device + * @mdev_apm: mask indicating the APIDs of the APQNs to be verified + * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified + * +- * Verifies that each APQN derived from the Cartesian product of a bitmap of +- * AP adapter IDs and AP queue indexes is not configured for any matrix +- * mediated device. AP queue sharing is not allowed. ++ * Verifies that each APQN derived from the Cartesian product of APIDs ++ * represented by the bits set in @mdev_apm and the APQIs of the bits set in ++ * @mdev_aqm is not assigned to a mediated device other than the mdev to which ++ * the APQN is being assigned (@assignee). AP queue sharing is not allowed. + * + * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. + */ +-static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, ++static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, ++ unsigned long *mdev_apm, + unsigned long *mdev_aqm) + { +- struct ap_matrix_mdev *matrix_mdev; ++ struct ap_matrix_mdev *assigned_to; + DECLARE_BITMAP(apm, AP_DEVICES); + DECLARE_BITMAP(aqm, AP_DOMAINS); + +- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { ++ list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { + /* +- * If the input apm and aqm are fields of the matrix_mdev +- * object, then move on to the next matrix_mdev. ++ * If the mdev to which the mdev_apm and mdev_aqm is being ++ * assigned is the same as the mdev being verified + */ +- if (mdev_apm == matrix_mdev->matrix.apm && +- mdev_aqm == matrix_mdev->matrix.aqm) ++ if (assignee == assigned_to) + continue; + + memset(apm, 0, sizeof(apm)); +@@ -885,15 +903,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, + * We work on full longs, as we can only exclude the leftover + * bits in non-inverse order. The leftover is all zeros. + */ +- if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, +- AP_DEVICES)) ++ if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) + continue; + +- if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, +- AP_DOMAINS)) ++ if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) + continue; + +- vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); ++ if (assignee) ++ vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); ++ else ++ vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); + + return -EADDRINUSE; + } +@@ -922,7 +941,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) + matrix_mdev->matrix.aqm)) + return -EADDRNOTAVAIL; + +- return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, ++ return vfio_ap_mdev_verify_no_sharing(matrix_mdev, ++ matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm); + } + +@@ -2271,7 +2291,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) + + mutex_lock(&matrix_dev->guests_lock); + mutex_lock(&matrix_dev->mdevs_lock); +- ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); ++ ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); + mutex_unlock(&matrix_dev->mdevs_lock); + mutex_unlock(&matrix_dev->guests_lock); + +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c +index 0ad8a10002ce36..5c9bc8af3c2df8 100644 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c +@@ -5646,6 +5646,7 @@ static struct lpfc_nodelist * + __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) + { + struct lpfc_nodelist *ndlp; ++ struct lpfc_nodelist *np = NULL; + uint32_t data1; + + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { +@@ -5660,14 +5661,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, data1, ndlp->nlp_rpi, + ndlp->active_rrqs_xri_bitmap); +- return ndlp; ++ ++ /* Check for new or potentially stale node */ ++ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) ++ return ndlp; ++ np = ndlp; + } + } + +- /* FIND node did NOT FOUND */ +- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, +- "0932 FIND node did x%x NOT FOUND.\n", did); +- return NULL; ++ if (!np) ++ /* FIND node did NOT FOUND */ ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, ++ "0932 FIND node did x%x NOT FOUND.\n", did); ++ ++ return np; + } + + struct lpfc_nodelist * +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 424b39a8155cb9..7c8e0e1d36da9b 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -13180,6 +13180,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) + eqhdl = lpfc_get_eq_hdl(0); + rc = pci_irq_vector(phba->pcidev, 0); + if (rc < 0) { ++ free_irq(phba->pcidev->irq, phba); + pci_free_irq_vectors(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0496 MSI pci_irq_vec failed (%d)\n", rc); +@@ -13260,6 +13261,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) + eqhdl = lpfc_get_eq_hdl(0); + retval = pci_irq_vector(phba->pcidev, 0); + if (retval < 0) { ++ free_irq(phba->pcidev->irq, phba); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0502 INTR pci_irq_vec failed (%d)\n", + retval); +diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c +index 0d148c39ebcc98..60714a6c26375e 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c +@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, + char *desc = NULL; + u16 event; + ++ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) ++ return; ++ + event = event_reply->event; + + switch (event) { +diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c +index e289f18fc76437..daef90ee431f52 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c +@@ -679,6 +679,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + size_t data_in_sz = 0; + long ret; + u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; ++ int tm_ret; + + issue_reset = 0; + +@@ -1120,18 +1121,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) +- mpt3sas_scsih_issue_locked_tm(ioc, ++ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, pcie_device->reset_timeout, + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); + else +- mpt3sas_scsih_issue_locked_tm(ioc, ++ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); ++ ++ if (tm_ret != SUCCESS) { ++ ioc_info(ioc, ++ "target reset failed, issue hard reset: handle (0x%04x)\n", ++ le16_to_cpu(mpi_request->FunctionDependent1)); ++ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); ++ } + } else + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } +diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c +index 900322bad4f3be..f9ab45c4bb40d5 100644 +--- a/drivers/scsi/st.c ++++ b/drivers/scsi/st.c +@@ -953,7 +953,6 @@ static void reset_state(struct scsi_tape *STp) + STp->partition = find_partition(STp); + if (STp->partition < 0) + STp->partition = 0; +- STp->new_partition = STp->partition; + } + } + +@@ -2895,7 +2894,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon + timeout = STp->long_timeout * 8; + + DEBC_printk(STp, "Erasing tape.\n"); +- fileno = blkno = at_sm = 0; + break; + case MTSETBLK: /* Set block length */ + case MTSETDENSITY: /* Set tape density */ +@@ -2928,14 +2926,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon + if (cmd_in == MTSETDENSITY) { + (STp->buffer)->b_data[4] = arg; + STp->density_changed = 1; /* At least we tried ;-) */ ++ STp->changed_density = arg; + } else if (cmd_in == SET_DENS_AND_BLK) + (STp->buffer)->b_data[4] = arg >> 24; + else + (STp->buffer)->b_data[4] = STp->density; + if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { + ltmp = arg & MT_ST_BLKSIZE_MASK; +- if (cmd_in == MTSETBLK) ++ if (cmd_in == MTSETBLK) { + STp->blksize_changed = 1; /* At least we tried ;-) */ ++ STp->changed_blksize = arg; ++ } + } else + ltmp = STp->block_size; + (STp->buffer)->b_data[9] = (ltmp >> 16); +@@ -3082,7 +3083,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon + cmd_in == MTSETDRVBUFFER || + cmd_in == SET_DENS_AND_BLK) { + if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && +- !(STp->use_pf & PF_TESTED)) { ++ cmdstatp->sense_hdr.asc == 0x24 && ++ (STp->device)->scsi_level <= SCSI_2 && ++ !(STp->use_pf & PF_TESTED)) { + /* Try the other possible state of Page Format if not + already tried */ + STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; +@@ -3634,9 +3637,25 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) + retval = (-EIO); + goto out; + } +- reset_state(STp); ++ reset_state(STp); /* Clears pos_unknown */ + /* remove this when the midlevel properly clears was_reset */ + STp->device->was_reset = 0; ++ ++ /* Fix the device settings after reset, ignore errors */ ++ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK || ++ mtc.mt_op == MTEOM) { ++ if (STp->can_partitions) { ++ /* STp->new_partition contains the ++ * latest partition set ++ */ ++ STp->partition = 0; ++ switch_partition(STp); ++ } ++ if (STp->density_changed) ++ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density); ++ if (STp->blksize_changed) ++ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize); ++ } + } + + if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && +diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h +index 1aaaf5369a40fc..6d31b894ee84cc 100644 +--- a/drivers/scsi/st.h ++++ b/drivers/scsi/st.h +@@ -165,6 +165,7 @@ struct scsi_tape { + unsigned char compression_changed; + unsigned char drv_buffer; + unsigned char density; ++ unsigned char changed_density; + unsigned char door_locked; + unsigned char autorew_dev; /* auto-rewind device */ + unsigned char rew_at_close; /* rewind necessary at close */ +@@ -172,6 +173,7 @@ struct scsi_tape { + unsigned char cleaning_req; /* cleaning requested? */ + unsigned char first_tur; /* first TEST UNIT READY */ + int block_size; ++ int changed_blksize; + int min_block; + int max_block; + int recover_count; /* From tape opening */ +diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h +index 24bd619ec5e487..1da1dfd9cb199c 100644 +--- a/drivers/soc/apple/rtkit-internal.h ++++ b/drivers/soc/apple/rtkit-internal.h +@@ -48,6 +48,7 @@ struct apple_rtkit { + + struct apple_rtkit_shmem ioreport_buffer; + struct apple_rtkit_shmem crashlog_buffer; ++ struct apple_rtkit_shmem oslog_buffer; + + struct apple_rtkit_shmem syslog_buffer; + char *syslog_msg_buffer; +diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c +index d9f19dc99da5e8..2c37216f423d20 100644 +--- a/drivers/soc/apple/rtkit.c ++++ b/drivers/soc/apple/rtkit.c +@@ -66,8 +66,9 @@ enum { + #define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24) + + #define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56) +-#define APPLE_RTKIT_OSLOG_INIT 1 +-#define APPLE_RTKIT_OSLOG_ACK 3 ++#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1 ++#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36) ++#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0) + + #define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 + #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 +@@ -256,15 +257,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, + struct apple_rtkit_shmem *buffer, + u8 ep, u64 msg) + { +- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg); + u64 reply; + int err; + ++ /* The different size vs. IOVA shifts look odd but are indeed correct this way */ ++ if (ep == APPLE_RTKIT_EP_OSLOG) { ++ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg); ++ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12; ++ } else { ++ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12; ++ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); ++ } ++ + buffer->buffer = NULL; + buffer->iomem = NULL; + buffer->is_mapped = false; +- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); +- buffer->size = n_4kpages << 12; + + dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", + buffer->size, &buffer->iova); +@@ -289,11 +296,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, + } + + if (!buffer->is_mapped) { +- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, +- APPLE_RTKIT_BUFFER_REQUEST); +- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages); +- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, +- buffer->iova); ++ /* oslog uses different fields and needs a shifted IOVA instead of size */ ++ if (ep == APPLE_RTKIT_EP_OSLOG) { ++ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, ++ APPLE_RTKIT_OSLOG_BUFFER_REQUEST); ++ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size); ++ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA, ++ buffer->iova >> 12); ++ } else { ++ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, ++ APPLE_RTKIT_BUFFER_REQUEST); ++ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, ++ buffer->size >> 12); ++ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, ++ buffer->iova); ++ } + apple_rtkit_send_message(rtk, ep, reply, NULL, false); + } + +@@ -487,25 +504,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg) + } + } + +-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg) +-{ +- u64 ack; +- +- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg); +- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK); +- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false); +-} +- + static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg) + { + u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg); + + switch (type) { +- case APPLE_RTKIT_OSLOG_INIT: +- apple_rtkit_oslog_rx_init(rtk, msg); ++ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST: ++ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer, ++ APPLE_RTKIT_EP_OSLOG, msg); + break; + default: +- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg); ++ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", ++ msg); + } + } + +@@ -744,7 +754,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, + rtk->mbox_cl.rx_callback = &apple_rtkit_rx; + rtk->mbox_cl.tx_done = &apple_rtkit_tx_done; + +- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, ++ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM, + dev_name(rtk->dev)); + if (!rtk->wq) { + ret = -ENOMEM; +@@ -787,6 +797,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk) + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); ++ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); +@@ -967,6 +978,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk) + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); ++ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); +diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c +index 6ea9b8c7d335c0..7a3bdef5a7c0da 100644 +--- a/drivers/soc/ti/k3-socinfo.c ++++ b/drivers/soc/ti/k3-socinfo.c +@@ -63,6 +63,12 @@ k3_chipinfo_partno_to_names(unsigned int partno, + return -EINVAL; + } + ++static const struct regmap_config k3_chipinfo_regmap_cfg = { ++ .reg_bits = 32, ++ .val_bits = 32, ++ .reg_stride = 4, ++}; ++ + static int k3_chipinfo_probe(struct platform_device *pdev) + { + struct device_node *node = pdev->dev.of_node; +@@ -70,13 +76,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct soc_device *soc_dev; + struct regmap *regmap; ++ void __iomem *base; + u32 partno_id; + u32 variant; + u32 jtag_id; + u32 mfg; + int ret; + +- regmap = device_node_to_regmap(node); ++ base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + +diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c +index 79173ab540a6bf..31b203ebbae0ca 100644 +--- a/drivers/soundwire/amd_manager.c ++++ b/drivers/soundwire/amd_manager.c +@@ -1138,6 +1138,7 @@ static int __maybe_unused amd_suspend(struct device *dev) + amd_sdw_wake_enable(amd_manager, false); + return amd_sdw_clock_stop(amd_manager); + } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { ++ amd_sdw_wake_enable(amd_manager, false); + /* + * As per hardware programming sequence on AMD platforms, + * clock stop should be invoked first before powering-off +@@ -1165,6 +1166,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev) + amd_sdw_wake_enable(amd_manager, true); + return amd_sdw_clock_stop(amd_manager); + } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { ++ amd_sdw_wake_enable(amd_manager, true); + ret = amd_sdw_clock_stop(amd_manager); + if (ret) + return ret; +diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c +index e7553c38be59d6..767942f19adb6a 100644 +--- a/drivers/soundwire/bus.c ++++ b/drivers/soundwire/bus.c +@@ -121,6 +121,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); + set_bit(SDW_MASTER_DEV_NUM, bus->assigned); + ++ ret = sdw_irq_create(bus, fwnode); ++ if (ret) ++ return ret; ++ + /* + * SDW is an enumerable bus, but devices can be powered off. So, + * they won't be able to report as present. +@@ -137,6 +141,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + + if (ret < 0) { + dev_err(bus->dev, "Finding slaves failed:%d\n", ret); ++ sdw_irq_delete(bus); + return ret; + } + +@@ -155,10 +160,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + bus->params.curr_bank = SDW_BANK0; + bus->params.next_bank = SDW_BANK1; + +- ret = sdw_irq_create(bus, fwnode); +- if (ret) +- return ret; +- + return 0; + } + EXPORT_SYMBOL(sdw_bus_master_add); +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index bcb0de864d34db..7dd94369abb47c 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -1,7 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0+ + // + // Copyright 2013 Freescale Semiconductor, Inc. +-// Copyright 2020 NXP ++// Copyright 2020-2025 NXP + // + // Freescale DSPI driver + // This file contains a driver for the Freescale DSPI +@@ -62,6 +62,7 @@ + #define SPI_SR_TFIWF BIT(18) + #define SPI_SR_RFDF BIT(17) + #define SPI_SR_CMDFFF BIT(16) ++#define SPI_SR_TXRXS BIT(30) + #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ + SPI_SR_TFUF | SPI_SR_TFFF | \ + SPI_SR_CMDTCF | SPI_SR_SPEF | \ +@@ -926,9 +927,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + struct spi_transfer *transfer; + bool cs = false; + int status = 0; ++ u32 val = 0; ++ bool cs_change = false; + + message->actual_length = 0; + ++ /* Put DSPI in running mode if halted. */ ++ regmap_read(dspi->regmap, SPI_MCR, &val); ++ if (val & SPI_MCR_HALT) { ++ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); ++ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && ++ !(val & SPI_SR_TXRXS)) ++ ; ++ } ++ + list_for_each_entry(transfer, &message->transfers, transfer_list) { + dspi->cur_transfer = transfer; + dspi->cur_msg = message; +@@ -958,6 +970,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; + } + ++ cs_change = transfer->cs_change; + dspi->tx = transfer->tx_buf; + dspi->rx = transfer->rx_buf; + dspi->len = transfer->len; +@@ -967,6 +980,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, + SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + ++ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); ++ + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, + dspi->progress, !dspi->irq); + +@@ -993,6 +1008,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + dspi_deassert_cs(spi, &cs); + } + ++ if (status || !cs_change) { ++ /* Put DSPI in stop mode */ ++ regmap_update_bits(dspi->regmap, SPI_MCR, ++ SPI_MCR_HALT, SPI_MCR_HALT); ++ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && ++ val & SPI_SR_TXRXS) ++ ; ++ } ++ + message->status = status; + spi_finalize_current_message(ctlr); + +@@ -1163,6 +1187,20 @@ static int dspi_resume(struct device *dev) + + static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); + ++static const struct regmap_range dspi_yes_ranges[] = { ++ regmap_reg_range(SPI_MCR, SPI_MCR), ++ regmap_reg_range(SPI_TCR, SPI_CTAR(3)), ++ regmap_reg_range(SPI_SR, SPI_TXFR3), ++ regmap_reg_range(SPI_RXFR0, SPI_RXFR3), ++ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), ++ regmap_reg_range(SPI_SREX, SPI_SREX), ++}; ++ ++static const struct regmap_access_table dspi_access_table = { ++ .yes_ranges = dspi_yes_ranges, ++ .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), ++}; ++ + static const struct regmap_range dspi_volatile_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_TCR), + regmap_reg_range(SPI_SR, SPI_SR), +@@ -1180,6 +1218,8 @@ static const struct regmap_config dspi_regmap_config = { + .reg_stride = 4, + .max_register = 0x88, + .volatile_table = &dspi_volatile_table, ++ .rd_table = &dspi_access_table, ++ .wr_table = &dspi_access_table, + }; + + static const struct regmap_range dspi_xspi_volatile_ranges[] = { +@@ -1201,6 +1241,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { + .reg_stride = 4, + .max_register = 0x13c, + .volatile_table = &dspi_xspi_volatile_table, ++ .rd_table = &dspi_access_table, ++ .wr_table = &dspi_access_table, + }, + { + .name = "pushr", +@@ -1223,6 +1265,8 @@ static int dspi_init(struct fsl_dspi *dspi) + if (!spi_controller_is_target(dspi->ctlr)) + mcr |= SPI_MCR_HOST; + ++ mcr |= SPI_MCR_HALT; ++ + regmap_write(dspi->regmap, SPI_MCR, mcr); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c +index 1f374cf4d6f65c..1615f935c8f03f 100644 +--- a/drivers/spi/spi-rockchip.c ++++ b/drivers/spi/spi-rockchip.c +@@ -542,7 +542,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs, + cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; + if (spi->mode & SPI_LSB_FIRST) + cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET; +- if (spi->mode & SPI_CS_HIGH) ++ if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0))) + cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET; + + if (xfer->rx_buf && xfer->tx_buf) +diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c +index b8947265d329e4..5b2cb225a41983 100644 +--- a/drivers/spi/spi-sun4i.c ++++ b/drivers/spi/spi-sun4i.c +@@ -263,6 +263,9 @@ static int sun4i_spi_transfer_one(struct spi_master *master, + else + reg |= SUN4I_CTL_DHB; + ++ /* Now that the settings are correct, enable the interface */ ++ reg |= SUN4I_CTL_ENABLE; ++ + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); + + /* Ensure that we have a parent clock fast enough */ +@@ -403,7 +406,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) + } + + sun4i_spi_write(sspi, SUN4I_CTL_REG, +- SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); ++ SUN4I_CTL_MASTER | SUN4I_CTL_TP); + + return 0; + +diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c +index 3503e6c0a5c983..b5deb4fe3b8324 100644 +--- a/drivers/spi/spi-zynqmp-gqspi.c ++++ b/drivers/spi/spi-zynqmp-gqspi.c +@@ -799,7 +799,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) + static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) + { + struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id; +- irqreturn_t ret = IRQ_NONE; + u32 status, mask, dma_status = 0; + + status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); +@@ -814,27 +813,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) + dma_status); + } + +- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { ++ if (!mask && !dma_status) ++ return IRQ_NONE; ++ ++ if (mask & GQSPI_ISR_TXNOT_FULL_MASK) + zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); +- ret = IRQ_HANDLED; +- } + +- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { ++ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) + zynqmp_process_dma_irq(xqspi); +- ret = IRQ_HANDLED; +- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && +- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { ++ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && ++ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) + zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); +- ret = IRQ_HANDLED; +- } + + if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 && + ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { + zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); + complete(&xqspi->data_completion); +- ret = IRQ_HANDLED; + } +- return ret; ++ return IRQ_HANDLED; + } + + /** +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index b516c2893420bc..b756d4cfecfe93 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4323,8 +4323,8 @@ int iscsit_close_connection( + spin_unlock(&iscsit_global->ts_bitmap_lock); + + iscsit_stop_timers_for_cmds(conn); +- iscsit_stop_nopin_response_timer(conn); + iscsit_stop_nopin_timer(conn); ++ iscsit_stop_nopin_response_timer(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c +index f110f932ba0543..675f774be1d30e 100644 +--- a/drivers/target/target_core_spc.c ++++ b/drivers/target/target_core_spc.c +@@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) + if (descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + +- if (!descr->enabled || descr->enabled(descr, cmd)) ++ if (!descr->enabled || descr->enabled(descr, cmd)) { + *opcode = descr; ++ return TCM_NO_SENSE; ++ } + break; + case 0x2: + /* +@@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) + if (descr->serv_action_valid && + descr->service_action == requested_sa) { + if (!descr->enabled || descr->enabled(descr, +- cmd)) ++ cmd)) { + *opcode = descr; ++ return TCM_NO_SENSE; ++ } + } else if (!descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + break; +@@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) + */ + if (descr->service_action == requested_sa) + if (!descr->enabled || descr->enabled(descr, +- cmd)) ++ cmd)) { + *opcode = descr; ++ return TCM_NO_SENSE; ++ } + break; + } + } + +- return 0; ++ return TCM_NO_SENSE; + } + + static sense_reason_t +diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c +index 61c3d450ee605a..2e06b26be4ef69 100644 +--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c +@@ -331,6 +331,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) + tj_max = intel_tcc_get_tjmax(cpu); + if (tj_max < 0) + return tj_max; ++ tj_max *= 1000; + + zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); + if (!zonedev) +diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c +index 404f01cca4dab5..ff8657afb31d3c 100644 +--- a/drivers/thermal/qoriq_thermal.c ++++ b/drivers/thermal/qoriq_thermal.c +@@ -18,6 +18,7 @@ + #define SITES_MAX 16 + #define TMR_DISABLE 0x0 + #define TMR_ME 0x80000000 ++#define TMR_CMD BIT(29) + #define TMR_ALPF 0x0c000000 + #define TMR_ALPF_V2 0x03000000 + #define TMTMIR_DEFAULT 0x0000000f +@@ -356,6 +357,12 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev) + if (ret) + return ret; + ++ if (data->ver > TMU_VER1) { ++ ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD); ++ if (ret) ++ return ret; ++ } ++ + clk_disable_unprepare(data->clk); + + return 0; +@@ -370,6 +377,12 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev) + if (ret) + return ret; + ++ if (data->ver > TMU_VER1) { ++ ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD); ++ if (ret) ++ return ret; ++ } ++ + /* Enable monitoring */ + return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME); + } +diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c +index 2ee8c5ebca7c3c..43146c0685dfa7 100644 +--- a/drivers/thunderbolt/retimer.c ++++ b/drivers/thunderbolt/retimer.c +@@ -89,9 +89,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt) + if (ret) + goto err_nvm; + +- ret = tb_nvm_add_non_active(nvm, nvm_write); +- if (ret) +- goto err_nvm; ++ if (!rt->no_nvm_upgrade) { ++ ret = tb_nvm_add_non_active(nvm, nvm_write); ++ if (ret) ++ goto err_nvm; ++ } + + rt->nvm = nvm; + return 0; +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index c2778300e15100..d5ad6cae6b652b 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -1676,7 +1676,7 @@ static void serial8250_disable_ms(struct uart_port *port) + if (up->bugs & UART_BUG_NOMSR) + return; + +- mctrl_gpio_disable_ms(up->gpios); ++ mctrl_gpio_disable_ms_no_sync(up->gpios); + + up->ier &= ~UART_IER_MSI; + serial_port_out(port, UART_IER, up->ier); +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index bcca5627afaca8..85559d9b35d830 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -698,7 +698,7 @@ static void atmel_disable_ms(struct uart_port *port) + + atmel_port->ms_irq_enabled = false; + +- mctrl_gpio_disable_ms(atmel_port->gpios); ++ mctrl_gpio_disable_ms_no_sync(atmel_port->gpios); + + if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) + idr |= ATMEL_US_CTSIC; +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 349d4849ba5e3b..04809b781f45be 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -1597,7 +1597,7 @@ static void imx_uart_shutdown(struct uart_port *port) + imx_uart_dma_exit(sport); + } + +- mctrl_gpio_disable_ms(sport->gpios); ++ mctrl_gpio_disable_ms_sync(sport->gpios); + + spin_lock_irqsave(&sport->port.lock, flags); + ucr2 = imx_uart_readl(sport, UCR2); +diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c +index 7d5aaa8d422b19..d5fb293dd5a93c 100644 +--- a/drivers/tty/serial/serial_mctrl_gpio.c ++++ b/drivers/tty/serial/serial_mctrl_gpio.c +@@ -322,11 +322,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) + } + EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms); + +-/** +- * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines +- * @gpios: gpios to disable +- */ +-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) ++static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync) + { + enum mctrl_gpio_idx i; + +@@ -342,10 +338,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) + if (!gpios->irq[i]) + continue; + +- disable_irq(gpios->irq[i]); ++ if (sync) ++ disable_irq(gpios->irq[i]); ++ else ++ disable_irq_nosync(gpios->irq[i]); + } + } +-EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); ++ ++/** ++ * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms ++ * lines, and wait for any pending IRQ to be processed ++ * @gpios: gpios to disable ++ */ ++void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) ++{ ++ mctrl_gpio_disable_ms(gpios, true); ++} ++EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync); ++ ++/** ++ * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the ++ * ms lines, and return immediately ++ * @gpios: gpios to disable ++ */ ++void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) ++{ ++ mctrl_gpio_disable_ms(gpios, false); ++} ++EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync); + + void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) + { +diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h +index fc76910fb105a3..79e97838ebe567 100644 +--- a/drivers/tty/serial/serial_mctrl_gpio.h ++++ b/drivers/tty/serial/serial_mctrl_gpio.h +@@ -87,9 +87,16 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios); + void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios); + + /* +- * Disable gpio interrupts to report status line changes. ++ * Disable gpio interrupts to report status line changes, and block until ++ * any corresponding IRQ is processed + */ +-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios); ++void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios); ++ ++/* ++ * Disable gpio interrupts to report status line changes, and return ++ * immediately ++ */ ++void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios); + + /* + * Enable gpio wakeup interrupts to enable wake up source. +@@ -148,7 +155,11 @@ static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) + { + } + +-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) ++static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) ++{ ++} ++ ++static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) + { + } + +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 4350a69d97d7ac..61d8f50676b1bd 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -104,6 +104,20 @@ struct plat_sci_reg { + u8 offset, size; + }; + ++struct sci_suspend_regs { ++ u16 scdl; ++ u16 sccks; ++ u16 scsmr; ++ u16 scscr; ++ u16 scfcr; ++ u16 scsptr; ++ u16 hssrr; ++ u16 scpcr; ++ u16 scpdr; ++ u8 scbrr; ++ u8 semr; ++}; ++ + struct sci_port_params { + const struct plat_sci_reg regs[SCIx_NR_REGS]; + unsigned int fifosize; +@@ -134,6 +148,8 @@ struct sci_port { + struct dma_chan *chan_tx; + struct dma_chan *chan_rx; + ++ struct reset_control *rstc; ++ + #ifdef CONFIG_SERIAL_SH_SCI_DMA + struct dma_chan *chan_tx_saved; + struct dma_chan *chan_rx_saved; +@@ -153,6 +169,7 @@ struct sci_port { + int rx_trigger; + struct timer_list rx_fifo_timer; + int rx_fifo_timeout; ++ struct sci_suspend_regs suspend_regs; + u16 hscif_tot; + + bool has_rtscts; +@@ -2237,7 +2254,7 @@ static void sci_shutdown(struct uart_port *port) + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + + s->autorts = false; +- mctrl_gpio_disable_ms(to_sci_port(port)->gpios); ++ mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios); + + spin_lock_irqsave(&port->lock, flags); + sci_stop_rx(port); +@@ -3325,6 +3342,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, + } + + sp = &sci_ports[id]; ++ sp->rstc = rstc; + *dev_id = id; + + p->type = SCI_OF_TYPE(data); +@@ -3473,13 +3491,77 @@ static int sci_probe(struct platform_device *dev) + return 0; + } + ++static void sci_console_save(struct sci_port *s) ++{ ++ struct sci_suspend_regs *regs = &s->suspend_regs; ++ struct uart_port *port = &s->port; ++ ++ if (sci_getreg(port, SCDL)->size) ++ regs->scdl = sci_serial_in(port, SCDL); ++ if (sci_getreg(port, SCCKS)->size) ++ regs->sccks = sci_serial_in(port, SCCKS); ++ if (sci_getreg(port, SCSMR)->size) ++ regs->scsmr = sci_serial_in(port, SCSMR); ++ if (sci_getreg(port, SCSCR)->size) ++ regs->scscr = sci_serial_in(port, SCSCR); ++ if (sci_getreg(port, SCFCR)->size) ++ regs->scfcr = sci_serial_in(port, SCFCR); ++ if (sci_getreg(port, SCSPTR)->size) ++ regs->scsptr = sci_serial_in(port, SCSPTR); ++ if (sci_getreg(port, SCBRR)->size) ++ regs->scbrr = sci_serial_in(port, SCBRR); ++ if (sci_getreg(port, HSSRR)->size) ++ regs->hssrr = sci_serial_in(port, HSSRR); ++ if (sci_getreg(port, SCPCR)->size) ++ regs->scpcr = sci_serial_in(port, SCPCR); ++ if (sci_getreg(port, SCPDR)->size) ++ regs->scpdr = sci_serial_in(port, SCPDR); ++ if (sci_getreg(port, SEMR)->size) ++ regs->semr = sci_serial_in(port, SEMR); ++} ++ ++static void sci_console_restore(struct sci_port *s) ++{ ++ struct sci_suspend_regs *regs = &s->suspend_regs; ++ struct uart_port *port = &s->port; ++ ++ if (sci_getreg(port, SCDL)->size) ++ sci_serial_out(port, SCDL, regs->scdl); ++ if (sci_getreg(port, SCCKS)->size) ++ sci_serial_out(port, SCCKS, regs->sccks); ++ if (sci_getreg(port, SCSMR)->size) ++ sci_serial_out(port, SCSMR, regs->scsmr); ++ if (sci_getreg(port, SCSCR)->size) ++ sci_serial_out(port, SCSCR, regs->scscr); ++ if (sci_getreg(port, SCFCR)->size) ++ sci_serial_out(port, SCFCR, regs->scfcr); ++ if (sci_getreg(port, SCSPTR)->size) ++ sci_serial_out(port, SCSPTR, regs->scsptr); ++ if (sci_getreg(port, SCBRR)->size) ++ sci_serial_out(port, SCBRR, regs->scbrr); ++ if (sci_getreg(port, HSSRR)->size) ++ sci_serial_out(port, HSSRR, regs->hssrr); ++ if (sci_getreg(port, SCPCR)->size) ++ sci_serial_out(port, SCPCR, regs->scpcr); ++ if (sci_getreg(port, SCPDR)->size) ++ sci_serial_out(port, SCPDR, regs->scpdr); ++ if (sci_getreg(port, SEMR)->size) ++ sci_serial_out(port, SEMR, regs->semr); ++} ++ + static __maybe_unused int sci_suspend(struct device *dev) + { + struct sci_port *sport = dev_get_drvdata(dev); + +- if (sport) ++ if (sport) { + uart_suspend_port(&sci_uart_driver, &sport->port); + ++ if (!console_suspend_enabled && uart_console(&sport->port)) ++ sci_console_save(sport); ++ else ++ return reset_control_assert(sport->rstc); ++ } ++ + return 0; + } + +@@ -3487,8 +3569,18 @@ static __maybe_unused int sci_resume(struct device *dev) + { + struct sci_port *sport = dev_get_drvdata(dev); + +- if (sport) ++ if (sport) { ++ if (!console_suspend_enabled && uart_console(&sport->port)) { ++ sci_console_restore(sport); ++ } else { ++ int ret = reset_control_deassert(sport->rstc); ++ ++ if (ret) ++ return ret; ++ } ++ + uart_resume_port(&sci_uart_driver, &sport->port); ++ } + + return 0; + } +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c +index 9ef90bb30a47eb..b58422ae156c93 100644 +--- a/drivers/tty/serial/stm32-usart.c ++++ b/drivers/tty/serial/stm32-usart.c +@@ -952,7 +952,7 @@ static void stm32_usart_enable_ms(struct uart_port *port) + + static void stm32_usart_disable_ms(struct uart_port *port) + { +- mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); ++ mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios); + } + + /* Transmit stop */ +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index cb5611cbf45474..2346a1fc72b56e 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -257,6 +257,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = { + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | ++ UFS_DEVICE_QUIRK_PA_HIBER8TIME | + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, +@@ -8459,6 +8460,31 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) + return ret; + } + ++/** ++ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME. ++ * @hba: per-adapter instance ++ * ++ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter ++ * to ensure proper hibernation timing. This function retrieves the current ++ * PA_HIBERN8TIME value and increments it by 100us. ++ */ ++static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba) ++{ ++ u32 pa_h8time; ++ int ret; ++ ++ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time); ++ if (ret) { ++ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret); ++ return; ++ } ++ ++ /* Increment by 1 to increase hibernation time by 100 µs */ ++ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1); ++ if (ret) ++ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret); ++} ++ + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) + { + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { +@@ -8474,6 +8500,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) + ufshcd_quirk_tune_host_pa_tactivate(hba); ++ ++ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) ++ ufshcd_quirk_override_pa_h8time(hba); + } + + static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 5a53280fa2edfd..44352df58c9e4e 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1180,7 +1180,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + */ + switch (GET_EP_CTX_STATE(ep_ctx)) { + case EP_STATE_HALTED: +- xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); ++ xhci_dbg(xhci, "Stop ep completion raced with stall\n"); ++ /* ++ * If the halt happened before Stop Endpoint failed, its transfer event ++ * should have already been handled and Reset Endpoint should be pending. ++ */ ++ if (ep->ep_state & EP_HALTED) ++ goto reset_done; ++ + if (ep->ep_state & EP_HAS_STREAMS) { + reset_type = EP_SOFT_RESET; + } else { +@@ -1191,8 +1198,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, + } + /* reset ep, reset handler cleans up cancelled tds */ + err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); ++ xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); + if (err) + break; ++reset_done: ++ /* Reset EP handler will clean up cancelled TDs */ + ep->ep_state &= ~EP_STOP_CMD_PENDING; + return; + case EP_STATE_STOPPED: +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c +index b56aae3f7be378..9b8b70ffde5a0a 100644 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c +@@ -3420,6 +3420,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, + ndev->mvdev.max_vqs = max_vqs; + mvdev = &ndev->mvdev; + mvdev->mdev = mdev; ++ /* cpu_to_mlx5vdpa16() below depends on this flag */ ++ mvdev->actual_features = ++ (device_features & BIT_ULL(VIRTIO_F_VERSION_1)); + + ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); + ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index a2ad4f7c716bf3..d9eb8733a324b7 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -1813,7 +1813,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev) + cpu_to_le16(PCI_COMMAND_MEMORY); + } + +- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) ++ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx || ++ vdev->pdev->irq == IRQ_NOTCONNECTED) + vconfig[PCI_INTERRUPT_PIN] = 0; + + ret = vfio_cap_init(vdev); +diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c +index a8f259bc2f4d0c..fa168b43423954 100644 +--- a/drivers/vfio/pci/vfio_pci_core.c ++++ b/drivers/vfio/pci/vfio_pci_core.c +@@ -731,15 +731,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); + static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type) + { + if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { +- u8 pin; +- +- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || +- vdev->nointx || vdev->pdev->is_virtfn) +- return 0; +- +- pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); +- +- return pin ? 1 : 0; ++ return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0; + } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { + u8 pos; + u16 flags; +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 620134041b4881..c4322faca2bd5b 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -269,7 +269,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev, + if (!is_irq_none(vdev)) + return -EINVAL; + +- if (!pdev->irq) ++ if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED) + return -ENODEV; + + name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 8d8a22504d71fc..66235151115740 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -560,6 +560,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) + int ret; + + llnode = llist_del_all(&svq->completion_list); ++ ++ mutex_lock(&svq->vq.mutex); ++ + llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) { + se_cmd = &cmd->tvc_se_cmd; + +@@ -593,6 +596,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) + vhost_scsi_release_cmd_res(se_cmd); + } + ++ mutex_unlock(&svq->vq.mutex); ++ + if (signal) + vhost_signal(&svq->vs->dev, &svq->vq); + } +@@ -746,7 +751,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + size_t len = iov_iter_count(iter); + unsigned int nbytes = 0; + struct page *page; +- int i; ++ int i, ret; + + if (cmd->tvc_data_direction == DMA_FROM_DEVICE) { + cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, +@@ -759,6 +764,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + page = alloc_page(GFP_KERNEL); + if (!page) { + i--; ++ ret = -ENOMEM; + goto err; + } + +@@ -766,8 +772,10 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + sg_set_page(&sg[i], page, nbytes, 0); + + if (cmd->tvc_data_direction == DMA_TO_DEVICE && +- copy_page_from_iter(page, 0, nbytes, iter) != nbytes) ++ copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { ++ ret = -EFAULT; + goto err; ++ } + + len -= nbytes; + } +@@ -782,7 +790,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + for (; i >= 0; i--) + __free_page(sg_page(&sg[i])); + kfree(cmd->saved_iter_addr); +- return -ENOMEM; ++ return ret; + } + + static int +@@ -1221,9 +1229,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) + " %d\n", cmd, exp_data_len, prot_bytes, data_direction); + + if (data_direction != DMA_NONE) { +- if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, +- &prot_iter, exp_data_len, +- &data_iter))) { ++ ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter, ++ exp_data_len, &data_iter); ++ if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to sgl\n"); + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); + goto err; +@@ -1301,8 +1309,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work) + resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; + } + ++ mutex_lock(&tmf->svq->vq.mutex); + vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, + tmf->vq_desc, &tmf->resp_iov, resp_code); ++ mutex_unlock(&tmf->svq->vq.mutex); ++ + vhost_scsi_release_tmf_res(tmf); + } + +diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c +index 8587c9da067003..42e681a78136ab 100644 +--- a/drivers/video/fbdev/core/bitblit.c ++++ b/drivers/video/fbdev/core/bitblit.c +@@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, + } + + static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width) ++ int sx, int height, int width, int fg, int bg) + { +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + struct fb_fillrect region; + +- region.color = attr_bgcol_ec(bgshift, vc, info); ++ region.color = bg; + region.dx = sx * vc->vc_font.width; + region.dy = sy * vc->vc_font.height; + region.width = width * vc->vc_font.width; +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 405d587450ef84..7a6f9a3cb3ba34 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -1240,7 +1240,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, + { + struct fb_info *info = fbcon_info_from_console(vc->vc_num); + struct fbcon_ops *ops = info->fbcon_par; +- ++ int fg, bg; + struct fbcon_display *p = &fb_display[vc->vc_num]; + u_int y_break; + +@@ -1261,16 +1261,18 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, + fbcon_clear_margins(vc, 0); + } + ++ fg = get_color(vc, info, vc->vc_video_erase_char, 1); ++ bg = get_color(vc, info, vc->vc_video_erase_char, 0); + /* Split blits that cross physical y_wrap boundary */ + + y_break = p->vrows - p->yscroll; + if (sy < y_break && sy + height - 1 >= y_break) { + u_int b = y_break - sy; +- ops->clear(vc, info, real_y(p, sy), sx, b, width); ++ ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); + ops->clear(vc, info, real_y(p, sy + b), sx, height - b, +- width); ++ width, fg, bg); + } else +- ops->clear(vc, info, real_y(p, sy), sx, height, width); ++ ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); + } + + static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, +diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h +index 0eaf54a2115167..25691d4b027bfc 100644 +--- a/drivers/video/fbdev/core/fbcon.h ++++ b/drivers/video/fbdev/core/fbcon.h +@@ -55,7 +55,7 @@ struct fbcon_ops { + void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width); + void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width); ++ int sx, int height, int width, int fb, int bg); + void (*putcs)(struct vc_data *vc, struct fb_info *info, + const unsigned short *s, int count, int yy, int xx, + int fg, int bg); +@@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info) + return (~(0xfff << max_len)) & 0xff; + } + +-static inline int attr_col_ec(int shift, struct vc_data *vc, +- struct fb_info *info, int is_fg) +-{ +- int is_mono01; +- int col; +- int fg; +- int bg; +- +- if (!vc) +- return 0; +- +- if (vc->vc_can_do_color) +- return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char) +- : attr_bgcol(shift,vc->vc_video_erase_char); +- +- if (!info) +- return 0; +- +- col = mono_col(info); +- is_mono01 = info->fix.visual == FB_VISUAL_MONO01; +- +- if (attr_reverse(vc->vc_video_erase_char)) { +- fg = is_mono01 ? col : 0; +- bg = is_mono01 ? 0 : col; +- } +- else { +- fg = is_mono01 ? 0 : col; +- bg = is_mono01 ? col : 0; +- } +- +- return is_fg ? fg : bg; +-} +- +-#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) +-#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) +- + /* + * Scroll Method + */ +diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c +index 2789ace7963427..9f4d65478554ad 100644 +--- a/drivers/video/fbdev/core/fbcon_ccw.c ++++ b/drivers/video/fbdev/core/fbcon_ccw.c +@@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + } + + static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width) ++ int sx, int height, int width, int fg, int bg) + { + struct fbcon_ops *ops = info->fbcon_par; + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + u32 vyres = GETVYRES(ops->p, info); + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = bg; + region.dx = sy * vc->vc_font.height; + region.dy = vyres - ((sx + width) * vc->vc_font.width); + region.height = width * vc->vc_font.width; +diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c +index 86a254c1b2b7b6..b18e31886da102 100644 +--- a/drivers/video/fbdev/core/fbcon_cw.c ++++ b/drivers/video/fbdev/core/fbcon_cw.c +@@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + } + + static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width) ++ int sx, int height, int width, int fg, int bg) + { + struct fbcon_ops *ops = info->fbcon_par; + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + u32 vxres = GETVXRES(ops->p, info); + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = bg; + region.dx = vxres - ((sy + height) * vc->vc_font.height); + region.dy = sx * vc->vc_font.width; + region.height = width * vc->vc_font.width; +diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c +index 23bc045769d088..b6b074cfd9dc08 100644 +--- a/drivers/video/fbdev/core/fbcon_ud.c ++++ b/drivers/video/fbdev/core/fbcon_ud.c +@@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, + } + + static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width) ++ int sx, int height, int width, int fg, int bg) + { + struct fbcon_ops *ops = info->fbcon_par; + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = bg; + region.dy = vyres - ((sy + height) * vc->vc_font.height); + region.dx = vxres - ((sx + width) * vc->vc_font.width); + region.width = width * vc->vc_font.width; +diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c +index 2768eff247ba46..b3aa0c6620c7d1 100644 +--- a/drivers/video/fbdev/core/tileblit.c ++++ b/drivers/video/fbdev/core/tileblit.c +@@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, + } + + static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, +- int sx, int height, int width) ++ int sx, int height, int width, int fg, int bg) + { + struct fb_tilerect rect; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; +- int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; + + rect.index = vc->vc_video_erase_char & + ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); +- rect.fg = attr_fgcol_ec(fgshift, vc, info); +- rect.bg = attr_bgcol_ec(bgshift, vc, info); ++ rect.fg = fg; ++ rect.bg = bg; + rect.sx = sx; + rect.sy = sy; + rect.width = width; +@@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, + static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, + int color, int bottom_only) + { +- return; ++ unsigned int cw = vc->vc_font.width; ++ unsigned int ch = vc->vc_font.height; ++ unsigned int rw = info->var.xres - (vc->vc_cols*cw); ++ unsigned int bh = info->var.yres - (vc->vc_rows*ch); ++ unsigned int rs = info->var.xres - rw; ++ unsigned int bs = info->var.yres - bh; ++ unsigned int vwt = info->var.xres_virtual / cw; ++ unsigned int vht = info->var.yres_virtual / ch; ++ struct fb_tilerect rect; ++ ++ rect.index = vc->vc_video_erase_char & ++ ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); ++ rect.fg = color; ++ rect.bg = color; ++ ++ if ((int) rw > 0 && !bottom_only) { ++ rect.sx = (info->var.xoffset + rs + cw - 1) / cw; ++ rect.sy = 0; ++ rect.width = (rw + cw - 1) / cw; ++ rect.height = vht; ++ if (rect.width + rect.sx > vwt) ++ rect.width = vwt - rect.sx; ++ if (rect.sx < vwt) ++ info->tileops->fb_tilefill(info, &rect); ++ } ++ ++ if ((int) bh > 0) { ++ rect.sx = info->var.xoffset / cw; ++ rect.sy = (info->var.yoffset + bs) / ch; ++ rect.width = rs / cw; ++ rect.height = (bh + ch - 1) / ch; ++ if (rect.height + rect.sy > vht) ++ rect.height = vht - rect.sy; ++ if (rect.sy < vht) ++ info->tileops->fb_tilefill(info, &rect); ++ } + } + + static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, +diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c +index 0bced82fa4940d..8cf1268a4e5545 100644 +--- a/drivers/video/fbdev/fsl-diu-fb.c ++++ b/drivers/video/fbdev/fsl-diu-fb.c +@@ -1827,6 +1827,7 @@ static void fsl_diu_remove(struct platform_device *pdev) + int i; + + data = dev_get_drvdata(&pdev->dev); ++ device_remove_file(&pdev->dev, &data->dev_attr); + disable_lcdc(&data->fsl_diu_info[0]); + + free_irq(data->irq, data->diu_reg); +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index 80669e05bf0ee4..c5f04234d9511a 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -2530,7 +2530,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) + struct vring_virtqueue *vq = to_vvq(_vq); + + if (vq->event_triggered) +- vq->event_triggered = false; ++ data_race(vq->event_triggered = false); + + return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : + virtqueue_enable_cb_delayed_split(_vq); +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c +index b72a858bbac702..001b2c9311254c 100644 +--- a/drivers/watchdog/aspeed_wdt.c ++++ b/drivers/watchdog/aspeed_wdt.c +@@ -11,21 +11,30 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include + + static bool nowayout = WATCHDOG_NOWAYOUT; + module_param(nowayout, bool, 0); + MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); ++struct aspeed_wdt_scu { ++ const char *compatible; ++ u32 reset_status_reg; ++ u32 wdt_reset_mask; ++ u32 wdt_reset_mask_shift; ++}; + + struct aspeed_wdt_config { + u32 ext_pulse_width_mask; + u32 irq_shift; + u32 irq_mask; ++ struct aspeed_wdt_scu scu; + }; + + struct aspeed_wdt { +@@ -39,18 +48,36 @@ static const struct aspeed_wdt_config ast2400_config = { + .ext_pulse_width_mask = 0xff, + .irq_shift = 0, + .irq_mask = 0, ++ .scu = { ++ .compatible = "aspeed,ast2400-scu", ++ .reset_status_reg = 0x3c, ++ .wdt_reset_mask = 0x1, ++ .wdt_reset_mask_shift = 1, ++ }, + }; + + static const struct aspeed_wdt_config ast2500_config = { + .ext_pulse_width_mask = 0xfffff, + .irq_shift = 12, + .irq_mask = GENMASK(31, 12), ++ .scu = { ++ .compatible = "aspeed,ast2500-scu", ++ .reset_status_reg = 0x3c, ++ .wdt_reset_mask = 0x1, ++ .wdt_reset_mask_shift = 2, ++ }, + }; + + static const struct aspeed_wdt_config ast2600_config = { + .ext_pulse_width_mask = 0xfffff, + .irq_shift = 0, + .irq_mask = GENMASK(31, 10), ++ .scu = { ++ .compatible = "aspeed,ast2600-scu", ++ .reset_status_reg = 0x74, ++ .wdt_reset_mask = 0xf, ++ .wdt_reset_mask_shift = 16, ++ }, + }; + + static const struct of_device_id aspeed_wdt_of_table[] = { +@@ -211,6 +238,56 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd, + return 0; + } + ++static void aspeed_wdt_update_bootstatus(struct platform_device *pdev, ++ struct aspeed_wdt *wdt) ++{ ++ const struct resource *res; ++ struct aspeed_wdt_scu scu = wdt->cfg->scu; ++ struct regmap *scu_base; ++ u32 reset_mask_width; ++ u32 reset_mask_shift; ++ u32 idx = 0; ++ u32 status; ++ int ret; ++ ++ if (!of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt")) { ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ idx = ((intptr_t)wdt->base & 0x00000fff) / (uintptr_t)resource_size(res); ++ } ++ ++ scu_base = syscon_regmap_lookup_by_compatible(scu.compatible); ++ if (IS_ERR(scu_base)) { ++ wdt->wdd.bootstatus = WDIOS_UNKNOWN; ++ return; ++ } ++ ++ ret = regmap_read(scu_base, scu.reset_status_reg, &status); ++ if (ret) { ++ wdt->wdd.bootstatus = WDIOS_UNKNOWN; ++ return; ++ } ++ ++ reset_mask_width = hweight32(scu.wdt_reset_mask); ++ reset_mask_shift = scu.wdt_reset_mask_shift + ++ reset_mask_width * idx; ++ ++ if (status & (scu.wdt_reset_mask << reset_mask_shift)) ++ wdt->wdd.bootstatus = WDIOF_CARDRESET; ++ ++ /* clear wdt reset event flag */ ++ if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt") || ++ of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-wdt")) { ++ ret = regmap_read(scu_base, scu.reset_status_reg, &status); ++ if (!ret) { ++ status &= ~(scu.wdt_reset_mask << reset_mask_shift); ++ regmap_write(scu_base, scu.reset_status_reg, status); ++ } ++ } else { ++ regmap_write(scu_base, scu.reset_status_reg, ++ scu.wdt_reset_mask << reset_mask_shift); ++ } ++} ++ + /* access_cs0 shows if cs0 is accessible, hence the reverted bit */ + static ssize_t access_cs0_show(struct device *dev, + struct device_attribute *attr, char *buf) +@@ -447,10 +524,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) + writel(duration - 1, wdt->base + WDT_RESET_WIDTH); + } + ++ aspeed_wdt_update_bootstatus(pdev, wdt); ++ + status = readl(wdt->base + WDT_TIMEOUT_STATUS); + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) { +- wdt->wdd.bootstatus = WDIOF_CARDRESET; +- + if (of_device_is_compatible(np, "aspeed,ast2400-wdt") || + of_device_is_compatible(np, "aspeed,ast2500-wdt")) + wdt->wdd.groups = bswitch_groups; +diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c +index 544d3f9010b92a..1db82da56db62b 100644 +--- a/drivers/xen/platform-pci.c ++++ b/drivers/xen/platform-pci.c +@@ -26,6 +26,8 @@ + + #define DRV_NAME "xen-platform-pci" + ++#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002 ++ + static unsigned long platform_mmio; + static unsigned long platform_mmio_alloc; + static unsigned long platform_mmiolen; +@@ -174,6 +176,8 @@ static int platform_pci_probe(struct pci_dev *pdev, + static const struct pci_device_id platform_pci_tbl[] = { + {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, ++ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} + }; + +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index 25164d56c9d995..d3b6908110c6f0 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -966,9 +966,15 @@ static int __init xenbus_init(void) + if (xen_pv_domain()) + xen_store_domain_type = XS_PV; + if (xen_hvm_domain()) ++ { + xen_store_domain_type = XS_HVM; +- if (xen_hvm_domain() && xen_initial_domain()) +- xen_store_domain_type = XS_LOCAL; ++ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); ++ if (err) ++ goto out_error; ++ xen_store_evtchn = (int)v; ++ if (!v && xen_initial_domain()) ++ xen_store_domain_type = XS_LOCAL; ++ } + if (xen_pv_domain() && !xen_start_info->store_evtchn) + xen_store_domain_type = XS_LOCAL; + if (xen_pv_domain() && xen_start_info->store_evtchn) +@@ -987,10 +993,6 @@ static int __init xenbus_init(void) + xen_store_interface = gfn_to_virt(xen_store_gfn); + break; + case XS_HVM: +- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); +- if (err) +- goto out_error; +- xen_store_evtchn = (int)v; + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); + if (err) + goto out_error; +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 434cf3d5f4cf18..226e6434a58a94 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -1885,6 +1885,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) + up_write(&space_info->groups_sem); + goto next; + } ++ ++ /* ++ * Cache the zone_unusable value before turning the block group ++ * to read only. As soon as the block group is read only it's ++ * zone_unusable value gets moved to the block group's read-only ++ * bytes and isn't available for calculations anymore. We also ++ * cache it before unlocking the block group, to prevent races ++ * (reports from KCSAN and such tools) with tasks updating it. ++ */ ++ zone_unusable = bg->zone_unusable; ++ + spin_unlock(&bg->lock); + + /* +@@ -1900,13 +1911,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) + goto next; + } + +- /* +- * Cache the zone_unusable value before turning the block group +- * to read only. As soon as the blog group is read only it's +- * zone_unusable value gets moved to the block group's read-only +- * bytes and isn't available for calculations anymore. +- */ +- zone_unusable = bg->zone_unusable; + ret = inc_block_group_ro(bg, 0); + up_write(&space_info->groups_sem); + if (ret < 0) +diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c +index 944a7340f6a449..3981c941f5b556 100644 +--- a/fs/btrfs/discard.c ++++ b/fs/btrfs/discard.c +@@ -167,13 +167,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, + block_group->discard_eligible_time = 0; + queued = !list_empty(&block_group->discard_list); + list_del_init(&block_group->discard_list); +- /* +- * If the block group is currently running in the discard workfn, we +- * don't want to deref it, since it's still being used by the workfn. +- * The workfn will notice this case and deref the block group when it is +- * finished. +- */ +- if (queued && !running) ++ if (queued) + btrfs_put_block_group(block_group); + + spin_unlock(&discard_ctl->lock); +@@ -260,9 +254,10 @@ static struct btrfs_block_group *peek_discard_list( + block_group->discard_cursor = block_group->start; + block_group->discard_state = BTRFS_DISCARD_EXTENTS; + } +- discard_ctl->block_group = block_group; + } + if (block_group) { ++ btrfs_get_block_group(block_group); ++ discard_ctl->block_group = block_group; + *discard_state = block_group->discard_state; + *discard_index = block_group->discard_index; + } +@@ -493,9 +488,20 @@ static void btrfs_discard_workfn(struct work_struct *work) + + block_group = peek_discard_list(discard_ctl, &discard_state, + &discard_index, now); +- if (!block_group || !btrfs_run_discard_work(discard_ctl)) ++ if (!block_group) + return; ++ if (!btrfs_run_discard_work(discard_ctl)) { ++ spin_lock(&discard_ctl->lock); ++ btrfs_put_block_group(block_group); ++ discard_ctl->block_group = NULL; ++ spin_unlock(&discard_ctl->lock); ++ return; ++ } + if (now < block_group->discard_eligible_time) { ++ spin_lock(&discard_ctl->lock); ++ btrfs_put_block_group(block_group); ++ discard_ctl->block_group = NULL; ++ spin_unlock(&discard_ctl->lock); + btrfs_discard_schedule_work(discard_ctl, false); + return; + } +@@ -547,15 +553,7 @@ static void btrfs_discard_workfn(struct work_struct *work) + spin_lock(&discard_ctl->lock); + discard_ctl->prev_discard = trimmed; + discard_ctl->prev_discard_time = now; +- /* +- * If the block group was removed from the discard list while it was +- * running in this workfn, then we didn't deref it, since this function +- * still owned that reference. But we set the discard_ctl->block_group +- * back to NULL, so we can use that condition to know that now we need +- * to deref the block_group. +- */ +- if (discard_ctl->block_group == NULL) +- btrfs_put_block_group(block_group); ++ btrfs_put_block_group(block_group); + discard_ctl->block_group = NULL; + __btrfs_discard_schedule_work(discard_ctl, now, false); + spin_unlock(&discard_ctl->lock); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 2387210231f236..34a30d61b470c3 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -4313,6 +4313,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) + /* clear out the rbtree of defraggable inodes */ + btrfs_cleanup_defrag_inodes(fs_info); + ++ /* ++ * Handle the error fs first, as it will flush and wait for all ordered ++ * extents. This will generate delayed iputs, thus we want to handle ++ * it first. ++ */ ++ if (unlikely(BTRFS_FS_ERROR(fs_info))) ++ btrfs_error_commit_super(fs_info); ++ + /* + * Wait for any fixup workers to complete. + * If we don't wait for them here and they are still running by the time +@@ -4333,6 +4341,19 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) + */ + btrfs_flush_workqueue(fs_info->delalloc_workers); + ++ /* ++ * We can have ordered extents getting their last reference dropped from ++ * the fs_info->workers queue because for async writes for data bios we ++ * queue a work for that queue, at btrfs_wq_submit_bio(), that runs ++ * run_one_async_done() which calls btrfs_bio_end_io() in case the bio ++ * has an error, and that later function can do the final ++ * btrfs_put_ordered_extent() on the ordered extent attached to the bio, ++ * which adds a delayed iput for the inode. So we must flush the queue ++ * so that we don't have delayed iputs after committing the current ++ * transaction below and stopping the cleaner and transaction kthreads. ++ */ ++ btrfs_flush_workqueue(fs_info->workers); ++ + /* + * When finishing a compressed write bio we schedule a work queue item + * to finish an ordered extent - btrfs_finish_compressed_write_work() +@@ -4402,9 +4423,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) + btrfs_err(fs_info, "commit super ret %d", ret); + } + +- if (BTRFS_FS_ERROR(fs_info)) +- btrfs_error_commit_super(fs_info); +- + kthread_stop(fs_info->transaction_kthread); + kthread_stop(fs_info->cleaner_kthread); + +@@ -4541,10 +4559,6 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) + /* cleanup FS via transaction */ + btrfs_cleanup_transaction(fs_info); + +- mutex_lock(&fs_info->cleaner_mutex); +- btrfs_run_delayed_iputs(fs_info); +- mutex_unlock(&fs_info->cleaner_mutex); +- + down_write(&fs_info->cleanup_work_sem); + up_write(&fs_info->cleanup_work_sem); + } +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index b2ae50dcca0fe0..ed08d8e5639f59 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -3565,10 +3565,10 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, + return eb; + } + +-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS + struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, + u64 start) + { ++#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS + struct extent_buffer *eb, *exists = NULL; + int ret; + +@@ -3604,8 +3604,11 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, + free_eb: + btrfs_release_extent_buffer(eb); + return exists; +-} ++#else ++ /* Stub to avoid linker error when compiled with optimizations turned off. */ ++ return NULL; + #endif ++} + + static struct extent_buffer *grab_extent_buffer( + struct btrfs_fs_info *fs_info, struct page *page) +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index 537e184b4b1dfc..474758c878fcab 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -2931,6 +2931,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, + int ret; + + ASSERT(page_index <= last_index); ++again: + page = find_lock_page(inode->i_mapping, page_index); + if (!page) { + page_cache_sync_readahead(inode->i_mapping, ra, NULL, +@@ -2952,6 +2953,11 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, + ret = -EIO; + goto release_page; + } ++ if (page->mapping != inode->i_mapping) { ++ unlock_page(page); ++ put_page(page); ++ goto again; ++ } + } + + /* +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index 6be092bb814fdc..da49bdb70375b6 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -1538,8 +1538,8 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, + u64 extent_gen; + int ret; + +- if (unlikely(!extent_root)) { +- btrfs_err(fs_info, "no valid extent root for scrub"); ++ if (unlikely(!extent_root || !csum_root)) { ++ btrfs_err(fs_info, "no valid extent or csum root for scrub"); + return -EUCLEAN; + } + memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index aa1e6d88a72c7c..e2ead36e5be422 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -487,10 +487,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) + if (p->buf_len >= len) + return 0; + +- if (len > PATH_MAX) { +- WARN_ON(1); +- return -ENOMEM; +- } ++ if (WARN_ON(len > PATH_MAX)) ++ return -ENAMETOOLONG; + + path_len = p->end - p->start; + old_buf_len = p->buf_len; +diff --git a/fs/coredump.c b/fs/coredump.c +index 9d235fa14ab98f..d3a4f5dc2e362a 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -56,6 +57,13 @@ + static bool dump_vma_snapshot(struct coredump_params *cprm); + static void free_vma_snapshot(struct coredump_params *cprm); + ++/* ++ * File descriptor number for the pidfd for the thread-group leader of ++ * the coredumping task installed into the usermode helper's file ++ * descriptor table. ++ */ ++#define COREDUMP_PIDFD_NUMBER 3 ++ + static int core_uses_pid; + static unsigned int core_pipe_limit; + static char core_pattern[CORENAME_MAX_SIZE] = "core"; +@@ -332,6 +340,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, + case 'C': + err = cn_printf(cn, "%d", cprm->cpu); + break; ++ /* pidfd number */ ++ case 'F': { ++ /* ++ * Installing a pidfd only makes sense if ++ * we actually spawn a usermode helper. ++ */ ++ if (!ispipe) ++ break; ++ ++ /* ++ * Note that we'll install a pidfd for the ++ * thread-group leader. We know that task ++ * linkage hasn't been removed yet and even if ++ * this @current isn't the actual thread-group ++ * leader we know that the thread-group leader ++ * cannot be reaped until @current has exited. ++ */ ++ cprm->pid = task_tgid(current); ++ err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); ++ break; ++ } + default: + break; + } +@@ -488,7 +517,7 @@ static void wait_for_dump_helpers(struct file *file) + } + + /* +- * umh_pipe_setup ++ * umh_coredump_setup + * helper function to customize the process used + * to collect the core in userspace. Specifically + * it sets up a pipe and installs it as fd 0 (stdin) +@@ -498,21 +527,61 @@ static void wait_for_dump_helpers(struct file *file) + * is a special value that we use to trap recursive + * core dumps + */ +-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) ++static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) + { + struct file *files[2]; ++ struct file *pidfs_file = NULL; + struct coredump_params *cp = (struct coredump_params *)info->data; +- int err = create_pipe_files(files, 0); ++ int err; ++ ++ if (cp->pid) { ++ int fd; ++ ++ fd = pidfd_prepare(cp->pid, 0, &pidfs_file); ++ if (fd < 0) ++ return fd; ++ ++ /* ++ * We don't care about the fd. We also cannot simply ++ * replace it below because dup2() will refuse to close ++ * this file descriptor if its in a larval state. So ++ * close it! ++ */ ++ put_unused_fd(fd); ++ ++ /* ++ * Usermode helpers are childen of either ++ * system_unbound_wq or of kthreadd. So we know that ++ * we're starting off with a clean file descriptor ++ * table. So we should always be able to use ++ * COREDUMP_PIDFD_NUMBER as our file descriptor value. ++ */ ++ err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); ++ if (err < 0) ++ goto out_fail; ++ ++ pidfs_file = NULL; ++ } ++ ++ err = create_pipe_files(files, 0); + if (err) +- return err; ++ goto out_fail; + + cp->file = files[1]; + + err = replace_fd(0, files[0], 0); + fput(files[0]); ++ if (err < 0) ++ goto out_fail; ++ + /* and disallow core files too */ + current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; + ++ err = 0; ++ ++out_fail: ++ if (pidfs_file) ++ fput(pidfs_file); + return err; + } + +@@ -589,7 +658,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) + } + + if (cprm.limit == 1) { +- /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. ++ /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. + * + * Normally core limits are irrelevant to pipes, since + * we're not writing to the file system, but we use +@@ -634,7 +703,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) + retval = -ENOMEM; + sub_info = call_usermodehelper_setup(helper_argv[0], + helper_argv, NULL, GFP_KERNEL, +- umh_pipe_setup, NULL, &cprm); ++ umh_coredump_setup, NULL, &cprm); + if (sub_info) + retval = call_usermodehelper_exec(sub_info, + UMH_WAIT_EXEC); +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c +index 0618af36f5506b..3c9ab6461579c9 100644 +--- a/fs/dlm/lowcomms.c ++++ b/fs/dlm/lowcomms.c +@@ -1826,8 +1826,8 @@ static int dlm_tcp_listen_validate(void) + { + /* We don't support multi-homed hosts */ + if (dlm_local_count > 1) { +- log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); +- return -EINVAL; ++ log_print("Detect multi-homed hosts but use only the first IP address."); ++ log_print("Try SCTP, if you want to enable multi-link."); + } + + return 0; +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 396474e9e2bffe..3a2dfc59fb40fc 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -641,8 +641,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, + /* Hm, nope. Are (enough) root reserved clusters available? */ + if (uid_eq(sbi->s_resuid, current_fsuid()) || + (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || +- capable(CAP_SYS_RESOURCE) || +- (flags & EXT4_MB_USE_ROOT_BLOCKS)) { ++ (flags & EXT4_MB_USE_ROOT_BLOCKS) || ++ capable(CAP_SYS_RESOURCE)) { + + if (free_clusters >= (nclusters + dirty_clusters + + resv_clusters)) +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 60455c84a93742..81fe87fcbfa068 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -273,7 +273,8 @@ struct ext4_system_blocks { + /* + * Flags for ext4_io_end->flags + */ +-#define EXT4_IO_END_UNWRITTEN 0x0001 ++#define EXT4_IO_END_UNWRITTEN 0x0001 ++#define EXT4_IO_END_FAILED 0x0002 + + struct ext4_io_end_vec { + struct list_head list; /* list of io_end_vec */ +@@ -2994,6 +2995,8 @@ extern int ext4_inode_attach_jinode(struct inode *inode); + extern int ext4_can_truncate(struct inode *inode); + extern int ext4_truncate(struct inode *); + extern int ext4_break_layouts(struct inode *); ++extern int ext4_truncate_page_cache_block_range(struct inode *inode, ++ loff_t start, loff_t end); + extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); + extern void ext4_set_inode_flags(struct inode *, bool init); + extern int ext4_alloc_da_blocks(struct inode *inode); +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 32218ac7f50fe2..39e3661a80c433 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -4660,22 +4660,13 @@ static long ext4_zero_range(struct file *file, loff_t offset, + goto out_mutex; + } + +- /* +- * For journalled data we need to write (and checkpoint) pages +- * before discarding page cache to avoid inconsitent data on +- * disk in case of crash before zeroing trans is committed. +- */ +- if (ext4_should_journal_data(inode)) { +- ret = filemap_write_and_wait_range(mapping, start, +- end - 1); +- if (ret) { +- filemap_invalidate_unlock(mapping); +- goto out_mutex; +- } ++ /* Now release the pages and zero block aligned part of pages */ ++ ret = ext4_truncate_page_cache_block_range(inode, start, end); ++ if (ret) { ++ filemap_invalidate_unlock(mapping); ++ goto out_mutex; + } + +- /* Now release the pages and zero block aligned part of pages */ +- truncate_pagecache_range(inode, start, end - 1); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); + + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index d3d28e65872027..86245e27be18db 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -3892,6 +3893,68 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, + return ret; + } + ++static inline void ext4_truncate_folio(struct inode *inode, ++ loff_t start, loff_t end) ++{ ++ unsigned long blocksize = i_blocksize(inode); ++ struct folio *folio; ++ ++ /* Nothing to be done if no complete block needs to be truncated. */ ++ if (round_up(start, blocksize) >= round_down(end, blocksize)) ++ return; ++ ++ folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT); ++ if (IS_ERR(folio)) ++ return; ++ ++ if (folio_mkclean(folio)) ++ folio_mark_dirty(folio); ++ folio_unlock(folio); ++ folio_put(folio); ++} ++ ++int ext4_truncate_page_cache_block_range(struct inode *inode, ++ loff_t start, loff_t end) ++{ ++ unsigned long blocksize = i_blocksize(inode); ++ int ret; ++ ++ /* ++ * For journalled data we need to write (and checkpoint) pages ++ * before discarding page cache to avoid inconsitent data on disk ++ * in case of crash before freeing or unwritten converting trans ++ * is committed. ++ */ ++ if (ext4_should_journal_data(inode)) { ++ ret = filemap_write_and_wait_range(inode->i_mapping, start, ++ end - 1); ++ if (ret) ++ return ret; ++ goto truncate_pagecache; ++ } ++ ++ /* ++ * If the block size is less than the page size, the file's mapped ++ * blocks within one page could be freed or converted to unwritten. ++ * So it's necessary to remove writable userspace mappings, and then ++ * ext4_page_mkwrite() can be called during subsequent write access ++ * to these partial folios. ++ */ ++ if (!IS_ALIGNED(start | end, PAGE_SIZE) && ++ blocksize < PAGE_SIZE && start < inode->i_size) { ++ loff_t page_boundary = round_up(start, PAGE_SIZE); ++ ++ ext4_truncate_folio(inode, start, min(page_boundary, end)); ++ if (end > page_boundary) ++ ext4_truncate_folio(inode, ++ round_down(end, PAGE_SIZE), end); ++ } ++ ++truncate_pagecache: ++ truncate_pagecache_range(inode, start, end - 1); ++ return 0; ++} ++ + static void ext4_wait_dax_page(struct inode *inode) + { + filemap_invalidate_unlock(inode->i_mapping); +@@ -3946,17 +4009,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) + + trace_ext4_punch_hole(inode, offset, length, 0); + +- /* +- * Write out all dirty pages to avoid race conditions +- * Then release them. +- */ +- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { +- ret = filemap_write_and_wait_range(mapping, offset, +- offset + length - 1); +- if (ret) +- return ret; +- } +- + inode_lock(inode); + + /* No need to punch hole beyond i_size */ +@@ -4018,8 +4070,11 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) + ret = ext4_update_disksize_before_punch(inode, offset, length); + if (ret) + goto out_dio; +- truncate_pagecache_range(inode, first_block_offset, +- last_block_offset); ++ ++ ret = ext4_truncate_page_cache_block_range(inode, ++ first_block_offset, last_block_offset + 1); ++ if (ret) ++ goto out_dio; + } + + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c +index 7ab4f5a9bf5b87..7287dbfe13f120 100644 +--- a/fs/ext4/page-io.c ++++ b/fs/ext4/page-io.c +@@ -181,14 +181,25 @@ static int ext4_end_io_end(ext4_io_end_t *io_end) + "list->prev 0x%p\n", + io_end, inode->i_ino, io_end->list.next, io_end->list.prev); + +- io_end->handle = NULL; /* Following call will use up the handle */ +- ret = ext4_convert_unwritten_io_end_vec(handle, io_end); ++ /* ++ * Do not convert the unwritten extents if data writeback fails, ++ * or stale data may be exposed. ++ */ ++ io_end->handle = NULL; /* Following call will use up the handle */ ++ if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) { ++ ret = -EIO; ++ if (handle) ++ jbd2_journal_free_reserved(handle); ++ } else { ++ ret = ext4_convert_unwritten_io_end_vec(handle, io_end); ++ } + if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) { + ext4_msg(inode->i_sb, KERN_EMERG, + "failed to convert unwritten extents to written " + "extents -- potential data loss! " + "(inode %lu, error %d)", inode->i_ino, ret); + } ++ + ext4_clear_io_unwritten_flag(io_end); + ext4_release_io_end(io_end); + return ret; +@@ -344,6 +355,7 @@ static void ext4_end_bio(struct bio *bio) + bio->bi_status, inode->i_ino, + (unsigned long long) + bi_sector >> (inode->i_blkbits - 9)); ++ io_end->flag |= EXT4_IO_END_FAILED; + mapping_set_error(inode->i_mapping, + blk_status_to_errno(bio->bi_status)); + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 751c879271e05e..d2b58f940aab5e 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2821,6 +2821,13 @@ static int ext4_check_opt_consistency(struct fs_context *fc, + } + + if (is_remount) { ++ if (!sbi->s_journal && ++ ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) { ++ ext4_msg(NULL, KERN_WARNING, ++ "Remounting fs w/o journal so ignoring data_err option"); ++ ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT); ++ } ++ + if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && + (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { + ext4_msg(NULL, KERN_ERR, "can't mount with " +@@ -5421,6 +5428,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + "data=, fs mounted w/o journal"); + goto failed_mount3a; + } ++ if (test_opt(sb, DATA_ERR_ABORT)) { ++ ext4_msg(sb, KERN_ERR, ++ "can't mount with data_err=abort, fs mounted w/o journal"); ++ goto failed_mount3a; ++ } + sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; + clear_opt(sb, JOURNAL_CHECKSUM); + clear_opt(sb, DATA_FLAGS); +@@ -6771,6 +6783,7 @@ static int ext4_reconfigure(struct fs_context *fc) + { + struct super_block *sb = fc->root->d_sb; + int ret; ++ bool old_ro = sb_rdonly(sb); + + fc->s_fs_info = EXT4_SB(sb); + +@@ -6782,9 +6795,9 @@ static int ext4_reconfigure(struct fs_context *fc) + if (ret < 0) + return ret; + +- ext4_msg(sb, KERN_INFO, "re-mounted %pU %s. Quota mode: %s.", +- &sb->s_uuid, sb_rdonly(sb) ? "ro" : "r/w", +- ext4_quota_mode(sb)); ++ ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.", ++ &sb->s_uuid, ++ (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : ""); + + return 0; + } +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c +index 180feefc4a9ceb..c4b0661888a159 100644 +--- a/fs/f2fs/sysfs.c ++++ b/fs/f2fs/sysfs.c +@@ -61,6 +61,12 @@ struct f2fs_attr { + int id; + }; + ++struct f2fs_base_attr { ++ struct attribute attr; ++ ssize_t (*show)(struct f2fs_base_attr *a, char *buf); ++ ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len); ++}; ++ + static ssize_t f2fs_sbi_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf); + +@@ -791,6 +797,25 @@ static void f2fs_sb_release(struct kobject *kobj) + complete(&sbi->s_kobj_unregister); + } + ++static ssize_t f2fs_base_attr_show(struct kobject *kobj, ++ struct attribute *attr, char *buf) ++{ ++ struct f2fs_base_attr *a = container_of(attr, ++ struct f2fs_base_attr, attr); ++ ++ return a->show ? a->show(a, buf) : 0; ++} ++ ++static ssize_t f2fs_base_attr_store(struct kobject *kobj, ++ struct attribute *attr, ++ const char *buf, size_t len) ++{ ++ struct f2fs_base_attr *a = container_of(attr, ++ struct f2fs_base_attr, attr); ++ ++ return a->store ? a->store(a, buf, len) : 0; ++} ++ + /* + * Note that there are three feature list entries: + * 1) /sys/fs/f2fs/features +@@ -809,14 +834,13 @@ static void f2fs_sb_release(struct kobject *kobj) + * please add new on-disk feature in this list only. + * - ref. F2FS_SB_FEATURE_RO_ATTR() + */ +-static ssize_t f2fs_feature_show(struct f2fs_attr *a, +- struct f2fs_sb_info *sbi, char *buf) ++static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf) + { + return sysfs_emit(buf, "supported\n"); + } + + #define F2FS_FEATURE_RO_ATTR(_name) \ +-static struct f2fs_attr f2fs_attr_##_name = { \ ++static struct f2fs_base_attr f2fs_base_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = 0444 }, \ + .show = f2fs_feature_show, \ + } +@@ -1166,37 +1190,38 @@ static struct attribute *f2fs_attrs[] = { + }; + ATTRIBUTE_GROUPS(f2fs); + ++#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr) + static struct attribute *f2fs_feat_attrs[] = { + #ifdef CONFIG_FS_ENCRYPTION +- ATTR_LIST(encryption), +- ATTR_LIST(test_dummy_encryption_v2), ++ BASE_ATTR_LIST(encryption), ++ BASE_ATTR_LIST(test_dummy_encryption_v2), + #if IS_ENABLED(CONFIG_UNICODE) +- ATTR_LIST(encrypted_casefold), ++ BASE_ATTR_LIST(encrypted_casefold), + #endif + #endif /* CONFIG_FS_ENCRYPTION */ + #ifdef CONFIG_BLK_DEV_ZONED +- ATTR_LIST(block_zoned), ++ BASE_ATTR_LIST(block_zoned), + #endif +- ATTR_LIST(atomic_write), +- ATTR_LIST(extra_attr), +- ATTR_LIST(project_quota), +- ATTR_LIST(inode_checksum), +- ATTR_LIST(flexible_inline_xattr), +- ATTR_LIST(quota_ino), +- ATTR_LIST(inode_crtime), +- ATTR_LIST(lost_found), ++ BASE_ATTR_LIST(atomic_write), ++ BASE_ATTR_LIST(extra_attr), ++ BASE_ATTR_LIST(project_quota), ++ BASE_ATTR_LIST(inode_checksum), ++ BASE_ATTR_LIST(flexible_inline_xattr), ++ BASE_ATTR_LIST(quota_ino), ++ BASE_ATTR_LIST(inode_crtime), ++ BASE_ATTR_LIST(lost_found), + #ifdef CONFIG_FS_VERITY +- ATTR_LIST(verity), ++ BASE_ATTR_LIST(verity), + #endif +- ATTR_LIST(sb_checksum), ++ BASE_ATTR_LIST(sb_checksum), + #if IS_ENABLED(CONFIG_UNICODE) +- ATTR_LIST(casefold), ++ BASE_ATTR_LIST(casefold), + #endif +- ATTR_LIST(readonly), ++ BASE_ATTR_LIST(readonly), + #ifdef CONFIG_F2FS_FS_COMPRESSION +- ATTR_LIST(compression), ++ BASE_ATTR_LIST(compression), + #endif +- ATTR_LIST(pin_file), ++ BASE_ATTR_LIST(pin_file), + NULL, + }; + ATTRIBUTE_GROUPS(f2fs_feat); +@@ -1263,9 +1288,14 @@ static struct kset f2fs_kset = { + .kobj = {.ktype = &f2fs_ktype}, + }; + ++static const struct sysfs_ops f2fs_feat_attr_ops = { ++ .show = f2fs_base_attr_show, ++ .store = f2fs_base_attr_store, ++}; ++ + static const struct kobj_type f2fs_feat_ktype = { + .default_groups = f2fs_feat_groups, +- .sysfs_ops = &f2fs_attr_ops, ++ .sysfs_ops = &f2fs_feat_attr_ops, + }; + + static struct kobject f2fs_feat = { +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index e4d6cc0d2332a4..82951a535d2d4d 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1121,6 +1121,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, + else if (err == -EINTR) + fuse_invalidate_attr(inode); + ++ if (err == -ENOSYS) ++ err = -EPERM; + return err; + } + +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 2c0908a3021026..687670075d2256 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -853,11 +853,12 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock) + __releases(&gl->gl_lockref.lock) + __acquires(&gl->gl_lockref.lock) + { +- struct gfs2_holder *gh = NULL; ++ struct gfs2_holder *gh; + + if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) + return; + ++ /* While a demote is in progress, the GLF_LOCK flag must be set. */ + GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); + + if (test_bit(GLF_DEMOTE, &gl->gl_flags) && +@@ -869,18 +870,22 @@ __acquires(&gl->gl_lockref.lock) + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; ++ do_xmote(gl, NULL, gl->gl_target); ++ return; + } else { + if (test_bit(GLF_DEMOTE, &gl->gl_flags)) + gfs2_demote_wake(gl); + if (do_promote(gl)) + goto out_unlock; + gh = find_first_waiter(gl); ++ if (!gh) ++ goto out_unlock; + gl->gl_target = gh->gh_state; + if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) + do_error(gl, 0); /* Fail queued try locks */ ++ do_xmote(gl, gh, gl->gl_target); ++ return; + } +- do_xmote(gl, gh, gl->gl_target); +- return; + + out_sched: + clear_bit(GLF_LOCK, &gl->gl_flags); +diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c +index 421c0d360836e0..19ec3253748331 100644 +--- a/fs/jbd2/recovery.c ++++ b/fs/jbd2/recovery.c +@@ -286,21 +286,22 @@ static int fc_do_one_pass(journal_t *journal, + int jbd2_journal_recover(journal_t *journal) + { + int err, err2; +- journal_superblock_t * sb; +- + struct recovery_info info; + errseq_t wb_err; + struct address_space *mapping; + + memset(&info, 0, sizeof(info)); +- sb = journal->j_superblock; + + /* + * The journal superblock's s_start field (the current log head) + * is always zero if, and only if, the journal was cleanly +- * unmounted. ++ * unmounted. We use its in-memory version j_tail here because ++ * jbd2_journal_wipe() could have updated it without updating journal ++ * superblock. + */ +- if (!sb->s_start) { ++ if (!journal->j_tail) { ++ journal_superblock_t *sb = journal->j_superblock; ++ + jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n", + be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head)); + journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1; +diff --git a/fs/namespace.c b/fs/namespace.c +index 450f4198b8cdd8..ef3b2ae2957ec3 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -636,12 +636,8 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) + smp_mb(); // see mntput_no_expire() and do_umount() + if (likely(!read_seqretry(&mount_lock, seq))) + return 0; +- if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { +- mnt_add_count(mnt, -1); +- return 1; +- } + lock_mount_hash(); +- if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { ++ if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { + mnt_add_count(mnt, -1); + unlock_mount_hash(); + return 1; +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index 62607d52bfa5e7..aa09f930eeaf7e 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -1080,6 +1080,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc) + if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) + server->namelen = NFS2_MAXNAMLEN; + } ++ /* Linux 'subtree_check' borkenness mandates this setting */ ++ server->fh_expire_type = NFS_FH_VOL_RENAME; + + if (!(fattr->valid & NFS_ATTR_FATTR)) { + error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh, +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 55cfa1c4e0a65d..bbd582d8a7dc93 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -297,7 +297,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi) + if (delegation == NULL) + goto out; + spin_lock(&delegation->lock); +- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { ++ if (delegation->inode && ++ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); + /* Refcount matched in nfs_end_delegation_return() */ + ret = nfs_get_delegation(delegation); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 39f7549afcf5bd..38918638423596 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -2642,6 +2642,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) + unblock_revalidate(new_dentry); + } + ++static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry, ++ struct dentry *new_dentry) ++{ ++ struct nfs_server *server = NFS_SB(old_dentry->d_sb); ++ ++ if (old_dentry->d_parent != new_dentry->d_parent) ++ return false; ++ if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE) ++ return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN); ++ return true; ++} ++ + /* + * RENAME + * FIXME: Some nfsds, like the Linux user space nfsd, may generate a +@@ -2729,7 +2741,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, + + } + +- if (S_ISREG(old_inode->i_mode)) ++ if (S_ISREG(old_inode->i_mode) && ++ nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry)) + nfs_sync_inode(old_inode); + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, + must_unblock ? nfs_unblock_rename : NULL); +diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c +index acf4b88889dc38..d5f1fbfd9a0c7f 100644 +--- a/fs/nfs/filelayout/filelayoutdev.c ++++ b/fs/nfs/filelayout/filelayoutdev.c +@@ -75,6 +75,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + struct page *scratch; + struct list_head dsaddrs; + struct nfs4_pnfs_ds_addr *da; ++ struct net *net = server->nfs_client->cl_net; + + /* set up xdr stream */ + scratch = alloc_page(gfp_flags); +@@ -158,8 +159,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + + mp_count = be32_to_cpup(p); /* multipath count */ + for (j = 0; j < mp_count; j++) { +- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, +- &stream, gfp_flags); ++ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); + if (da) + list_add_tail(&da->da_node, &dsaddrs); + } +@@ -169,7 +169,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + goto out_err_free_deviceid; + } + +- dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); ++ dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); + if (!dsaddr->ds_list[i]) + goto out_err_drain_dsaddrs; + +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 2b3c5eea1f1345..0bc537de1b2958 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -1255,6 +1255,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, + case -ECONNRESET: + case -EHOSTDOWN: + case -EHOSTUNREACH: ++ case -ENETDOWN: + case -ENETUNREACH: + case -EADDRINUSE: + case -ENOBUFS: +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +index e028f5a0ef5f65..d21c5ecfbf1cc3 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +@@ -49,6 +49,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + struct nfs4_pnfs_ds_addr *da; + struct nfs4_ff_layout_ds *new_ds = NULL; + struct nfs4_ff_ds_version *ds_versions = NULL; ++ struct net *net = server->nfs_client->cl_net; + u32 mp_count; + u32 version_count; + __be32 *p; +@@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + + for (i = 0; i < mp_count; i++) { + /* multipath ds */ +- da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, +- &stream, gfp_flags); ++ da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); + if (da) + list_add_tail(&da->da_node, &dsaddrs); + } +@@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + new_ds->ds_versions = ds_versions; + new_ds->ds_versions_cnt = version_count; + +- new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); ++ new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); + if (!new_ds->ds) + goto out_err_drain_dsaddrs; + +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 56bbf59bda3cf6..06230baaa554e7 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr) + + int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) + { ++ if (unlikely(nfs_current_task_exiting())) ++ return -EINTR; + schedule(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index ca49d999159eb1..c29ad2e1d41635 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -865,6 +865,11 @@ static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid) + NFS4_STATEID_OTHER_SIZE); + } + ++static inline bool nfs_current_task_exiting(void) ++{ ++ return (current->flags & PF_EXITING) != 0; ++} ++ + static inline bool nfs_error_is_fatal(int err) + { + switch (err) { +diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c +index 4bf208a0a8e994..715753f41fb072 100644 +--- a/fs/nfs/nfs3proc.c ++++ b/fs/nfs/nfs3proc.c +@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) + __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); + schedule_timeout(NFS_JUKEBOX_RETRY_TIME); + res = -ERESTARTSYS; +- } while (!fatal_signal_pending(current)); ++ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting()); + return res; + } + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index c140427e322ced..1b94a55215e7de 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -422,6 +422,8 @@ static int nfs4_delay_killable(long *timeout) + { + might_sleep(); + ++ if (unlikely(nfs_current_task_exiting())) ++ return -EINTR; + __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); + schedule_timeout(nfs4_update_delay(timeout)); + if (!__fatal_signal_pending(current)) +@@ -433,6 +435,8 @@ static int nfs4_delay_interruptible(long *timeout) + { + might_sleep(); + ++ if (unlikely(nfs_current_task_exiting())) ++ return -EINTR; + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); + schedule_timeout(nfs4_update_delay(timeout)); + if (!signal_pending(current)) +@@ -1712,7 +1716,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, + rcu_read_unlock(); + trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); + +- if (!fatal_signal_pending(current)) { ++ if (!fatal_signal_pending(current) && ++ !nfs_current_task_exiting()) { + if (schedule_timeout(5*HZ) == 0) + status = -EAGAIN; + else +@@ -3494,7 +3499,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, + write_sequnlock(&state->seqlock); + trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); + +- if (fatal_signal_pending(current)) ++ if (fatal_signal_pending(current) || nfs_current_task_exiting()) + status = -EINTR; + else + if (schedule_timeout(5*HZ) != 0) +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 794bb4aa588d39..9fc71dc090c254 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -2741,7 +2741,15 @@ static void nfs4_state_manager(struct nfs_client *clp) + pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" + " with error %d\n", section_sep, section, + clp->cl_hostname, -status); +- ssleep(1); ++ switch (status) { ++ case -ENETDOWN: ++ case -ENETUNREACH: ++ nfs_mark_client_ready(clp, -EIO); ++ break; ++ default: ++ ssleep(1); ++ break; ++ } + out_drain: + memalloc_nofs_restore(memflags); + nfs4_end_drain_session(clp); +diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h +index d886c8226d8fec..79996d7dad0f76 100644 +--- a/fs/nfs/pnfs.h ++++ b/fs/nfs/pnfs.h +@@ -59,6 +59,7 @@ struct nfs4_pnfs_ds { + struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ + char *ds_remotestr; /* comma sep list of addrs */ + struct list_head ds_addrs; ++ const struct net *ds_net; + struct nfs_client *ds_clp; + refcount_t ds_count; + unsigned long ds_state; +@@ -405,7 +406,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode, + int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); + void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); + void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); +-struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, ++struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net, ++ struct list_head *dsaddrs, + gfp_t gfp_flags); + void nfs4_pnfs_v3_ds_connect_unload(void); + int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c +index 88e061bd711b74..1b317c44da126b 100644 +--- a/fs/nfs/pnfs_nfs.c ++++ b/fs/nfs/pnfs_nfs.c +@@ -651,12 +651,12 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1, + * Lookup DS by addresses. nfs4_ds_cache_lock is held + */ + static struct nfs4_pnfs_ds * +-_data_server_lookup_locked(const struct list_head *dsaddrs) ++_data_server_lookup_locked(const struct net *net, const struct list_head *dsaddrs) + { + struct nfs4_pnfs_ds *ds; + + list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) +- if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) ++ if (ds->ds_net == net && _same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) + return ds; + return NULL; + } +@@ -763,7 +763,7 @@ nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) + * uncached and return cached struct nfs4_pnfs_ds. + */ + struct nfs4_pnfs_ds * +-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) ++nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags) + { + struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; + char *remotestr; +@@ -781,13 +781,14 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) + remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); + + spin_lock(&nfs4_ds_cache_lock); +- tmp_ds = _data_server_lookup_locked(dsaddrs); ++ tmp_ds = _data_server_lookup_locked(net, dsaddrs); + if (tmp_ds == NULL) { + INIT_LIST_HEAD(&ds->ds_addrs); + list_splice_init(dsaddrs, &ds->ds_addrs); + ds->ds_remotestr = remotestr; + refcount_set(&ds->ds_count, 1); + INIT_LIST_HEAD(&ds->ds_node); ++ ds->ds_net = net; + ds->ds_clp = NULL; + list_add(&ds->ds_node, &nfs4_data_server_cache); + dprintk("%s add new data server %s\n", __func__, +diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c +index 0859122684425f..dd4dc70e4aaab7 100644 +--- a/fs/orangefs/inode.c ++++ b/fs/orangefs/inode.c +@@ -23,9 +23,9 @@ static int orangefs_writepage_locked(struct page *page, + struct orangefs_write_range *wr = NULL; + struct iov_iter iter; + struct bio_vec bv; +- size_t len, wlen; ++ size_t wlen; + ssize_t ret; +- loff_t off; ++ loff_t len, off; + + set_page_writeback(page); + +@@ -92,8 +92,7 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow, + struct orangefs_write_range *wrp, wr; + struct iov_iter iter; + ssize_t ret; +- size_t len; +- loff_t off; ++ loff_t len, off; + int i; + + len = i_size_read(inode); +diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c +index 7dbbf3b6d98d3c..a1c97cd2720a68 100644 +--- a/fs/pstore/inode.c ++++ b/fs/pstore/inode.c +@@ -264,7 +264,7 @@ static void parse_options(char *options) + static int pstore_show_options(struct seq_file *m, struct dentry *root) + { + if (kmsg_bytes != CONFIG_PSTORE_DEFAULT_KMSG_BYTES) +- seq_printf(m, ",kmsg_bytes=%lu", kmsg_bytes); ++ seq_printf(m, ",kmsg_bytes=%u", kmsg_bytes); + return 0; + } + +diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h +index 801d6c0b170c3a..a0fc511969100c 100644 +--- a/fs/pstore/internal.h ++++ b/fs/pstore/internal.h +@@ -6,7 +6,7 @@ + #include + #include + +-extern unsigned long kmsg_bytes; ++extern unsigned int kmsg_bytes; + + #ifdef CONFIG_PSTORE_FTRACE + extern void pstore_register_ftrace(void); +@@ -35,7 +35,7 @@ static inline void pstore_unregister_pmsg(void) {} + + extern struct pstore_info *psinfo; + +-extern void pstore_set_kmsg_bytes(int); ++extern void pstore_set_kmsg_bytes(unsigned int bytes); + extern void pstore_get_records(int); + extern void pstore_get_backend_records(struct pstore_info *psi, + struct dentry *root, int quiet); +diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c +index 03425928d2fb3c..ef62389212b608 100644 +--- a/fs/pstore/platform.c ++++ b/fs/pstore/platform.c +@@ -92,8 +92,8 @@ module_param(compress, charp, 0444); + MODULE_PARM_DESC(compress, "compression to use"); + + /* How much of the kernel log to snapshot */ +-unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES; +-module_param(kmsg_bytes, ulong, 0444); ++unsigned int kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES; ++module_param(kmsg_bytes, uint, 0444); + MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)"); + + static void *compress_workspace; +@@ -107,9 +107,9 @@ static void *compress_workspace; + static char *big_oops_buf; + static size_t max_compressed_size; + +-void pstore_set_kmsg_bytes(int bytes) ++void pstore_set_kmsg_bytes(unsigned int bytes) + { +- kmsg_bytes = bytes; ++ WRITE_ONCE(kmsg_bytes, bytes); + } + + /* Tag each group of saved records with a sequence number */ +@@ -278,6 +278,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason) + { + struct kmsg_dump_iter iter; ++ unsigned int remaining = READ_ONCE(kmsg_bytes); + unsigned long total = 0; + const char *why; + unsigned int part = 1; +@@ -300,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, + kmsg_dump_rewind(&iter); + + oopscount++; +- while (total < kmsg_bytes) { ++ while (total < remaining) { + char *dst; + size_t dst_size; + int header_size; +diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c +index db9076da2182ad..bf32bc22ebd69f 100644 +--- a/fs/smb/client/cifsacl.c ++++ b/fs/smb/client/cifsacl.c +@@ -811,7 +811,23 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, + return; + + for (i = 0; i < num_aces; ++i) { ++ if (end_of_acl - acl_base < acl_size) ++ break; ++ + ppace[i] = (struct smb_ace *) (acl_base + acl_size); ++ acl_base = (char *)ppace[i]; ++ acl_size = offsetof(struct smb_ace, sid) + ++ offsetof(struct smb_sid, sub_auth); ++ ++ if (end_of_acl - acl_base < acl_size || ++ ppace[i]->sid.num_subauth == 0 || ++ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || ++ (end_of_acl - acl_base < ++ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || ++ (le16_to_cpu(ppace[i]->size) < ++ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth)) ++ break; ++ + #ifdef CONFIG_CIFS_DEBUG2 + dump_ace(ppace[i], end_of_acl); + #endif +@@ -855,7 +871,6 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, + (void *)ppace[i], + sizeof(struct smb_ace)); */ + +- acl_base = (char *)ppace[i]; + acl_size = le16_to_cpu(ppace[i]->size); + } + +diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h +index c46d418c1c0c3e..ca33f6cd6a8004 100644 +--- a/fs/smb/client/cifspdu.h ++++ b/fs/smb/client/cifspdu.h +@@ -1226,10 +1226,9 @@ typedef struct smb_com_query_information_rsp { + typedef struct smb_com_setattr_req { + struct smb_hdr hdr; /* wct = 8 */ + __le16 attr; +- __le16 time_low; +- __le16 time_high; ++ __le32 last_write_time; + __le16 reserved[5]; /* must be zero */ +- __u16 ByteCount; ++ __le16 ByteCount; + __u8 BufferFormat; /* 4 = ASCII */ + unsigned char fileName[]; + } __attribute__((packed)) SETATTR_REQ; +diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h +index 7f97e54686524b..c6d325666b5cd8 100644 +--- a/fs/smb/client/cifsproto.h ++++ b/fs/smb/client/cifsproto.h +@@ -31,6 +31,9 @@ extern void cifs_small_buf_release(void *); + extern void free_rsp_buf(int, void *); + extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, + unsigned int /* length */); ++extern int smb_send_kvec(struct TCP_Server_Info *server, ++ struct msghdr *msg, ++ size_t *sent); + extern unsigned int _get_xid(void); + extern void _free_xid(unsigned int); + #define get_xid() \ +@@ -395,6 +398,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon); + extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, + struct kstatfs *FSData); + ++extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, ++ const char *fileName, __le32 attributes, __le64 write_time, ++ const struct nls_table *nls_codepage, ++ struct cifs_sb_info *cifs_sb); + extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, const FILE_BASIC_INFO *data, + const struct nls_table *nls_codepage, +diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c +index 769950adb7763b..b91184ebce02c5 100644 +--- a/fs/smb/client/cifssmb.c ++++ b/fs/smb/client/cifssmb.c +@@ -5157,6 +5157,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, + return rc; + } + ++int ++SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, ++ const char *fileName, __le32 attributes, __le64 write_time, ++ const struct nls_table *nls_codepage, ++ struct cifs_sb_info *cifs_sb) ++{ ++ SETATTR_REQ *pSMB; ++ SETATTR_RSP *pSMBr; ++ struct timespec64 ts; ++ int bytes_returned; ++ int name_len; ++ int rc; ++ ++ cifs_dbg(FYI, "In %s path %s\n", __func__, fileName); ++ ++retry: ++ rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, ++ (void **) &pSMBr); ++ if (rc) ++ return rc; ++ ++ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { ++ name_len = ++ cifsConvertToUTF16((__le16 *) pSMB->fileName, ++ fileName, PATH_MAX, nls_codepage, ++ cifs_remap(cifs_sb)); ++ name_len++; /* trailing null */ ++ name_len *= 2; ++ } else { ++ name_len = copy_path_name(pSMB->fileName, fileName); ++ } ++ /* Only few attributes can be set by this command, others are not accepted by Win9x. */ ++ pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) & ++ (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE)); ++ /* Zero write time value (in both NT and SETATTR formats) means to not change it. */ ++ if (le64_to_cpu(write_time) != 0) { ++ ts = cifs_NTtimeToUnix(write_time); ++ pSMB->last_write_time = cpu_to_le32(ts.tv_sec); ++ } ++ pSMB->BufferFormat = 0x04; ++ name_len++; /* account for buffer type byte */ ++ inc_rfc1001_len(pSMB, (__u16)name_len); ++ pSMB->ByteCount = cpu_to_le16(name_len); ++ ++ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, ++ (struct smb_hdr *) pSMBr, &bytes_returned, 0); ++ if (rc) ++ cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc); ++ ++ cifs_buf_release(pSMB); ++ ++ if (rc == -EAGAIN) ++ goto retry; ++ ++ return rc; ++} ++ + /* Some legacy servers such as NT4 require that the file times be set on + an open handle, rather than by pathname - this is awkward due to + potential access conflicts on the open, but it is unavoidable for these +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 54aba8d642ee75..3faaee33ad4558 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -3051,8 +3051,10 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) + * sessinit is sent but no second negprot + */ + struct rfc1002_session_packet req = {}; +- struct smb_hdr *smb_buf = (struct smb_hdr *)&req; ++ struct msghdr msg = {}; ++ struct kvec iov = {}; + unsigned int len; ++ size_t sent; + + req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); + +@@ -3081,10 +3083,18 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) + * As per rfc1002, @len must be the number of bytes that follows the + * length field of a rfc1002 session request payload. + */ +- len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); ++ len = sizeof(req.trailer.session_req); ++ req.type = RFC1002_SESSION_REQUEST; ++ req.flags = 0; ++ req.length = cpu_to_be16(len); ++ len += offsetof(typeof(req), trailer.session_req); ++ iov.iov_base = &req; ++ iov.iov_len = len; ++ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len); ++ rc = smb_send_kvec(server, &msg, &sent); ++ if (rc < 0 || len != sent) ++ return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED; + +- smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); +- rc = smb_send(server, smb_buf, len); + /* + * RFC1001 layer in at least one server requires very short break before + * negprot presumably because not expecting negprot to follow so fast. +@@ -3093,7 +3103,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) + */ + usleep_range(1000, 2000); + +- return rc; ++ return 0; + } + + static int +@@ -3946,11 +3956,13 @@ int + cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, + struct TCP_Server_Info *server) + { ++ bool in_retry = false; + int rc = 0; + + if (!server->ops->need_neg || !server->ops->negotiate) + return -ENOSYS; + ++retry: + /* only send once per connect */ + spin_lock(&server->srv_lock); + if (server->tcpStatus != CifsGood && +@@ -3970,6 +3982,14 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, + spin_unlock(&server->srv_lock); + + rc = server->ops->negotiate(xid, ses, server); ++ if (rc == -EAGAIN) { ++ /* Allow one retry attempt */ ++ if (!in_retry) { ++ in_retry = true; ++ goto retry; ++ } ++ rc = -EHOSTDOWN; ++ } + if (rc == 0) { + spin_lock(&server->srv_lock); + if (server->tcpStatus == CifsInNegotiate) +diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c +index d2e291ef104ec0..137d03781d5268 100644 +--- a/fs/smb/client/fs_context.c ++++ b/fs/smb/client/fs_context.c +@@ -1249,6 +1249,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, + case Opt_rsize: + ctx->rsize = result.uint_32; + ctx->got_rsize = true; ++ ctx->vol_rsize = ctx->rsize; + break; + case Opt_wsize: + ctx->wsize = result.uint_32; +@@ -1264,6 +1265,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, + ctx->wsize, PAGE_SIZE); + } + } ++ ctx->vol_wsize = ctx->wsize; + break; + case Opt_acregmax: + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { +diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h +index bbd2063ab838d3..d0a2043ea44682 100644 +--- a/fs/smb/client/fs_context.h ++++ b/fs/smb/client/fs_context.h +@@ -253,6 +253,9 @@ struct smb3_fs_context { + bool use_client_guid:1; + /* reuse existing guid for multichannel */ + u8 client_guid[SMB2_CLIENT_GUID_SIZE]; ++ /* User-specified original r/wsize value */ ++ unsigned int vol_rsize; ++ unsigned int vol_wsize; + unsigned int bsize; + unsigned int rasize; + unsigned int rsize; +diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c +index d86da949a91905..007da0a699cb0a 100644 +--- a/fs/smb/client/link.c ++++ b/fs/smb/client/link.c +@@ -257,7 +257,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_open_parms oparms; + struct cifs_io_parms io_parms = {0}; + int buf_type = CIFS_NO_BUFFER; +- FILE_ALL_INFO file_info; ++ struct cifs_open_info_data query_data; + + oparms = (struct cifs_open_parms) { + .tcon = tcon, +@@ -269,11 +269,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + .fid = &fid, + }; + +- rc = CIFS_open(xid, &oparms, &oplock, &file_info); ++ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &query_data); + if (rc) + return rc; + +- if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { ++ if (query_data.fi.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { + rc = -ENOENT; + /* it's not a symlink */ + goto out; +@@ -312,7 +312,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + .fid = &fid, + }; + +- rc = CIFS_open(xid, &oparms, &oplock, NULL); ++ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL); + if (rc) + return rc; + +diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c +index 75929a0a56f969..e616be8196deda 100644 +--- a/fs/smb/client/readdir.c ++++ b/fs/smb/client/readdir.c +@@ -733,7 +733,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, + else + cifs_buf_release(cfile->srch_inf. + ntwrk_buf_start); ++ /* Reset all pointers to the network buffer to prevent stale references */ + cfile->srch_inf.ntwrk_buf_start = NULL; ++ cfile->srch_inf.srch_entries_start = NULL; ++ cfile->srch_inf.last_entry = NULL; + } + rc = initiate_cifs_search(xid, file, full_path); + if (rc) { +@@ -756,11 +759,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, + rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, + search_flags, + &cfile->srch_inf); ++ if (rc) ++ return -ENOENT; + /* FindFirst/Next set last_entry to NULL on malformed reply */ + if (cfile->srch_inf.last_entry) + cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); +- if (rc) +- return -ENOENT; + } + if (index_to_find < cfile->srch_inf.index_of_last_entry) { + /* we found the buffer that contains the entry */ +diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c +index caa1d852ece49c..e62d9cc592e0c8 100644 +--- a/fs/smb/client/smb1ops.c ++++ b/fs/smb/client/smb1ops.c +@@ -426,13 +426,6 @@ cifs_negotiate(const unsigned int xid, + { + int rc; + rc = CIFSSMBNegotiate(xid, ses, server); +- if (rc == -EAGAIN) { +- /* retry only once on 1st time connection */ +- set_credits(server, 1); +- rc = CIFSSMBNegotiate(xid, ses, server); +- if (rc == -EAGAIN) +- rc = -EHOSTDOWN; +- } + return rc; + } + +@@ -444,8 +437,8 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + unsigned int wsize; + + /* start with specified wsize, or default */ +- if (ctx->wsize) +- wsize = ctx->wsize; ++ if (ctx->got_wsize) ++ wsize = ctx->vol_wsize; + else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) + wsize = CIFS_DEFAULT_IOSIZE; + else +@@ -497,7 +490,7 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + else + defsize = server->maxBuf - sizeof(READ_RSP); + +- rsize = ctx->rsize ? ctx->rsize : defsize; ++ rsize = ctx->got_rsize ? ctx->vol_rsize : defsize; + + /* + * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to +@@ -548,24 +541,104 @@ static int cifs_query_path_info(const unsigned int xid, + const char *full_path, + struct cifs_open_info_data *data) + { +- int rc; ++ int rc = -EOPNOTSUPP; + FILE_ALL_INFO fi = {}; ++ struct cifs_search_info search_info = {}; ++ bool non_unicode_wildcard = false; + + data->symlink = false; + data->adjust_tz = false; + +- /* could do find first instead but this returns more info */ +- rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls, +- cifs_remap(cifs_sb)); + /* +- * BB optimize code so we do not make the above call when server claims +- * no NT SMB support and the above call failed at least once - set flag +- * in tcon or mount. ++ * First try CIFSSMBQPathInfo() function which returns more info ++ * (NumberOfLinks) than CIFSFindFirst() fallback function. ++ * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over ++ * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over ++ * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB ++ * Open command on non-NT servers works only for files, does not work ++ * for directories. And moreover Win9x SMB server returns bogus data in ++ * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers, ++ * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function. ++ */ ++ if (tcon->ses->capabilities & CAP_NT_SMBS) ++ rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, ++ cifs_sb->local_nls, cifs_remap(cifs_sb)); ++ ++ /* ++ * Non-UNICODE variant of fallback functions below expands wildcards, ++ * so they cannot be used for querying paths with wildcard characters. + */ +- if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { ++ if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><")) ++ non_unicode_wildcard = true; ++ ++ /* ++ * Then fallback to CIFSFindFirst() which works also with non-NT servers ++ * but does not does not provide NumberOfLinks. ++ */ ++ if ((rc == -EOPNOTSUPP || rc == -EINVAL) && ++ !non_unicode_wildcard) { ++ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) ++ search_info.info_level = SMB_FIND_FILE_INFO_STANDARD; ++ else ++ search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; ++ rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, ++ CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END, ++ &search_info, false); ++ if (rc == 0) { ++ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) { ++ FIND_FILE_STANDARD_INFO *di; ++ int offset = tcon->ses->server->timeAdj; ++ ++ di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start; ++ fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( ++ di->CreationDate, di->CreationTime, offset))); ++ fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( ++ di->LastAccessDate, di->LastAccessTime, offset))); ++ fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( ++ di->LastWriteDate, di->LastWriteTime, offset))); ++ fi.ChangeTime = fi.LastWriteTime; ++ fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes)); ++ fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize)); ++ fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize)); ++ } else { ++ FILE_FULL_DIRECTORY_INFO *di; ++ ++ di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start; ++ fi.CreationTime = di->CreationTime; ++ fi.LastAccessTime = di->LastAccessTime; ++ fi.LastWriteTime = di->LastWriteTime; ++ fi.ChangeTime = di->ChangeTime; ++ fi.Attributes = di->ExtFileAttributes; ++ fi.AllocationSize = di->AllocationSize; ++ fi.EndOfFile = di->EndOfFile; ++ fi.EASize = di->EaSize; ++ } ++ fi.NumberOfLinks = cpu_to_le32(1); ++ fi.DeletePending = 0; ++ fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY); ++ cifs_buf_release(search_info.ntwrk_buf_start); ++ } else if (!full_path[0]) { ++ /* ++ * CIFSFindFirst() does not work on root path if the ++ * root path was exported on the server from the top ++ * level path (drive letter). ++ */ ++ rc = -EOPNOTSUPP; ++ } ++ } ++ ++ /* ++ * If everything failed then fallback to the legacy SMB command ++ * SMB_COM_QUERY_INFORMATION which works with all servers, but ++ * provide just few information. ++ */ ++ if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) { + rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, + cifs_remap(cifs_sb)); + data->adjust_tz = true; ++ } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) { ++ /* Path with non-UNICODE wildcard character cannot exist. */ ++ rc = -ENOENT; + } + + if (!rc) { +@@ -662,6 +735,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, + int rc; + FILE_ALL_INFO fi = {}; + ++ /* ++ * CIFSSMBQFileInfo() for non-NT servers returns bogus data in ++ * Attributes fields. So do not use this command for non-NT servers. ++ */ ++ if (!(tcon->ses->capabilities & CAP_NT_SMBS)) ++ return -EOPNOTSUPP; ++ + if (cfile->symlink_target) { + data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); + if (!data->symlink_target) +@@ -832,6 +912,9 @@ smb_set_file_info(struct inode *inode, const char *full_path, + struct cifs_fid fid; + struct cifs_open_parms oparms; + struct cifsFileInfo *open_file; ++ FILE_BASIC_INFO new_buf; ++ struct cifs_open_info_data query_data; ++ __le64 write_time = buf->LastWriteTime; + struct cifsInodeInfo *cinode = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct tcon_link *tlink = NULL; +@@ -839,20 +922,58 @@ smb_set_file_info(struct inode *inode, const char *full_path, + + /* if the file is already open for write, just use that fileid */ + open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); ++ + if (open_file) { + fid.netfid = open_file->fid.netfid; + netpid = open_file->pid; + tcon = tlink_tcon(open_file->tlink); +- goto set_via_filehandle; ++ } else { ++ tlink = cifs_sb_tlink(cifs_sb); ++ if (IS_ERR(tlink)) { ++ rc = PTR_ERR(tlink); ++ tlink = NULL; ++ goto out; ++ } ++ tcon = tlink_tcon(tlink); + } + +- tlink = cifs_sb_tlink(cifs_sb); +- if (IS_ERR(tlink)) { +- rc = PTR_ERR(tlink); +- tlink = NULL; +- goto out; ++ /* ++ * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO ++ * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers ++ * interprets zero time value as do not change existing value on server. ++ * API of ->set_file_info() callback expects that zero time value has ++ * the NT meaning - do not change. Therefore if server is non-NT and ++ * some time values in "buf" are zero, then fetch missing time values. ++ */ ++ if (!(tcon->ses->capabilities & CAP_NT_SMBS) && ++ (!buf->CreationTime || !buf->LastAccessTime || ++ !buf->LastWriteTime || !buf->ChangeTime)) { ++ rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data); ++ if (rc) { ++ if (open_file) { ++ cifsFileInfo_put(open_file); ++ open_file = NULL; ++ } ++ goto out; ++ } ++ /* ++ * Original write_time from buf->LastWriteTime is preserved ++ * as SMBSetInformation() interprets zero as do not change. ++ */ ++ new_buf = *buf; ++ buf = &new_buf; ++ if (!buf->CreationTime) ++ buf->CreationTime = query_data.fi.CreationTime; ++ if (!buf->LastAccessTime) ++ buf->LastAccessTime = query_data.fi.LastAccessTime; ++ if (!buf->LastWriteTime) ++ buf->LastWriteTime = query_data.fi.LastWriteTime; ++ if (!buf->ChangeTime) ++ buf->ChangeTime = query_data.fi.ChangeTime; + } +- tcon = tlink_tcon(tlink); ++ ++ if (open_file) ++ goto set_via_filehandle; + + rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, + cifs_sb); +@@ -873,8 +994,45 @@ smb_set_file_info(struct inode *inode, const char *full_path, + .fid = &fid, + }; + +- cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); +- rc = CIFS_open(xid, &oparms, &oplock, NULL); ++ if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) { ++ /* Opening directory path is not possible on non-NT servers. */ ++ rc = -EOPNOTSUPP; ++ } else { ++ /* ++ * Use cifs_open_file() instead of CIFS_open() as the ++ * cifs_open_file() selects the correct function which ++ * works also on non-NT servers. ++ */ ++ rc = cifs_open_file(xid, &oparms, &oplock, NULL); ++ /* ++ * Opening path for writing on non-NT servers is not ++ * possible when the read-only attribute is already set. ++ * Non-NT server in this case returns -EACCES. For those ++ * servers the only possible way how to clear the read-only ++ * bit is via SMB_COM_SETATTR command. ++ */ ++ if (rc == -EACCES && ++ (cinode->cifsAttrs & ATTR_READONLY) && ++ le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */ ++ !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) && ++ !(tcon->ses->capabilities & CAP_NT_SMBS)) ++ rc = -EOPNOTSUPP; ++ } ++ ++ /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */ ++ if (rc == -EOPNOTSUPP) { ++ cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n"); ++ rc = SMBSetInformation(xid, tcon, full_path, ++ buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs), ++ write_time, ++ cifs_sb->local_nls, cifs_sb); ++ if (rc == 0) ++ cinode->cifsAttrs = le32_to_cpu(buf->Attributes); ++ else ++ rc = -EACCES; ++ goto out; ++ } ++ + if (rc != 0) { + if (rc == -EIO) + rc = -EINVAL; +@@ -882,6 +1040,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, + } + + netpid = current->tgid; ++ cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n"); + + set_via_filehandle: + rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid); +@@ -892,6 +1051,21 @@ smb_set_file_info(struct inode *inode, const char *full_path, + CIFSSMBClose(xid, tcon, fid.netfid); + else + cifsFileInfo_put(open_file); ++ ++ /* ++ * Setting the read-only bit is not honered on non-NT servers when done ++ * via open-semantics. So for setting it, use SMB_COM_SETATTR command. ++ * This command works only after the file is closed, so use it only when ++ * operation was called without the filehandle. ++ */ ++ if (open_file == NULL && ++ !(tcon->ses->capabilities & CAP_NT_SMBS) && ++ le32_to_cpu(buf->Attributes) & ATTR_READONLY) { ++ SMBSetInformation(xid, tcon, full_path, ++ buf->Attributes, ++ 0 /* do not change write time */, ++ cifs_sb->local_nls, cifs_sb); ++ } + out: + if (tlink != NULL) + cifs_put_tlink(tlink); +diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c +index db9c807115c605..d7f2835e0b1cc1 100644 +--- a/fs/smb/client/smb2file.c ++++ b/fs/smb/client/smb2file.c +@@ -107,16 +107,25 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 + int err_buftype = CIFS_NO_BUFFER; + struct cifs_fid *fid = oparms->fid; + struct network_resiliency_req nr_ioctl_req; ++ bool retry_without_read_attributes = false; + + smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb); + if (smb2_path == NULL) + return -ENOMEM; + +- oparms->desired_access |= FILE_READ_ATTRIBUTES; ++ if (!(oparms->desired_access & FILE_READ_ATTRIBUTES)) { ++ oparms->desired_access |= FILE_READ_ATTRIBUTES; ++ retry_without_read_attributes = true; ++ } + smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; + + rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, + &err_buftype); ++ if (rc == -EACCES && retry_without_read_attributes) { ++ oparms->desired_access &= ~FILE_READ_ATTRIBUTES; ++ rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, ++ &err_buftype); ++ } + if (rc && data) { + struct smb2_hdr *hdr = err_iov.iov_base; + +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index b809a616728f27..4e3eacbec96d14 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -428,12 +428,20 @@ smb2_negotiate(const unsigned int xid, + server->CurrentMid = 0; + spin_unlock(&server->mid_lock); + rc = SMB2_negotiate(xid, ses, server); +- /* BB we probably don't need to retry with modern servers */ +- if (rc == -EAGAIN) +- rc = -EHOSTDOWN; + return rc; + } + ++static inline unsigned int ++prevent_zero_iosize(unsigned int size, const char *type) ++{ ++ if (size == 0) { ++ cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n", ++ type, CIFS_MIN_DEFAULT_IOSIZE); ++ return CIFS_MIN_DEFAULT_IOSIZE; ++ } ++ return size; ++} ++ + static unsigned int + smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + { +@@ -441,12 +449,12 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + unsigned int wsize; + + /* start with specified wsize, or default */ +- wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE; ++ wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE; + wsize = min_t(unsigned int, wsize, server->max_write); + if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); + +- return wsize; ++ return prevent_zero_iosize(wsize, "w"); + } + + static unsigned int +@@ -456,7 +464,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + unsigned int wsize; + + /* start with specified wsize, or default */ +- wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE; ++ wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE; + wsize = min_t(unsigned int, wsize, server->max_write); + #ifdef CONFIG_CIFS_SMB_DIRECT + if (server->rdma) { +@@ -478,7 +486,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); + +- return wsize; ++ return prevent_zero_iosize(wsize, "w"); + } + + static unsigned int +@@ -488,13 +496,13 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + unsigned int rsize; + + /* start with specified rsize, or default */ +- rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE; ++ rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE; + rsize = min_t(unsigned int, rsize, server->max_read); + + if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); + +- return rsize; ++ return prevent_zero_iosize(rsize, "r"); + } + + static unsigned int +@@ -504,7 +512,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + unsigned int rsize; + + /* start with specified rsize, or default */ +- rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE; ++ rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE; + rsize = min_t(unsigned int, rsize, server->max_read); + #ifdef CONFIG_CIFS_SMB_DIRECT + if (server->rdma) { +@@ -527,7 +535,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) + if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); + +- return rsize; ++ return prevent_zero_iosize(rsize, "r"); + } + + /* +diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c +index ddf1a3aafee5c6..2269963e500819 100644 +--- a/fs/smb/client/transport.c ++++ b/fs/smb/client/transport.c +@@ -178,7 +178,7 @@ delete_mid(struct mid_q_entry *mid) + * Our basic "send data to server" function. Should be called with srv_mutex + * held. The caller is responsible for handling the results. + */ +-static int ++int + smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, + size_t *sent) + { +diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h +index c3ee42188d252e..1af827ae757e0d 100644 +--- a/fs/smb/common/smb2pdu.h ++++ b/fs/smb/common/smb2pdu.h +@@ -95,6 +95,9 @@ + */ + #define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024) + ++/* According to MS-SMB2 specification The minimum recommended value is 65536.*/ ++#define CIFS_MIN_DEFAULT_IOSIZE (65536) ++ + /* + * SMB2 Header Definition + * +diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c +index 72294764d4c20c..e564432643ea30 100644 +--- a/fs/smb/server/oplock.c ++++ b/fs/smb/server/oplock.c +@@ -146,12 +146,9 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) + { + struct oplock_info *opinfo; + +- if (list_empty(&ci->m_op_list)) +- return NULL; +- + down_read(&ci->m_lock); +- opinfo = list_first_entry(&ci->m_op_list, struct oplock_info, +- op_entry); ++ opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info, ++ op_entry); + if (opinfo) { + if (opinfo->conn == NULL || + !atomic_inc_not_zero(&opinfo->refcount)) +diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c +index f6616d687365a3..3bbf2382706056 100644 +--- a/fs/smb/server/vfs.c ++++ b/fs/smb/server/vfs.c +@@ -426,10 +426,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, + ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", + *pos, count); + ++ if (*pos >= XATTR_SIZE_MAX) { ++ pr_err("stream write position %lld is out of bounds\n", *pos); ++ return -EINVAL; ++ } ++ + size = *pos + count; + if (size > XATTR_SIZE_MAX) { + size = XATTR_SIZE_MAX; +- count = (*pos + count) - XATTR_SIZE_MAX; ++ count = XATTR_SIZE_MAX - *pos; + } + + v_len = ksmbd_vfs_getcasexattr(idmap, +@@ -443,13 +448,6 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, + goto out; + } + +- if (v_len <= *pos) { +- pr_err("stream write position %lld is out of bounds (stream length: %zd)\n", +- *pos, v_len); +- err = -EINVAL; +- goto out; +- } +- + if (v_len < size) { + wbuf = kvzalloc(size, GFP_KERNEL); + if (!wbuf) { +diff --git a/include/crypto/hash.h b/include/crypto/hash.h +index f7c2a22cd776da..c0d472fdc82e6c 100644 +--- a/include/crypto/hash.h ++++ b/include/crypto/hash.h +@@ -153,6 +153,7 @@ struct ahash_request { + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. + * @clone_tfm: Copy transform into new object, may allocate memory. ++ * @reqsize: Size of the request context. + * @halg: see struct hash_alg_common + */ + struct ahash_alg { +@@ -169,6 +170,8 @@ struct ahash_alg { + void (*exit_tfm)(struct crypto_ahash *tfm); + int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); + ++ unsigned int reqsize; ++ + struct hash_alg_common halg; + }; + +diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h +index 9a022caacf9361..f3e7e3e5078dbc 100644 +--- a/include/drm/drm_atomic.h ++++ b/include/drm/drm_atomic.h +@@ -372,8 +372,27 @@ struct drm_atomic_state { + * + * Allow full modeset. This is used by the ATOMIC IOCTL handler to + * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should +- * never consult this flag, instead looking at the output of +- * drm_atomic_crtc_needs_modeset(). ++ * generally not consult this flag, but instead look at the output of ++ * drm_atomic_crtc_needs_modeset(). The detailed rules are: ++ * ++ * - Drivers must not consult @allow_modeset in the atomic commit path. ++ * Use drm_atomic_crtc_needs_modeset() instead. ++ * ++ * - Drivers must consult @allow_modeset before adding unrelated struct ++ * drm_crtc_state to this commit by calling ++ * drm_atomic_get_crtc_state(). See also the warning in the ++ * documentation for that function. ++ * ++ * - Drivers must never change this flag, it is under the exclusive ++ * control of userspace. ++ * ++ * - Drivers may consult @allow_modeset in the atomic check path, if ++ * they have the choice between an optimal hardware configuration ++ * which requires a modeset, and a less optimal configuration which ++ * can be committed without a modeset. An example would be suboptimal ++ * scanout FIFO allocation resulting in increased idle power ++ * consumption. This allows userspace to avoid flickering and delays ++ * for the normal composition loop at reasonable cost. + */ + bool allow_modeset : 1; + /** +diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h +index 7c2ec139c464ad..a578068169f19a 100644 +--- a/include/drm/drm_gem.h ++++ b/include/drm/drm_gem.h +@@ -35,6 +35,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -557,6 +558,18 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje + return (obj->handle_count > 1) || obj->dma_buf; + } + ++/** ++ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported ++ * @obj: the GEM object ++ * ++ * Returns: ++ * True if the GEM object's buffer has been imported, false otherwise ++ */ ++static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) ++{ ++ return !!obj->import_attach; ++} ++ + #ifdef CONFIG_LOCKDEP + /** + * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. +diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h +index d4f2c8706042cd..2331cd8174fe3f 100644 +--- a/include/linux/bpf-cgroup.h ++++ b/include/linux/bpf-cgroup.h +@@ -106,6 +106,7 @@ struct bpf_prog_list { + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; ++ u32 flags; + }; + + int cgroup_bpf_inherit(struct cgroup *cgrp); +diff --git a/include/linux/coredump.h b/include/linux/coredump.h +index d3eba436015087..c1b4c8c70caebd 100644 +--- a/include/linux/coredump.h ++++ b/include/linux/coredump.h +@@ -28,6 +28,7 @@ struct coredump_params { + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; ++ struct pid *pid; + }; + + /* +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index f0ccca16a0aca1..608e8296ba206a 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -600,10 +600,14 @@ static inline int dma_mmap_wc(struct device *dev, + #else + #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) + #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) +-#define dma_unmap_addr(PTR, ADDR_NAME) (0) +-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +-#define dma_unmap_len(PTR, LEN_NAME) (0) +-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) ++#define dma_unmap_addr(PTR, ADDR_NAME) \ ++ ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) ++#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ ++ do { typeof(PTR) __p __maybe_unused = PTR; } while (0) ++#define dma_unmap_len(PTR, LEN_NAME) \ ++ ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) ++#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ ++ do { typeof(PTR) __p __maybe_unused = PTR; } while (0) + #endif + + #endif /* _LINUX_DMA_MAPPING_H */ +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 75607d4ba26cb7..714966d5701e00 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -448,7 +448,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio, + const char *from = kmap_local_folio(folio, offset); + size_t chunk = len; + +- if (folio_test_highmem(folio) && ++ if (folio_test_partial_kmap(folio) && + chunk > PAGE_SIZE - offset_in_page(offset)) + chunk = PAGE_SIZE - offset_in_page(offset); + memcpy(to, from, chunk); +@@ -469,7 +469,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset, + char *to = kmap_local_folio(folio, offset); + size_t chunk = len; + +- if (folio_test_highmem(folio) && ++ if (folio_test_partial_kmap(folio) && + chunk > PAGE_SIZE - offset_in_page(offset)) + chunk = PAGE_SIZE - offset_in_page(offset); + memcpy(to, from, chunk); +@@ -501,7 +501,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, + size_t offset = offset_in_folio(folio, pos); + char *from = kmap_local_folio(folio, offset); + +- if (folio_test_highmem(folio)) { ++ if (folio_test_partial_kmap(folio)) { + offset = offset_in_page(offset); + len = min_t(size_t, len, PAGE_SIZE - offset); + } else +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h +index 8f77bb0f4ae0ca..05f8b7d7d1e968 100644 +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -237,6 +237,7 @@ struct hrtimer_cpu_base { + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; ++ call_single_data_t csd; + } ____cacheline_aligned; + + static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h +index af8a771a053c51..d79851c5fabd86 100644 +--- a/include/linux/ipv6.h ++++ b/include/linux/ipv6.h +@@ -199,6 +199,7 @@ struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; ++ u8 dontfrag:1; + }; + + /* struct ipv6_pinfo - ipv6 private area */ +diff --git a/include/linux/lzo.h b/include/linux/lzo.h +index e95c7d1092b286..4d30e3624acd23 100644 +--- a/include/linux/lzo.h ++++ b/include/linux/lzo.h +@@ -24,10 +24,18 @@ + int lzo1x_1_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + ++/* Same as above but does not write more than dst_len to dst. */ ++int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, ++ unsigned char *dst, size_t *dst_len, void *wrkmem); ++ + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ + int lzorle1x_1_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + ++/* Same as above but does not write more than dst_len to dst. */ ++int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, ++ unsigned char *dst, size_t *dst_len, void *wrkmem); ++ + /* safe decompression with overrun testing */ + int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len); +diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h +index 27f42f713c891c..86f0f2a25a3d63 100644 +--- a/include/linux/mlx4/device.h ++++ b/include/linux/mlx4/device.h +@@ -1135,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf); + +-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); ++int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order); + void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); + + int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index 3fb428ce7d1c7c..b6b9a4dfa4fb92 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -40,6 +40,8 @@ + + #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + ++#define MLX5_FS_MAX_POOL_SIZE BIT(30) ++ + enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_NONE, + MLX5_FLOW_DESTINATION_TYPE_VPORT, +diff --git a/include/linux/msi.h b/include/linux/msi.h +index ddace8c34dcf95..2cf15cf5d060ad 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -171,6 +171,10 @@ struct msi_desc_data { + * @dev: Pointer to the device which uses this descriptor + * @msg: The last set MSI message cached for reuse + * @affinity: Optional pointer to a cpu affinity mask for this descriptor ++ * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr. ++ * Only used if iommu_msi_shift != 0 ++ * @iommu_msi_shift: Indicates how many bits of the original address should be ++ * preserved when using iommu_msi_iova. + * @sysfs_attr: Pointer to sysfs device attribute + * + * @write_msi_msg: Callback that may be called when the MSI message +@@ -189,7 +193,8 @@ struct msi_desc { + struct msi_msg msg; + struct irq_affinity_desc *affinity; + #ifdef CONFIG_IRQ_MSI_IOMMU +- const void *iommu_cookie; ++ u64 iommu_msi_iova : 58; ++ u64 iommu_msi_shift : 6; + #endif + #ifdef CONFIG_SYSFS + struct device_attribute *sysfs_attrs; +@@ -306,28 +311,14 @@ struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid, + + #define msi_desc_to_dev(desc) ((desc)->dev) + +-#ifdef CONFIG_IRQ_MSI_IOMMU +-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) +-{ +- return desc->iommu_cookie; +-} +- +-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, +- const void *iommu_cookie) +-{ +- desc->iommu_cookie = iommu_cookie; +-} +-#else +-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) ++static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova, ++ unsigned int msi_shift) + { +- return NULL; +-} +- +-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, +- const void *iommu_cookie) +-{ +-} ++#ifdef CONFIG_IRQ_MSI_IOMMU ++ desc->iommu_msi_iova = msi_iova >> msi_shift; ++ desc->iommu_msi_shift = msi_shift; + #endif ++} + + int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid, + struct msi_desc *init_desc); +diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h +index 86d96e00c2e3de..374b1b208bd89e 100644 +--- a/include/linux/nfs_fs_sb.h ++++ b/include/linux/nfs_fs_sb.h +@@ -199,6 +199,15 @@ struct nfs_server { + char *fscache_uniq; /* Uniquifier (or NULL) */ + #endif + ++ /* The following #defines numerically match the NFSv4 equivalents */ ++#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1) ++#define NFS_FH_VOLATILE_ANY (0x2) ++#define NFS_FH_VOL_MIGRATION (0x4) ++#define NFS_FH_VOL_RENAME (0x8) ++#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME) ++ u32 fh_expire_type; /* V4 bitmask representing file ++ handle volatility type for ++ this filesystem */ + u32 pnfs_blksize; /* layout_blksize attr */ + #if IS_ENABLED(CONFIG_NFS_V4) + u32 attr_bitmask[3];/* V4 bitmask representing the set +@@ -222,9 +231,6 @@ struct nfs_server { + u32 acl_bitmask; /* V4 bitmask representing the ACEs + that are supported on this + filesystem */ +- u32 fh_expire_type; /* V4 bitmask representing file +- handle volatility type for +- this filesystem */ + struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ + struct rpc_wait_queue roc_rpcwaitq; + void *pnfs_ld_data; /* per mount point data */ +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h +index a77f3a7d21d12f..36d0961f1672fd 100644 +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -551,6 +551,13 @@ PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) + PAGEFLAG_FALSE(HighMem, highmem) + #endif + ++/* Does kmap_local_folio() only allow access to one page of the folio? */ ++#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP ++#define folio_test_partial_kmap(f) true ++#else ++#define folio_test_partial_kmap(f) folio_test_highmem(f) ++#endif ++ + #ifdef CONFIG_SWAP + static __always_inline bool folio_test_swapcache(struct folio *folio) + { +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index fcb834dd75c240..90c782749b0558 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -1016,7 +1016,13 @@ struct perf_output_handle { + struct perf_buffer *rb; + unsigned long wakeup; + unsigned long size; +- u64 aux_flags; ++ union { ++ u64 flags; /* perf_output*() */ ++ u64 aux_flags; /* perf_aux_output*() */ ++ struct { ++ u64 skip_read : 1; ++ }; ++ }; + union { + void *addr; + unsigned long head; +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 72da69cc5764f3..27531a0b3a6e72 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -97,9 +97,9 @@ static inline void __rcu_read_lock(void) + + static inline void __rcu_read_unlock(void) + { +- preempt_enable(); + if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) + rcu_read_unlock_strict(); ++ preempt_enable(); + } + + static inline int rcu_preempt_depth(void) +diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h +index 126f6b418f6afc..559f758bf2eaa8 100644 +--- a/include/linux/rcutree.h ++++ b/include/linux/rcutree.h +@@ -104,7 +104,7 @@ extern int rcu_scheduler_active; + void rcu_end_inkernel_boot(void); + bool rcu_inkernel_boot_has_ended(void); + bool rcu_is_watching(void); +-#ifndef CONFIG_PREEMPTION ++#ifndef CONFIG_PREEMPT_RCU + void rcu_all_qs(void); + #endif + +diff --git a/include/linux/trace.h b/include/linux/trace.h +index fdcd76b7be83d7..7eaad857dee04f 100644 +--- a/include/linux/trace.h ++++ b/include/linux/trace.h +@@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export) + static inline void trace_printk_init_buffers(void) + { + } +-static inline int trace_array_printk(struct trace_array *tr, unsigned long ip, +- const char *fmt, ...) ++static inline __printf(3, 4) ++int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) + { + return 0; + } +diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h +index 3691e0e76a1a20..62147eecf931da 100644 +--- a/include/linux/trace_seq.h ++++ b/include/linux/trace_seq.h +@@ -79,8 +79,8 @@ extern __printf(2, 3) + void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); + extern __printf(2, 0) + void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +-extern void +-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); ++extern __printf(2, 0) ++void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); + extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); + extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + int cnt); +@@ -104,8 +104,8 @@ static inline __printf(2, 3) + void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) + { + } +-static inline void +-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) ++static inline __printf(2, 0) ++void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) + { + } + +diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h +index 33a4c146dc19c4..2ca60828f28bb6 100644 +--- a/include/linux/usb/r8152.h ++++ b/include/linux/usb/r8152.h +@@ -30,6 +30,7 @@ + #define VENDOR_ID_NVIDIA 0x0955 + #define VENDOR_ID_TPLINK 0x2357 + #define VENDOR_ID_DLINK 0x2001 ++#define VENDOR_ID_DELL 0x413c + #define VENDOR_ID_ASUS 0x0b05 + + #if IS_REACHABLE(CONFIG_USB_RTL8152) +diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h +index b4fcd0164048ed..0740dfc6c04881 100644 +--- a/include/media/v4l2-subdev.h ++++ b/include/media/v4l2-subdev.h +@@ -822,7 +822,9 @@ struct v4l2_subdev_state { + * possible configuration from the remote end, likely calling + * this operation as close as possible to stream on time. The + * operation shall fail if the pad index it has been called on +- * is not valid or in case of unrecoverable failures. ++ * is not valid or in case of unrecoverable failures. The ++ * config argument has been memset to 0 just before calling ++ * the op. + * + * @set_routing: enable or disable data connection routes described in the + * subdevice routing table. +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index 77bf30203d3cf6..b6eedf7650da59 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -8,21 +8,46 @@ + #include + #include + +-void unix_inflight(struct user_struct *user, struct file *fp); +-void unix_notinflight(struct user_struct *user, struct file *fp); +-void unix_destruct_scm(struct sk_buff *skb); +-void io_uring_destruct_scm(struct sk_buff *skb); ++#if IS_ENABLED(CONFIG_UNIX) ++struct unix_sock *unix_get_socket(struct file *filp); ++#else ++static inline struct unix_sock *unix_get_socket(struct file *filp) ++{ ++ return NULL; ++} ++#endif ++ ++extern unsigned int unix_tot_inflight; ++void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver); ++void unix_del_edges(struct scm_fp_list *fpl); ++void unix_update_edges(struct unix_sock *receiver); ++int unix_prepare_fpl(struct scm_fp_list *fpl); ++void unix_destroy_fpl(struct scm_fp_list *fpl); + void unix_gc(void); +-void wait_for_unix_gc(void); +-struct sock *unix_get_socket(struct file *filp); ++void wait_for_unix_gc(struct scm_fp_list *fpl); ++ ++struct unix_vertex { ++ struct list_head edges; ++ struct list_head entry; ++ struct list_head scc_entry; ++ unsigned long out_degree; ++ unsigned long index; ++ unsigned long scc_index; ++}; ++ ++struct unix_edge { ++ struct unix_sock *predecessor; ++ struct unix_sock *successor; ++ struct list_head vertex_entry; ++ struct list_head stack_entry; ++}; ++ + struct sock *unix_peer_get(struct sock *sk); + + #define UNIX_HASH_MOD (256 - 1) + #define UNIX_HASH_SIZE (256 * 2) + #define UNIX_HASH_BITS 8 + +-extern unsigned int unix_tot_inflight; +- + struct unix_address { + refcount_t refcnt; + int len; +@@ -42,6 +67,7 @@ struct unix_skb_parms { + + struct scm_stat { + atomic_t nr_fds; ++ unsigned long nr_unix_fds; + }; + + #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) +@@ -54,12 +80,9 @@ struct unix_sock { + struct path path; + struct mutex iolock, bindlock; + struct sock *peer; +- struct list_head link; +- unsigned long inflight; ++ struct sock *listener; ++ struct unix_vertex *vertex; + spinlock_t lock; +- unsigned long gc_flags; +-#define UNIX_GC_CANDIDATE 0 +-#define UNIX_GC_MAYBE_CYCLE 1 + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; +diff --git a/include/net/scm.h b/include/net/scm.h +index e8c76b4be2fe71..059e287745dc39 100644 +--- a/include/net/scm.h ++++ b/include/net/scm.h +@@ -22,9 +22,20 @@ struct scm_creds { + kgid_t gid; + }; + ++#ifdef CONFIG_UNIX ++struct unix_edge; ++#endif ++ + struct scm_fp_list { + short count; ++ short count_unix; + short max; ++#ifdef CONFIG_UNIX ++ bool inflight; ++ bool dead; ++ struct list_head vertices; ++ struct unix_edge *edges; ++#endif + struct user_struct *user; + struct file *fp[SCM_MAX_FD]; + }; +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index b33d27e42cff38..fd550c0b563450 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -228,7 +228,6 @@ struct xfrm_state { + + /* Data for encapsulator */ + struct xfrm_encap_tmpl *encap; +- struct sock __rcu *encap_sk; + + /* Data for care-of address */ + xfrm_address_t *coaddr; +diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h +index fe05121169589f..555ea3d142a46b 100644 +--- a/include/rdma/uverbs_std_types.h ++++ b/include/rdma/uverbs_std_types.h +@@ -34,7 +34,7 @@ + static inline void *_uobj_get_obj_read(struct ib_uobject *uobj) + { + if (IS_ERR(uobj)) +- return NULL; ++ return ERR_CAST(uobj); + return uobj->object; + } + #define uobj_get_obj_read(_object, _type, _id, _attrs) \ +diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h +index 5497dc9c396a5a..b58dc869cf77eb 100644 +--- a/include/sound/hda_codec.h ++++ b/include/sound/hda_codec.h +@@ -196,6 +196,7 @@ struct hda_codec { + /* beep device */ + struct hda_beep *beep; + unsigned int beep_mode; ++ bool beep_just_power_on; + + /* widget capabilities cache */ + u32 *wcaps; +diff --git a/include/sound/pcm.h b/include/sound/pcm.h +index 2a815373dac1d9..ed4449cbdf8033 100644 +--- a/include/sound/pcm.h ++++ b/include/sound/pcm.h +@@ -1427,6 +1427,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s + #define snd_pcm_lib_mmap_iomem NULL + #endif + ++void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); ++ + /** + * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer + * @dma: DMA number +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h +index 3c4d5ef6d44636..8ea1674069fe81 100644 +--- a/include/trace/events/btrfs.h ++++ b/include/trace/events/btrfs.h +@@ -1956,7 +1956,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref, + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct prelim_ref *oldref, + const struct prelim_ref *newref, u64 tree_size), +- TP_ARGS(fs_info, newref, oldref, tree_size), ++ TP_ARGS(fs_info, oldref, newref, tree_size), + + TP_STRUCT__entry_btrfs( + __field( u64, root_id ) +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h +index 431bc700bcfb93..c7f904a72af217 100644 +--- a/include/uapi/linux/bpf.h ++++ b/include/uapi/linux/bpf.h +@@ -1140,6 +1140,7 @@ enum bpf_perf_event_type { + #define BPF_F_BEFORE (1U << 3) + #define BPF_F_AFTER (1U << 4) + #define BPF_F_ID (1U << 5) ++#define BPF_F_PREORDER (1U << 6) + #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */ + + /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the +diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h +index 606b52e88ce335..3d1987e1bb2dd6 100644 +--- a/include/uapi/linux/idxd.h ++++ b/include/uapi/linux/idxd.h +@@ -31,6 +31,7 @@ enum idxd_scmd_stat { + IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, + IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, + IDXD_SCMD_DEV_EVL_ERR = 0x80120000, ++ IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, + }; + + #define IDXD_SCMD_SOFTERR_MASK 0x80000000 +diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h +index 41ff44dfa1db3f..f52de5ed1b3b6e 100644 +--- a/include/ufs/ufs_quirks.h ++++ b/include/ufs/ufs_quirks.h +@@ -107,4 +107,10 @@ struct ufs_dev_quirk { + */ + #define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11) + ++/* ++ * Some ufs devices may need more time to be in hibern8 before exiting. ++ * Enable this quirk to give it an additional 100us. ++ */ ++#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12) ++ + #endif /* UFS_QUIRKS_H_ */ +diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c +index 976e9500f6518c..a26cf840e623d6 100644 +--- a/io_uring/fdinfo.c ++++ b/io_uring/fdinfo.c +@@ -81,11 +81,11 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) + seq_printf(m, "SqMask:\t0x%x\n", sq_mask); + seq_printf(m, "SqHead:\t%u\n", sq_head); + seq_printf(m, "SqTail:\t%u\n", sq_tail); +- seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); ++ seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head)); + seq_printf(m, "CqMask:\t0x%x\n", cq_mask); + seq_printf(m, "CqHead:\t%u\n", cq_head); + seq_printf(m, "CqTail:\t%u\n", cq_tail); +- seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); ++ seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail)); + seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); + sq_entries = min(sq_tail - sq_head, ctx->sq_entries); + for (i = 0; i < sq_entries; i++) { +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index db592fa549b738..43b46098279a16 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -701,6 +701,7 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) + * to care for a non-real case. + */ + if (need_resched()) { ++ ctx->cqe_sentinel = ctx->cqe_cached; + io_cq_unlock_post(ctx); + mutex_unlock(&ctx->uring_lock); + cond_resched(); +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c +index cf2eb0895d403c..684fb450ad086f 100644 +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -369,7 +369,7 @@ static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) + /* count number of elements in the list. + * it's slow but the list cannot be long + */ +-static u32 prog_list_length(struct hlist_head *head) ++static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt) + { + struct bpf_prog_list *pl; + u32 cnt = 0; +@@ -377,6 +377,8 @@ static u32 prog_list_length(struct hlist_head *head) + hlist_for_each_entry(pl, head, node) { + if (!prog_list_prog(pl)) + continue; ++ if (preorder_cnt && (pl->flags & BPF_F_PREORDER)) ++ (*preorder_cnt)++; + cnt++; + } + return cnt; +@@ -400,7 +402,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp, + + if (flags & BPF_F_ALLOW_MULTI) + return true; +- cnt = prog_list_length(&p->bpf.progs[atype]); ++ cnt = prog_list_length(&p->bpf.progs[atype], NULL); + WARN_ON_ONCE(cnt > 1); + if (cnt == 1) + return !!(flags & BPF_F_ALLOW_OVERRIDE); +@@ -423,12 +425,12 @@ static int compute_effective_progs(struct cgroup *cgrp, + struct bpf_prog_array *progs; + struct bpf_prog_list *pl; + struct cgroup *p = cgrp; +- int cnt = 0; ++ int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart; + + /* count number of effective programs by walking parents */ + do { + if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) +- cnt += prog_list_length(&p->bpf.progs[atype]); ++ cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt); + p = cgroup_parent(p); + } while (p); + +@@ -439,20 +441,34 @@ static int compute_effective_progs(struct cgroup *cgrp, + /* populate the array with effective progs */ + cnt = 0; + p = cgrp; ++ fstart = preorder_cnt; ++ bstart = preorder_cnt - 1; + do { + if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) + continue; + ++ init_bstart = bstart; + hlist_for_each_entry(pl, &p->bpf.progs[atype], node) { + if (!prog_list_prog(pl)) + continue; + +- item = &progs->items[cnt]; ++ if (pl->flags & BPF_F_PREORDER) { ++ item = &progs->items[bstart]; ++ bstart--; ++ } else { ++ item = &progs->items[fstart]; ++ fstart++; ++ } + item->prog = prog_list_prog(pl); + bpf_cgroup_storages_assign(item->cgroup_storage, + pl->storage); + cnt++; + } ++ ++ /* reverse pre-ordering progs at this cgroup level */ ++ for (i = bstart + 1, j = init_bstart; i < j; i++, j--) ++ swap(progs->items[i], progs->items[j]); ++ + } while ((p = cgroup_parent(p))); + + *array = progs; +@@ -663,7 +679,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp, + */ + return -EPERM; + +- if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) ++ if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS) + return -E2BIG; + + pl = find_attach_entry(progs, prog, link, replace_prog, +@@ -698,6 +714,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp, + + pl->prog = prog; + pl->link = link; ++ pl->flags = flags; + bpf_cgroup_storages_assign(pl->storage, storage); + cgrp->bpf.flags[atype] = saved_flags; + +@@ -1073,7 +1090,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + lockdep_is_held(&cgroup_mutex)); + total_cnt += bpf_prog_array_length(effective); + } else { +- total_cnt += prog_list_length(&cgrp->bpf.progs[atype]); ++ total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL); + } + } + +@@ -1105,7 +1122,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + u32 id; + + progs = &cgrp->bpf.progs[atype]; +- cnt = min_t(int, prog_list_length(progs), total_cnt); ++ cnt = min_t(int, prog_list_length(progs, NULL), total_cnt); + i = 0; + hlist_for_each_entry(pl, progs, node) { + prog = prog_list_prog(pl); +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c +index fc34f72702cc40..8a3eadf17f785d 100644 +--- a/kernel/bpf/hashtab.c ++++ b/kernel/bpf/hashtab.c +@@ -2212,7 +2212,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ + b = &htab->buckets[i]; + rcu_read_lock(); + head = &b->head; +- hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { ++ hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) { + key = elem->key; + if (is_percpu) { + /* current cpu value for percpu map */ +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index f089a616301119..b66349f892f25e 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -3900,7 +3900,8 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, + #define BPF_F_ATTACH_MASK_BASE \ + (BPF_F_ALLOW_OVERRIDE | \ + BPF_F_ALLOW_MULTI | \ +- BPF_F_REPLACE) ++ BPF_F_REPLACE | \ ++ BPF_F_PREORDER) + + #define BPF_F_ATTACH_MASK_MPROG \ + (BPF_F_REPLACE | \ +@@ -4442,6 +4443,8 @@ static int bpf_prog_get_info_by_fd(struct file *file, + info.recursion_misses = stats.misses; + + info.verified_insns = prog->aux->verified_insns; ++ if (prog->aux->btf) ++ info.btf_id = btf_obj_id(prog->aux->btf); + + if (!bpf_capable()) { + info.jited_prog_len = 0; +@@ -4588,8 +4591,6 @@ static int bpf_prog_get_info_by_fd(struct file *file, + } + } + +- if (prog->aux->btf) +- info.btf_id = btf_obj_id(prog->aux->btf); + info.attach_btf_id = prog->aux->attach_btf_id; + if (attach_btf) + info.attach_btf_obj_id = btf_obj_id(attach_btf); +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 756e179a1efe3e..1f9ae600e4455c 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -16014,12 +16014,16 @@ static void clean_verifier_state(struct bpf_verifier_env *env, + static void clean_live_states(struct bpf_verifier_env *env, int insn, + struct bpf_verifier_state *cur) + { ++ struct bpf_verifier_state *loop_entry; + struct bpf_verifier_state_list *sl; + + sl = *explored_state(env, insn); + while (sl) { + if (sl->state.branches) + goto next; ++ loop_entry = get_loop_entry(&sl->state); ++ if (loop_entry && loop_entry->branches) ++ goto next; + if (sl->state.insn_idx != insn || + !same_callsites(&sl->state, cur)) + goto next; +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 3ccf80dfa587a3..e8ef062f6ca058 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -90,7 +90,7 @@ + DEFINE_MUTEX(cgroup_mutex); + DEFINE_SPINLOCK(css_set_lock); + +-#ifdef CONFIG_PROVE_RCU ++#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP) + EXPORT_SYMBOL_GPL(cgroup_mutex); + EXPORT_SYMBOL_GPL(css_set_lock); + #endif +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 987807b1040ae0..5dd6424e62fa89 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -1163,6 +1163,12 @@ static void perf_assert_pmu_disabled(struct pmu *pmu) + WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0); + } + ++static inline void perf_pmu_read(struct perf_event *event) ++{ ++ if (event->state == PERF_EVENT_STATE_ACTIVE) ++ event->pmu->read(event); ++} ++ + static void get_ctx(struct perf_event_context *ctx) + { + refcount_inc(&ctx->refcount); +@@ -3397,8 +3403,7 @@ static void __perf_event_sync_stat(struct perf_event *event, + * we know the event must be on the current CPU, therefore we + * don't need to use it. + */ +- if (event->state == PERF_EVENT_STATE_ACTIVE) +- event->pmu->read(event); ++ perf_pmu_read(event); + + perf_event_update_time(event); + +@@ -4524,15 +4529,8 @@ static void __perf_event_read(void *info) + + pmu->read(event); + +- for_each_sibling_event(sub, event) { +- if (sub->state == PERF_EVENT_STATE_ACTIVE) { +- /* +- * Use sibling's PMU rather than @event's since +- * sibling could be on different (eg: software) PMU. +- */ +- sub->pmu->read(sub); +- } +- } ++ for_each_sibling_event(sub, event) ++ perf_pmu_read(sub); + + data->ret = pmu->commit_txn(pmu); + +@@ -7297,9 +7295,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; + +- if ((leader != event) && +- (leader->state == PERF_EVENT_STATE_ACTIVE)) +- leader->pmu->read(leader); ++ if ((leader != event) && !handle->skip_read) ++ perf_pmu_read(leader); + + values[n++] = perf_event_count(leader); + if (read_format & PERF_FORMAT_ID) +@@ -7312,9 +7309,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, + for_each_sibling_event(sub, leader) { + n = 0; + +- if ((sub != event) && +- (sub->state == PERF_EVENT_STATE_ACTIVE)) +- sub->pmu->read(sub); ++ if ((sub != event) && !handle->skip_read) ++ perf_pmu_read(sub); + + values[n++] = perf_event_count(sub); + if (read_format & PERF_FORMAT_ID) +@@ -7369,6 +7365,9 @@ void perf_output_sample(struct perf_output_handle *handle, + { + u64 sample_type = data->type; + ++ if (data->sample_flags & PERF_SAMPLE_READ) ++ handle->skip_read = 1; ++ + perf_output_put(handle, *header); + + if (sample_type & PERF_SAMPLE_IDENTIFIER) +diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c +index 6c2cb4e4f48dab..8f3f624419aa92 100644 +--- a/kernel/events/hw_breakpoint.c ++++ b/kernel/events/hw_breakpoint.c +@@ -950,9 +950,10 @@ static int hw_breakpoint_event_init(struct perf_event *bp) + return -ENOENT; + + /* +- * no branch sampling for breakpoint events ++ * Check if breakpoint type is supported before proceeding. ++ * Also, no branch sampling for breakpoint events. + */ +- if (has_branch_stack(bp)) ++ if (!hw_breakpoint_slots_cached(find_slot_idx(bp->attr.bp_type)) || has_branch_stack(bp)) + return -EOPNOTSUPP; + + err = register_perf_hw_breakpoint(bp); +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c +index 52de76ef8723b8..dc1193b779c080 100644 +--- a/kernel/events/ring_buffer.c ++++ b/kernel/events/ring_buffer.c +@@ -181,6 +181,7 @@ __perf_output_begin(struct perf_output_handle *handle, + + handle->rb = rb; + handle->event = event; ++ handle->flags = 0; + + have_lost = local_read(&rb->lost); + if (unlikely(have_lost)) { +diff --git a/kernel/fork.c b/kernel/fork.c +index 97f433fb4b5ef4..7966c9a1c163d1 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -518,10 +518,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) + vma_numab_state_init(new); + dup_anon_vma_name(orig, new); + +- /* track_pfn_copy() will later take care of copying internal state. */ +- if (unlikely(new->vm_flags & VM_PFNMAP)) +- untrack_pfn_clear(new); +- + return new; + } + +@@ -715,6 +711,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + tmp = vm_area_dup(mpnt); + if (!tmp) + goto fail_nomem; ++ ++ /* track_pfn_copy() will later take care of copying internal state. */ ++ if (unlikely(tmp->vm_flags & VM_PFNMAP)) ++ untrack_pfn_clear(tmp); ++ + retval = vma_dup_policy(mpnt, tmp); + if (retval) + goto fail_nomem_policy; +diff --git a/kernel/padata.c b/kernel/padata.c +index 071d8cad807871..93cd7704ab63e6 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -358,7 +358,8 @@ static void padata_reorder(struct parallel_data *pd) + * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. + */ + padata_get_pd(pd); +- queue_work(pinst->serial_wq, &pd->reorder_work); ++ if (!queue_work(pinst->serial_wq, &pd->reorder_work)) ++ padata_put_pd(pd); + } + } + +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index dcdf449615bdac..51c43e0f9b29b5 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -3119,7 +3119,12 @@ void console_unblank(void) + */ + cookie = console_srcu_read_lock(); + for_each_console_srcu(c) { +- if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) { ++ short flags = console_srcu_read_flags(c); ++ ++ if (flags & CON_SUSPENDED) ++ continue; ++ ++ if ((flags & CON_ENABLED) && c->unblank) { + found_unblank = true; + break; + } +@@ -3156,7 +3161,12 @@ void console_unblank(void) + + cookie = console_srcu_read_lock(); + for_each_console_srcu(c) { +- if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) ++ short flags = console_srcu_read_flags(c); ++ ++ if (flags & CON_SUSPENDED) ++ continue; ++ ++ if ((flags & CON_ENABLED) && c->unblank) + c->unblank(); + } + console_srcu_read_unlock(cookie); +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h +index 41021080ad258d..94b715139f52d9 100644 +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -821,8 +821,17 @@ void rcu_read_unlock_strict(void) + { + struct rcu_data *rdp; + +- if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) ++ if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread) + return; ++ ++ /* ++ * rcu_report_qs_rdp() can only be invoked with a stable rdp and ++ * from the local CPU. ++ * ++ * The in_atomic_preempt_off() check ensures that we come here holding ++ * the last preempt_count (which will get dropped once we return to ++ * __rcu_read_unlock(). ++ */ + rdp = this_cpu_ptr(&rcu_data); + rdp->cpu_no_qs.b.norm = false; + rcu_report_qs_rdp(rdp); +@@ -963,13 +972,16 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) + */ + static void rcu_flavor_sched_clock_irq(int user) + { +- if (user || rcu_is_cpu_rrupt_from_idle()) { ++ if (user || rcu_is_cpu_rrupt_from_idle() || ++ (IS_ENABLED(CONFIG_PREEMPT_COUNT) && ++ (preempt_count() == HARDIRQ_OFFSET))) { + + /* + * Get here if this CPU took its interrupt from user +- * mode or from the idle loop, and if this is not a +- * nested interrupt. In this case, the CPU is in +- * a quiescent state, so note it. ++ * mode, from the idle loop without this being a nested ++ * interrupt, or while not holding the task preempt count ++ * (with PREEMPT_COUNT=y). In this case, the CPU is in a ++ * quiescent state, so note it. + * + * No memory barrier is required here because rcu_qs() + * references only CPU-local variables that other CPUs +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 268e2a49b964e0..6ce3028e6e852f 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -73,10 +73,10 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (default: 0.70 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ +-unsigned int sysctl_sched_base_slice = 750000ULL; +-static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; ++unsigned int sysctl_sched_base_slice = 700000ULL; ++static unsigned int normalized_sysctl_sched_base_slice = 700000ULL; + + /* + * After fork, child runs first. If set to 0 (default) then +diff --git a/kernel/softirq.c b/kernel/softirq.c +index f24d80cf20bd35..d9e37f3fa13039 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -125,6 +125,18 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { + .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), + }; + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static struct lock_class_key bh_lock_key; ++struct lockdep_map bh_lock_map = { ++ .name = "local_bh", ++ .key = &bh_lock_key, ++ .wait_type_outer = LD_WAIT_FREE, ++ .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */ ++ .lock_type = LD_LOCK_PERCPU, ++}; ++EXPORT_SYMBOL_GPL(bh_lock_map); ++#endif ++ + /** + * local_bh_blocked() - Check for idle whether BH processing is blocked + * +@@ -147,6 +159,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) + + WARN_ON_ONCE(in_hardirq()); + ++ lock_map_acquire_read(&bh_lock_map); ++ + /* First entry of a task into a BH disabled section? */ + if (!current->softirq_disable_cnt) { + if (preemptible()) { +@@ -210,6 +224,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) + WARN_ON_ONCE(in_hardirq()); + lockdep_assert_irqs_enabled(); + ++ lock_map_release(&bh_lock_map); ++ + local_irq_save(flags); + curcnt = __this_cpu_read(softirq_ctrl.cnt); + +@@ -260,6 +276,8 @@ static inline void ksoftirqd_run_begin(void) + /* Counterpart to ksoftirqd_run_begin() */ + static inline void ksoftirqd_run_end(void) + { ++ /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */ ++ lock_map_release(&bh_lock_map); + __local_bh_enable(SOFTIRQ_OFFSET, true); + WARN_ON_ONCE(in_interrupt()); + local_irq_enable(); +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 877535b06e73aa..6d9da768604d68 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -58,6 +58,8 @@ + #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) + #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) + ++static void retrigger_next_event(void *arg); ++ + /* + * The timer bases: + * +@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = + .clockid = CLOCK_TAI, + .get_time = &ktime_get_clocktai, + }, +- } ++ }, ++ .csd = CSD_INIT(retrigger_next_event, NULL) + }; + + static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { +@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { + [CLOCK_TAI] = HRTIMER_BASE_TAI, + }; + ++static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base) ++{ ++ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) ++ return true; ++ else ++ return likely(base->online); ++} ++ + /* + * Functions and macros which are different for UP/SMP systems are kept in a + * single place +@@ -178,27 +189,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, + } + + /* +- * We do not migrate the timer when it is expiring before the next +- * event on the target cpu. When high resolution is enabled, we cannot +- * reprogram the target cpu hardware and we would cause it to fire +- * late. To keep it simple, we handle the high resolution enabled and +- * disabled case similar. ++ * Check if the elected target is suitable considering its next ++ * event and the hotplug state of the current CPU. ++ * ++ * If the elected target is remote and its next event is after the timer ++ * to queue, then a remote reprogram is necessary. However there is no ++ * guarantee the IPI handling the operation would arrive in time to meet ++ * the high resolution deadline. In this case the local CPU becomes a ++ * preferred target, unless it is offline. ++ * ++ * High and low resolution modes are handled the same way for simplicity. + * + * Called with cpu_base->lock of target cpu held. + */ +-static int +-hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) ++static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base, ++ struct hrtimer_cpu_base *new_cpu_base, ++ struct hrtimer_cpu_base *this_cpu_base) + { + ktime_t expires; + ++ /* ++ * The local CPU clockevent can be reprogrammed. Also get_target_base() ++ * guarantees it is online. ++ */ ++ if (new_cpu_base == this_cpu_base) ++ return true; ++ ++ /* ++ * The offline local CPU can't be the default target if the ++ * next remote target event is after this timer. Keep the ++ * elected new base. An IPI will we issued to reprogram ++ * it as a last resort. ++ */ ++ if (!hrtimer_base_is_online(this_cpu_base)) ++ return true; ++ + expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); +- return expires < new_base->cpu_base->expires_next; ++ ++ return expires >= new_base->cpu_base->expires_next; + } + +-static inline +-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, +- int pinned) ++static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) + { ++ if (!hrtimer_base_is_online(base)) { ++ int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER)); ++ ++ return &per_cpu(hrtimer_bases, cpu); ++ } ++ + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) + if (static_branch_likely(&timers_migration_enabled) && !pinned) + return &per_cpu(hrtimer_bases, get_nohz_timer_target()); +@@ -249,8 +287,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, + raw_spin_unlock(&base->cpu_base->lock); + raw_spin_lock(&new_base->cpu_base->lock); + +- if (new_cpu_base != this_cpu_base && +- hrtimer_check_target(timer, new_base)) { ++ if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, ++ this_cpu_base)) { + raw_spin_unlock(&new_base->cpu_base->lock); + raw_spin_lock(&base->cpu_base->lock); + new_cpu_base = this_cpu_base; +@@ -259,8 +297,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, + } + WRITE_ONCE(timer->base, new_base); + } else { +- if (new_cpu_base != this_cpu_base && +- hrtimer_check_target(timer, new_base)) { ++ if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) { + new_cpu_base = this_cpu_base; + goto again; + } +@@ -720,8 +757,6 @@ static inline int hrtimer_is_hres_enabled(void) + return hrtimer_hres_enabled; + } + +-static void retrigger_next_event(void *arg); +- + /* + * Switch to high resolution mode + */ +@@ -1208,6 +1243,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + u64 delta_ns, const enum hrtimer_mode mode, + struct hrtimer_clock_base *base) + { ++ struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases); + struct hrtimer_clock_base *new_base; + bool force_local, first; + +@@ -1219,9 +1255,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + * and enforce reprogramming after it is queued no matter whether + * it is the new first expiring timer again or not. + */ +- force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); ++ force_local = base->cpu_base == this_cpu_base; + force_local &= base->cpu_base->next_timer == timer; + ++ /* ++ * Don't force local queuing if this enqueue happens on a unplugged ++ * CPU after hrtimer_cpu_dying() has been invoked. ++ */ ++ force_local &= this_cpu_base->online; ++ + /* + * Remove an active timer from the queue. In case it is not queued + * on the current CPU, make sure that remove_hrtimer() updates the +@@ -1251,8 +1293,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + } + + first = enqueue_hrtimer(timer, new_base, mode); +- if (!force_local) +- return first; ++ if (!force_local) { ++ /* ++ * If the current CPU base is online, then the timer is ++ * never queued on a remote CPU if it would be the first ++ * expiring timer there. ++ */ ++ if (hrtimer_base_is_online(this_cpu_base)) ++ return first; ++ ++ /* ++ * Timer was enqueued remote because the current base is ++ * already offline. If the timer is the first to expire, ++ * kick the remote CPU to reprogram the clock event. ++ */ ++ if (first) { ++ struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base; ++ ++ smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd); ++ } ++ return 0; ++ } + + /* + * Timer was forced to stay on the current CPU to avoid +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c +index b924f0f096fa44..7534069f603334 100644 +--- a/kernel/time/posix-timers.c ++++ b/kernel/time/posix-timers.c +@@ -118,6 +118,7 @@ static int posix_timer_add(struct k_itimer *timer) + return id; + } + spin_unlock(&hash_lock); ++ cond_resched(); + } + /* POSIX return code when no timer ID could be allocated */ + return -EAGAIN; +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index ed7d6ad694fba6..20a5e6962b6967 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -46,7 +46,7 @@ static void + print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, + int idx, u64 now) + { +- SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function); ++ SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, timer->function); + SEQ_printf(m, ", S:%02x", timer->state); + SEQ_printf(m, "\n"); + SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", +@@ -98,7 +98,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, + static void + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) + { +- SEQ_printf(m, " .base: %pK\n", base); ++ SEQ_printf(m, " .base: %p\n", base); + SEQ_printf(m, " .index: %d\n", base->index); + + SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 95868c31573007..43d19b69c635b2 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3487,10 +3487,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) + } + EXPORT_SYMBOL_GPL(trace_vbprintk); + +-__printf(3, 0) +-static int +-__trace_array_vprintk(struct trace_buffer *buffer, +- unsigned long ip, const char *fmt, va_list args) ++static __printf(3, 0) ++int __trace_array_vprintk(struct trace_buffer *buffer, ++ unsigned long ip, const char *fmt, va_list args) + { + struct trace_event_call *call = &event_print; + struct ring_buffer_event *event; +@@ -3543,7 +3542,6 @@ __trace_array_vprintk(struct trace_buffer *buffer, + return len; + } + +-__printf(3, 0) + int trace_array_vprintk(struct trace_array *tr, + unsigned long ip, const char *fmt, va_list args) + { +@@ -3573,7 +3571,6 @@ int trace_array_vprintk(struct trace_array *tr, + * Note, trace_array_init_printk() must be called on @tr before this + * can be used. + */ +-__printf(3, 0) + int trace_array_printk(struct trace_array *tr, + unsigned long ip, const char *fmt, ...) + { +@@ -3618,7 +3615,6 @@ int trace_array_init_printk(struct trace_array *tr) + } + EXPORT_SYMBOL_GPL(trace_array_init_printk); + +-__printf(3, 4) + int trace_array_printk_buf(struct trace_buffer *buffer, + unsigned long ip, const char *fmt, ...) + { +@@ -3634,7 +3630,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer, + return ret; + } + +-__printf(2, 0) + int trace_vprintk(unsigned long ip, const char *fmt, va_list args) + { + return trace_array_vprintk(&global_trace, ip, fmt, args); +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index db0d2641125e7e..faf892aecdf490 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -800,13 +800,15 @@ static inline void __init disable_tracing_selftest(const char *reason) + + extern void *head_page(struct trace_array_cpu *data); + extern unsigned long long ns2usecs(u64 nsec); +-extern int +-trace_vbprintk(unsigned long ip, const char *fmt, va_list args); +-extern int +-trace_vprintk(unsigned long ip, const char *fmt, va_list args); +-extern int +-trace_array_vprintk(struct trace_array *tr, +- unsigned long ip, const char *fmt, va_list args); ++ ++__printf(2, 0) ++int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); ++__printf(2, 0) ++int trace_vprintk(unsigned long ip, const char *fmt, va_list args); ++__printf(3, 0) ++int trace_array_vprintk(struct trace_array *tr, ++ unsigned long ip, const char *fmt, va_list args); ++__printf(3, 4) + int trace_array_printk_buf(struct trace_buffer *buffer, + unsigned long ip, const char *fmt, ...); + void trace_printk_seq(struct trace_seq *s); +diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c +index fde0aa24414800..a75a9ca46b5940 100644 +--- a/lib/dynamic_queue_limits.c ++++ b/lib/dynamic_queue_limits.c +@@ -116,7 +116,7 @@ EXPORT_SYMBOL(dql_completed); + void dql_reset(struct dql *dql) + { + /* Reset all dynamic values */ +- dql->limit = 0; ++ dql->limit = dql->min_limit; + dql->num_queued = 0; + dql->num_completed = 0; + dql->last_obj_cnt = 0; +diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile +index 2f58fafbbdddc0..fc7b2b7ef4b20e 100644 +--- a/lib/lzo/Makefile ++++ b/lib/lzo/Makefile +@@ -1,5 +1,5 @@ + # SPDX-License-Identifier: GPL-2.0-only +-lzo_compress-objs := lzo1x_compress.o ++lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o + lzo_decompress-objs := lzo1x_decompress_safe.o + + obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o +diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c +index 9d31e7126606ac..f00dff9b9d4e1b 100644 +--- a/lib/lzo/lzo1x_compress.c ++++ b/lib/lzo/lzo1x_compress.c +@@ -18,11 +18,22 @@ + #include + #include "lzodefs.h" + +-static noinline size_t +-lzo1x_1_do_compress(const unsigned char *in, size_t in_len, +- unsigned char *out, size_t *out_len, +- size_t ti, void *wrkmem, signed char *state_offset, +- const unsigned char bitstream_version) ++#undef LZO_UNSAFE ++ ++#ifndef LZO_SAFE ++#define LZO_UNSAFE 1 ++#define LZO_SAFE(name) name ++#define HAVE_OP(x) 1 ++#endif ++ ++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun ++ ++static noinline int ++LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, ++ unsigned char **out, unsigned char *op_end, ++ size_t *tp, void *wrkmem, ++ signed char *state_offset, ++ const unsigned char bitstream_version) + { + const unsigned char *ip; + unsigned char *op; +@@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + const unsigned char * const ip_end = in + in_len - 20; + const unsigned char *ii; + lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; ++ size_t ti = *tp; + +- op = out; ++ op = *out; + ip = in; + ii = ip; + ip += ti < 4 ? 4 - ti : 0; +@@ -116,25 +128,32 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + if (t != 0) { + if (t <= 3) { + op[*state_offset] |= t; ++ NEED_OP(4); + COPY4(op, ii); + op += t; + } else if (t <= 16) { ++ NEED_OP(17); + *op++ = (t - 3); + COPY8(op, ii); + COPY8(op + 8, ii + 8); + op += t; + } else { + if (t <= 18) { ++ NEED_OP(1); + *op++ = (t - 3); + } else { + size_t tt = t - 18; ++ NEED_OP(1); + *op++ = 0; + while (unlikely(tt > 255)) { + tt -= 255; ++ NEED_OP(1); + *op++ = 0; + } ++ NEED_OP(1); + *op++ = tt; + } ++ NEED_OP(t); + do { + COPY8(op, ii); + COPY8(op + 8, ii + 8); +@@ -151,6 +170,7 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + if (unlikely(run_length)) { + ip += run_length; + run_length -= MIN_ZERO_RUN_LENGTH; ++ NEED_OP(4); + put_unaligned_le32((run_length << 21) | 0xfffc18 + | (run_length & 0x7), op); + op += 4; +@@ -243,10 +263,12 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + ip += m_len; + if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { + m_off -= 1; ++ NEED_OP(2); + *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); + *op++ = (m_off >> 3); + } else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; ++ NEED_OP(1); + if (m_len <= M3_MAX_LEN) + *op++ = (M3_MARKER | (m_len - 2)); + else { +@@ -254,14 +276,18 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + *op++ = M3_MARKER | 0; + while (unlikely(m_len > 255)) { + m_len -= 255; ++ NEED_OP(1); + *op++ = 0; + } ++ NEED_OP(1); + *op++ = (m_len); + } ++ NEED_OP(2); + *op++ = (m_off << 2); + *op++ = (m_off >> 6); + } else { + m_off -= 0x4000; ++ NEED_OP(1); + if (m_len <= M4_MAX_LEN) + *op++ = (M4_MARKER | ((m_off >> 11) & 8) + | (m_len - 2)); +@@ -282,11 +308,14 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + m_len -= M4_MAX_LEN; + *op++ = (M4_MARKER | ((m_off >> 11) & 8)); + while (unlikely(m_len > 255)) { ++ NEED_OP(1); + m_len -= 255; + *op++ = 0; + } ++ NEED_OP(1); + *op++ = (m_len); + } ++ NEED_OP(2); + *op++ = (m_off << 2); + *op++ = (m_off >> 6); + } +@@ -295,14 +324,20 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + ii = ip; + goto next; + } +- *out_len = op - out; +- return in_end - (ii - ti); ++ *out = op; ++ *tp = in_end - (ii - ti); ++ return LZO_E_OK; ++ ++output_overrun: ++ return LZO_E_OUTPUT_OVERRUN; + } + +-static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, +- unsigned char *out, size_t *out_len, +- void *wrkmem, const unsigned char bitstream_version) ++static int LZO_SAFE(lzogeneric1x_1_compress)( ++ const unsigned char *in, size_t in_len, ++ unsigned char *out, size_t *out_len, ++ void *wrkmem, const unsigned char bitstream_version) + { ++ unsigned char * const op_end = out + *out_len; + const unsigned char *ip = in; + unsigned char *op = out; + unsigned char *data_start; +@@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, + while (l > 20) { + size_t ll = min_t(size_t, l, m4_max_offset + 1); + uintptr_t ll_end = (uintptr_t) ip + ll; ++ int err; ++ + if ((ll_end + ((t + ll) >> 5)) <= ll_end) + break; + BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); + memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); +- t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, +- &state_offset, bitstream_version); ++ err = LZO_SAFE(lzo1x_1_do_compress)( ++ ip, ll, &op, op_end, &t, wrkmem, ++ &state_offset, bitstream_version); ++ if (err != LZO_E_OK) ++ return err; + ip += ll; +- op += *out_len; + l -= ll; + } + t += l; +@@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, + const unsigned char *ii = in + in_len - t; + + if (op == data_start && t <= 238) { ++ NEED_OP(1); + *op++ = (17 + t); + } else if (t <= 3) { + op[state_offset] |= t; + } else if (t <= 18) { ++ NEED_OP(1); + *op++ = (t - 3); + } else { + size_t tt = t - 18; ++ NEED_OP(1); + *op++ = 0; + while (tt > 255) { + tt -= 255; ++ NEED_OP(1); + *op++ = 0; + } ++ NEED_OP(1); + *op++ = tt; + } ++ NEED_OP(t); + if (t >= 16) do { + COPY8(op, ii); + COPY8(op + 8, ii + 8); +@@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, + } while (--t > 0); + } + ++ NEED_OP(3); + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; + + *out_len = op - out; + return LZO_E_OK; ++ ++output_overrun: ++ return LZO_E_OUTPUT_OVERRUN; + } + +-int lzo1x_1_compress(const unsigned char *in, size_t in_len, +- unsigned char *out, size_t *out_len, +- void *wrkmem) ++int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, ++ unsigned char *out, size_t *out_len, ++ void *wrkmem) + { +- return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); ++ return LZO_SAFE(lzogeneric1x_1_compress)( ++ in, in_len, out, out_len, wrkmem, 0); + } + +-int lzorle1x_1_compress(const unsigned char *in, size_t in_len, +- unsigned char *out, size_t *out_len, +- void *wrkmem) ++int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, ++ unsigned char *out, size_t *out_len, ++ void *wrkmem) + { +- return lzogeneric1x_1_compress(in, in_len, out, out_len, +- wrkmem, LZO_VERSION); ++ return LZO_SAFE(lzogeneric1x_1_compress)( ++ in, in_len, out, out_len, wrkmem, LZO_VERSION); + } + +-EXPORT_SYMBOL_GPL(lzo1x_1_compress); +-EXPORT_SYMBOL_GPL(lzorle1x_1_compress); ++EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); ++EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress)); + ++#ifndef LZO_UNSAFE + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("LZO1X-1 Compressor"); ++#endif +diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c +new file mode 100644 +index 00000000000000..371c9f84949281 +--- /dev/null ++++ b/lib/lzo/lzo1x_compress_safe.c +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * LZO1X Compressor from LZO ++ * ++ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer ++ * ++ * The full LZO package can be found at: ++ * http://www.oberhumer.com/opensource/lzo/ ++ * ++ * Changed for Linux kernel use by: ++ * Nitin Gupta ++ * Richard Purdie ++ */ ++ ++#define LZO_SAFE(name) name##_safe ++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) ++ ++#include "lzo1x_compress.c" +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 9bf5a69e20d87a..2d2cada8a8a4c8 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -1266,7 +1266,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + { + struct mem_cgroup *iter; + int ret = 0; +- int i = 0; + + BUG_ON(mem_cgroup_is_root(memcg)); + +@@ -1276,10 +1275,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + + css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); + while (!ret && (task = css_task_iter_next(&it))) { +- /* Avoid potential softlockup warning */ +- if ((++i & 1023) == 0) +- cond_resched(); + ret = fn(task, arg); ++ /* Avoid potential softlockup warning */ ++ cond_resched(); + } + css_task_iter_end(&it); + if (ret) { +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 74737c35082b45..44011ebecddf01 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -4038,6 +4038,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, + } + + retry: ++ /* ++ * Deal with possible cpuset update races or zonelist updates to avoid ++ * infinite retries. ++ */ ++ if (check_retry_cpuset(cpuset_mems_cookie, ac) || ++ check_retry_zonelist(zonelist_iter_cookie)) ++ goto restart; ++ + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ + if (alloc_flags & ALLOC_KSWAPD) + wake_all_kswapds(order, gfp_mask, ac); +diff --git a/net/Makefile b/net/Makefile +index 4c4dc535453dff..45f3fbaae644e1 100644 +--- a/net/Makefile ++++ b/net/Makefile +@@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ + obj-$(CONFIG_INET) += ipv4/ + obj-$(CONFIG_TLS) += tls/ + obj-$(CONFIG_XFRM) += xfrm/ +-obj-$(CONFIG_UNIX_SCM) += unix/ ++obj-$(CONFIG_UNIX) += unix/ + obj-y += ipv6/ + obj-$(CONFIG_BPFILTER) += bpfilter/ + obj-$(CONFIG_PACKET) += packet/ +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 72ee41b894a520..1c54e812ef1f78 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) + sizeof(req), &req); + } + +-static bool l2cap_check_enc_key_size(struct hci_conn *hcon) ++static bool l2cap_check_enc_key_size(struct hci_conn *hcon, ++ struct l2cap_chan *chan) + { + /* The minimum encryption key size needs to be enforced by the + * host stack before establishing any L2CAP connections. The +@@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) + int min_key_size = hcon->hdev->min_enc_key_size; + + /* On FIPS security level, key size must be 16 bytes */ +- if (hcon->sec_level == BT_SECURITY_FIPS) ++ if (chan->sec_level == BT_SECURITY_FIPS) + min_key_size = 16; + + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || +@@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) + !__l2cap_no_conn_pending(chan)) + return; + +- if (l2cap_check_enc_key_size(conn->hcon)) ++ if (l2cap_check_enc_key_size(conn->hcon, chan)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); +@@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) + continue; + } + +- if (l2cap_check_enc_key_size(conn->hcon)) ++ if (l2cap_check_enc_key_size(conn->hcon, chan)) + l2cap_start_connection(chan); + else + l2cap_chan_close(chan, ECONNREFUSED); +@@ -3955,7 +3956,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, + /* Check if the ACL is secure enough (if not SDP) */ + if (psm != cpu_to_le16(L2CAP_PSM_SDP) && + (!hci_conn_check_link_mode(conn->hcon) || +- !l2cap_check_enc_key_size(conn->hcon))) { ++ !l2cap_check_enc_key_size(conn->hcon, pchan))) { + conn->disc_reason = HCI_ERROR_AUTH_FAILURE; + result = L2CAP_CR_SEC_BLOCK; + goto response; +@@ -7323,7 +7324,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) + } + + if (chan->state == BT_CONNECT) { +- if (!status && l2cap_check_enc_key_size(hcon)) ++ if (!status && l2cap_check_enc_key_size(hcon, chan)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); +@@ -7333,7 +7334,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) + struct l2cap_conn_rsp rsp; + __u16 res, stat; + +- if (!status && l2cap_check_enc_key_size(hcon)) { ++ if (!status && l2cap_check_enc_key_size(hcon, chan)) { + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + res = L2CAP_CR_PEND; + stat = L2CAP_CS_AUTHOR_PEND; +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c +index 7305f5f8215cac..96bea0c8408fec 100644 +--- a/net/bridge/br_mdb.c ++++ b/net/bridge/br_mdb.c +@@ -1030,7 +1030,7 @@ static int br_mdb_add_group(const struct br_mdb_config *cfg, + + /* host join */ + if (!port) { +- if (mp->host_joined) { ++ if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) { + NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host"); + return -EEXIST; + } +diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c +index 98aea5485aaef4..a8c67035e23c00 100644 +--- a/net/bridge/br_nf_core.c ++++ b/net/bridge/br_nf_core.c +@@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { + * ipt_REJECT needs it. Future netfilter modules might + * require us to fill additional fields. + */ +-static const u32 br_dst_default_metrics[RTAX_MAX] = { +- [RTAX_MTU - 1] = 1500, +-}; +- + void br_netfilter_rtable_init(struct net_bridge *br) + { + struct rtable *rt = &br->fake_rtable; + + rcuref_init(&rt->dst.__rcuref, 1); + rt->dst.dev = br->dev; +- dst_init_metrics(&rt->dst, br_dst_default_metrics, true); ++ dst_init_metrics(&rt->dst, br->metrics, false); ++ dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); + rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; + rt->dst.ops = &fake_dst_ops; + } +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index 72d80fd943a8a2..9197b511e45972 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -502,6 +502,7 @@ struct net_bridge { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; ++ u32 metrics[RTAX_MAX]; + #endif + u16 group_fwd_mask; + u16 group_fwd_mask_required; +diff --git a/net/can/bcm.c b/net/can/bcm.c +index a1f5db0fd5d4fd..75653584f31b94 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -58,6 +58,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -120,6 +121,7 @@ struct bcm_op { + struct canfd_frame last_sframe; + struct sock *sk; + struct net_device *rx_reg_dev; ++ spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ + }; + + struct bcm_sock { +@@ -205,7 +207,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) + seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); + seq_printf(m, " <<<\n"); + +- list_for_each_entry(op, &bo->rx_ops, list) { ++ rcu_read_lock(); ++ ++ list_for_each_entry_rcu(op, &bo->rx_ops, list) { + + unsigned long reduction; + +@@ -261,6 +265,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) + seq_printf(m, "# sent %ld\n", op->frames_abs); + } + seq_putc(m, '\n'); ++ ++ rcu_read_unlock(); ++ + return 0; + } + #endif /* CONFIG_PROC_FS */ +@@ -273,13 +280,18 @@ static void bcm_can_tx(struct bcm_op *op) + { + struct sk_buff *skb; + struct net_device *dev; +- struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; ++ struct canfd_frame *cf; + int err; + + /* no target device? => exit */ + if (!op->ifindex) + return; + ++ /* read currframe under lock protection */ ++ spin_lock_bh(&op->bcm_tx_lock); ++ cf = op->frames + op->cfsiz * op->currframe; ++ spin_unlock_bh(&op->bcm_tx_lock); ++ + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); + if (!dev) { + /* RFC: should this bcm_op remove itself here? */ +@@ -300,6 +312,10 @@ static void bcm_can_tx(struct bcm_op *op) + skb->dev = dev; + can_skb_set_owner(skb, op->sk); + err = can_send(skb, 1); ++ ++ /* update currframe and count under lock protection */ ++ spin_lock_bh(&op->bcm_tx_lock); ++ + if (!err) + op->frames_abs++; + +@@ -308,6 +324,11 @@ static void bcm_can_tx(struct bcm_op *op) + /* reached last frame? */ + if (op->currframe >= op->nframes) + op->currframe = 0; ++ ++ if (op->count > 0) ++ op->count--; ++ ++ spin_unlock_bh(&op->bcm_tx_lock); + out: + dev_put(dev); + } +@@ -404,7 +425,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) + struct bcm_msg_head msg_head; + + if (op->kt_ival1 && (op->count > 0)) { +- op->count--; ++ bcm_can_tx(op); + if (!op->count && (op->flags & TX_COUNTEVT)) { + + /* create notification to user */ +@@ -419,7 +440,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) + + bcm_send_to_user(op, &msg_head, NULL, 0); + } +- bcm_can_tx(op); + + } else if (op->kt_ival2) { + bcm_can_tx(op); +@@ -801,7 +821,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, + REGMASK(op->can_id), + bcm_rx_handler, op); + +- list_del(&op->list); ++ list_del_rcu(&op->list); + bcm_remove_op(op); + return 1; /* done */ + } +@@ -821,7 +841,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, + list_for_each_entry_safe(op, n, ops, list) { + if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && + (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { +- list_del(&op->list); ++ list_del_rcu(&op->list); + bcm_remove_op(op); + return 1; /* done */ + } +@@ -914,6 +934,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + } + op->flags = msg_head->flags; + ++ /* only lock for unlikely count/nframes/currframe changes */ ++ if (op->nframes != msg_head->nframes || ++ op->flags & TX_RESET_MULTI_IDX || ++ op->flags & SETTIMER) { ++ ++ spin_lock_bh(&op->bcm_tx_lock); ++ ++ if (op->nframes != msg_head->nframes || ++ op->flags & TX_RESET_MULTI_IDX) { ++ /* potentially update changed nframes */ ++ op->nframes = msg_head->nframes; ++ /* restart multiple frame transmission */ ++ op->currframe = 0; ++ } ++ ++ if (op->flags & SETTIMER) ++ op->count = msg_head->count; ++ ++ spin_unlock_bh(&op->bcm_tx_lock); ++ } ++ + } else { + /* insert new BCM operation for the given can_id */ + +@@ -921,9 +962,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + if (!op) + return -ENOMEM; + ++ spin_lock_init(&op->bcm_tx_lock); + op->can_id = msg_head->can_id; + op->cfsiz = CFSIZ(msg_head->flags); + op->flags = msg_head->flags; ++ op->nframes = msg_head->nframes; ++ ++ if (op->flags & SETTIMER) ++ op->count = msg_head->count; + + /* create array for CAN frames and copy the data */ + if (msg_head->nframes > 1) { +@@ -982,22 +1028,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + + } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ + +- if (op->nframes != msg_head->nframes) { +- op->nframes = msg_head->nframes; +- /* start multiple frame transmission with index 0 */ +- op->currframe = 0; +- } +- +- /* check flags */ +- +- if (op->flags & TX_RESET_MULTI_IDX) { +- /* start multiple frame transmission with index 0 */ +- op->currframe = 0; +- } +- + if (op->flags & SETTIMER) { + /* set timer values */ +- op->count = msg_head->count; + op->ival1 = msg_head->ival1; + op->ival2 = msg_head->ival2; + op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); +@@ -1014,11 +1046,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + op->flags |= TX_ANNOUNCE; + } + +- if (op->flags & TX_ANNOUNCE) { ++ if (op->flags & TX_ANNOUNCE) + bcm_can_tx(op); +- if (op->count) +- op->count--; +- } + + if (op->flags & STARTTIMER) + bcm_tx_start_timer(op); +@@ -1234,7 +1263,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + bcm_rx_handler, op, "bcm", sk); + if (err) { + /* this bcm rx op is broken -> remove it */ +- list_del(&op->list); ++ list_del_rcu(&op->list); + bcm_remove_op(op); + return err; + } +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 359e24c3f22cab..6afea369ca2132 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -897,6 +897,10 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) + pkt_dev->nr_labels = 0; + do { + __u32 tmp; ++ ++ if (n >= MAX_MPLS_LABELS) ++ return -E2BIG; ++ + len = hex32_arg(&buffer[i], 8, &tmp); + if (len <= 0) + return len; +@@ -908,8 +912,6 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) + return -EFAULT; + i++; + n++; +- if (n >= MAX_MPLS_LABELS) +- return -E2BIG; + } while (c == ','); + + pkt_dev->nr_labels = n; +@@ -1875,8 +1877,8 @@ static ssize_t pktgen_thread_write(struct file *file, + i = len; + + /* Read variable name */ +- +- len = strn_len(&user_buffer[i], sizeof(name) - 1); ++ max = min(sizeof(name) - 1, count - i); ++ len = strn_len(&user_buffer[i], max); + if (len < 0) + return len; + +@@ -1906,7 +1908,8 @@ static ssize_t pktgen_thread_write(struct file *file, + if (!strcmp(name, "add_device")) { + char f[32]; + memset(f, 0, 32); +- len = strn_len(&user_buffer[i], sizeof(f) - 1); ++ max = min(sizeof(f) - 1, count - i); ++ len = strn_len(&user_buffer[i], max); + if (len < 0) { + ret = len; + goto out; +diff --git a/net/core/scm.c b/net/core/scm.c +index 737917c7ac6276..431bfb3ea39290 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + + /* +@@ -85,8 +86,15 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + return -ENOMEM; + *fplp = fpl; + fpl->count = 0; ++ fpl->count_unix = 0; + fpl->max = SCM_MAX_FD; + fpl->user = NULL; ++#if IS_ENABLED(CONFIG_UNIX) ++ fpl->inflight = false; ++ fpl->dead = false; ++ fpl->edges = NULL; ++ INIT_LIST_HEAD(&fpl->vertices); ++#endif + } + fpp = &fpl->fp[fpl->count]; + +@@ -109,6 +117,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + fput(file); + return -EINVAL; + } ++ if (unix_get_socket(file)) ++ fpl->count_unix++; ++ + *fpp++ = file; + fpl->count++; + } +@@ -371,8 +382,14 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) + if (new_fpl) { + for (i = 0; i < fpl->count; i++) + get_file(fpl->fp[i]); ++ + new_fpl->max = new_fpl->count; + new_fpl->user = get_uid(fpl->user); ++#if IS_ENABLED(CONFIG_UNIX) ++ new_fpl->inflight = false; ++ new_fpl->edges = NULL; ++ INIT_LIST_HEAD(&new_fpl->vertices); ++#endif + } + return new_fpl; + } +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index eeace9b509cec7..49fd664f50fc01 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -118,47 +118,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + } + + #ifdef CONFIG_INET_ESPINTCP +-struct esp_tcp_sk { +- struct sock *sk; +- struct rcu_head rcu; +-}; +- +-static void esp_free_tcp_sk(struct rcu_head *head) +-{ +- struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); +- +- sock_put(esk->sk); +- kfree(esk); +-} +- + static struct sock *esp_find_tcp_sk(struct xfrm_state *x) + { + struct xfrm_encap_tmpl *encap = x->encap; + struct net *net = xs_net(x); +- struct esp_tcp_sk *esk; + __be16 sport, dport; +- struct sock *nsk; + struct sock *sk; + +- sk = rcu_dereference(x->encap_sk); +- if (sk && sk->sk_state == TCP_ESTABLISHED) +- return sk; +- + spin_lock_bh(&x->lock); + sport = encap->encap_sport; + dport = encap->encap_dport; +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (sk && sk == nsk) { +- esk = kmalloc(sizeof(*esk), GFP_ATOMIC); +- if (!esk) { +- spin_unlock_bh(&x->lock); +- return ERR_PTR(-ENOMEM); +- } +- RCU_INIT_POINTER(x->encap_sk, NULL); +- esk->sk = sk; +- call_rcu(&esk->rcu, esp_free_tcp_sk); +- } + spin_unlock_bh(&x->lock); + + sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, +@@ -171,20 +140,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) + return ERR_PTR(-EINVAL); + } + +- spin_lock_bh(&x->lock); +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (encap->encap_sport != sport || +- encap->encap_dport != dport) { +- sock_put(sk); +- sk = nsk ?: ERR_PTR(-EREMCHG); +- } else if (sk == nsk) { +- sock_put(sk); +- } else { +- rcu_assign_pointer(x->encap_sk, sk); +- } +- spin_unlock_bh(&x->lock); +- + return sk; + } + +@@ -207,6 +162,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + err = espintcp_push_skb(sk, skb); + bh_unlock_sock(sk); + ++ sock_put(sk); ++ + out: + rcu_read_unlock(); + return err; +@@ -391,6 +348,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, + if (IS_ERR(sk)) + return ERR_CAST(sk); + ++ sock_put(sk); ++ + *lenp = htons(len); + esph = (struct ip_esp_hdr *)(lenp + 1); + +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 90ce87ffed4617..7993ff46de23ca 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -829,19 +829,33 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, + } + } + ++ if (cfg->fc_dst_len > 32) { ++ NL_SET_ERR_MSG(extack, "Invalid prefix length"); ++ err = -EINVAL; ++ goto errout; ++ } ++ ++ if (cfg->fc_dst_len < 32 && (ntohl(cfg->fc_dst) << cfg->fc_dst_len)) { ++ NL_SET_ERR_MSG(extack, "Invalid prefix for given prefix length"); ++ err = -EINVAL; ++ goto errout; ++ } ++ + if (cfg->fc_nh_id) { + if (cfg->fc_oif || cfg->fc_gw_family || + cfg->fc_encap || cfg->fc_mp) { + NL_SET_ERR_MSG(extack, + "Nexthop specification and nexthop id are mutually exclusive"); +- return -EINVAL; ++ err = -EINVAL; ++ goto errout; + } + } + + if (has_gw && has_via) { + NL_SET_ERR_MSG(extack, + "Nexthop configuration can not contain both GATEWAY and VIA"); +- return -EINVAL; ++ err = -EINVAL; ++ goto errout; + } + + if (!cfg->fc_table) +diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c +index 513f475c6a534e..298a9944a3d1e8 100644 +--- a/net/ipv4/fib_rules.c ++++ b/net/ipv4/fib_rules.c +@@ -222,9 +222,9 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct nlattr **tb, + struct netlink_ext_ack *extack) + { +- struct net *net = sock_net(skb->sk); ++ struct fib4_rule *rule4 = (struct fib4_rule *)rule; ++ struct net *net = rule->fr_net; + int err = -EINVAL; +- struct fib4_rule *rule4 = (struct fib4_rule *) rule; + + if (!inet_validate_dscp(frh->tos)) { + NL_SET_ERR_MSG(extack, +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index 77b97c48da5ea8..fa54b36b241ac0 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1192,22 +1192,6 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, + return 0; + } + +-static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) +-{ +- if (plen > KEYLENGTH) { +- NL_SET_ERR_MSG(extack, "Invalid prefix length"); +- return false; +- } +- +- if ((plen < KEYLENGTH) && (key << plen)) { +- NL_SET_ERR_MSG(extack, +- "Invalid prefix for given prefix length"); +- return false; +- } +- +- return true; +-} +- + static void fib_remove_alias(struct trie *t, struct key_vector *tp, + struct key_vector *l, struct fib_alias *old); + +@@ -1228,9 +1212,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb, + + key = ntohl(cfg->fc_dst); + +- if (!fib_valid_key_len(key, plen, extack)) +- return -EINVAL; +- + pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); + + fi = fib_create_info(cfg, extack); +@@ -1723,9 +1704,6 @@ int fib_table_delete(struct net *net, struct fib_table *tb, + + key = ntohl(cfg->fc_dst); + +- if (!fib_valid_key_len(key, plen, extack)) +- return -EINVAL; +- + l = fib_find_node(t, &tp, key); + if (!l) + return -ESRCH; +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index 7967ff7e02f794..60e81f6b1c6d4e 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -1231,22 +1231,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) + { + unsigned int locksz = sizeof(spinlock_t); + unsigned int i, nblocks = 1; ++ spinlock_t *ptr = NULL; + +- if (locksz != 0) { +- /* allocate 2 cache lines or at least one spinlock per cpu */ +- nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); +- nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); ++ if (locksz == 0) ++ goto set_mask; + +- /* no more locks than number of hash buckets */ +- nblocks = min(nblocks, hashinfo->ehash_mask + 1); ++ /* Allocate 2 cache lines or at least one spinlock per cpu. */ ++ nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus(); + +- hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); +- if (!hashinfo->ehash_locks) +- return -ENOMEM; ++ /* At least one page per NUMA node. */ ++ nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz); ++ ++ nblocks = roundup_pow_of_two(nblocks); ++ ++ /* No more locks than number of hash buckets. */ ++ nblocks = min(nblocks, hashinfo->ehash_mask + 1); + +- for (i = 0; i < nblocks; i++) +- spin_lock_init(&hashinfo->ehash_locks[i]); ++ if (num_online_nodes() > 1) { ++ /* Use vmalloc() to allow NUMA policy to spread pages ++ * on all available nodes if desired. ++ */ ++ ptr = vmalloc_array(nblocks, locksz); ++ } ++ if (!ptr) { ++ ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL); ++ if (!ptr) ++ return -ENOMEM; + } ++ for (i = 0; i < nblocks; i++) ++ spin_lock_init(&ptr[i]); ++ hashinfo->ehash_locks = ptr; ++set_mask: + hashinfo->ehash_locks_mask = nblocks - 1; + return 0; + } +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 890c15510b4210..f261e29adc7c2e 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -140,7 +140,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, + const struct iphdr *iph; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; +- unsigned int data_len = 0; + struct ip_tunnel *t; + + if (tpi->proto == htons(ETH_P_TEB)) +@@ -181,7 +180,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, + case ICMP_TIME_EXCEEDED: + if (code != ICMP_EXC_TTL) + return 0; +- data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ + break; + + case ICMP_REDIRECT: +@@ -189,10 +187,16 @@ static int ipgre_err(struct sk_buff *skb, u32 info, + } + + #if IS_ENABLED(CONFIG_IPV6) +- if (tpi->proto == htons(ETH_P_IPV6) && +- !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, +- type, data_len)) +- return 0; ++ if (tpi->proto == htons(ETH_P_IPV6)) { ++ unsigned int data_len = 0; ++ ++ if (type == ICMP_TIME_EXCEEDED) ++ data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ ++ ++ if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, ++ type, data_len)) ++ return 0; ++ } + #endif + + if (t->parms.iph.daddr == 0 || +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 10d38ec0ff5acd..a172248b667837 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -425,6 +425,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr + return false; + } + ++static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) ++{ ++ tp->delivered_ce += ecn_count; ++} ++ ++/* Updates the delivered and delivered_ce counts */ ++static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, ++ bool ece_ack) ++{ ++ tp->delivered += delivered; ++ if (ece_ack) ++ tcp_count_delivered_ce(tp, delivered); ++} ++ + /* Buffer size and advertised window tuning. + * + * 1. Tuning sk->sk_sndbuf, when connection enters established state. +@@ -1137,15 +1151,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) + } + } + +-/* Updates the delivered and delivered_ce counts */ +-static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, +- bool ece_ack) +-{ +- tp->delivered += delivered; +- if (ece_ack) +- tp->delivered_ce += delivered; +-} +- + /* This procedure tags the retransmission queue when SACKs arrive. + * + * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). +@@ -3816,12 +3821,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) + } + } + +-static inline void tcp_in_ack_event(struct sock *sk, u32 flags) ++static void tcp_in_ack_event(struct sock *sk, int flag) + { + const struct inet_connection_sock *icsk = inet_csk(sk); + +- if (icsk->icsk_ca_ops->in_ack_event) +- icsk->icsk_ca_ops->in_ack_event(sk, flags); ++ if (icsk->icsk_ca_ops->in_ack_event) { ++ u32 ack_ev_flags = 0; ++ ++ if (flag & FLAG_WIN_UPDATE) ++ ack_ev_flags |= CA_ACK_WIN_UPDATE; ++ if (flag & FLAG_SLOWPATH) { ++ ack_ev_flags |= CA_ACK_SLOWPATH; ++ if (flag & FLAG_ECE) ++ ack_ev_flags |= CA_ACK_ECE; ++ } ++ ++ icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags); ++ } + } + + /* Congestion control has updated the cwnd already. So if we're in +@@ -3938,12 +3954,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + tcp_snd_una_update(tp, ack); + flag |= FLAG_WIN_UPDATE; + +- tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); +- + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); + } else { +- u32 ack_ev_flags = CA_ACK_SLOWPATH; +- + if (ack_seq != TCP_SKB_CB(skb)->end_seq) + flag |= FLAG_DATA; + else +@@ -3955,19 +3967,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, + &sack_state); + +- if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { ++ if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) + flag |= FLAG_ECE; +- ack_ev_flags |= CA_ACK_ECE; +- } + + if (sack_state.sack_delivered) + tcp_count_delivered(tp, sack_state.sack_delivered, + flag & FLAG_ECE); +- +- if (flag & FLAG_WIN_UPDATE) +- ack_ev_flags |= CA_ACK_WIN_UPDATE; +- +- tcp_in_ack_event(sk, ack_ev_flags); + } + + /* This is a deviation from RFC3168 since it states that: +@@ -3994,6 +3999,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + + tcp_rack_update_reo_wnd(sk, &rs); + ++ tcp_in_ack_event(sk, flag); ++ + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); + +@@ -4025,6 +4032,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + return 1; + + no_queue: ++ tcp_in_ack_event(sk, flag); + /* If data was DSACKed, see if we can undo a cwnd reduction. */ + if (flag & FLAG_DSACKING_ACK) { + tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 62bb9651133c4d..7e4c8628cf9835 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -135,47 +135,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + } + + #ifdef CONFIG_INET6_ESPINTCP +-struct esp_tcp_sk { +- struct sock *sk; +- struct rcu_head rcu; +-}; +- +-static void esp_free_tcp_sk(struct rcu_head *head) +-{ +- struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); +- +- sock_put(esk->sk); +- kfree(esk); +-} +- + static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) + { + struct xfrm_encap_tmpl *encap = x->encap; + struct net *net = xs_net(x); +- struct esp_tcp_sk *esk; + __be16 sport, dport; +- struct sock *nsk; + struct sock *sk; + +- sk = rcu_dereference(x->encap_sk); +- if (sk && sk->sk_state == TCP_ESTABLISHED) +- return sk; +- + spin_lock_bh(&x->lock); + sport = encap->encap_sport; + dport = encap->encap_dport; +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (sk && sk == nsk) { +- esk = kmalloc(sizeof(*esk), GFP_ATOMIC); +- if (!esk) { +- spin_unlock_bh(&x->lock); +- return ERR_PTR(-ENOMEM); +- } +- RCU_INIT_POINTER(x->encap_sk, NULL); +- esk->sk = sk; +- call_rcu(&esk->rcu, esp_free_tcp_sk); +- } + spin_unlock_bh(&x->lock); + + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, +@@ -188,20 +157,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) + return ERR_PTR(-EINVAL); + } + +- spin_lock_bh(&x->lock); +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (encap->encap_sport != sport || +- encap->encap_dport != dport) { +- sock_put(sk); +- sk = nsk ?: ERR_PTR(-EREMCHG); +- } else if (sk == nsk) { +- sock_put(sk); +- } else { +- rcu_assign_pointer(x->encap_sk, sk); +- } +- spin_unlock_bh(&x->lock); +- + return sk; + } + +@@ -224,6 +179,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + err = espintcp_push_skb(sk, skb); + bh_unlock_sock(sk); + ++ sock_put(sk); ++ + out: + rcu_read_unlock(); + return err; +@@ -427,6 +384,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, + if (IS_ERR(sk)) + return ERR_CAST(sk); + ++ sock_put(sk); ++ + *lenp = htons(len); + esph = (struct ip_esp_hdr *)(lenp + 1); + +diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c +index 6eeab21512ba98..e0f0c5f8cccdaa 100644 +--- a/net/ipv6/fib6_rules.c ++++ b/net/ipv6/fib6_rules.c +@@ -350,9 +350,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct nlattr **tb, + struct netlink_ext_ack *extack) + { ++ struct fib6_rule *rule6 = (struct fib6_rule *)rule; ++ struct net *net = rule->fr_net; + int err = -EINVAL; +- struct net *net = sock_net(skb->sk); +- struct fib6_rule *rule6 = (struct fib6_rule *) rule; + + if (!inet_validate_dscp(frh->tos)) { + NL_SET_ERR_MSG(extack, +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index c86d5dca29df01..28777b14224048 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1452,6 +1452,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, + } + v6_cork->hop_limit = ipc6->hlimit; + v6_cork->tclass = ipc6->tclass; ++ v6_cork->dontfrag = ipc6->dontfrag; + if (rt->dst.flags & DST_XFRM_TUNNEL) + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); +@@ -1485,7 +1486,7 @@ static int __ip6_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, size_t length, int transhdrlen, +- unsigned int flags, struct ipcm6_cookie *ipc6) ++ unsigned int flags) + { + struct sk_buff *skb, *skb_prev = NULL; + struct inet_cork *cork = &cork_full->base; +@@ -1541,7 +1542,7 @@ static int __ip6_append_data(struct sock *sk, + if (headersize + transhdrlen > mtu) + goto emsgsize; + +- if (cork->length + length > mtu - headersize && ipc6->dontfrag && ++ if (cork->length + length > mtu - headersize && v6_cork->dontfrag && + (sk->sk_protocol == IPPROTO_UDP || + sk->sk_protocol == IPPROTO_ICMPV6 || + sk->sk_protocol == IPPROTO_RAW)) { +@@ -1913,7 +1914,7 @@ int ip6_append_data(struct sock *sk, + + return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, + &np->cork, sk_page_frag(sk), getfrag, +- from, length, transhdrlen, flags, ipc6); ++ from, length, transhdrlen, flags); + } + EXPORT_SYMBOL_GPL(ip6_append_data); + +@@ -2118,7 +2119,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, + err = __ip6_append_data(sk, &queue, cork, &v6_cork, + ¤t->task_frag, getfrag, from, + length + exthdrlen, transhdrlen + exthdrlen, +- flags, ipc6); ++ flags); + if (err) { + __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); + return ERR_PTR(err); +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index cc25fec44f8502..19f3de3c24ef1c 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -888,15 +888,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + if (sk->sk_type != SOCK_STREAM) + goto copy_uaddr; + ++ /* Partial read */ ++ if (used + offset < skb_len) ++ continue; ++ + if (!(flags & MSG_PEEK)) { + skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); + *seq = 0; + } +- +- /* Partial read */ +- if (used + offset < skb_len) +- continue; + } while (len > 0); + + out: +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 42e2c84ed2484a..2c7e139efd532f 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -2959,7 +2959,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, + if (tx) + ieee80211_flush_queues(local, sdata, false); + +- drv_mgd_complete_tx(sdata->local, sdata, &info); ++ if (tx || frame_buf) ++ drv_mgd_complete_tx(sdata->local, sdata, &info); + + /* clear AP addr only after building the needed mgmt frames */ + eth_zero_addr(sdata->deflink.u.mgd.bssid); +@@ -7821,7 +7822,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, + ieee80211_report_disconnect(sdata, frame_buf, + sizeof(frame_buf), true, + req->reason_code, false); +- drv_mgd_complete_tx(sdata->local, sdata, &info); + return 0; + } + +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index 559665467b04dd..1d5de1d9f008d8 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -621,7 +621,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { + .data = &nf_conntrack_max, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_INT_MAX, + }, + [NF_SYSCTL_CT_COUNT] = { + .procname = "nf_conntrack_count", +@@ -657,7 +659,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { + .data = &nf_ct_expect_max, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_INT_MAX, + }, + [NF_SYSCTL_CT_ACCT] = { + .procname = "nf_conntrack_acct", +@@ -951,7 +955,9 @@ static struct ctl_table nf_ct_netfilter_table[] = { + .data = &nf_conntrack_max, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_INT_MAX, + }, + { } + }; +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index 5d9cccfac4a155..afcb83d469ff60 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -175,6 +175,11 @@ struct hfsc_sched { + + #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ + ++static bool cl_in_el_or_vttree(struct hfsc_class *cl) ++{ ++ return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) || ++ ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node)); ++} + + /* + * eligible tree holds backlogged classes being sorted by their eligible times. +@@ -1040,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + if (cl == NULL) + return -ENOBUFS; + ++ RB_CLEAR_NODE(&cl->el_node); ++ + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); + if (err) { + kfree(cl); +@@ -1570,7 +1577,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + return err; + } + +- if (first && !cl->cl_nactive) { ++ sch->qstats.backlog += len; ++ sch->q.qlen++; ++ ++ if (first && !cl_in_el_or_vttree(cl)) { + if (cl->cl_flags & HFSC_RSC) + init_ed(cl, len); + if (cl->cl_flags & HFSC_FSC) +@@ -1585,9 +1595,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + + } + +- sch->qstats.backlog += len; +- sch->q.qlen++; +- + return NET_XMIT_SUCCESS; + } + +diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c +index dbcc72b43d0c08..d44d7f427fc948 100644 +--- a/net/smc/smc_pnet.c ++++ b/net/smc/smc_pnet.c +@@ -1084,14 +1084,16 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, + struct smc_init_info *ini) + { + u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; ++ struct net_device *base_ndev; + struct net *net; + +- ndev = pnet_find_base_ndev(ndev); ++ base_ndev = pnet_find_base_ndev(ndev); + net = dev_net(ndev); +- if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ++ if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, + ndev_pnetid) && ++ smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) && + smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { +- smc_pnet_find_rdma_dev(ndev, ini); ++ smc_pnet_find_rdma_dev(base_ndev, ini); + return; /* pnetid could not be determined */ + } + _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 142ee6554848a6..4ffb2bcaf3648e 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -275,9 +275,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, + old = rcu_dereference_protected(clnt->cl_xprt, + lockdep_is_held(&clnt->cl_lock)); + +- if (!xprt_bound(xprt)) +- clnt->cl_autobind = 1; +- + clnt->cl_timeout = timeout; + rcu_assign_pointer(clnt->cl_xprt, xprt); + spin_unlock(&clnt->cl_lock); +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c +index 102c3818bc54d4..53bcca365fb1cd 100644 +--- a/net/sunrpc/rpcb_clnt.c ++++ b/net/sunrpc/rpcb_clnt.c +@@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data) + } + + trace_rpcb_setport(child, map->r_status, map->r_port); +- xprt->ops->set_port(xprt, map->r_port); +- if (map->r_port) ++ if (map->r_port) { ++ xprt->ops->set_port(xprt, map->r_port); + xprt_set_bound(xprt); ++ } + } + + /* +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 9b45fbdc90cabe..73bc39281ef5f5 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); + + static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) + { ++ if (unlikely(current->flags & PF_EXITING)) ++ return -EINTR; + schedule(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; +diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c +index c524421ec65252..8584893b478510 100644 +--- a/net/tipc/crypto.c ++++ b/net/tipc/crypto.c +@@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, + goto exit; + } + ++ /* Get net to avoid freed tipc_crypto when delete namespace */ ++ get_net(aead->crypto->net); ++ + /* Now, do encrypt */ + rc = crypto_aead_encrypt(req); + if (rc == -EINPROGRESS || rc == -EBUSY) + return rc; + + tipc_bearer_put(b); ++ put_net(aead->crypto->net); + + exit: + kfree(ctx); +@@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err) + kfree(tx_ctx); + tipc_bearer_put(b); + tipc_aead_put(aead); ++ put_net(net); + } + + /** +diff --git a/net/unix/Kconfig b/net/unix/Kconfig +index 28b232f281ab16..8b5d04210d7cf1 100644 +--- a/net/unix/Kconfig ++++ b/net/unix/Kconfig +@@ -16,11 +16,6 @@ config UNIX + + Say Y unless you know what you are doing. + +-config UNIX_SCM +- bool +- depends on UNIX +- default y +- + config AF_UNIX_OOB + bool + depends on UNIX +diff --git a/net/unix/Makefile b/net/unix/Makefile +index 20491825b4d0da..4ddd125c4642c7 100644 +--- a/net/unix/Makefile ++++ b/net/unix/Makefile +@@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o + + obj-$(CONFIG_UNIX_DIAG) += unix_diag.o + unix_diag-y := diag.o +- +-obj-$(CONFIG_UNIX_SCM) += scm.o +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index ab23c8d72122b2..236a2cd2bc93d2 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -117,8 +117,6 @@ + #include + #include + +-#include "scm.h" +- + static atomic_long_t unix_nr_socks; + static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; + static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; +@@ -980,11 +978,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, + sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); + sk->sk_destruct = unix_sock_destructor; + u = unix_sk(sk); +- u->inflight = 0; ++ u->listener = NULL; ++ u->vertex = NULL; + u->path.dentry = NULL; + u->path.mnt = NULL; + spin_lock_init(&u->lock); +- INIT_LIST_HEAD(&u->link); + mutex_init(&u->iolock); /* single task reading lock */ + mutex_init(&u->bindlock); /* single task binding lock */ + init_waitqueue_head(&u->peer_wait); +@@ -1583,6 +1581,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, + newsk->sk_type = sk->sk_type; + init_peercred(newsk); + newu = unix_sk(newsk); ++ newu->listener = other; + RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); + otheru = unix_sk(other); + +@@ -1678,8 +1677,8 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) + { + struct sock *sk = sock->sk; +- struct sock *tsk; + struct sk_buff *skb; ++ struct sock *tsk; + int err; + + err = -EOPNOTSUPP; +@@ -1709,6 +1708,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags, + + /* attach accepted sock to socket */ + unix_state_lock(tsk); ++ unix_update_edges(unix_sk(tsk)); + newsock->state = SS_CONNECTED; + unix_sock_inherit_flags(sock, newsock); + sock_graft(tsk, newsock); +@@ -1752,51 +1752,65 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) + return err; + } + ++/* The "user->unix_inflight" variable is protected by the garbage ++ * collection lock, and we just read it locklessly here. If you go ++ * over the limit, there might be a tiny race in actually noticing ++ * it across threads. Tough. ++ */ ++static inline bool too_many_unix_fds(struct task_struct *p) ++{ ++ struct user_struct *user = current_user(); ++ ++ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) ++ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); ++ return false; ++} ++ ++static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) ++{ ++ if (too_many_unix_fds(current)) ++ return -ETOOMANYREFS; ++ ++ /* Need to duplicate file references for the sake of garbage ++ * collection. Otherwise a socket in the fps might become a ++ * candidate for GC while the skb is not yet queued. ++ */ ++ UNIXCB(skb).fp = scm_fp_dup(scm->fp); ++ if (!UNIXCB(skb).fp) ++ return -ENOMEM; ++ ++ if (unix_prepare_fpl(UNIXCB(skb).fp)) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) ++{ ++ scm->fp = UNIXCB(skb).fp; ++ UNIXCB(skb).fp = NULL; ++ ++ unix_destroy_fpl(scm->fp); ++} ++ + static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) + { + scm->fp = scm_fp_dup(UNIXCB(skb).fp); ++} + +- /* +- * Garbage collection of unix sockets starts by selecting a set of +- * candidate sockets which have reference only from being in flight +- * (total_refs == inflight_refs). This condition is checked once during +- * the candidate collection phase, and candidates are marked as such, so +- * that non-candidates can later be ignored. While inflight_refs is +- * protected by unix_gc_lock, total_refs (file count) is not, hence this +- * is an instantaneous decision. +- * +- * Once a candidate, however, the socket must not be reinstalled into a +- * file descriptor while the garbage collection is in progress. +- * +- * If the above conditions are met, then the directed graph of +- * candidates (*) does not change while unix_gc_lock is held. +- * +- * Any operations that changes the file count through file descriptors +- * (dup, close, sendmsg) does not change the graph since candidates are +- * not installed in fds. +- * +- * Dequeing a candidate via recvmsg would install it into an fd, but +- * that takes unix_gc_lock to decrement the inflight count, so it's +- * serialized with garbage collection. +- * +- * MSG_PEEK is special in that it does not change the inflight count, +- * yet does install the socket into an fd. The following lock/unlock +- * pair is to ensure serialization with garbage collection. It must be +- * done between incrementing the file count and installing the file into +- * an fd. +- * +- * If garbage collection starts after the barrier provided by the +- * lock/unlock, then it will see the elevated refcount and not mark this +- * as a candidate. If a garbage collection is already in progress +- * before the file count was incremented, then the lock/unlock pair will +- * ensure that garbage collection is finished before progressing to +- * installing the fd. +- * +- * (*) A -> B where B is on the queue of A or B is on the queue of C +- * which is on the queue of listening socket A. +- */ +- spin_lock(&unix_gc_lock); +- spin_unlock(&unix_gc_lock); ++static void unix_destruct_scm(struct sk_buff *skb) ++{ ++ struct scm_cookie scm; ++ ++ memset(&scm, 0, sizeof(scm)); ++ scm.pid = UNIXCB(skb).pid; ++ if (UNIXCB(skb).fp) ++ unix_detach_fds(&scm, skb); ++ ++ /* Alas, it calls VFS */ ++ /* So fscking what? fput() had been SMP-safe since the last Summer */ ++ scm_destroy(&scm); ++ sock_wfree(skb); + } + + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) +@@ -1855,8 +1869,10 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb) + struct scm_fp_list *fp = UNIXCB(skb).fp; + struct unix_sock *u = unix_sk(sk); + +- if (unlikely(fp && fp->count)) ++ if (unlikely(fp && fp->count)) { + atomic_add(fp->count, &u->scm_stat.nr_fds); ++ unix_add_edges(fp, u); ++ } + } + + static void scm_stat_del(struct sock *sk, struct sk_buff *skb) +@@ -1864,8 +1880,10 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb) + struct scm_fp_list *fp = UNIXCB(skb).fp; + struct unix_sock *u = unix_sk(sk); + +- if (unlikely(fp && fp->count)) ++ if (unlikely(fp && fp->count)) { + atomic_sub(fp->count, &u->scm_stat.nr_fds); ++ unix_del_edges(fp); ++ } + } + + /* +@@ -1885,11 +1903,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, + long timeo; + int err; + +- wait_for_unix_gc(); + err = scm_send(sock, msg, &scm, false); + if (err < 0) + return err; + ++ wait_for_unix_gc(scm.fp); ++ + err = -EOPNOTSUPP; + if (msg->msg_flags&MSG_OOB) + goto out; +@@ -2157,11 +2176,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, + bool fds_sent = false; + int data_len; + +- wait_for_unix_gc(); + err = scm_send(sock, msg, &scm, false); + if (err < 0) + return err; + ++ wait_for_unix_gc(scm.fp); ++ + err = -EOPNOTSUPP; + if (msg->msg_flags & MSG_OOB) { + #if IS_ENABLED(CONFIG_AF_UNIX_OOB) +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 2a758531e10271..23efb78fe9ef4b 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -81,278 +81,551 @@ + #include + #include + +-#include "scm.h" ++struct unix_sock *unix_get_socket(struct file *filp) ++{ ++ struct inode *inode = file_inode(filp); ++ ++ /* Socket ? */ ++ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { ++ struct socket *sock = SOCKET_I(inode); ++ const struct proto_ops *ops; ++ struct sock *sk = sock->sk; + +-/* Internal data structures and random procedures: */ ++ ops = READ_ONCE(sock->ops); + +-static LIST_HEAD(gc_candidates); +-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); ++ /* PF_UNIX ? */ ++ if (sk && ops && ops->family == PF_UNIX) ++ return unix_sk(sk); ++ } + +-static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), +- struct sk_buff_head *hitlist) ++ return NULL; ++} ++ ++static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) + { +- struct sk_buff *skb; +- struct sk_buff *next; +- +- spin_lock(&x->sk_receive_queue.lock); +- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { +- /* Do we have file descriptors ? */ +- if (UNIXCB(skb).fp) { +- bool hit = false; +- /* Process the descriptors of this socket */ +- int nfd = UNIXCB(skb).fp->count; +- struct file **fp = UNIXCB(skb).fp->fp; +- +- while (nfd--) { +- /* Get the socket the fd matches if it indeed does so */ +- struct sock *sk = unix_get_socket(*fp++); +- +- if (sk) { +- struct unix_sock *u = unix_sk(sk); +- +- /* Ignore non-candidates, they could +- * have been added to the queues after +- * starting the garbage collection +- */ +- if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { +- hit = true; +- +- func(u); +- } +- } +- } +- if (hit && hitlist != NULL) { +- __skb_unlink(skb, &x->sk_receive_queue); +- __skb_queue_tail(hitlist, skb); +- } +- } ++ /* If an embryo socket has a fd, ++ * the listener indirectly holds the fd's refcnt. ++ */ ++ if (edge->successor->listener) ++ return unix_sk(edge->successor->listener)->vertex; ++ ++ return edge->successor->vertex; ++} ++ ++static bool unix_graph_maybe_cyclic; ++static bool unix_graph_grouped; ++ ++static void unix_update_graph(struct unix_vertex *vertex) ++{ ++ /* If the receiver socket is not inflight, no cyclic ++ * reference could be formed. ++ */ ++ if (!vertex) ++ return; ++ ++ unix_graph_maybe_cyclic = true; ++ unix_graph_grouped = false; ++} ++ ++static LIST_HEAD(unix_unvisited_vertices); ++ ++enum unix_vertex_index { ++ UNIX_VERTEX_INDEX_MARK1, ++ UNIX_VERTEX_INDEX_MARK2, ++ UNIX_VERTEX_INDEX_START, ++}; ++ ++static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; ++ ++static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) ++{ ++ struct unix_vertex *vertex = edge->predecessor->vertex; ++ ++ if (!vertex) { ++ vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); ++ vertex->index = unix_vertex_unvisited_index; ++ vertex->out_degree = 0; ++ INIT_LIST_HEAD(&vertex->edges); ++ INIT_LIST_HEAD(&vertex->scc_entry); ++ ++ list_move_tail(&vertex->entry, &unix_unvisited_vertices); ++ edge->predecessor->vertex = vertex; + } +- spin_unlock(&x->sk_receive_queue.lock); ++ ++ vertex->out_degree++; ++ list_add_tail(&edge->vertex_entry, &vertex->edges); ++ ++ unix_update_graph(unix_edge_successor(edge)); + } + +-static void scan_children(struct sock *x, void (*func)(struct unix_sock *), +- struct sk_buff_head *hitlist) ++static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) + { +- if (x->sk_state != TCP_LISTEN) { +- scan_inflight(x, func, hitlist); +- } else { +- struct sk_buff *skb; +- struct sk_buff *next; +- struct unix_sock *u; +- LIST_HEAD(embryos); ++ struct unix_vertex *vertex = edge->predecessor->vertex; + +- /* For a listening socket collect the queued embryos +- * and perform a scan on them as well. +- */ +- spin_lock(&x->sk_receive_queue.lock); +- skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { +- u = unix_sk(skb->sk); ++ if (!fpl->dead) ++ unix_update_graph(unix_edge_successor(edge)); + +- /* An embryo cannot be in-flight, so it's safe +- * to use the list link. +- */ +- BUG_ON(!list_empty(&u->link)); +- list_add_tail(&u->link, &embryos); +- } +- spin_unlock(&x->sk_receive_queue.lock); ++ list_del(&edge->vertex_entry); ++ vertex->out_degree--; + +- while (!list_empty(&embryos)) { +- u = list_entry(embryos.next, struct unix_sock, link); +- scan_inflight(&u->sk, func, hitlist); +- list_del_init(&u->link); +- } ++ if (!vertex->out_degree) { ++ edge->predecessor->vertex = NULL; ++ list_move_tail(&vertex->entry, &fpl->vertices); + } + } + +-static void dec_inflight(struct unix_sock *usk) ++static void unix_free_vertices(struct scm_fp_list *fpl) + { +- usk->inflight--; ++ struct unix_vertex *vertex, *next_vertex; ++ ++ list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { ++ list_del(&vertex->entry); ++ kfree(vertex); ++ } + } + +-static void inc_inflight(struct unix_sock *usk) ++static DEFINE_SPINLOCK(unix_gc_lock); ++unsigned int unix_tot_inflight; ++ ++void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) + { +- usk->inflight++; ++ int i = 0, j = 0; ++ ++ spin_lock(&unix_gc_lock); ++ ++ if (!fpl->count_unix) ++ goto out; ++ ++ do { ++ struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); ++ struct unix_edge *edge; ++ ++ if (!inflight) ++ continue; ++ ++ edge = fpl->edges + i++; ++ edge->predecessor = inflight; ++ edge->successor = receiver; ++ ++ unix_add_edge(fpl, edge); ++ } while (i < fpl->count_unix); ++ ++ receiver->scm_stat.nr_unix_fds += fpl->count_unix; ++ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); ++out: ++ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); ++ ++ spin_unlock(&unix_gc_lock); ++ ++ fpl->inflight = true; ++ ++ unix_free_vertices(fpl); + } + +-static void inc_inflight_move_tail(struct unix_sock *u) ++void unix_del_edges(struct scm_fp_list *fpl) + { +- u->inflight++; ++ struct unix_sock *receiver; ++ int i = 0; ++ ++ spin_lock(&unix_gc_lock); + +- /* If this still might be part of a cycle, move it to the end +- * of the list, so that it's checked even if it was already +- * passed over ++ if (!fpl->count_unix) ++ goto out; ++ ++ do { ++ struct unix_edge *edge = fpl->edges + i++; ++ ++ unix_del_edge(fpl, edge); ++ } while (i < fpl->count_unix); ++ ++ if (!fpl->dead) { ++ receiver = fpl->edges[0].successor; ++ receiver->scm_stat.nr_unix_fds -= fpl->count_unix; ++ } ++ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); ++out: ++ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); ++ ++ spin_unlock(&unix_gc_lock); ++ ++ fpl->inflight = false; ++} ++ ++void unix_update_edges(struct unix_sock *receiver) ++{ ++ /* nr_unix_fds is only updated under unix_state_lock(). ++ * If it's 0 here, the embryo socket is not part of the ++ * inflight graph, and GC will not see it, so no lock needed. + */ +- if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) +- list_move_tail(&u->link, &gc_candidates); ++ if (!receiver->scm_stat.nr_unix_fds) { ++ receiver->listener = NULL; ++ } else { ++ spin_lock(&unix_gc_lock); ++ unix_update_graph(unix_sk(receiver->listener)->vertex); ++ receiver->listener = NULL; ++ spin_unlock(&unix_gc_lock); ++ } + } + +-static bool gc_in_progress; +-#define UNIX_INFLIGHT_TRIGGER_GC 16000 ++int unix_prepare_fpl(struct scm_fp_list *fpl) ++{ ++ struct unix_vertex *vertex; ++ int i; ++ ++ if (!fpl->count_unix) ++ return 0; ++ ++ for (i = 0; i < fpl->count_unix; i++) { ++ vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); ++ if (!vertex) ++ goto err; ++ ++ list_add(&vertex->entry, &fpl->vertices); ++ } ++ ++ fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), ++ GFP_KERNEL_ACCOUNT); ++ if (!fpl->edges) ++ goto err; ++ ++ return 0; + +-void wait_for_unix_gc(void) ++err: ++ unix_free_vertices(fpl); ++ return -ENOMEM; ++} ++ ++void unix_destroy_fpl(struct scm_fp_list *fpl) + { +- /* If number of inflight sockets is insane, +- * force a garbage collect right now. +- * Paired with the WRITE_ONCE() in unix_inflight(), +- * unix_notinflight() and gc_in_progress(). +- */ +- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && +- !READ_ONCE(gc_in_progress)) +- unix_gc(); +- wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress)); ++ if (fpl->inflight) ++ unix_del_edges(fpl); ++ ++ kvfree(fpl->edges); ++ unix_free_vertices(fpl); + } + +-/* The external entry point: unix_gc() */ +-void unix_gc(void) ++static bool unix_vertex_dead(struct unix_vertex *vertex) + { +- struct sk_buff *next_skb, *skb; ++ struct unix_edge *edge; + struct unix_sock *u; +- struct unix_sock *next; +- struct sk_buff_head hitlist; +- struct list_head cursor; +- LIST_HEAD(not_cycle_list); ++ long total_ref; + +- spin_lock(&unix_gc_lock); ++ list_for_each_entry(edge, &vertex->edges, vertex_entry) { ++ struct unix_vertex *next_vertex = unix_edge_successor(edge); + +- /* Avoid a recursive GC. */ +- if (gc_in_progress) +- goto out; ++ /* The vertex's fd can be received by a non-inflight socket. */ ++ if (!next_vertex) ++ return false; + +- /* Paired with READ_ONCE() in wait_for_unix_gc(). */ +- WRITE_ONCE(gc_in_progress, true); ++ /* The vertex's fd can be received by an inflight socket in ++ * another SCC. ++ */ ++ if (next_vertex->scc_index != vertex->scc_index) ++ return false; ++ } + +- /* First, select candidates for garbage collection. Only +- * in-flight sockets are considered, and from those only ones +- * which don't have any external reference. +- * +- * Holding unix_gc_lock will protect these candidates from +- * being detached, and hence from gaining an external +- * reference. Since there are no possible receivers, all +- * buffers currently on the candidates' queues stay there +- * during the garbage collection. +- * +- * We also know that no new candidate can be added onto the +- * receive queues. Other, non candidate sockets _can_ be +- * added to queue, so we must make sure only to touch +- * candidates. +- * +- * Embryos, though never candidates themselves, affect which +- * candidates are reachable by the garbage collector. Before +- * being added to a listener's queue, an embryo may already +- * receive data carrying SCM_RIGHTS, potentially making the +- * passed socket a candidate that is not yet reachable by the +- * collector. It becomes reachable once the embryo is +- * enqueued. Therefore, we must ensure that no SCM-laden +- * embryo appears in a (candidate) listener's queue between +- * consecutive scan_children() calls. +- */ +- list_for_each_entry_safe(u, next, &gc_inflight_list, link) { +- struct sock *sk = &u->sk; +- long total_refs; +- +- total_refs = file_count(sk->sk_socket->file); +- +- BUG_ON(!u->inflight); +- BUG_ON(total_refs < u->inflight); +- if (total_refs == u->inflight) { +- list_move_tail(&u->link, &gc_candidates); +- __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); +- __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); +- +- if (sk->sk_state == TCP_LISTEN) { +- unix_state_lock_nested(sk, U_LOCK_GC_LISTENER); +- unix_state_unlock(sk); ++ /* No receiver exists out of the same SCC. */ ++ ++ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); ++ u = edge->predecessor; ++ total_ref = file_count(u->sk.sk_socket->file); ++ ++ /* If not close()d, total_ref > out_degree. */ ++ if (total_ref != vertex->out_degree) ++ return false; ++ ++ return true; ++} ++ ++enum unix_recv_queue_lock_class { ++ U_RECVQ_LOCK_NORMAL, ++ U_RECVQ_LOCK_EMBRYO, ++}; ++ ++static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) ++{ ++ skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); ++ ++#if IS_ENABLED(CONFIG_AF_UNIX_OOB) ++ if (u->oob_skb) { ++ WARN_ON_ONCE(skb_unref(u->oob_skb)); ++ u->oob_skb = NULL; ++ } ++#endif ++} ++ ++static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) ++{ ++ struct unix_vertex *vertex; ++ ++ list_for_each_entry_reverse(vertex, scc, scc_entry) { ++ struct sk_buff_head *queue; ++ struct unix_edge *edge; ++ struct unix_sock *u; ++ ++ edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); ++ u = edge->predecessor; ++ queue = &u->sk.sk_receive_queue; ++ ++ spin_lock(&queue->lock); ++ ++ if (u->sk.sk_state == TCP_LISTEN) { ++ struct sk_buff *skb; ++ ++ skb_queue_walk(queue, skb) { ++ struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; ++ ++ /* listener -> embryo order, the inversion never happens. */ ++ spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); ++ unix_collect_queue(unix_sk(skb->sk), hitlist); ++ spin_unlock(&embryo_queue->lock); + } ++ } else { ++ unix_collect_queue(u, hitlist); + } ++ ++ spin_unlock(&queue->lock); + } ++} + +- /* Now remove all internal in-flight reference to children of +- * the candidates. +- */ +- list_for_each_entry(u, &gc_candidates, link) +- scan_children(&u->sk, dec_inflight, NULL); ++static bool unix_scc_cyclic(struct list_head *scc) ++{ ++ struct unix_vertex *vertex; ++ struct unix_edge *edge; + +- /* Restore the references for children of all candidates, +- * which have remaining references. Do this recursively, so +- * only those remain, which form cyclic references. +- * +- * Use a "cursor" link, to make the list traversal safe, even +- * though elements might be moved about. ++ /* SCC containing multiple vertices ? */ ++ if (!list_is_singular(scc)) ++ return true; ++ ++ vertex = list_first_entry(scc, typeof(*vertex), scc_entry); ++ ++ /* Self-reference or a embryo-listener circle ? */ ++ list_for_each_entry(edge, &vertex->edges, vertex_entry) { ++ if (unix_edge_successor(edge) == vertex) ++ return true; ++ } ++ ++ return false; ++} ++ ++static LIST_HEAD(unix_visited_vertices); ++static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; ++ ++static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, ++ struct sk_buff_head *hitlist) ++{ ++ LIST_HEAD(vertex_stack); ++ struct unix_edge *edge; ++ LIST_HEAD(edge_stack); ++ ++next_vertex: ++ /* Push vertex to vertex_stack and mark it as on-stack ++ * (index >= UNIX_VERTEX_INDEX_START). ++ * The vertex will be popped when finalising SCC later. + */ +- list_add(&cursor, &gc_candidates); +- while (cursor.next != &gc_candidates) { +- u = list_entry(cursor.next, struct unix_sock, link); ++ list_add(&vertex->scc_entry, &vertex_stack); ++ ++ vertex->index = *last_index; ++ vertex->scc_index = *last_index; ++ (*last_index)++; ++ ++ /* Explore neighbour vertices (receivers of the current vertex's fd). */ ++ list_for_each_entry(edge, &vertex->edges, vertex_entry) { ++ struct unix_vertex *next_vertex = unix_edge_successor(edge); ++ ++ if (!next_vertex) ++ continue; ++ ++ if (next_vertex->index == unix_vertex_unvisited_index) { ++ /* Iterative deepening depth first search ++ * ++ * 1. Push a forward edge to edge_stack and set ++ * the successor to vertex for the next iteration. ++ */ ++ list_add(&edge->stack_entry, &edge_stack); + +- /* Move cursor to after the current position. */ +- list_move(&cursor, &u->link); ++ vertex = next_vertex; ++ goto next_vertex; + +- if (u->inflight) { +- list_move_tail(&u->link, ¬_cycle_list); +- __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); +- scan_children(&u->sk, inc_inflight_move_tail, NULL); ++ /* 2. Pop the edge directed to the current vertex ++ * and restore the ancestor for backtracking. ++ */ ++prev_vertex: ++ edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); ++ list_del_init(&edge->stack_entry); ++ ++ next_vertex = vertex; ++ vertex = edge->predecessor->vertex; ++ ++ /* If the successor has a smaller scc_index, two vertices ++ * are in the same SCC, so propagate the smaller scc_index ++ * to skip SCC finalisation. ++ */ ++ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); ++ } else if (next_vertex->index != unix_vertex_grouped_index) { ++ /* Loop detected by a back/cross edge. ++ * ++ * The successor is on vertex_stack, so two vertices are in ++ * the same SCC. If the successor has a smaller *scc_index*, ++ * propagate it to skip SCC finalisation. ++ */ ++ vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); ++ } else { ++ /* The successor was already grouped as another SCC */ + } + } +- list_del(&cursor); + +- /* Now gc_candidates contains only garbage. Restore original +- * inflight counters for these as well, and remove the skbuffs +- * which are creating the cycle(s). +- */ +- skb_queue_head_init(&hitlist); +- list_for_each_entry(u, &gc_candidates, link) { +- scan_children(&u->sk, inc_inflight, &hitlist); ++ if (vertex->index == vertex->scc_index) { ++ struct unix_vertex *v; ++ struct list_head scc; ++ bool scc_dead = true; + +-#if IS_ENABLED(CONFIG_AF_UNIX_OOB) +- if (u->oob_skb) { +- kfree_skb(u->oob_skb); +- u->oob_skb = NULL; ++ /* SCC finalised. ++ * ++ * If the scc_index was not updated, all the vertices above on ++ * vertex_stack are in the same SCC. Group them using scc_entry. ++ */ ++ __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); ++ ++ list_for_each_entry_reverse(v, &scc, scc_entry) { ++ /* Don't restart DFS from this vertex in unix_walk_scc(). */ ++ list_move_tail(&v->entry, &unix_visited_vertices); ++ ++ /* Mark vertex as off-stack. */ ++ v->index = unix_vertex_grouped_index; ++ ++ if (scc_dead) ++ scc_dead = unix_vertex_dead(v); + } +-#endif ++ ++ if (scc_dead) ++ unix_collect_skb(&scc, hitlist); ++ else if (!unix_graph_maybe_cyclic) ++ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); ++ ++ list_del(&scc); + } + +- /* not_cycle_list contains those sockets which do not make up a +- * cycle. Restore these to the inflight list. ++ /* Need backtracking ? */ ++ if (!list_empty(&edge_stack)) ++ goto prev_vertex; ++} ++ ++static void unix_walk_scc(struct sk_buff_head *hitlist) ++{ ++ unsigned long last_index = UNIX_VERTEX_INDEX_START; ++ ++ unix_graph_maybe_cyclic = false; ++ ++ /* Visit every vertex exactly once. ++ * __unix_walk_scc() moves visited vertices to unix_visited_vertices. + */ +- while (!list_empty(¬_cycle_list)) { +- u = list_entry(not_cycle_list.next, struct unix_sock, link); +- __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); +- list_move_tail(&u->link, &gc_inflight_list); ++ while (!list_empty(&unix_unvisited_vertices)) { ++ struct unix_vertex *vertex; ++ ++ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); ++ __unix_walk_scc(vertex, &last_index, hitlist); + } + +- spin_unlock(&unix_gc_lock); ++ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); ++ swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); + +- /* We need io_uring to clean its registered files, ignore all io_uring +- * originated skbs. It's fine as io_uring doesn't keep references to +- * other io_uring instances and so killing all other files in the cycle +- * will put all io_uring references forcing it to go through normal +- * release.path eventually putting registered files. +- */ +- skb_queue_walk_safe(&hitlist, skb, next_skb) { +- if (skb->destructor == io_uring_destruct_scm) { +- __skb_unlink(skb, &hitlist); +- skb_queue_tail(&skb->sk->sk_receive_queue, skb); ++ unix_graph_grouped = true; ++} ++ ++static void unix_walk_scc_fast(struct sk_buff_head *hitlist) ++{ ++ unix_graph_maybe_cyclic = false; ++ ++ while (!list_empty(&unix_unvisited_vertices)) { ++ struct unix_vertex *vertex; ++ struct list_head scc; ++ bool scc_dead = true; ++ ++ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); ++ list_add(&scc, &vertex->scc_entry); ++ ++ list_for_each_entry_reverse(vertex, &scc, scc_entry) { ++ list_move_tail(&vertex->entry, &unix_visited_vertices); ++ ++ if (scc_dead) ++ scc_dead = unix_vertex_dead(vertex); + } ++ ++ if (scc_dead) ++ unix_collect_skb(&scc, hitlist); ++ else if (!unix_graph_maybe_cyclic) ++ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); ++ ++ list_del(&scc); + } + +- /* Here we are. Hitlist is filled. Die. */ +- __skb_queue_purge(&hitlist); ++ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); ++} ++ ++static bool gc_in_progress; ++ ++static void __unix_gc(struct work_struct *work) ++{ ++ struct sk_buff_head hitlist; ++ struct sk_buff *skb; + + spin_lock(&unix_gc_lock); + +- /* There could be io_uring registered files, just push them back to +- * the inflight list +- */ +- list_for_each_entry_safe(u, next, &gc_candidates, link) +- list_move_tail(&u->link, &gc_inflight_list); ++ if (!unix_graph_maybe_cyclic) { ++ spin_unlock(&unix_gc_lock); ++ goto skip_gc; ++ } ++ ++ __skb_queue_head_init(&hitlist); ++ ++ if (unix_graph_grouped) ++ unix_walk_scc_fast(&hitlist); ++ else ++ unix_walk_scc(&hitlist); + +- /* All candidates should have been detached by now. */ +- BUG_ON(!list_empty(&gc_candidates)); ++ spin_unlock(&unix_gc_lock); ++ ++ skb_queue_walk(&hitlist, skb) { ++ if (UNIXCB(skb).fp) ++ UNIXCB(skb).fp->dead = true; ++ } + +- /* Paired with READ_ONCE() in wait_for_unix_gc(). */ ++ __skb_queue_purge(&hitlist); ++skip_gc: + WRITE_ONCE(gc_in_progress, false); ++} + +- wake_up(&unix_gc_wait); ++static DECLARE_WORK(unix_gc_work, __unix_gc); + +- out: +- spin_unlock(&unix_gc_lock); ++void unix_gc(void) ++{ ++ WRITE_ONCE(gc_in_progress, true); ++ queue_work(system_unbound_wq, &unix_gc_work); ++} ++ ++#define UNIX_INFLIGHT_TRIGGER_GC 16000 ++#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) ++ ++void wait_for_unix_gc(struct scm_fp_list *fpl) ++{ ++ /* If number of inflight sockets is insane, ++ * force a garbage collect right now. ++ * ++ * Paired with the WRITE_ONCE() in unix_inflight(), ++ * unix_notinflight(), and __unix_gc(). ++ */ ++ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && ++ !READ_ONCE(gc_in_progress)) ++ unix_gc(); ++ ++ /* Penalise users who want to send AF_UNIX sockets ++ * but whose sockets have not been received yet. ++ */ ++ if (!fpl || !fpl->count_unix || ++ READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) ++ return; ++ ++ if (READ_ONCE(gc_in_progress)) ++ flush_work(&unix_gc_work); + } +diff --git a/net/unix/scm.c b/net/unix/scm.c +deleted file mode 100644 +index e92f2fad64105d..00000000000000 +--- a/net/unix/scm.c ++++ /dev/null +@@ -1,161 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "scm.h" +- +-unsigned int unix_tot_inflight; +-EXPORT_SYMBOL(unix_tot_inflight); +- +-LIST_HEAD(gc_inflight_list); +-EXPORT_SYMBOL(gc_inflight_list); +- +-DEFINE_SPINLOCK(unix_gc_lock); +-EXPORT_SYMBOL(unix_gc_lock); +- +-struct sock *unix_get_socket(struct file *filp) +-{ +- struct sock *u_sock = NULL; +- struct inode *inode = file_inode(filp); +- +- /* Socket ? */ +- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { +- struct socket *sock = SOCKET_I(inode); +- const struct proto_ops *ops = READ_ONCE(sock->ops); +- struct sock *s = sock->sk; +- +- /* PF_UNIX ? */ +- if (s && ops && ops->family == PF_UNIX) +- u_sock = s; +- } +- +- return u_sock; +-} +-EXPORT_SYMBOL(unix_get_socket); +- +-/* Keep the number of times in flight count for the file +- * descriptor if it is for an AF_UNIX socket. +- */ +-void unix_inflight(struct user_struct *user, struct file *fp) +-{ +- struct sock *s = unix_get_socket(fp); +- +- spin_lock(&unix_gc_lock); +- +- if (s) { +- struct unix_sock *u = unix_sk(s); +- +- if (!u->inflight) { +- BUG_ON(!list_empty(&u->link)); +- list_add_tail(&u->link, &gc_inflight_list); +- } else { +- BUG_ON(list_empty(&u->link)); +- } +- u->inflight++; +- /* Paired with READ_ONCE() in wait_for_unix_gc() */ +- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); +- } +- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); +- spin_unlock(&unix_gc_lock); +-} +- +-void unix_notinflight(struct user_struct *user, struct file *fp) +-{ +- struct sock *s = unix_get_socket(fp); +- +- spin_lock(&unix_gc_lock); +- +- if (s) { +- struct unix_sock *u = unix_sk(s); +- +- BUG_ON(!u->inflight); +- BUG_ON(list_empty(&u->link)); +- +- u->inflight--; +- if (!u->inflight) +- list_del_init(&u->link); +- /* Paired with READ_ONCE() in wait_for_unix_gc() */ +- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); +- } +- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); +- spin_unlock(&unix_gc_lock); +-} +- +-/* +- * The "user->unix_inflight" variable is protected by the garbage +- * collection lock, and we just read it locklessly here. If you go +- * over the limit, there might be a tiny race in actually noticing +- * it across threads. Tough. +- */ +-static inline bool too_many_unix_fds(struct task_struct *p) +-{ +- struct user_struct *user = current_user(); +- +- if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) +- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); +- return false; +-} +- +-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +-{ +- int i; +- +- if (too_many_unix_fds(current)) +- return -ETOOMANYREFS; +- +- /* +- * Need to duplicate file references for the sake of garbage +- * collection. Otherwise a socket in the fps might become a +- * candidate for GC while the skb is not yet queued. +- */ +- UNIXCB(skb).fp = scm_fp_dup(scm->fp); +- if (!UNIXCB(skb).fp) +- return -ENOMEM; +- +- for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->user, scm->fp->fp[i]); +- return 0; +-} +-EXPORT_SYMBOL(unix_attach_fds); +- +-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +-{ +- int i; +- +- scm->fp = UNIXCB(skb).fp; +- UNIXCB(skb).fp = NULL; +- +- for (i = scm->fp->count-1; i >= 0; i--) +- unix_notinflight(scm->fp->user, scm->fp->fp[i]); +-} +-EXPORT_SYMBOL(unix_detach_fds); +- +-void unix_destruct_scm(struct sk_buff *skb) +-{ +- struct scm_cookie scm; +- +- memset(&scm, 0, sizeof(scm)); +- scm.pid = UNIXCB(skb).pid; +- if (UNIXCB(skb).fp) +- unix_detach_fds(&scm, skb); +- +- /* Alas, it calls VFS */ +- /* So fscking what? fput() had been SMP-safe since the last Summer */ +- scm_destroy(&scm); +- sock_wfree(skb); +-} +-EXPORT_SYMBOL(unix_destruct_scm); +- +-void io_uring_destruct_scm(struct sk_buff *skb) +-{ +- unix_destruct_scm(skb); +-} +-EXPORT_SYMBOL(io_uring_destruct_scm); +diff --git a/net/unix/scm.h b/net/unix/scm.h +deleted file mode 100644 +index 5a255a477f1609..00000000000000 +--- a/net/unix/scm.h ++++ /dev/null +@@ -1,10 +0,0 @@ +-#ifndef NET_UNIX_SCM_H +-#define NET_UNIX_SCM_H +- +-extern struct list_head gc_inflight_list; +-extern spinlock_t unix_gc_lock; +- +-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); +-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); +- +-#endif +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 68b3f9e7edffd4..2edb0f868c5738 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1603,6 +1603,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) + struct xfrm_policy *delpol; + struct hlist_head *chain; + ++ /* Sanitize mark before store */ ++ policy->mark.v &= policy->mark.m; ++ + spin_lock_bh(&net->xfrm.xfrm_policy_lock); + chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); + if (chain) +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 8a6e8656d014f2..86029cf5358c7a 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -754,9 +754,6 @@ int __xfrm_state_delete(struct xfrm_state *x) + net->xfrm.state_num--; + spin_unlock(&net->xfrm.xfrm_state_lock); + +- if (x->encap_sk) +- sock_put(rcu_dereference_raw(x->encap_sk)); +- + xfrm_dev_state_delete(x); + + /* All xfrm_state objects are created by xfrm_state_alloc. +@@ -1478,6 +1475,9 @@ static void __xfrm_state_insert(struct xfrm_state *x) + + list_add(&x->km.all, &net->xfrm.state_all); + ++ /* Sanitize mark before store */ ++ x->mark.v &= x->mark.m; ++ + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, + x->props.reqid, x->props.family); + XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, +diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile +index 3fa16412db15ca..927d72659173e0 100644 +--- a/samples/bpf/Makefile ++++ b/samples/bpf/Makefile +@@ -392,7 +392,7 @@ $(obj)/%.o: $(src)/%.c + @echo " CLANG-bpf " $@ + $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \ + -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \ +- -I$(LIBBPF_INCLUDE) \ ++ -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \ + -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ + -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \ + -Wno-gnu-variable-sized-type-not-at-end \ +diff --git a/scripts/config b/scripts/config +index ff88e2faefd35c..ea475c07de283e 100755 +--- a/scripts/config ++++ b/scripts/config +@@ -32,6 +32,7 @@ commands: + Disable option directly after other option + --module-after|-M beforeopt option + Turn option into module directly after other option ++ --refresh Refresh the config using old settings + + commands can be repeated multiple times + +@@ -124,16 +125,22 @@ undef_var() { + txt_delete "^# $name is not set" "$FN" + } + +-if [ "$1" = "--file" ]; then +- FN="$2" +- if [ "$FN" = "" ] ; then +- usage ++FN=.config ++CMDS=() ++while [[ $# -gt 0 ]]; do ++ if [ "$1" = "--file" ]; then ++ if [ "$2" = "" ]; then ++ usage ++ fi ++ FN="$2" ++ shift 2 ++ else ++ CMDS+=("$1") ++ shift + fi +- shift 2 +-else +- FN=.config +-fi ++done + ++set -- "${CMDS[@]}" + if [ "$1" = "" ] ; then + usage + fi +@@ -217,9 +224,8 @@ while [ "$1" != "" ] ; do + set_var "${CONFIG_}$B" "${CONFIG_}$B=m" "${CONFIG_}$A" + ;; + +- # undocumented because it ignores --file (fixme) + --refresh) +- yes "" | make oldconfig ++ yes "" | make oldconfig KCONFIG_CONFIG=$FN + ;; + + *) +diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh +index 0b7952471c18f6..79c09b378be816 100755 +--- a/scripts/kconfig/merge_config.sh ++++ b/scripts/kconfig/merge_config.sh +@@ -112,8 +112,8 @@ INITFILE=$1 + shift; + + if [ ! -r "$INITFILE" ]; then +- echo "The base file '$INITFILE' does not exist. Exit." >&2 +- exit 1 ++ echo "The base file '$INITFILE' does not exist. Creating one..." >&2 ++ touch "$INITFILE" + fi + + MERGE_LIST=$* +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 98308a2bdef6e0..068edb0d79f736 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -235,7 +235,9 @@ static int process_measurement(struct file *file, const struct cred *cred, + &allowed_algos); + violation_check = ((func == FILE_CHECK || func == MMAP_CHECK || + func == MMAP_CHECK_REQPROT) && +- (ima_policy_flag & IMA_MEASURE)); ++ (ima_policy_flag & IMA_MEASURE) && ++ ((action & IMA_MEASURE) || ++ (file->f_mode & FMODE_WRITE))); + if (!action && !violation_check) + return 0; + +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index 5dd1e164f9b13d..1e35c9f807b2b6 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -830,7 +830,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file) + static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, int format) + { +- struct netlbl_lsm_catmap *old_cat, *new_cat = NULL; ++ struct netlbl_lsm_catmap *old_cat; + struct smack_known *skp; + struct netlbl_lsm_secattr ncats; + char mapcatset[SMK_CIPSOLEN]; +@@ -917,22 +917,15 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + + smack_catset_bit(cat, mapcatset); + } +- ncats.flags = 0; +- if (catlen == 0) { +- ncats.attr.mls.cat = NULL; +- ncats.attr.mls.lvl = maplevel; +- new_cat = netlbl_catmap_alloc(GFP_ATOMIC); +- if (new_cat) +- new_cat->next = ncats.attr.mls.cat; +- ncats.attr.mls.cat = new_cat; +- skp->smk_netlabel.flags &= ~(1U << 3); +- rc = 0; +- } else { +- rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); +- } ++ ++ rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); + if (rc >= 0) { + old_cat = skp->smk_netlabel.attr.mls.cat; + rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); ++ if (ncats.attr.mls.cat) ++ skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT; ++ else ++ skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT; + skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; + synchronize_rcu(); + netlbl_catmap_free(old_cat); +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 728c211142d145..471de2d1b37ad1 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1085,8 +1085,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) + runtime->oss.params = 0; + runtime->oss.prepare = 1; + runtime->oss.buffer_used = 0; +- if (runtime->dma_area) +- snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); ++ snd_pcm_runtime_buffer_set_silence(runtime); + + runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); + +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index e40de64ec85cb5..31fc20350fd96e 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -703,6 +703,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) + atomic_inc(&runtime->buffer_accessing); + } + ++/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ ++void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) ++{ ++ snd_pcm_buffer_access_lock(runtime); ++ if (runtime->dma_area) ++ snd_pcm_format_set_silence(runtime->format, runtime->dma_area, ++ bytes_to_samples(runtime, runtime->dma_bytes)); ++ snd_pcm_buffer_access_unlock(runtime); ++} ++EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); ++ + #if IS_ENABLED(CONFIG_SND_PCM_OSS) + #define is_oss_stream(substream) ((substream)->oss.oss) + #else +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 49f6763c3250dd..31428cdc0f63d7 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -1169,8 +1169,7 @@ static __poll_t snd_seq_poll(struct file *file, poll_table * wait) + if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) { + + /* check if data is available in the pool */ +- if (!snd_seq_write_pool_allocated(client) || +- snd_seq_pool_poll_wait(client->pool, file, wait)) ++ if (snd_seq_pool_poll_wait(client->pool, file, wait)) + mask |= EPOLLOUT | EPOLLWRNORM; + } + +@@ -2584,8 +2583,6 @@ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table + if (client == NULL) + return -ENXIO; + +- if (! snd_seq_write_pool_allocated(client)) +- return 1; + if (snd_seq_pool_poll_wait(client->pool, file, wait)) + return 1; + return 0; +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c +index b603bb93f89603..692860deec0c3d 100644 +--- a/sound/core/seq/seq_memory.c ++++ b/sound/core/seq/seq_memory.c +@@ -429,6 +429,7 @@ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, + poll_table *wait) + { + poll_wait(file, &pool->output_sleep, wait); ++ guard(spinlock_irq)(&pool->lock); + return snd_seq_output_ok(pool); + } + +diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c +index e63621bcb21427..1a684e47d4d189 100644 +--- a/sound/pci/hda/hda_beep.c ++++ b/sound/pci/hda/hda_beep.c +@@ -31,8 +31,9 @@ static void generate_tone(struct hda_beep *beep, int tone) + beep->power_hook(beep, true); + beep->playing = 1; + } +- snd_hda_codec_write(codec, beep->nid, 0, +- AC_VERB_SET_BEEP_CONTROL, tone); ++ if (!codec->beep_just_power_on) ++ snd_hda_codec_write(codec, beep->nid, 0, ++ AC_VERB_SET_BEEP_CONTROL, tone); + if (!tone && beep->playing) { + beep->playing = 0; + if (beep->power_hook) +@@ -212,10 +213,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid) + struct hda_beep *beep; + int err; + +- if (!snd_hda_get_bool_hint(codec, "beep")) +- return 0; /* disabled explicitly by hints */ +- if (codec->beep_mode == HDA_BEEP_MODE_OFF) +- return 0; /* disabled by module option */ ++ if (!codec->beep_just_power_on) { ++ if (!snd_hda_get_bool_hint(codec, "beep")) ++ return 0; /* disabled explicitly by hints */ ++ if (codec->beep_mode == HDA_BEEP_MODE_OFF) ++ return 0; /* disabled by module option */ ++ } + + beep = kzalloc(sizeof(*beep), GFP_KERNEL); + if (beep == NULL) +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 2f3f295f2b0cb5..440b934cdc284a 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -24,6 +24,7 @@ + #include + #include "hda_local.h" + #include "hda_auto_parser.h" ++#include "hda_beep.h" + #include "hda_jack.h" + #include "hda_generic.h" + #include "hda_component.h" +@@ -6789,6 +6790,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec, + } + } + ++/* GPIO1 = amplifier on/off */ ++static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ static const hda_nid_t conn[] = { 0x02 }; ++ static const struct hda_pintbl pincfgs[] = { ++ { 0x14, 0x90170110 }, /* front/high speakers */ ++ { 0x17, 0x90170130 }, /* back/bass speakers */ ++ { } ++ }; ++ ++ // enable mute led ++ alc285_fixup_hp_mute_led_coefbit(codec, fix, action); ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PRE_PROBE: ++ /* needed for amp of back speakers */ ++ spec->gpio_mask |= 0x01; ++ spec->gpio_dir |= 0x01; ++ snd_hda_apply_pincfgs(codec, pincfgs); ++ /* share DAC to have unified volume control */ ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); ++ break; ++ case HDA_FIXUP_ACT_INIT: ++ /* need to toggle GPIO to enable the amp of back speakers */ ++ alc_update_gpio_data(codec, 0x01, true); ++ msleep(100); ++ alc_update_gpio_data(codec, 0x01, false); ++ break; ++ } ++} ++ + static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +@@ -6861,6 +6897,30 @@ static void alc285_fixup_hp_envy_x360(struct hda_codec *codec, + } + } + ++static void alc285_fixup_hp_beep(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ codec->beep_just_power_on = true; ++ } else if (action == HDA_FIXUP_ACT_INIT) { ++#ifdef CONFIG_SND_HDA_INPUT_BEEP ++ /* ++ * Just enable loopback to internal speaker and headphone jack. ++ * Disable amplification to get about the same beep volume as ++ * was on pure BIOS setup before loading the driver. ++ */ ++ alc_update_coef_idx(codec, 0x36, 0x7070, BIT(13)); ++ ++ snd_hda_enable_beep_device(codec, 1); ++ ++#if !IS_ENABLED(CONFIG_INPUT_PCSPKR) ++ dev_warn_once(hda_codec_dev(codec), ++ "enable CONFIG_INPUT_PCSPKR to get PC beeps\n"); ++#endif ++#endif ++ } ++} ++ + /* for hda_fixup_thinkpad_acpi() */ + #include "thinkpad_helper.c" + +@@ -7376,6 +7436,7 @@ enum { + ALC280_FIXUP_HP_9480M, + ALC245_FIXUP_HP_X360_AMP, + ALC285_FIXUP_HP_SPECTRE_X360_EB1, ++ ALC285_FIXUP_HP_SPECTRE_X360_DF1, + ALC285_FIXUP_HP_ENVY_X360, + ALC288_FIXUP_DELL_HEADSET_MODE, + ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, +@@ -7477,6 +7538,7 @@ enum { + ALC285_FIXUP_HP_GPIO_LED, + ALC285_FIXUP_HP_MUTE_LED, + ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, ++ ALC285_FIXUP_HP_BEEP_MICMUTE_LED, + ALC236_FIXUP_HP_MUTE_LED_COEFBIT2, + ALC236_FIXUP_HP_GPIO_LED, + ALC236_FIXUP_HP_MUTE_LED, +@@ -9064,6 +9126,12 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_mute_led, + }, ++ [ALC285_FIXUP_HP_BEEP_MICMUTE_LED] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_hp_beep, ++ .chained = true, ++ .chain_id = ALC285_FIXUP_HP_MUTE_LED, ++ }, + [ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc236_fixup_hp_mute_led_coefbit2, +@@ -9407,6 +9475,10 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_eb1 + }, ++ [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_hp_spectre_x360_df1 ++ }, + [ALC285_FIXUP_HP_ENVY_X360] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_envy_x360, +@@ -10006,6 +10078,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), + SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), ++ SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1), + SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), +@@ -10016,7 +10089,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), +- SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), ++ SND_PCI_QUIRK(0x103c, 0x8760, "HP EliteBook 8{4,5}5 G7", ALC285_FIXUP_HP_BEEP_MICMUTE_LED), + SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), +@@ -10501,6 +10574,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), ++ SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), + SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), +@@ -10754,6 +10828,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, + {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, + {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, ++ {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"}, + {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, + {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, + {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"}, +diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c +index 0b8e88b19888ec..6d8455c1bee6d8 100644 +--- a/sound/soc/codecs/cs42l43-jack.c ++++ b/sound/soc/codecs/cs42l43-jack.c +@@ -642,6 +642,10 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv) + + reinit_completion(&priv->type_detect); + ++ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, ++ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, ++ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK); ++ + cs42l43_start_hs_bias(priv, true); + regmap_update_bits(cs42l43->regmap, CS42L43_HS2, + CS42L43_HSDET_MODE_MASK, 0x3 << CS42L43_HSDET_MODE_SHIFT); +@@ -653,6 +657,9 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv) + CS42L43_HSDET_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT); + cs42l43_stop_hs_bias(priv); + ++ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, ++ CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, 0); ++ + if (!time_left) + return -ETIMEDOUT; + +diff --git a/sound/soc/codecs/mt6359-accdet.h b/sound/soc/codecs/mt6359-accdet.h +index c234f2f4276a12..78ada3a5bfae55 100644 +--- a/sound/soc/codecs/mt6359-accdet.h ++++ b/sound/soc/codecs/mt6359-accdet.h +@@ -123,6 +123,15 @@ struct mt6359_accdet { + struct workqueue_struct *jd_workqueue; + }; + ++#if IS_ENABLED(CONFIG_SND_SOC_MT6359_ACCDET) + int mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack); ++#else ++static inline int ++mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, ++ struct snd_soc_jack *jack) ++{ ++ return -EOPNOTSUPP; ++} ++#endif + #endif +diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c +index 9d6431338fb715..329549936bd5c3 100644 +--- a/sound/soc/codecs/pcm3168a.c ++++ b/sound/soc/codecs/pcm3168a.c +@@ -494,9 +494,9 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream, + } + break; + case 24: +- if (provider_mode || (format == SND_SOC_DAIFMT_DSP_A) || +- (format == SND_SOC_DAIFMT_DSP_B)) { +- dev_err(component->dev, "24-bit slots not supported in provider mode, or consumer mode using DSP\n"); ++ if (!provider_mode && ((format == SND_SOC_DAIFMT_DSP_A) || ++ (format == SND_SOC_DAIFMT_DSP_B))) { ++ dev_err(component->dev, "24-bit slots not supported in consumer mode using DSP\n"); + return -EINVAL; + } + break; +diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c +index c382cb6be60256..c0bb1b4b2dcb59 100644 +--- a/sound/soc/codecs/rt722-sdca-sdw.c ++++ b/sound/soc/codecs/rt722-sdca-sdw.c +@@ -28,9 +28,50 @@ static bool rt722_sdca_readable_register(struct device *dev, unsigned int reg) + 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_GE49, RT722_SDCA_CTL_DETECTED_MODE, + 0): +- case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, RT722_SDCA_CTL_HIDTX_CURRENT_OWNER, +- 0) ... SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, +- RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU03, RT722_SDCA_CTL_SELECTED_MODE, ++ 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05, ++ RT722_SDCA_CTL_FU_MUTE, CH_L) ... ++ SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05, ++ RT722_SDCA_CTL_FU_MUTE, CH_R): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU0D, ++ RT722_SDCA_CTL_SELECTED_MODE, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F, ++ RT722_SDCA_CTL_FU_MUTE, CH_L) ... ++ SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F, ++ RT722_SDCA_CTL_FU_MUTE, CH_R): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE40, ++ RT722_SDCA_CTL_REQ_POWER_STATE, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE12, ++ RT722_SDCA_CTL_REQ_POWER_STATE, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS01, ++ RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS11, ++ RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, ++ RT722_SDCA_CTL_FU_MUTE, CH_01) ... ++ SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, ++ RT722_SDCA_CTL_FU_MUTE, CH_04): ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_IT26, ++ RT722_SDCA_CTL_VENDOR_DEF, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_PDE2A, ++ RT722_SDCA_CTL_REQ_POWER_STATE, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_CS1F, ++ RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, ++ RT722_SDCA_CTL_HIDTX_CURRENT_OWNER, 0) ... ++ SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, ++ RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06, ++ RT722_SDCA_CTL_FU_MUTE, CH_L) ... ++ SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06, ++ RT722_SDCA_CTL_FU_MUTE, CH_R): ++ case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_OT23, ++ RT722_SDCA_CTL_VENDOR_DEF, CH_08): ++ case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_PDE23, ++ RT722_SDCA_CTL_REQ_POWER_STATE, 0): ++ case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_CS31, ++ RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): + case RT722_BUF_ADDR_HID1 ... RT722_BUF_ADDR_HID2: + return true; + default: +@@ -74,6 +115,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re + case 0x5600000 ... 0x5600007: + case 0x5700000 ... 0x5700004: + case 0x5800000 ... 0x5800004: ++ case 0x5810000: + case 0x5b00003: + case 0x5c00011: + case 0x5d00006: +@@ -81,6 +123,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re + case 0x5f00030: + case 0x6100000 ... 0x6100051: + case 0x6100055 ... 0x6100057: ++ case 0x6100060: + case 0x6100062: + case 0x6100064 ... 0x6100065: + case 0x6100067: +diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c +index e87a07eee97377..72d6356b898148 100644 +--- a/sound/soc/codecs/tas2764.c ++++ b/sound/soc/codecs/tas2764.c +@@ -182,33 +182,6 @@ static SOC_ENUM_SINGLE_DECL( + static const struct snd_kcontrol_new tas2764_asi1_mux = + SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum); + +-static int tas2764_dac_event(struct snd_soc_dapm_widget *w, +- struct snd_kcontrol *kcontrol, int event) +-{ +- struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); +- struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); +- int ret; +- +- switch (event) { +- case SND_SOC_DAPM_POST_PMU: +- tas2764->dac_powered = true; +- ret = tas2764_update_pwr_ctrl(tas2764); +- break; +- case SND_SOC_DAPM_PRE_PMD: +- tas2764->dac_powered = false; +- ret = tas2764_update_pwr_ctrl(tas2764); +- break; +- default: +- dev_err(tas2764->dev, "Unsupported event\n"); +- return -EINVAL; +- } +- +- if (ret < 0) +- return ret; +- +- return 0; +-} +- + static const struct snd_kcontrol_new isense_switch = + SOC_DAPM_SINGLE("Switch", TAS2764_PWR_CTRL, TAS2764_ISENSE_POWER_EN, 1, 1); + static const struct snd_kcontrol_new vsense_switch = +@@ -221,8 +194,7 @@ static const struct snd_soc_dapm_widget tas2764_dapm_widgets[] = { + 1, &isense_switch), + SND_SOC_DAPM_SWITCH("VSENSE", TAS2764_PWR_CTRL, TAS2764_VSENSE_POWER_EN, + 1, &vsense_switch), +- SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2764_dac_event, +- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), ++ SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_OUTPUT("OUT"), + SND_SOC_DAPM_SIGGEN("VMON"), + SND_SOC_DAPM_SIGGEN("IMON") +@@ -243,9 +215,28 @@ static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction) + { + struct tas2764_priv *tas2764 = + snd_soc_component_get_drvdata(dai->component); ++ int ret; ++ ++ if (!mute) { ++ tas2764->dac_powered = true; ++ ret = tas2764_update_pwr_ctrl(tas2764); ++ if (ret) ++ return ret; ++ } + + tas2764->unmuted = !mute; +- return tas2764_update_pwr_ctrl(tas2764); ++ ret = tas2764_update_pwr_ctrl(tas2764); ++ if (ret) ++ return ret; ++ ++ if (mute) { ++ tas2764->dac_powered = false; ++ ret = tas2764_update_pwr_ctrl(tas2764); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; + } + + static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth) +@@ -636,6 +627,7 @@ static const struct reg_default tas2764_reg_defaults[] = { + { TAS2764_TDM_CFG2, 0x0a }, + { TAS2764_TDM_CFG3, 0x10 }, + { TAS2764_TDM_CFG5, 0x42 }, ++ { TAS2764_INT_CLK_CFG, 0x19 }, + }; + + static const struct regmap_range_cfg tas2764_regmap_ranges[] = { +@@ -653,6 +645,7 @@ static const struct regmap_range_cfg tas2764_regmap_ranges[] = { + static bool tas2764_volatile_register(struct device *dev, unsigned int reg) + { + switch (reg) { ++ case TAS2764_SW_RST: + case TAS2764_INT_LTCH0 ... TAS2764_INT_LTCH4: + case TAS2764_INT_CLK_CFG: + return true; +diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c +index 7128bcf3a743e1..bb304de5cc38a3 100644 +--- a/sound/soc/fsl/imx-card.c ++++ b/sound/soc/fsl/imx-card.c +@@ -517,7 +517,7 @@ static int imx_card_parse_of(struct imx_card_data *data) + if (!card->dai_link) + return -ENOMEM; + +- data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL); ++ data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL); + if (!data->link_data) + return -ENOMEM; + +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c +index ce80adc30fe946..6a85e8fdcae646 100644 +--- a/sound/soc/intel/boards/bytcr_rt5640.c ++++ b/sound/soc/intel/boards/bytcr_rt5640.c +@@ -576,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, ++ { /* Acer Aspire SW3-013 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"), ++ }, ++ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | ++ BYT_RT5640_JD_SRC_JD2_IN4N | ++ BYT_RT5640_OVCD_TH_2000UA | ++ BYT_RT5640_OVCD_SF_0P75 | ++ BYT_RT5640_DIFF_MIC | ++ BYT_RT5640_SSP0_AIF1 | ++ BYT_RT5640_MCLK_EN), ++ }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), +diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c +index e69c1bb2cb2395..7f411b85778237 100644 +--- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c ++++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c +@@ -58,7 +58,15 @@ static const char *aud_clks[MT8188_CLK_NUM] = { + [MT8188_CLK_AUD_ADC] = "aud_adc", + [MT8188_CLK_AUD_DAC_HIRES] = "aud_dac_hires", + [MT8188_CLK_AUD_A1SYS_HP] = "aud_a1sys_hp", ++ [MT8188_CLK_AUD_AFE_DMIC1] = "aud_afe_dmic1", ++ [MT8188_CLK_AUD_AFE_DMIC2] = "aud_afe_dmic2", ++ [MT8188_CLK_AUD_AFE_DMIC3] = "aud_afe_dmic3", ++ [MT8188_CLK_AUD_AFE_DMIC4] = "aud_afe_dmic4", + [MT8188_CLK_AUD_ADC_HIRES] = "aud_adc_hires", ++ [MT8188_CLK_AUD_DMIC_HIRES1] = "aud_dmic_hires1", ++ [MT8188_CLK_AUD_DMIC_HIRES2] = "aud_dmic_hires2", ++ [MT8188_CLK_AUD_DMIC_HIRES3] = "aud_dmic_hires3", ++ [MT8188_CLK_AUD_DMIC_HIRES4] = "aud_dmic_hires4", + [MT8188_CLK_AUD_I2SIN] = "aud_i2sin", + [MT8188_CLK_AUD_TDM_IN] = "aud_tdm_in", + [MT8188_CLK_AUD_I2S_OUT] = "aud_i2s_out", +diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h +index ec53c171c170a8..c6c78d684f3ee1 100644 +--- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h ++++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h +@@ -54,7 +54,15 @@ enum { + MT8188_CLK_AUD_ADC, + MT8188_CLK_AUD_DAC_HIRES, + MT8188_CLK_AUD_A1SYS_HP, ++ MT8188_CLK_AUD_AFE_DMIC1, ++ MT8188_CLK_AUD_AFE_DMIC2, ++ MT8188_CLK_AUD_AFE_DMIC3, ++ MT8188_CLK_AUD_AFE_DMIC4, + MT8188_CLK_AUD_ADC_HIRES, ++ MT8188_CLK_AUD_DMIC_HIRES1, ++ MT8188_CLK_AUD_DMIC_HIRES2, ++ MT8188_CLK_AUD_DMIC_HIRES3, ++ MT8188_CLK_AUD_DMIC_HIRES4, + MT8188_CLK_AUD_I2SIN, + MT8188_CLK_AUD_TDM_IN, + MT8188_CLK_AUD_I2S_OUT, +diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c +index 11f30b183520ff..4a304bffef8bab 100644 +--- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c ++++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c +@@ -2855,10 +2855,6 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg) + case AFE_DMIC3_SRC_DEBUG_MON0: + case AFE_DMIC3_UL_SRC_MON0: + case AFE_DMIC3_UL_SRC_MON1: +- case DMIC_GAIN1_CUR: +- case DMIC_GAIN2_CUR: +- case DMIC_GAIN3_CUR: +- case DMIC_GAIN4_CUR: + case ETDM_IN1_MONITOR: + case ETDM_IN2_MONITOR: + case ETDM_OUT1_MONITOR: +diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c +index 88a7169336d61f..580eb20b0771a8 100644 +--- a/sound/soc/qcom/sm8250.c ++++ b/sound/soc/qcom/sm8250.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -39,9 +40,11 @@ static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, + SNDRV_PCM_HW_PARAM_RATE); + struct snd_interval *channels = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_CHANNELS); ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + rate->min = rate->max = 48000; + channels->min = channels->max = 2; ++ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); + + return 0; + } +diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c +index 9a828e55c4f9e7..507743c87e402d 100644 +--- a/sound/soc/soc-dai.c ++++ b/sound/soc/soc-dai.c +@@ -275,10 +275,11 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, + + if (dai->driver->ops && + dai->driver->ops->xlate_tdm_slot_mask) +- dai->driver->ops->xlate_tdm_slot_mask(slots, +- &tx_mask, &rx_mask); ++ ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); + else +- snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); ++ ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); ++ if (ret) ++ goto err; + + for_each_pcm_streams(stream) + snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]); +@@ -287,6 +288,7 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, + dai->driver->ops->set_tdm_slot) + ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, + slots, slot_width); ++err: + return soc_dai_ret(dai, ret); + } + EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c +index b4cfc34d00ee63..eff1355cc3df00 100644 +--- a/sound/soc/soc-ops.c ++++ b/sound/soc/soc-ops.c +@@ -638,6 +638,33 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, + } + EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); + ++static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl) ++{ ++ struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; ++ struct snd_ctl_elem_value uctl; ++ int ret; ++ ++ if (!mc->platform_max) ++ return 0; ++ ++ ret = kctl->get(kctl, &uctl); ++ if (ret < 0) ++ return ret; ++ ++ if (uctl.value.integer.value[0] > mc->platform_max) ++ uctl.value.integer.value[0] = mc->platform_max; ++ ++ if (snd_soc_volsw_is_stereo(mc) && ++ uctl.value.integer.value[1] > mc->platform_max) ++ uctl.value.integer.value[1] = mc->platform_max; ++ ++ ret = kctl->put(kctl, &uctl); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ + /** + * snd_soc_limit_volume - Set new limit to an existing volume control. + * +@@ -662,7 +689,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card, + struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; + if (max <= mc->max - mc->min) { + mc->platform_max = max; +- ret = 0; ++ ret = snd_soc_clip_to_platform_max(kctl); + } + } + return ret; +diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c +index b4cdcec33e1209..84145209dec493 100644 +--- a/sound/soc/sof/ipc4-control.c ++++ b/sound/soc/sof/ipc4-control.c +@@ -483,6 +483,14 @@ static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol, + return -EINVAL; + } + ++ /* Check header id */ ++ if (header.numid != SOF_CTRL_CMD_BINARY) { ++ dev_err_ratelimited(scomp->dev, ++ "Incorrect numid for bytes put %d\n", ++ header.numid); ++ return -EINVAL; ++ } ++ + /* Verify the ABI header first */ + if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr))) + return -EFAULT; +@@ -565,7 +573,8 @@ static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol, + if (data_size > size) + return -ENOSPC; + +- header.numid = scontrol->comp_id; ++ /* Set header id and length */ ++ header.numid = SOF_CTRL_CMD_BINARY; + header.length = data_size; + + if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv))) +diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c +index e8acf60c27a743..bb5df0d214e367 100644 +--- a/sound/soc/sof/ipc4-pcm.c ++++ b/sound/soc/sof/ipc4-pcm.c +@@ -621,7 +621,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm + return -ENOMEM; + } + +- if (!support_info) ++ /* Delay reporting is only supported on playback */ ++ if (!support_info || stream == SNDRV_PCM_STREAM_CAPTURE) + continue; + + stream_info = kzalloc(sizeof(*stream_info), GFP_KERNEL); +diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c +index 7afded323150c8..c18a1fdd40ee38 100644 +--- a/sound/soc/sof/topology.c ++++ b/sound/soc/sof/topology.c +@@ -1057,7 +1057,7 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, + struct snd_sof_dai *dai) + { + struct snd_soc_card *card = scomp->card; +- struct snd_soc_pcm_runtime *rtd; ++ struct snd_soc_pcm_runtime *rtd, *full, *partial; + struct snd_soc_dai *cpu_dai; + int stream; + int i; +@@ -1074,12 +1074,22 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, + else + goto end; + ++ full = NULL; ++ partial = NULL; + list_for_each_entry(rtd, &card->rtd_list, list) { + /* does stream match DAI link ? */ +- if (!rtd->dai_link->stream_name || +- !strstr(rtd->dai_link->stream_name, w->sname)) +- continue; ++ if (rtd->dai_link->stream_name) { ++ if (!strcmp(rtd->dai_link->stream_name, w->sname)) { ++ full = rtd; ++ break; ++ } else if (strstr(rtd->dai_link->stream_name, w->sname)) { ++ partial = rtd; ++ } ++ } ++ } + ++ rtd = full ? full : partial; ++ if (rtd) { + for_each_rtd_cpu_dais(rtd, i, cpu_dai) { + /* + * Please create DAI widget in the right order +diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c +index f0a5fd90110182..0d7758cc84c638 100644 +--- a/sound/soc/sunxi/sun4i-codec.c ++++ b/sound/soc/sunxi/sun4i-codec.c +@@ -25,6 +25,7 @@ + #include + + #include ++#include + #include + #include + #include +@@ -239,6 +240,7 @@ struct sun4i_codec { + struct clk *clk_module; + struct reset_control *rst; + struct gpio_desc *gpio_pa; ++ struct gpio_desc *gpio_hp; + + /* ADC_FIFOC register is at different offset on different SoCs */ + struct regmap_field *reg_adc_fifoc; +@@ -1277,6 +1279,49 @@ static struct snd_soc_dai_driver dummy_cpu_dai = { + .ops = &dummy_dai_ops, + }; + ++static struct snd_soc_jack sun4i_headphone_jack; ++ ++static struct snd_soc_jack_pin sun4i_headphone_jack_pins[] = { ++ { .pin = "Headphone", .mask = SND_JACK_HEADPHONE }, ++}; ++ ++static struct snd_soc_jack_gpio sun4i_headphone_jack_gpio = { ++ .name = "hp-det", ++ .report = SND_JACK_HEADPHONE, ++ .debounce_time = 150, ++}; ++ ++static int sun4i_codec_machine_init(struct snd_soc_pcm_runtime *rtd) ++{ ++ struct snd_soc_card *card = rtd->card; ++ struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card); ++ int ret; ++ ++ if (scodec->gpio_hp) { ++ ret = snd_soc_card_jack_new_pins(card, "Headphone Jack", ++ SND_JACK_HEADPHONE, ++ &sun4i_headphone_jack, ++ sun4i_headphone_jack_pins, ++ ARRAY_SIZE(sun4i_headphone_jack_pins)); ++ if (ret) { ++ dev_err(rtd->dev, ++ "Headphone jack creation failed: %d\n", ret); ++ return ret; ++ } ++ ++ sun4i_headphone_jack_gpio.desc = scodec->gpio_hp; ++ ret = snd_soc_jack_add_gpios(&sun4i_headphone_jack, 1, ++ &sun4i_headphone_jack_gpio); ++ ++ if (ret) { ++ dev_err(rtd->dev, "Headphone GPIO not added: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ + static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, + int *num_links) + { +@@ -1302,6 +1347,7 @@ static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, + link->codecs->name = dev_name(dev); + link->platforms->name = dev_name(dev); + link->dai_fmt = SND_SOC_DAIFMT_I2S; ++ link->init = sun4i_codec_machine_init; + + *num_links = 1; + +@@ -1742,6 +1788,13 @@ static int sun4i_codec_probe(struct platform_device *pdev) + return ret; + } + ++ scodec->gpio_hp = devm_gpiod_get_optional(&pdev->dev, "hp-det", GPIOD_IN); ++ if (IS_ERR(scodec->gpio_hp)) { ++ ret = PTR_ERR(scodec->gpio_hp); ++ dev_err_probe(&pdev->dev, ret, "Failed to get hp-det gpio\n"); ++ return ret; ++ } ++ + /* reg_field setup */ + scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev, + scodec->regmap, +diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c +index 9b75639434b815..0a764426d93586 100644 +--- a/tools/bpf/bpftool/common.c ++++ b/tools/bpf/bpftool/common.c +@@ -461,10 +461,11 @@ int get_fd_type(int fd) + p_err("can't read link type: %s", strerror(errno)); + return -1; + } +- if (n == sizeof(path)) { ++ if (n == sizeof(buf)) { + p_err("can't read link type: path too long!"); + return -1; + } ++ buf[n] = '\0'; + + if (strstr(buf, "bpf-map")) + return BPF_OBJ_MAP; +diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build +index fac42486a8cf0b..27f4ee9cb4db4a 100644 +--- a/tools/build/Makefile.build ++++ b/tools/build/Makefile.build +@@ -141,6 +141,10 @@ objprefix := $(subst ./,,$(OUTPUT)$(dir)/) + obj-y := $(addprefix $(objprefix),$(obj-y)) + subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y)) + ++# Separate out test log files from real build objects. ++test-y := $(filter %_log, $(obj-y)) ++obj-y := $(filter-out %_log, $(obj-y)) ++ + # Final '$(obj)-in.o' object + in-target := $(objprefix)$(obj)-in.o + +@@ -151,7 +155,7 @@ $(subdir-y): + + $(sort $(subdir-obj-y)): $(subdir-y) ; + +-$(in-target): $(obj-y) FORCE ++$(in-target): $(obj-y) $(test-y) FORCE + $(call rule_mkdir) + $(call if_changed,$(host)ld_multi) + +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h +index 977ec094bc2a6c..2a90f04a4160db 100644 +--- a/tools/include/uapi/linux/bpf.h ++++ b/tools/include/uapi/linux/bpf.h +@@ -1140,6 +1140,7 @@ enum bpf_perf_event_type { + #define BPF_F_BEFORE (1U << 3) + #define BPF_F_AFTER (1U << 4) + #define BPF_F_ID (1U << 5) ++#define BPF_F_PREORDER (1U << 6) + #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */ + + /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index 2fad178949efe9..fa2abe56e845d9 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -1802,7 +1802,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, + } + + len = strlen(value); +- if (value[len - 1] != '"') { ++ if (len < 2 || value[len - 1] != '"') { + pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", + ext->name, value); + return -EINVAL; +diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c +index ae61ae5b02bf88..0871f86c6b6666 100644 +--- a/tools/net/ynl/lib/ynl.c ++++ b/tools/net/ynl/lib/ynl.c +@@ -368,7 +368,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr) + "Invalid attribute (binary %s)", policy->name); + return -1; + case YNL_PT_NUL_STR: +- if ((!policy->len || len <= policy->len) && !data[len - 1]) ++ if (len && (!policy->len || len <= policy->len) && !data[len - 1]) + break; + yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, + "Invalid attribute (string %s)", policy->name); +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index a1b14378bab045..f8e676a6e6f8e9 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -3287,7 +3287,7 @@ static int handle_insn_ops(struct instruction *insn, + if (update_cfi_state(insn, next_insn, &state->cfi, op)) + return 1; + +- if (!insn->alt_group) ++ if (!opts.uaccess || !insn->alt_group) + continue; + + if (op->dest.type == OP_DEST_PUSHF) { +@@ -3754,6 +3754,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, + return 0; + + case INSN_STAC: ++ if (!opts.uaccess) ++ break; ++ + if (state.uaccess) { + WARN_INSN(insn, "recursive UACCESS enable"); + return 1; +@@ -3763,6 +3766,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, + break; + + case INSN_CLAC: ++ if (!opts.uaccess) ++ break; ++ + if (!state.uaccess && func) { + WARN_INSN(insn, "redundant UACCESS disable"); + return 1; +@@ -4238,7 +4244,8 @@ static int validate_symbol(struct objtool_file *file, struct section *sec, + if (!insn || insn->ignore || insn->visited) + return 0; + +- state->uaccess = sym->uaccess_safe; ++ if (opts.uaccess) ++ state->uaccess = sym->uaccess_safe; + + ret = validate_branch(file, insn_func(insn), insn, *state); + if (ret) +@@ -4685,8 +4692,10 @@ int check(struct objtool_file *file) + init_cfi_state(&force_undefined_cfi); + force_undefined_cfi.force_undefined = true; + +- if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) ++ if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) { ++ ret = -1; + goto out; ++ } + + cfi_hash_add(&init_cfi); + cfi_hash_add(&func_cfi); +@@ -4703,7 +4712,7 @@ int check(struct objtool_file *file) + if (opts.retpoline) { + ret = validate_retpoline(file); + if (ret < 0) +- return ret; ++ goto out; + warnings += ret; + } + +@@ -4739,7 +4748,7 @@ int check(struct objtool_file *file) + */ + ret = validate_unrets(file); + if (ret < 0) +- return ret; ++ goto out; + warnings += ret; + } + +@@ -4802,7 +4811,7 @@ int check(struct objtool_file *file) + if (opts.prefix) { + ret = add_prefix_symbols(file); + if (ret < 0) +- return ret; ++ goto out; + warnings += ret; + } + +diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py +index dc794907686304..4a6bf4e048f5b0 100644 +--- a/tools/testing/kunit/qemu_configs/x86_64.py ++++ b/tools/testing/kunit/qemu_configs/x86_64.py +@@ -7,4 +7,6 @@ CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='x86_64', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', +- extra_qemu_params=[]) ++ # qboot is faster than SeaBIOS and doesn't mess up ++ # the terminal. ++ extra_qemu_params=['-bios', 'qboot.rom']) +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +index 2d0796314862ac..0a99fd404f6dc0 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +@@ -68,7 +68,6 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map) + goto close_cli; + + err = disconnect(cli); +- ASSERT_OK(err, "disconnect"); + + close_cli: + close(cli); +diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh +index a3678dfe5848a2..c151374ddf0402 100755 +--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh ++++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh +@@ -149,7 +149,7 @@ cfg_test_host_common() + check_err $? "Failed to add $name host entry" + + bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null +- check_fail $? "Managed to replace $name host entry" ++ check_err $? "Failed to replace $name host entry" + + bridge mdb del dev br0 port br0 grp $grp $state vid 10 + bridge mdb get dev br0 grp $grp vid 10 &> /dev/null +diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh +index 342ad27f631b15..e771f5f7faa26a 100755 +--- a/tools/testing/selftests/net/gro.sh ++++ b/tools/testing/selftests/net/gro.sh +@@ -95,5 +95,6 @@ trap cleanup EXIT + if [[ "${test}" == "all" ]]; then + run_all_tests + else +- run_test "${proto}" "${test}" ++ exit_code=$(run_test "${proto}" "${test}") ++ exit $exit_code + fi;