diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.102-103.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.102-103.patch new file mode 100644 index 0000000000..a8cafa9509 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.102-103.patch @@ -0,0 +1,22717 @@ +diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml +index 4ebea60b8c5ba5..8c52fa0ea5f8ee 100644 +--- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml ++++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml +@@ -25,7 +25,7 @@ properties: + maxItems: 1 + + clocks: +- minItems: 2 ++ maxItems: 2 + + clock-names: + items: +diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml +index bc5594d1864301..300bf2252c3e8e 100644 +--- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml ++++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml +@@ -20,7 +20,7 @@ properties: + maxItems: 2 + + clocks: +- minItems: 1 ++ maxItems: 1 + + clock-names: + items: +diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst +index a624e92f2687f0..6ba11dfb4bf360 100644 +--- a/Documentation/filesystems/fscrypt.rst ++++ b/Documentation/filesystems/fscrypt.rst +@@ -141,9 +141,8 @@ However, these ioctls have some limitations: + CONFIG_PAGE_POISONING=y in your kernel config and add page_poison=1 + to your kernel command line. However, this has a performance cost. + +-- Secret keys might still exist in CPU registers, in crypto +- accelerator hardware (if used by the crypto API to implement any of +- the algorithms), or in other places not explicitly considered here. ++- Secret keys might still exist in CPU registers or in other places ++ not explicitly considered here. + + Limitations of v1 policies + ~~~~~~~~~~~~~~~~~~~~~~~~~~ +@@ -375,9 +374,12 @@ the work is done by XChaCha12, which is much faster than AES when AES + acceleration is unavailable. For more information about Adiantum, see + `the Adiantum paper `_. + +-The (AES-128-CBC-ESSIV, AES-128-CTS-CBC) pair exists only to support +-systems whose only form of AES acceleration is an off-CPU crypto +-accelerator such as CAAM or CESA that does not support XTS. ++The (AES-128-CBC-ESSIV, AES-128-CTS-CBC) pair was added to try to ++provide a more efficient option for systems that lack AES instructions ++in the CPU but do have a non-inline crypto engine such as CAAM or CESA ++that supports AES-CBC (and not AES-XTS). This is deprecated. It has ++been shown that just doing AES on the CPU is actually faster. ++Moreover, Adiantum is faster still and is recommended on such systems. + + The remaining mode pairs are the "national pride ciphers": + +@@ -1231,22 +1233,13 @@ this by validating all top-level encryption policies prior to access. + Inline encryption support + ========================= + +-By default, fscrypt uses the kernel crypto API for all cryptographic +-operations (other than HKDF, which fscrypt partially implements +-itself). The kernel crypto API supports hardware crypto accelerators, +-but only ones that work in the traditional way where all inputs and +-outputs (e.g. plaintexts and ciphertexts) are in memory. fscrypt can +-take advantage of such hardware, but the traditional acceleration +-model isn't particularly efficient and fscrypt hasn't been optimized +-for it. +- +-Instead, many newer systems (especially mobile SoCs) have *inline +-encryption hardware* that can encrypt/decrypt data while it is on its +-way to/from the storage device. Linux supports inline encryption +-through a set of extensions to the block layer called *blk-crypto*. +-blk-crypto allows filesystems to attach encryption contexts to bios +-(I/O requests) to specify how the data will be encrypted or decrypted +-in-line. For more information about blk-crypto, see ++Many newer systems (especially mobile SoCs) have *inline encryption ++hardware* that can encrypt/decrypt data while it is on its way to/from ++the storage device. Linux supports inline encryption through a set of ++extensions to the block layer called *blk-crypto*. blk-crypto allows ++filesystems to attach encryption contexts to bios (I/O requests) to ++specify how the data will be encrypted or decrypted in-line. For more ++information about blk-crypto, see + :ref:`Documentation/block/inline-encryption.rst `. + + On supported filesystems (currently ext4 and f2fs), fscrypt can use +diff --git a/Documentation/firmware-guide/acpi/i2c-muxes.rst b/Documentation/firmware-guide/acpi/i2c-muxes.rst +index 3a8997ccd7c4b6..f366539acd792a 100644 +--- a/Documentation/firmware-guide/acpi/i2c-muxes.rst ++++ b/Documentation/firmware-guide/acpi/i2c-muxes.rst +@@ -14,7 +14,7 @@ Consider this topology:: + | | | 0x70 |--CH01--> i2c client B (0x50) + +------+ +------+ + +-which corresponds to the following ASL:: ++which corresponds to the following ASL (in the scope of \_SB):: + + Device (SMB1) + { +@@ -24,7 +24,7 @@ which corresponds to the following ASL:: + Name (_HID, ...) + Name (_CRS, ResourceTemplate () { + I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED, +- AddressingMode7Bit, "^SMB1", 0x00, ++ AddressingMode7Bit, "\\_SB.SMB1", 0x00, + ResourceConsumer,,) + } + +@@ -37,7 +37,7 @@ which corresponds to the following ASL:: + Name (_HID, ...) + Name (_CRS, ResourceTemplate () { + I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, +- AddressingMode7Bit, "^CH00", 0x00, ++ AddressingMode7Bit, "\\_SB.SMB1.CH00", 0x00, + ResourceConsumer,,) + } + } +@@ -52,7 +52,7 @@ which corresponds to the following ASL:: + Name (_HID, ...) + Name (_CRS, ResourceTemplate () { + I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED, +- AddressingMode7Bit, "^CH01", 0x00, ++ AddressingMode7Bit, "\\_SB.SMB1.CH01", 0x00, + ResourceConsumer,,) + } + } +diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst +index f7a73421eb76a1..e774b48de9f511 100644 +--- a/Documentation/networking/bonding.rst ++++ b/Documentation/networking/bonding.rst +@@ -444,6 +444,18 @@ arp_missed_max + + The default value is 2, and the allowable range is 1 - 255. + ++coupled_control ++ ++ Specifies whether the LACP state machine's MUX in the 802.3ad mode ++ should have separate Collecting and Distributing states. ++ ++ This is by implementing the independent control state machine per ++ IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control ++ state machine. ++ ++ The default value is 1. This setting does not separate the Collecting ++ and Distributing states, maintaining the bond in coupled control. ++ + downdelay + + Specifies the time, in milliseconds, to wait before disabling +diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst +index 15f1919d640c09..b797f3dd4b6922 100644 +--- a/Documentation/networking/mptcp-sysctl.rst ++++ b/Documentation/networking/mptcp-sysctl.rst +@@ -20,6 +20,8 @@ add_addr_timeout - INTEGER (seconds) + resent to an MPTCP peer that has not acknowledged a previous + ADD_ADDR message. + ++ Do not retransmit if set to 0. ++ + The default value matches TCP_RTO_MAX. This is a per-namespace + sysctl. + +diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst +index b6d5a3a8febc10..f6a7cffdc12921 100644 +--- a/Documentation/power/runtime_pm.rst ++++ b/Documentation/power/runtime_pm.rst +@@ -398,10 +398,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: + nonzero, increment the counter and return 1; otherwise return 0 without + changing the counter + +- `int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);` ++ `int pm_runtime_get_if_active(struct device *dev);` + - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the +- runtime PM status is RPM_ACTIVE, and either ign_usage_count is true +- or the device's usage_count is non-zero, increment the counter and ++ runtime PM status is RPM_ACTIVE, increment the counter and + return 1; otherwise return 0 without changing the counter + + `void pm_runtime_put_noidle(struct device *dev);` +diff --git a/Makefile b/Makefile +index 685a65992449d1..9b288ccccd6495 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 102 ++SUBLEVEL = 103 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +@@ -1061,7 +1061,7 @@ KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD + KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) + + # userspace programs are linked via the compiler, use the correct linker +-ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy) ++ifdef CONFIG_CC_IS_CLANG + KBUILD_USERLDFLAGS += $(call cc-option, --ld-path=$(LD)) + endif + +diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h +index c7d2510e5a786f..853c4f81ba4a57 100644 +--- a/arch/arm/include/asm/topology.h ++++ b/arch/arm/include/asm/topology.h +@@ -13,6 +13,7 @@ + #define arch_set_freq_scale topology_set_freq_scale + #define arch_scale_freq_capacity topology_get_freq_scale + #define arch_scale_freq_invariant topology_scale_freq_invariant ++#define arch_scale_freq_ref topology_get_freq_ref + #endif + + /* Replace task scheduler's default cpu-invariant accounting */ +diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c +index 36915a073c2340..f432d22bfed844 100644 +--- a/arch/arm/mach-rockchip/platsmp.c ++++ b/arch/arm/mach-rockchip/platsmp.c +@@ -279,11 +279,6 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) + } + + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { +- if (rockchip_smp_prepare_sram(node)) { +- of_node_put(node); +- return; +- } +- + /* enable the SCU power domain */ + pmu_set_power_domain(PMU_PWRDN_SCU, true); + +@@ -316,11 +311,19 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) + asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); + ncores = ((l2ctlr >> 24) & 0x3) + 1; + } +- of_node_put(node); + + /* Make sure that all cores except the first are really off */ + for (i = 1; i < ncores; i++) + pmu_set_power_domain(0 + i, false); ++ ++ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { ++ if (rockchip_smp_prepare_sram(node)) { ++ of_node_put(node); ++ return; ++ } ++ } ++ ++ of_node_put(node); + } + + static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus) +diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c +index d5c805adf7a82b..ea706fac63587a 100644 +--- a/arch/arm/mach-tegra/reset.c ++++ b/arch/arm/mach-tegra/reset.c +@@ -63,7 +63,7 @@ static void __init tegra_cpu_reset_handler_enable(void) + BUG_ON(is_enabled); + BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE); + +- memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start, ++ memcpy_toio(iram_base, (void *)__tegra_cpu_reset_handler_start, + tegra_cpu_reset_handler_size); + + err = call_firmware_op(set_cpu_boot_addr, 0, reset_address); +diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +index f156167b4e8a71..3f74767d63ab71 100644 +--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +@@ -531,7 +531,6 @@ sdhci0: mmc@fa10000 { + clock-names = "clk_ahb", "clk_xin"; + assigned-clocks = <&k3_clks 57 6>; + assigned-clock-parents = <&k3_clks 57 8>; +- mmc-ddr-1_8v; + mmc-hs200-1_8v; + ti,trm-icp = <0x2>; + bus-width = <8>; +diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +index e931c966b7f22b..e98d043e574600 100644 +--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +@@ -448,16 +448,16 @@ AM62X_IOPAD(0x01ec, PIN_INPUT_PULLUP, 0) /* (A17) I2C1_SDA */ /* SODIMM 12 */ + /* Verdin I2C_2_DSI */ + pinctrl_i2c2: main-i2c2-default-pins { + pinctrl-single,pins = < +- AM62X_IOPAD(0x00b0, PIN_INPUT, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */ +- AM62X_IOPAD(0x00b4, PIN_INPUT, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */ ++ AM62X_IOPAD(0x00b0, PIN_INPUT_PULLUP, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */ ++ AM62X_IOPAD(0x00b4, PIN_INPUT_PULLUP, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */ + >; + }; + + /* Verdin I2C_4_CSI */ + pinctrl_i2c3: main-i2c3-default-pins { + pinctrl-single,pins = < +- AM62X_IOPAD(0x01d0, PIN_INPUT, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */ +- AM62X_IOPAD(0x01d4, PIN_INPUT, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */ ++ AM62X_IOPAD(0x01d0, PIN_INPUT_PULLUP, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */ ++ AM62X_IOPAD(0x01d4, PIN_INPUT_PULLUP, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */ + >; + }; + +@@ -729,8 +729,8 @@ AM62X_MCU_IOPAD(0x0010, PIN_INPUT, 7) /* (C9) MCU_SPI0_D1.MCU_GPIO0_4 */ /* SODI + /* Verdin I2C_3_HDMI */ + pinctrl_mcu_i2c0: mcu-i2c0-default-pins { + pinctrl-single,pins = < +- AM62X_MCU_IOPAD(0x0044, PIN_INPUT, 0) /* (A8) MCU_I2C0_SCL */ /* SODIMM 59 */ +- AM62X_MCU_IOPAD(0x0048, PIN_INPUT, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */ ++ AM62X_MCU_IOPAD(0x0044, PIN_INPUT_PULLUP, 0) /* (A8) MCU_I2C0_SCL */ /* SODIMM 59 */ ++ AM62X_MCU_IOPAD(0x0048, PIN_INPUT_PULLUP, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */ + >; + }; + +diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts +index 99f2878de4c677..12cbf253a8910b 100644 +--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts ++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts +@@ -144,8 +144,8 @@ AM62AX_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (D15) UART0_TXD */ + + main_uart1_pins_default: main-uart1-default-pins { + pinctrl-single,pins = < +- AM62AX_IOPAD(0x01e8, PIN_INPUT, 1) /* (C17) I2C1_SCL.UART1_RXD */ +- AM62AX_IOPAD(0x01ec, PIN_OUTPUT, 1) /* (E17) I2C1_SDA.UART1_TXD */ ++ AM62AX_IOPAD(0x01ac, PIN_INPUT, 2) /* (B21) MCASP0_AFSR.UART1_RXD */ ++ AM62AX_IOPAD(0x01b0, PIN_OUTPUT, 2) /* (A21) MCASP0_ACLKR.UART1_TXD */ + AM62AX_IOPAD(0x0194, PIN_INPUT, 2) /* (C19) MCASP0_AXR3.UART1_CTSn */ + AM62AX_IOPAD(0x0198, PIN_OUTPUT, 2) /* (B19) MCASP0_AXR2.UART1_RTSn */ + >; +diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h +index 2a4e0e084d695d..8e0869d9b7a0e5 100644 +--- a/arch/arm64/boot/dts/ti/k3-pinctrl.h ++++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h +@@ -8,11 +8,16 @@ + #ifndef DTS_ARM64_TI_K3_PINCTRL_H + #define DTS_ARM64_TI_K3_PINCTRL_H + ++#define ST_EN_SHIFT (14) + #define PULLUDEN_SHIFT (16) + #define PULLTYPESEL_SHIFT (17) + #define RXACTIVE_SHIFT (18) + #define DEBOUNCE_SHIFT (11) + ++/* Schmitt trigger configuration */ ++#define ST_DISABLE (0 << ST_EN_SHIFT) ++#define ST_ENABLE (1 << ST_EN_SHIFT) ++ + #define PULL_DISABLE (1 << PULLUDEN_SHIFT) + #define PULL_ENABLE (0 << PULLUDEN_SHIFT) + +@@ -26,9 +31,13 @@ + #define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE) + #define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP) + #define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN) +-#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +-#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +-#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN) ++#define PIN_INPUT (INPUT_EN | ST_ENABLE | PULL_DISABLE) ++#define PIN_INPUT_PULLUP (INPUT_EN | ST_ENABLE | PULL_UP) ++#define PIN_INPUT_PULLDOWN (INPUT_EN | ST_ENABLE | PULL_DOWN) ++/* Input configurations with Schmitt Trigger disabled */ ++#define PIN_INPUT_NOST (INPUT_EN | PULL_DISABLE) ++#define PIN_INPUT_PULLUP_NOST (INPUT_EN | PULL_UP) ++#define PIN_INPUT_PULLDOWN_NOST (INPUT_EN | PULL_DOWN) + + #define PIN_DEBOUNCE_DISABLE (0 << DEBOUNCE_SHIFT) + #define PIN_DEBOUNCE_CONF1 (1 << DEBOUNCE_SHIFT) +diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h +index a407f9cd549edc..c07a58b96329d8 100644 +--- a/arch/arm64/include/asm/acpi.h ++++ b/arch/arm64/include/asm/acpi.h +@@ -150,7 +150,7 @@ acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor) + {} + #endif + +-static inline const char *acpi_get_enable_method(int cpu) ++static __always_inline const char *acpi_get_enable_method(int cpu) + { + if (acpi_psci_present()) + return "psci"; +diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h +index 9fab663dd2de96..a323b109b9c44b 100644 +--- a/arch/arm64/include/asm/topology.h ++++ b/arch/arm64/include/asm/topology.h +@@ -23,6 +23,7 @@ void update_freq_counters_refs(void); + #define arch_set_freq_scale topology_set_freq_scale + #define arch_scale_freq_capacity topology_get_freq_scale + #define arch_scale_freq_invariant topology_scale_freq_invariant ++#define arch_scale_freq_ref topology_get_freq_ref + + #ifdef CONFIG_ACPI_CPPC_LIB + #define arch_init_invariance_cppc topology_init_cpu_capacity_cppc +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index a1e0cc5353fb12..d0d836448a76b2 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -1876,10 +1876,10 @@ void fpsimd_save_and_flush_cpu_state(void) + if (!system_supports_fpsimd()) + return; + WARN_ON(preemptible()); +- __get_cpu_fpsimd_context(); ++ get_cpu_fpsimd_context(); + fpsimd_save(); + fpsimd_flush_cpu_state(); +- __put_cpu_fpsimd_context(); ++ put_cpu_fpsimd_context(); + } + + #ifdef CONFIG_KERNEL_MODE_NEON +diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c +index 817d788cd86669..1a2c72f3e7f80e 100644 +--- a/arch/arm64/kernel/topology.c ++++ b/arch/arm64/kernel/topology.c +@@ -82,7 +82,12 @@ int __init parse_acpi_topology(void) + #undef pr_fmt + #define pr_fmt(fmt) "AMU: " fmt + +-static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); ++/* ++ * Ensure that amu_scale_freq_tick() will return SCHED_CAPACITY_SCALE until ++ * the CPU capacity and its associated frequency have been correctly ++ * initialized. ++ */ ++static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT); + static DEFINE_PER_CPU(u64, arch_const_cycles_prev); + static DEFINE_PER_CPU(u64, arch_core_cycles_prev); + static cpumask_var_t amu_fie_cpus; +@@ -112,14 +117,14 @@ static inline bool freq_counters_valid(int cpu) + return true; + } + +-static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) ++void freq_inv_set_max_ratio(int cpu, u64 max_rate) + { +- u64 ratio; ++ u64 ratio, ref_rate = arch_timer_get_rate(); + + if (unlikely(!max_rate || !ref_rate)) { +- pr_debug("CPU%d: invalid maximum or reference frequency.\n", ++ WARN_ONCE(1, "CPU%d: invalid maximum or reference frequency.\n", + cpu); +- return -EINVAL; ++ return; + } + + /* +@@ -139,12 +144,10 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) + ratio = div64_u64(ratio, max_rate); + if (!ratio) { + WARN_ONCE(1, "Reference frequency too low.\n"); +- return -EINVAL; ++ return; + } + +- per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; +- +- return 0; ++ WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio); + } + + static void amu_scale_freq_tick(void) +@@ -195,10 +198,7 @@ static void amu_fie_setup(const struct cpumask *cpus) + return; + + for_each_cpu(cpu, cpus) { +- if (!freq_counters_valid(cpu) || +- freq_inv_set_max_ratio(cpu, +- cpufreq_get_hw_max_freq(cpu) * 1000ULL, +- arch_timer_get_rate())) ++ if (!freq_counters_valid(cpu)) + return; + } + +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index 8b70759cdbb90c..610f8a1099f50b 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -953,6 +953,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne + + void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) + { ++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + console_verbose(); + + pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index 2e5d1e238af958..893b9485b840a4 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -753,6 +753,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) + */ + siaddr = untagged_addr(far); + } ++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + return 0; +diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c +index 68bf1a125502da..1e308328c07966 100644 +--- a/arch/arm64/mm/ptdump_debugfs.c ++++ b/arch/arm64/mm/ptdump_debugfs.c +@@ -1,6 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 + #include +-#include + #include + + #include +@@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v) + { + struct ptdump_info *info = m->private; + +- get_online_mems(); + ptdump_walk(m, info); +- put_online_mems(); + return 0; + } + DEFINE_SHOW_ATTRIBUTE(ptdump); +diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c +index e2f30ff9afde82..a43ba7f9f9872a 100644 +--- a/arch/loongarch/kernel/module-sections.c ++++ b/arch/loongarch/kernel/module-sections.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) + { +@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v + return (Elf_Addr)&plt[nr]; + } + +-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) +-{ +- return x->r_info == y->r_info && x->r_addend == y->r_addend; +-} ++#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b)) + +-static bool duplicate_rela(const Elf_Rela *rela, int idx) ++static int compare_rela(const void *x, const void *y) + { +- int i; ++ int ret; ++ const Elf_Rela *rela_x = x, *rela_y = y; + +- for (i = 0; i < idx; i++) { +- if (is_rela_equal(&rela[i], &rela[idx])) +- return true; +- } ++ ret = cmp_3way(rela_x->r_info, rela_y->r_info); ++ if (ret == 0) ++ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend); + +- return false; ++ return ret; + } + + static void count_max_entries(Elf_Rela *relas, int num, + unsigned int *plts, unsigned int *gots) + { +- unsigned int i, type; ++ unsigned int i; ++ ++ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL); + + for (i = 0; i < num; i++) { +- type = ELF_R_TYPE(relas[i].r_info); +- switch (type) { ++ if (i && !compare_rela(&relas[i-1], &relas[i])) ++ continue; ++ ++ switch (ELF_R_TYPE(relas[i].r_info)) { + case R_LARCH_SOP_PUSH_PLT_PCREL: + case R_LARCH_B26: +- if (!duplicate_rela(relas, i)) +- (*plts)++; ++ (*plts)++; + break; + case R_LARCH_GOT_PC_HI20: +- if (!duplicate_rela(relas, i)) +- (*gots)++; ++ (*gots)++; + break; + default: + break; /* Do nothing. */ +diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c +index dcb1428b458c8f..869003f1c7036e 100644 +--- a/arch/loongarch/net/bpf_jit.c ++++ b/arch/loongarch/net/bpf_jit.c +@@ -203,11 +203,9 @@ bool bpf_jit_supports_kfunc_call(void) + return true; + } + +-/* initialized on the first pass of build_body() */ +-static int out_offset = -1; +-static int emit_bpf_tail_call(struct jit_ctx *ctx) ++static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn) + { +- int off; ++ int off, tc_ninsn = 0; + u8 tcc = tail_call_reg(ctx); + u8 a1 = LOONGARCH_GPR_A1; + u8 a2 = LOONGARCH_GPR_A2; +@@ -217,7 +215,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) + const int idx0 = ctx->idx; + + #define cur_offset (ctx->idx - idx0) +-#define jmp_offset (out_offset - (cur_offset)) ++#define jmp_offset (tc_ninsn - (cur_offset)) + + /* + * a0: &ctx +@@ -227,6 +225,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) + * if (index >= array->map.max_entries) + * goto out; + */ ++ tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; + off = offsetof(struct bpf_array, map.max_entries); + emit_insn(ctx, ldwu, t1, a1, off); + /* bgeu $a2, $t1, jmp_offset */ +@@ -258,15 +257,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) + emit_insn(ctx, ldd, t3, t2, off); + __build_epilogue(ctx, true); + +- /* out: */ +- if (out_offset == -1) +- out_offset = cur_offset; +- if (cur_offset != out_offset) { +- pr_err_once("tail_call out_offset = %d, expected %d!\n", +- cur_offset, out_offset); +- return -1; +- } +- + return 0; + + toofar: +@@ -853,7 +843,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + mark_tail_call(ctx); +- if (emit_bpf_tail_call(ctx) < 0) ++ if (emit_bpf_tail_call(ctx, i) < 0) + return -EINVAL; + break; + +@@ -1251,7 +1241,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); + +- out_offset = -1; + + return prog; + } +diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S +index 397114962a1427..10f6aa4d8f0571 100644 +--- a/arch/m68k/kernel/head.S ++++ b/arch/m68k/kernel/head.S +@@ -3404,6 +3404,7 @@ L(console_clear_loop): + + movel %d4,%d1 /* screen height in pixels */ + divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */ ++ subql #1,%d1 /* row range is 0 to num - 1 */ + + movel %d0,%a2@(Lconsole_struct_num_columns) + movel %d1,%a2@(Lconsole_struct_num_rows) +@@ -3550,15 +3551,14 @@ func_start console_putc,%a0/%a1/%d0-%d7 + cmpib #10,%d7 + jne L(console_not_lf) + movel %a0@(Lconsole_struct_cur_row),%d0 +- addil #1,%d0 +- movel %d0,%a0@(Lconsole_struct_cur_row) + movel %a0@(Lconsole_struct_num_rows),%d1 + cmpl %d1,%d0 + jcs 1f +- subil #1,%d0 +- movel %d0,%a0@(Lconsole_struct_cur_row) + console_scroll ++ jra L(console_exit) + 1: ++ addql #1,%d0 ++ movel %d0,%a0@(Lconsole_struct_cur_row) + jra L(console_exit) + + L(console_not_lf): +@@ -3585,12 +3585,6 @@ L(console_not_cr): + */ + L(console_not_home): + movel %a0@(Lconsole_struct_cur_column),%d0 +- addql #1,%a0@(Lconsole_struct_cur_column) +- movel %a0@(Lconsole_struct_num_columns),%d1 +- cmpl %d1,%d0 +- jcs 1f +- console_putc #'\n' /* recursion is OK! */ +-1: + movel %a0@(Lconsole_struct_cur_row),%d1 + + /* +@@ -3637,6 +3631,23 @@ L(console_do_font_scanline): + addq #1,%d1 + dbra %d7,L(console_read_char_scanline) + ++ /* ++ * Register usage in the code below: ++ * a0 = pointer to console globals ++ * d0 = cursor column ++ * d1 = cursor column limit ++ */ ++ ++ lea %pc@(L(console_globals)),%a0 ++ ++ movel %a0@(Lconsole_struct_cur_column),%d0 ++ addql #1,%d0 ++ movel %d0,%a0@(Lconsole_struct_cur_column) /* Update cursor pos */ ++ movel %a0@(Lconsole_struct_num_columns),%d1 ++ cmpl %d1,%d0 ++ jcs L(console_exit) ++ console_putc #'\n' /* Line wrap using tail recursion */ ++ + L(console_exit): + func_return console_putc + +diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/crypto/chacha-core.S +index 5755f69cfe0074..706aeb850fb0d6 100644 +--- a/arch/mips/crypto/chacha-core.S ++++ b/arch/mips/crypto/chacha-core.S +@@ -55,17 +55,13 @@ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #define MSB 0 + #define LSB 3 +-#define ROTx rotl +-#define ROTR(n) rotr n, 24 + #define CPU_TO_LE32(n) \ +- wsbh n; \ ++ wsbh n, n; \ + rotr n, 16; + #else + #define MSB 3 + #define LSB 0 +-#define ROTx rotr + #define CPU_TO_LE32(n) +-#define ROTR(n) + #endif + + #define FOR_EACH_WORD(x) \ +@@ -192,10 +188,10 @@ CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ + xor X(W), X(B); \ + xor X(Y), X(C); \ + xor X(Z), X(D); \ +- rotl X(V), S; \ +- rotl X(W), S; \ +- rotl X(Y), S; \ +- rotl X(Z), S; ++ rotr X(V), 32 - S; \ ++ rotr X(W), 32 - S; \ ++ rotr X(Y), 32 - S; \ ++ rotr X(Z), 32 - S; + + .text + .set reorder +@@ -372,21 +368,19 @@ chacha_crypt_arch: + /* First byte */ + lbu T1, 0(IN) + addiu $at, BYTES, 1 +- CPU_TO_LE32(SAVED_X) +- ROTR(SAVED_X) + xor T1, SAVED_X + sb T1, 0(OUT) + beqz $at, .Lchacha_mips_xor_done + /* Second byte */ + lbu T1, 1(IN) + addiu $at, BYTES, 2 +- ROTx SAVED_X, 8 ++ rotr SAVED_X, 8 + xor T1, SAVED_X + sb T1, 1(OUT) + beqz $at, .Lchacha_mips_xor_done + /* Third byte */ + lbu T1, 2(IN) +- ROTx SAVED_X, 8 ++ rotr SAVED_X, 8 + xor T1, SAVED_X + sb T1, 2(OUT) + b .Lchacha_mips_xor_done +diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h +index 61fd4d0aeda41f..c0769dc4b85321 100644 +--- a/arch/mips/include/asm/vpe.h ++++ b/arch/mips/include/asm/vpe.h +@@ -119,4 +119,12 @@ void cleanup_tc(struct tc *tc); + + int __init vpe_module_init(void); + void __exit vpe_module_exit(void); ++ ++#ifdef CONFIG_MIPS_VPE_LOADER_MT ++void *vpe_alloc(void); ++int vpe_start(void *vpe, unsigned long start); ++int vpe_stop(void *vpe); ++int vpe_free(void *vpe); ++#endif /* CONFIG_MIPS_VPE_LOADER_MT */ ++ + #endif /* _ASM_VPE_H */ +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index b630604c577f9f..02aa6a04a21da4 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -690,18 +690,20 @@ unsigned long mips_stack_top(void) + } + + /* Space for the VDSO, data page & GIC user page */ +- top -= PAGE_ALIGN(current->thread.abi->vdso->size); +- top -= PAGE_SIZE; +- top -= mips_gic_present() ? PAGE_SIZE : 0; ++ if (current->thread.abi) { ++ top -= PAGE_ALIGN(current->thread.abi->vdso->size); ++ top -= PAGE_SIZE; ++ top -= mips_gic_present() ? PAGE_SIZE : 0; ++ ++ /* Space to randomize the VDSO base */ ++ if (current->flags & PF_RANDOMIZE) ++ top -= VDSO_RANDOMIZE_SIZE; ++ } + + /* Space for cache colour alignment */ + if (cpu_has_dc_aliases) + top -= shm_align_mask + 1; + +- /* Space to randomize the VDSO base */ +- if (current->flags & PF_RANDOMIZE) +- top -= VDSO_RANDOMIZE_SIZE; +- + return top; + } + +diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c +index 1187729d8cbb1b..357543996ee661 100644 +--- a/arch/mips/lantiq/falcon/sysctrl.c ++++ b/arch/mips/lantiq/falcon/sysctrl.c +@@ -214,19 +214,16 @@ void __init ltq_soc_init(void) + of_node_put(np_syseth); + of_node_put(np_sysgpe); + +- if ((request_mem_region(res_status.start, resource_size(&res_status), +- res_status.name) < 0) || +- (request_mem_region(res_ebu.start, resource_size(&res_ebu), +- res_ebu.name) < 0) || +- (request_mem_region(res_sys[0].start, +- resource_size(&res_sys[0]), +- res_sys[0].name) < 0) || +- (request_mem_region(res_sys[1].start, +- resource_size(&res_sys[1]), +- res_sys[1].name) < 0) || +- (request_mem_region(res_sys[2].start, +- resource_size(&res_sys[2]), +- res_sys[2].name) < 0)) ++ if ((!request_mem_region(res_status.start, resource_size(&res_status), ++ res_status.name)) || ++ (!request_mem_region(res_ebu.start, resource_size(&res_ebu), ++ res_ebu.name)) || ++ (!request_mem_region(res_sys[0].start, resource_size(&res_sys[0]), ++ res_sys[0].name)) || ++ (!request_mem_region(res_sys[1].start, resource_size(&res_sys[1]), ++ res_sys[1].name)) || ++ (!request_mem_region(res_sys[2].start, resource_size(&res_sys[2]), ++ res_sys[2].name))) + pr_err("Failed to request core resources"); + + status_membase = ioremap(res_status.start, +diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile +index 920db57b6b4cc8..5f0a1f1b00a78b 100644 +--- a/arch/parisc/Makefile ++++ b/arch/parisc/Makefile +@@ -39,7 +39,9 @@ endif + + export LD_BFD + +-# Set default 32 bits cross compilers for vdso ++# Set default 32 bits cross compilers for vdso. ++# This means that for 64BIT, both the 64-bit tools and the 32-bit tools ++# need to be in the path. + CC_ARCHES_32 = hppa hppa2.0 hppa1.1 + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux + CROSS32_COMPILE := $(call cc-cross-prefix, \ +@@ -139,7 +141,7 @@ palo lifimage: vmlinuz + fi + @if test ! -f "$(PALOCONF)"; then \ + cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \ +- echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \ ++ echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \ + echo 'You should check it and re-run "make palo".'; \ + echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ + false; \ +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h +index babf65751e8180..3446a5e2520b22 100644 +--- a/arch/parisc/include/asm/pgtable.h ++++ b/arch/parisc/include/asm/pgtable.h +@@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page; + #define pte_none(x) (pte_val(x) == 0) + #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) + #define pte_user(x) (pte_val(x) & _PAGE_USER) +-#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0)) ++#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0)) + + #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) + #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) +@@ -398,6 +398,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + } + } + #define set_ptes set_ptes ++#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) + + /* Used for deferring calls to flush_dcache_page() */ + +@@ -462,7 +463,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned + if (!pte_young(pte)) { + return 0; + } +- set_pte(ptep, pte_mkold(pte)); ++ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + return 1; + } + +@@ -472,7 +473,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt + struct mm_struct; + static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) + { +- set_pte(ptep, pte_wrprotect(*ptep)); ++ set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); + } + + #define pte_same(A,B) (pte_val(A) == pte_val(B)) +diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h +index 51f40eaf778065..1013eeba31e5bb 100644 +--- a/arch/parisc/include/asm/special_insns.h ++++ b/arch/parisc/include/asm/special_insns.h +@@ -32,6 +32,34 @@ + pa; \ + }) + ++/** ++ * prober_user() - Probe user read access ++ * @sr: Space regster. ++ * @va: Virtual address. ++ * ++ * Return: Non-zero if address is accessible. ++ * ++ * Due to the way _PAGE_READ is handled in TLB entries, we need ++ * a special check to determine whether a user address is accessible. ++ * The ldb instruction does the initial access check. If it is ++ * successful, the probe instruction checks user access rights. ++ */ ++#define prober_user(sr, va) ({ \ ++ unsigned long read_allowed; \ ++ __asm__ __volatile__( \ ++ "copy %%r0,%0\n" \ ++ "8:\tldb 0(%%sr%1,%2),%%r0\n" \ ++ "\tproberi (%%sr%1,%2),%3,%0\n" \ ++ "9:\n" \ ++ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ ++ "or %%r0,%%r0,%%r0") \ ++ : "=&r" (read_allowed) \ ++ : "i" (sr), "r" (va), "i" (PRIV_USER) \ ++ : "memory" \ ++ ); \ ++ read_allowed; \ ++}) ++ + #define CR_EIEM 15 /* External Interrupt Enable Mask */ + #define CR_CR16 16 /* CR16 Interval Timer */ + #define CR_EIRR 23 /* External Interrupt Request Register */ +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index 88d0ae5769dde5..6c531d2c847eb1 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -42,9 +42,24 @@ + __gu_err; \ + }) + +-#define __get_user(val, ptr) \ +-({ \ +- __get_user_internal(SR_USER, val, ptr); \ ++#define __probe_user_internal(sr, error, ptr) \ ++({ \ ++ __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ ++ "\tcmpiclr,= 1,%0,%0\n" \ ++ "\tldi %4,%0\n" \ ++ : "=r"(error) \ ++ : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ ++ "i"(-EFAULT)); \ ++}) ++ ++#define __get_user(val, ptr) \ ++({ \ ++ register long __gu_err; \ ++ \ ++ __gu_err = __get_user_internal(SR_USER, val, ptr); \ ++ if (likely(!__gu_err)) \ ++ __probe_user_internal(SR_USER, __gu_err, ptr); \ ++ __gu_err; \ + }) + + #define __get_user_asm(sr, val, ldx, ptr) \ +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c +index f7953b0391cf60..1898956a70f25c 100644 +--- a/arch/parisc/kernel/cache.c ++++ b/arch/parisc/kernel/cache.c +@@ -425,7 +425,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) + return ptep; + } + +-static inline bool pte_needs_flush(pte_t pte) ++static inline bool pte_needs_cache_flush(pte_t pte) + { + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) + == (_PAGE_PRESENT | _PAGE_ACCESSED); +@@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma, + ptep = get_ptep(vma->vm_mm, vmaddr); + if (ptep) { + pte = ptep_get(ptep); +- needs_flush = pte_needs_flush(pte); ++ needs_flush = pte_needs_cache_flush(pte); + pte_unmap(ptep); + } + if (needs_flush) +@@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end) + } + + vm = find_vm_area((void *)start); +- if (WARN_ON_ONCE(!vm)) { ++ if (!vm) { + flush_cache_all(); + return; + } +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S +index ea57bcc21dc5fe..f4bf61a34701e5 100644 +--- a/arch/parisc/kernel/entry.S ++++ b/arch/parisc/kernel/entry.S +@@ -499,6 +499,12 @@ + * this happens is quite subtle, read below */ + .macro make_insert_tlb spc,pte,prot,tmp + space_to_prot \spc \prot /* create prot id from space */ ++ ++#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT ++ /* need to drop DMB bit, as it's used as SPECIAL flag */ ++ depi 0,_PAGE_SPECIAL_BIT,1,\pte ++#endif ++ + /* The following is the real subtlety. This is depositing + * T <-> _PAGE_REFTRAP + * D <-> _PAGE_DIRTY +@@ -511,17 +517,18 @@ + * Finally, _PAGE_READ goes in the top bit of PL1 (so we + * trigger an access rights trap in user space if the user + * tries to read an unreadable page */ +-#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT +- /* need to drop DMB bit, as it's used as SPECIAL flag */ +- depi 0,_PAGE_SPECIAL_BIT,1,\pte +-#endif + depd \pte,8,7,\prot + + /* PAGE_USER indicates the page can be read with user privileges, + * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 +- * contains _PAGE_READ) */ ++ * contains _PAGE_READ). While the kernel can't directly write ++ * user pages which have _PAGE_WRITE zero, it can read pages ++ * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel ++ * exception fault handler doesn't trigger when reading pages ++ * that aren't user read accessible */ + extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 + depdi 7,11,3,\prot ++ + /* If we're a gateway page, drop PL2 back to zero for promotion + * to kernel privilege (so we can execute the page as kernel). + * Any privilege promotion page always denys read and write */ +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 0fa81bf1466b15..f58c4bccfbce0e 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -613,6 +613,9 @@ lws_compare_and_swap32: + lws_compare_and_swap: + /* Trigger memory reference interruptions without writing to memory */ + 1: ldw 0(%r26), %r28 ++ proberi (%r26), PRIV_USER, %r28 ++ comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ ++ nop + 2: stbys,e %r0, 0(%r26) + + /* Calculate 8-bit hash index from virtual address */ +@@ -767,6 +770,9 @@ cas2_lock_start: + copy %r26, %r28 + depi_safe 0, 31, 2, %r28 + 10: ldw 0(%r28), %r1 ++ proberi (%r28), PRIV_USER, %r1 ++ comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ ++ nop + 11: stbys,e %r0, 0(%r28) + + /* Calculate 8-bit hash index from virtual address */ +@@ -951,41 +957,47 @@ atomic_xchg_begin: + + /* 8-bit exchange */ + 1: ldb 0(%r24), %r20 ++ proberi (%r24), PRIV_USER, %r20 ++ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ ++ nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 + b atomic_xchg_start + 2: stbys,e %r0, 0(%r20) +- nop +- nop +- nop + + /* 16-bit exchange */ + 3: ldh 0(%r24), %r20 ++ proberi (%r24), PRIV_USER, %r20 ++ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ ++ nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 + b atomic_xchg_start + 4: stbys,e %r0, 0(%r20) +- nop +- nop +- nop + + /* 32-bit exchange */ + 5: ldw 0(%r24), %r20 ++ proberi (%r24), PRIV_USER, %r20 ++ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ ++ nop + b atomic_xchg_start + 6: stbys,e %r0, 0(%r23) + nop + nop +- nop +- nop +- nop + + /* 64-bit exchange */ + #ifdef CONFIG_64BIT + 7: ldd 0(%r24), %r20 ++ proberi (%r24), PRIV_USER, %r20 ++ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ ++ nop + 8: stdby,e %r0, 0(%r23) + #else + 7: ldw 0(%r24), %r20 + 8: ldw 4(%r24), %r20 ++ proberi (%r24), PRIV_USER, %r20 ++ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ ++ nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 + 9: stbys,e %r0, 0(%r20) +diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c +index 5fc0c852c84c8d..69d65ffab31263 100644 +--- a/arch/parisc/lib/memcpy.c ++++ b/arch/parisc/lib/memcpy.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #define get_user_space() mfsp(SR_USER) + #define get_kernel_space() SR_KERNEL +@@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user); + unsigned long raw_copy_from_user(void *dst, const void __user *src, + unsigned long len) + { ++ unsigned long start = (unsigned long) src; ++ unsigned long end = start + len; ++ unsigned long newlen = len; ++ + mtsp(get_user_space(), SR_TEMP1); + mtsp(get_kernel_space(), SR_TEMP2); +- return pa_memcpy(dst, (void __force *)src, len); ++ ++ /* Check region is user accessible */ ++ if (start) ++ while (start < end) { ++ if (!prober_user(SR_TEMP1, start)) { ++ newlen = (start - (unsigned long) src); ++ break; ++ } ++ start += PAGE_SIZE; ++ /* align to page boundry which may have different permission */ ++ start = PAGE_ALIGN_DOWN(start); ++ } ++ return len - newlen + pa_memcpy(dst, (void __force *)src, newlen); + } + EXPORT_SYMBOL(raw_copy_from_user); + +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index c39de84e98b051..f1785640b049b5 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -363,6 +363,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, + mmap_read_unlock(mm); + + bad_area_nosemaphore: ++ if (!user_mode(regs) && fixup_exception(regs)) { ++ return; ++ } ++ + if (user_mode(regs)) { + int signo, si_code; + +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile +index 968aee2025b819..99c39c9b2a71f6 100644 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -72,6 +72,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE) + BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) + + BOOTCFLAGS := $(BOOTTARGETFLAGS) \ ++ -std=gnu11 \ + -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -O2 \ + -msoft-float -mno-altivec -mno-vsx \ +diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h +index f8ce178b43b783..34abf8bea2ccd6 100644 +--- a/arch/powerpc/include/asm/floppy.h ++++ b/arch/powerpc/include/asm/floppy.h +@@ -144,9 +144,12 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) + bus_addr = 0; + } + +- if (!bus_addr) /* need to map it */ ++ if (!bus_addr) { /* need to map it */ + bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size, + dir); ++ if (dma_mapping_error(&isa_bridge_pcidev->dev, bus_addr)) ++ return -ENOMEM; ++ } + + /* remember this one as prev */ + prev_addr = addr; +diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +index 4a25b6b4861582..f1e353fc6594e9 100644 +--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c ++++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +@@ -240,10 +240,8 @@ static int mpc512x_lpbfifo_kick(void) + dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + /* Make DMA channel work with LPB FIFO data register */ +- if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) { +- ret = -EINVAL; +- goto err_dma_prep; +- } ++ if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) ++ return -EINVAL; + + sg_init_table(&sg, 1); + +diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h +index e316ab3b77f341..61183688bdd54e 100644 +--- a/arch/riscv/include/asm/topology.h ++++ b/arch/riscv/include/asm/topology.h +@@ -9,6 +9,7 @@ + #define arch_set_freq_scale topology_set_freq_scale + #define arch_scale_freq_capacity topology_get_freq_scale + #define arch_scale_freq_invariant topology_scale_freq_invariant ++#define arch_scale_freq_ref topology_get_freq_ref + + /* Replace task scheduler's default cpu-invariant accounting */ + #define arch_scale_cpu_capacity topology_get_cpu_scale +diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c +index 4024599eb448ea..3612af9b4890b5 100644 +--- a/arch/s390/hypfs/hypfs_dbfs.c ++++ b/arch/s390/hypfs/hypfs_dbfs.c +@@ -6,6 +6,7 @@ + * Author(s): Michael Holzheu + */ + ++#include + #include + #include "hypfs.h" + +@@ -64,24 +65,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + long rc; + + mutex_lock(&df->lock); +- if (df->unlocked_ioctl) +- rc = df->unlocked_ioctl(file, cmd, arg); +- else +- rc = -ENOTTY; ++ rc = df->unlocked_ioctl(file, cmd, arg); + mutex_unlock(&df->lock); + return rc; + } + +-static const struct file_operations dbfs_ops = { ++static const struct file_operations dbfs_ops_ioctl = { + .read = dbfs_read, + .llseek = no_llseek, + .unlocked_ioctl = dbfs_ioctl, + }; + ++static const struct file_operations dbfs_ops = { ++ .read = dbfs_read, ++}; ++ + void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) + { +- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, +- &dbfs_ops); ++ const struct file_operations *fops = &dbfs_ops; ++ ++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS)) ++ fops = &dbfs_ops_ioctl; ++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops); + mutex_init(&df->lock); + } + +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h +index 4d646659a5f582..f7a44af12c2f4c 100644 +--- a/arch/s390/include/asm/timex.h ++++ b/arch/s390/include/asm/timex.h +@@ -192,13 +192,6 @@ static inline unsigned long get_tod_clock_fast(void) + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); + return clk; + } +- +-static inline cycles_t get_cycles(void) +-{ +- return (cycles_t) get_tod_clock() >> 2; +-} +-#define get_cycles get_cycles +- + int get_phys_clock(unsigned long *clock); + void init_cpu_timer(void); + +@@ -226,6 +219,12 @@ static inline unsigned long get_tod_clock_monotonic(void) + return tod; + } + ++static inline cycles_t get_cycles(void) ++{ ++ return (cycles_t)get_tod_clock_monotonic() >> 2; ++} ++#define get_cycles get_cycles ++ + /** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value +diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c +index d34d3548c046c2..086d3e3ffdea55 100644 +--- a/arch/s390/kernel/time.c ++++ b/arch/s390/kernel/time.c +@@ -579,7 +579,7 @@ static int stp_sync_clock(void *data) + atomic_dec(&sync->cpus); + /* Wait for in_sync to be set. */ + while (READ_ONCE(sync->in_sync) == 0) +- __udelay(1); ++ ; + } + if (sync->in_sync != 1) + /* Didn't work. Clear per-cpu in sync bit again. */ +diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c +index b51666967aa1fd..4721ada81a02da 100644 +--- a/arch/s390/mm/dump_pagetables.c ++++ b/arch/s390/mm/dump_pagetables.c +@@ -249,11 +249,9 @@ static int ptdump_show(struct seq_file *m, void *v) + .marker = address_markers, + }; + +- get_online_mems(); + mutex_lock(&cpa_mutex); + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + mutex_unlock(&cpa_mutex); +- put_online_mems(); + return 0; + } + DEFINE_SHOW_ATTRIBUTE(ptdump); +diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c +index 9355fbe5f51e94..2f534b26fda6c5 100644 +--- a/arch/s390/mm/pgalloc.c ++++ b/arch/s390/mm/pgalloc.c +@@ -456,11 +456,6 @@ void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) + page = virt_to_page(pgtable); + SetPageActive(page); + page_table_free(mm, (unsigned long *)pgtable); +- /* +- * page_table_free() does not do the pgste gmap_unlink() which +- * page_table_free_rcu() does: warn us if pgste ever reaches here. +- */ +- WARN_ON_ONCE(mm_has_pgste(mm)); + } + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h +index c7b4b49826a2aa..40d823f36c0941 100644 +--- a/arch/um/include/asm/thread_info.h ++++ b/arch/um/include/asm/thread_info.h +@@ -68,7 +68,11 @@ static inline struct thread_info *current_thread_info(void) + #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + #define _TIF_MEMDIE (1 << TIF_MEMDIE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ++#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) + #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) + ++#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ ++ _TIF_NOTIFY_RESUME) ++ + #endif +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c +index afe67d81614676..e7fbf610bda868 100644 +--- a/arch/um/kernel/process.c ++++ b/arch/um/kernel/process.c +@@ -98,14 +98,18 @@ void *__switch_to(struct task_struct *from, struct task_struct *to) + void interrupt_end(void) + { + struct pt_regs *regs = ¤t->thread.regs; +- +- if (need_resched()) +- schedule(); +- if (test_thread_flag(TIF_SIGPENDING) || +- test_thread_flag(TIF_NOTIFY_SIGNAL)) +- do_signal(regs); +- if (test_thread_flag(TIF_NOTIFY_RESUME)) +- resume_user_mode_work(regs); ++ unsigned long thread_flags; ++ ++ thread_flags = read_thread_flags(); ++ while (thread_flags & _TIF_WORK_MASK) { ++ if (thread_flags & _TIF_NEED_RESCHED) ++ schedule(); ++ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) ++ do_signal(regs); ++ if (thread_flags & _TIF_NOTIFY_RESUME) ++ resume_user_mode_work(regs); ++ thread_flags = read_thread_flags(); ++ } + } + + int get_current_pid(void) +diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h +index e59ded9761663e..a0a4fc684e63b3 100644 +--- a/arch/x86/include/asm/kvm-x86-ops.h ++++ b/arch/x86/include/asm/kvm-x86-ops.h +@@ -48,7 +48,6 @@ KVM_X86_OP(set_idt) + KVM_X86_OP(get_gdt) + KVM_X86_OP(set_gdt) + KVM_X86_OP(sync_dirty_debug_regs) +-KVM_X86_OP(set_dr6) + KVM_X86_OP(set_dr7) + KVM_X86_OP(cache_reg) + KVM_X86_OP(get_rflags) +@@ -102,7 +101,6 @@ KVM_X86_OP(write_tsc_multiplier) + KVM_X86_OP(get_exit_info) + KVM_X86_OP(check_intercept) + KVM_X86_OP(handle_exit_irqoff) +-KVM_X86_OP(request_immediate_exit) + KVM_X86_OP(sched_in) + KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging) + KVM_X86_OP_OPTIONAL(vcpu_blocking) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 5dfb8cc9616e55..813887324d52da 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -733,6 +733,7 @@ struct kvm_vcpu_arch { + u32 pkru; + u32 hflags; + u64 efer; ++ u64 host_debugctl; + u64 apic_base; + struct kvm_lapic *apic; /* kernel irqchip context */ + bool load_eoi_exitmap_pending; +@@ -1549,6 +1550,12 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) + return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; + } + ++enum kvm_x86_run_flags { ++ KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0), ++ KVM_RUN_LOAD_GUEST_DR6 = BIT(1), ++ KVM_RUN_LOAD_DEBUGCTL = BIT(2), ++}; ++ + struct kvm_x86_ops { + const char *name; + +@@ -1574,6 +1581,12 @@ struct kvm_x86_ops { + void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); + void (*vcpu_put)(struct kvm_vcpu *vcpu); + ++ /* ++ * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to ++ * match the host's value even while the guest is active. ++ */ ++ const u64 HOST_OWNED_DEBUGCTL; ++ + void (*update_exception_bitmap)(struct kvm_vcpu *vcpu); + int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); + int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); +@@ -1595,7 +1608,6 @@ struct kvm_x86_ops { + void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); +- void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); + void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); + void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); + unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); +@@ -1623,7 +1635,8 @@ struct kvm_x86_ops { + void (*flush_tlb_guest)(struct kvm_vcpu *vcpu); + + int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); +- enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu); ++ enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu, ++ u64 run_flags); + int (*handle_exit)(struct kvm_vcpu *vcpu, + enum exit_fastpath_completion exit_fastpath); + int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); +@@ -1657,7 +1670,7 @@ struct kvm_x86_ops { + bool allow_apicv_in_x2apic_without_x2apic_virtualization; + void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); + void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); +- void (*hwapic_isr_update)(int isr); ++ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); + bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); + void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); +@@ -1693,8 +1706,6 @@ struct kvm_x86_ops { + struct x86_exception *exception); + void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); + +- void (*request_immediate_exit)(struct kvm_vcpu *vcpu); +- + void (*sched_in)(struct kvm_vcpu *kvm, int cpu); + + /* +@@ -2180,7 +2191,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); + + int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); + int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); +-void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); + + void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, + u32 size); +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 03385545758159..723e48b57bd0f8 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -380,6 +380,7 @@ + #define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12) + #define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14 + #define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT) ++#define DEBUGCTLMSR_RTM_DEBUG BIT(15) + + #define MSR_PEBS_FRONTEND 0x000003f7 + +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h +index 97771b9d33af30..2759524b8ffc3a 100644 +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -94,12 +94,13 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); + #ifdef MODULE + #define __ADDRESSABLE_xen_hypercall + #else +-#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall) ++#define __ADDRESSABLE_xen_hypercall \ ++ __stringify(.global STATIC_CALL_KEY(xen_hypercall);) + #endif + + #define __HYPERCALL \ + __ADDRESSABLE_xen_hypercall \ +- "call __SCT__xen_hypercall" ++ __stringify(call STATIC_CALL_TRAMP(xen_hypercall)) + + #define __HYPERCALL_ENTRY(x) "a" (x) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index c4d5ac99c6af84..332c6f24280dde 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -71,10 +71,9 @@ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; + + static void __init set_return_thunk(void *thunk) + { +- if (x86_return_thunk != __x86_return_thunk) +- pr_warn("x86/bugs: return thunk changed\n"); +- + x86_return_thunk = thunk; ++ ++ pr_info("active return thunk: %ps\n", thunk); + } + + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c +index 6e738759779e81..5c594781b463db 100644 +--- a/arch/x86/kernel/cpu/hygon.c ++++ b/arch/x86/kernel/cpu/hygon.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include "cpu.h" + +@@ -240,6 +241,8 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) + x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; + } + } ++ ++ resctrl_cpu_detect(c); + } + + static void early_init_hygon(struct cpuinfo_x86 *c) +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c +index bd3fbd5be5da6e..223f4fa6a849b8 100644 +--- a/arch/x86/kvm/hyperv.c ++++ b/arch/x86/kvm/hyperv.c +@@ -1929,6 +1929,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) + if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) + goto out_flush_all; + ++ if (is_noncanonical_address(entries[i], vcpu)) ++ continue; ++ + /* + * Lower 12 bits of 'address' encode the number of additional + * pages to flush. +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 66c7f2367bb34e..ba1c2a7f74f766 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -750,7 +750,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) + * just set SVI. + */ + if (unlikely(apic->apicv_active)) +- static_call_cond(kvm_x86_hwapic_isr_update)(vec); ++ static_call_cond(kvm_x86_hwapic_isr_update)(apic->vcpu, vec); + else { + ++apic->isr_count; + BUG_ON(apic->isr_count > MAX_APIC_VECTOR); +@@ -795,7 +795,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) + * and must be left alone. + */ + if (unlikely(apic->apicv_active)) +- static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); ++ static_call_cond(kvm_x86_hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic)); + else { + --apic->isr_count; + BUG_ON(apic->isr_count < 0); +@@ -803,6 +803,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) + } + } + ++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_lapic *apic = vcpu->arch.apic; ++ ++ if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active) ++ return; ++ ++ static_call(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); ++} ++EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr); ++ + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) + { + /* This may race with setting of irr in __apic_accept_irq() and +@@ -2772,7 +2783,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) + if (apic->apicv_active) { + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); +- static_call_cond(kvm_x86_hwapic_isr_update)(-1); ++ static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, -1); + } + + vcpu->arch.apic_arb_prio = 0; +@@ -3072,7 +3083,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) + if (apic->apicv_active) { + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); +- static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); ++ static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); + } + kvm_make_request(KVM_REQ_EVENT, vcpu); + if (ioapic_in_kernel(vcpu->kvm)) +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index 0a0ea4b5dd8ce7..0dd069b8d6d11f 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -124,6 +124,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); + int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); + int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); + enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu); ++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu); + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); + + u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 86c50747e15837..abff6d45ae3334 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -4157,6 +4157,9 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) + + static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) + { ++ if (is_guest_mode(vcpu)) ++ return EXIT_FASTPATH_NONE; ++ + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && + to_svm(vcpu)->vmcb->control.exit_info_1) + return handle_fastpath_set_msr_irqoff(vcpu); +@@ -4170,6 +4173,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in + + guest_state_enter_irqoff(); + ++ /* ++ * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of ++ * VMRUN controls whether or not physical IRQs are masked (KVM always ++ * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the ++ * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow ++ * into guest state if delivery of an event during VMRUN triggers a ++ * #VMEXIT, and the guest_state transitions already tell lockdep that ++ * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of ++ * this path, so IRQs aren't actually unmasked while running host code. ++ */ ++ raw_local_irq_enable(); ++ + amd_clear_divider(); + + if (sev_es_guest(vcpu->kvm)) +@@ -4177,15 +4192,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in + else + __svm_vcpu_run(svm, spec_ctrl_intercepted); + ++ raw_local_irq_disable(); ++ + guest_state_exit_irqoff(); + } + +-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ++static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) + { ++ bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; + struct vcpu_svm *svm = to_svm(vcpu); + bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); + +- trace_kvm_entry(vcpu); ++ trace_kvm_entry(vcpu, force_immediate_exit); + + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; + svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; +@@ -4204,9 +4222,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + * is enough to force an immediate vmexit. + */ + disable_nmi_singlestep(svm); +- smp_send_reschedule(vcpu->cpu); ++ force_immediate_exit = true; + } + ++ if (force_immediate_exit) ++ smp_send_reschedule(vcpu->cpu); ++ + pre_svm_run(vcpu); + + sync_lapic_to_cr8(vcpu); +@@ -4220,10 +4241,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + svm_hv_update_vp_id(svm->vmcb, vcpu); + + /* +- * Run with all-zero DR6 unless needed, so that we can get the exact cause +- * of a #DB. ++ * Run with all-zero DR6 unless the guest can write DR6 freely, so that ++ * KVM can get the exact cause of a #DB. Note, loading guest DR6 from ++ * KVM's snapshot is only necessary when DR accesses won't exit. + */ +- if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) ++ if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6)) ++ svm_set_dr6(vcpu, vcpu->arch.dr6); ++ else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) + svm_set_dr6(vcpu, DR6_ACTIVE_LOW); + + clgi(); +@@ -4300,9 +4324,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) + + svm_complete_interrupts(vcpu); + +- if (is_guest_mode(vcpu)) +- return EXIT_FASTPATH_NONE; +- + return svm_exit_handlers_fastpath(vcpu); + } + +@@ -5003,7 +5024,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { + .set_idt = svm_set_idt, + .get_gdt = svm_get_gdt, + .set_gdt = svm_set_gdt, +- .set_dr6 = svm_set_dr6, + .set_dr7 = svm_set_dr7, + .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, + .cache_reg = svm_cache_reg, +@@ -5060,8 +5080,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { + .check_intercept = svm_check_intercept, + .handle_exit_irqoff = svm_handle_exit_irqoff, + +- .request_immediate_exit = __kvm_request_immediate_exit, +- + .sched_in = svm_sched_in, + + .nested_ops = &svm_nested_ops, +diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S +index 56fe34d9397f64..81ecb9e1101d78 100644 +--- a/arch/x86/kvm/svm/vmenter.S ++++ b/arch/x86/kvm/svm/vmenter.S +@@ -171,12 +171,8 @@ SYM_FUNC_START(__svm_vcpu_run) + VM_CLEAR_CPU_BUFFERS + + /* Enter guest mode */ +- sti +- + 3: vmrun %_ASM_AX + 4: +- cli +- + /* Pop @svm to RAX while it's the only available register. */ + pop %_ASM_AX + +@@ -341,11 +337,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) + VM_CLEAR_CPU_BUFFERS + + /* Enter guest mode */ +- sti +- + 1: vmrun %_ASM_AX +- +-2: cli ++2: + + /* Pop @svm to RDI, guest registers have been saved already. */ + pop %_ASM_DI +diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h +index b82e6ed4f02417..c6b4b1728006d5 100644 +--- a/arch/x86/kvm/trace.h ++++ b/arch/x86/kvm/trace.h +@@ -15,20 +15,23 @@ + * Tracepoint for guest mode entry. + */ + TRACE_EVENT(kvm_entry, +- TP_PROTO(struct kvm_vcpu *vcpu), +- TP_ARGS(vcpu), ++ TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit), ++ TP_ARGS(vcpu, force_immediate_exit), + + TP_STRUCT__entry( + __field( unsigned int, vcpu_id ) + __field( unsigned long, rip ) ++ __field( bool, immediate_exit ) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->rip = kvm_rip_read(vcpu); ++ __entry->immediate_exit = force_immediate_exit; + ), + +- TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) ++ TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip, ++ __entry->immediate_exit ? "[immediate exit]" : "") + ); + + /* +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index d3e346a574f11b..d2fa192d7ce776 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -2564,10 +2564,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { + kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); +- vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); ++ vmx_guest_debugctl_write(vcpu, vmcs12->guest_ia32_debugctl & ++ vmx_get_supported_debugctl(vcpu, false)); + } else { + kvm_set_dr(vcpu, 7, vcpu->arch.dr7); +- vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); ++ vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl); + } + if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) +@@ -3045,7 +3046,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, + return -EINVAL; + + if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && +- CC(!kvm_dr7_valid(vmcs12->guest_dr7))) ++ (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || ++ CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false)))) + return -EINVAL; + + if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && +@@ -3431,7 +3433,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, + + if (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) +- vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); ++ vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read(); + if (kvm_mpx_supported() && + (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) +@@ -4435,6 +4437,12 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) + (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | + (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); + ++ /* ++ * Note! Save DR7, but intentionally don't grab DEBUGCTL from vmcs02. ++ * Writes to DEBUGCTL that aren't intercepted by L1 are immediately ++ * propagated to vmcs12 (see vmx_set_msr()), as the value loaded into ++ * vmcs02 doesn't strictly track vmcs12. ++ */ + if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) + kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); + +@@ -4625,7 +4633,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR); + + kvm_set_dr(vcpu, 7, 0x400); +- vmcs_write64(GUEST_IA32_DEBUGCTL, 0); ++ vmx_guest_debugctl_write(vcpu, 0); + + if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, + vmcs12->vm_exit_msr_load_count)) +@@ -4680,6 +4688,9 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); + } + ++ /* Reload DEBUGCTL to ensure vmcs01 has a fresh FREEZE_IN_SMM value. */ ++ vmx_reload_guest_debugctl(vcpu); ++ + /* + * Note that calling vmx_set_{efer,cr0,cr4} is important as they + * handle a variety of side effects to KVM's software model. +@@ -4900,6 +4911,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, + kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); + } + ++ if (vmx->nested.update_vmcs01_hwapic_isr) { ++ vmx->nested.update_vmcs01_hwapic_isr = false; ++ kvm_apic_update_hwapic_isr(vcpu); ++ } ++ + if ((vm_exit_reason != -1) && + (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) + vmx->nested.need_vmcs12_to_shadow_sync = true; +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c +index 48a2f77f62ef35..50364e00e4e90d 100644 +--- a/arch/x86/kvm/vmx/pmu_intel.c ++++ b/arch/x86/kvm/vmx/pmu_intel.c +@@ -633,11 +633,11 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) + */ + static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) + { +- u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL); ++ u64 data = vmx_guest_debugctl_read(); + + if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { + data &= ~DEBUGCTLMSR_LBR; +- vmcs_write64(GUEST_IA32_DEBUGCTL, data); ++ vmx_guest_debugctl_write(vcpu, data); + } + } + +@@ -707,7 +707,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) + + if (!lbr_desc->event) { + vmx_disable_lbr_msrs_passthrough(vcpu); +- if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) ++ if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR) + goto warn; + if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) + goto warn; +@@ -729,7 +729,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) + + static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) + { +- if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) ++ if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)) + intel_pmu_release_guest_lbr_event(vcpu); + } + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index e53620e189254b..9b1f22bcb71658 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -49,6 +49,8 @@ + #include + #include + ++#include ++ + #include "capabilities.h" + #include "cpuid.h" + #include "hyperv.h" +@@ -1304,8 +1306,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) + u16 fs_sel, gs_sel; + int i; + +- vmx->req_immediate_exit = false; +- + /* + * Note that guest MSRs to be saved/restored can also be changed + * when guest state is loaded. This happens when guest transitions +@@ -1499,13 +1499,9 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, + */ + static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + { +- struct vcpu_vmx *vmx = to_vmx(vcpu); +- + vmx_vcpu_load_vmcs(vcpu, cpu, NULL); + + vmx_vcpu_pi_load(vcpu, cpu); +- +- vmx->host_debugctlmsr = get_debugctlmsr(); + } + + static void vmx_vcpu_put(struct kvm_vcpu *vcpu) +@@ -2128,7 +2124,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; + break; + case MSR_IA32_DEBUGCTLMSR: +- msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); ++ msr_info->data = vmx_guest_debugctl_read(); + break; + default: + find_uret_msr: +@@ -2153,7 +2149,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, + return (unsigned long)data; + } + +-static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) ++u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) + { + u64 debugctl = 0; + +@@ -2165,9 +2161,25 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated + (host_initiated || intel_pmu_lbr_is_enabled(vcpu))) + debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; + ++ if (boot_cpu_has(X86_FEATURE_RTM) && ++ (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_RTM))) ++ debugctl |= DEBUGCTLMSR_RTM_DEBUG; ++ + return debugctl; + } + ++bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated) ++{ ++ u64 invalid; ++ ++ invalid = data & ~vmx_get_supported_debugctl(vcpu, host_initiated); ++ if (invalid & (DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) { ++ kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); ++ invalid &= ~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR); ++ } ++ return !invalid; ++} ++ + /* + * Writes msr value into the appropriate "register". + * Returns 0 on success, non-0 otherwise. +@@ -2236,29 +2248,22 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + } + vmcs_writel(GUEST_SYSENTER_ESP, data); + break; +- case MSR_IA32_DEBUGCTLMSR: { +- u64 invalid; +- +- invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); +- if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { +- kvm_pr_unimpl_wrmsr(vcpu, msr_index, data); +- data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); +- invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); +- } +- +- if (invalid) ++ case MSR_IA32_DEBUGCTLMSR: ++ if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated)) + return 1; + ++ data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); ++ + if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & + VM_EXIT_SAVE_DEBUG_CONTROLS) + get_vmcs12(vcpu)->guest_ia32_debugctl = data; + +- vmcs_write64(GUEST_IA32_DEBUGCTL, data); ++ vmx_guest_debugctl_write(vcpu, data); ++ + if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && + (data & DEBUGCTLMSR_LBR)) + intel_pmu_create_guest_lbr_event(vcpu); + return 0; +- } + case MSR_IA32_BNDCFGS: + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && +@@ -4822,7 +4827,8 @@ static void init_vmcs(struct vcpu_vmx *vmx) + vmcs_write32(GUEST_SYSENTER_CS, 0); + vmcs_writel(GUEST_SYSENTER_ESP, 0); + vmcs_writel(GUEST_SYSENTER_EIP, 0); +- vmcs_write64(GUEST_IA32_DEBUGCTL, 0); ++ ++ vmx_guest_debugctl_write(&vmx->vcpu, 0); + + if (cpu_has_vmx_tpr_shadow()) { + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); +@@ -5620,12 +5626,6 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) + set_debugreg(DR6_RESERVED, 6); + } + +-static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) +-{ +- lockdep_assert_irqs_disabled(); +- set_debugreg(vcpu->arch.dr6, 6); +-} +- + static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) + { + vmcs_writel(GUEST_DR7, val); +@@ -6019,22 +6019,46 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) + return 1; + } + +-static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) ++static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu, ++ bool force_immediate_exit) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + +- if (!vmx->req_immediate_exit && +- !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { +- kvm_lapic_expired_hv_timer(vcpu); ++ /* ++ * In the *extremely* unlikely scenario that this is a spurious VM-Exit ++ * due to the timer expiring while it was "soft" disabled, just eat the ++ * exit and re-enter the guest. ++ */ ++ if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) + return EXIT_FASTPATH_REENTER_GUEST; +- } + +- return EXIT_FASTPATH_NONE; ++ /* ++ * If the timer expired because KVM used it to force an immediate exit, ++ * then mission accomplished. ++ */ ++ if (force_immediate_exit) ++ return EXIT_FASTPATH_EXIT_HANDLED; ++ ++ /* ++ * If L2 is active, go down the slow path as emulating the guest timer ++ * expiration likely requires synthesizing a nested VM-Exit. ++ */ ++ if (is_guest_mode(vcpu)) ++ return EXIT_FASTPATH_NONE; ++ ++ kvm_lapic_expired_hv_timer(vcpu); ++ return EXIT_FASTPATH_REENTER_GUEST; + } + + static int handle_preemption_timer(struct kvm_vcpu *vcpu) + { +- handle_fastpath_preemption_timer(vcpu); ++ /* ++ * This non-fastpath handler is reached if and only if the preemption ++ * timer was being used to emulate a guest timer while L2 is active. ++ * All other scenarios are supposed to be handled in the fastpath. ++ */ ++ WARN_ON_ONCE(!is_guest_mode(vcpu)); ++ kvm_lapic_expired_hv_timer(vcpu); + return 1; + } + +@@ -6834,11 +6858,27 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) + kvm_release_pfn_clean(pfn); + } + +-static void vmx_hwapic_isr_update(int max_isr) ++static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) + { + u16 status; + u8 old; + ++ /* ++ * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI ++ * is only relevant for if and only if Virtual Interrupt Delivery is ++ * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's ++ * vAPIC, not L1's vAPIC. KVM must update vmcs01 on the next nested ++ * VM-Exit, otherwise L1 with run with a stale SVI. ++ */ ++ if (is_guest_mode(vcpu)) { ++ /* ++ * KVM is supposed to forward intercepted L2 EOIs to L1 if VID ++ * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC. ++ */ ++ to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true; ++ return; ++ } ++ + if (max_isr == -1) + max_isr = 0; + +@@ -7175,13 +7215,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) + msrs[i].host, false); + } + +-static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) ++static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + u64 tscl; + u32 delta_tsc; + +- if (vmx->req_immediate_exit) { ++ if (force_immediate_exit) { + vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); + vmx->loaded_vmcs->hv_timer_soft_disabled = false; + } else if (vmx->hv_deadline_tsc != -1) { +@@ -7234,13 +7274,22 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, + barrier_nospec(); + } + +-static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) ++static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu, ++ bool force_immediate_exit) + { ++ /* ++ * If L2 is active, some VMX preemption timer exits can be handled in ++ * the fastpath even, all other exits must use the slow path. ++ */ ++ if (is_guest_mode(vcpu) && ++ to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER) ++ return EXIT_FASTPATH_NONE; ++ + switch (to_vmx(vcpu)->exit_reason.basic) { + case EXIT_REASON_MSR_WRITE: + return handle_fastpath_set_msr_irqoff(vcpu); + case EXIT_REASON_PREEMPTION_TIMER: +- return handle_fastpath_preemption_timer(vcpu); ++ return handle_fastpath_preemption_timer(vcpu, force_immediate_exit); + default: + return EXIT_FASTPATH_NONE; + } +@@ -7300,8 +7349,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, + guest_state_exit_irqoff(); + } + +-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ++static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) + { ++ bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4; + +@@ -7327,7 +7377,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + return EXIT_FASTPATH_NONE; + } + +- trace_kvm_entry(vcpu); ++ trace_kvm_entry(vcpu, force_immediate_exit); + + if (vmx->ple_window_dirty) { + vmx->ple_window_dirty = false; +@@ -7346,6 +7396,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + vcpu->arch.regs_dirty = 0; + ++ if (run_flags & KVM_RUN_LOAD_GUEST_DR6) ++ set_debugreg(vcpu->arch.dr6, 6); ++ ++ if (run_flags & KVM_RUN_LOAD_DEBUGCTL) ++ vmx_reload_guest_debugctl(vcpu); ++ + /* + * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately + * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time +@@ -7382,7 +7438,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + vmx_passthrough_lbr_msrs(vcpu); + + if (enable_preemption_timer) +- vmx_update_hv_timer(vcpu); ++ vmx_update_hv_timer(vcpu, force_immediate_exit); ++ else if (force_immediate_exit) ++ smp_send_reschedule(vcpu->cpu); + + kvm_wait_lapic_expire(vcpu); + +@@ -7398,8 +7456,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + } + + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ +- if (vmx->host_debugctlmsr) +- update_debugctlmsr(vmx->host_debugctlmsr); ++ if (vcpu->arch.host_debugctl) ++ update_debugctlmsr(vcpu->arch.host_debugctl); + + #ifndef CONFIG_X86_64 + /* +@@ -7446,10 +7504,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + vmx_recover_nmi_blocking(vmx); + vmx_complete_interrupts(vmx); + +- if (is_guest_mode(vcpu)) +- return EXIT_FASTPATH_NONE; +- +- return vmx_exit_handlers_fastpath(vcpu); ++ return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit); + } + + static void vmx_vcpu_free(struct kvm_vcpu *vcpu) +@@ -7948,11 +8003,6 @@ static __init void vmx_set_cpu_caps(void) + kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + } + +-static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) +-{ +- to_vmx(vcpu)->req_immediate_exit = true; +-} +- + static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info) + { +@@ -8279,6 +8329,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { + .vcpu_load = vmx_vcpu_load, + .vcpu_put = vmx_vcpu_put, + ++ .HOST_OWNED_DEBUGCTL = DEBUGCTLMSR_FREEZE_IN_SMM, ++ + .update_exception_bitmap = vmx_update_exception_bitmap, + .get_msr_feature = vmx_get_msr_feature, + .get_msr = vmx_get_msr, +@@ -8297,7 +8349,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { + .set_idt = vmx_set_idt, + .get_gdt = vmx_get_gdt, + .set_gdt = vmx_set_gdt, +- .set_dr6 = vmx_set_dr6, + .set_dr7 = vmx_set_dr7, + .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, + .cache_reg = vmx_cache_reg, +@@ -8364,8 +8415,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { + .check_intercept = vmx_check_intercept, + .handle_exit_irqoff = vmx_handle_exit_irqoff, + +- .request_immediate_exit = vmx_request_immediate_exit, +- + .sched_in = vmx_sched_in, + + .cpu_dirty_log_size = PML_ENTITY_NUM, +@@ -8623,7 +8672,6 @@ static __init int hardware_setup(void) + if (!enable_preemption_timer) { + vmx_x86_ops.set_hv_timer = NULL; + vmx_x86_ops.cancel_hv_timer = NULL; +- vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; + } + + kvm_caps.supported_mce_cap |= MCG_LMCE_P; +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h +index 6be1627d888e5a..5d73d3e570d782 100644 +--- a/arch/x86/kvm/vmx/vmx.h ++++ b/arch/x86/kvm/vmx/vmx.h +@@ -177,6 +177,7 @@ struct nested_vmx { + bool reload_vmcs01_apic_access_page; + bool update_vmcs01_cpu_dirty_logging; + bool update_vmcs01_apicv_status; ++ bool update_vmcs01_hwapic_isr; + + /* + * Enlightened VMCS has been enabled. It does not mean that L1 has to +@@ -330,8 +331,6 @@ struct vcpu_vmx { + unsigned int ple_window; + bool ple_window_dirty; + +- bool req_immediate_exit; +- + /* Support for PML */ + #define PML_ENTITY_NUM 512 + struct page *pml_pg; +@@ -339,8 +338,6 @@ struct vcpu_vmx { + /* apic deadline value in host tsc */ + u64 hv_deadline_tsc; + +- unsigned long host_debugctlmsr; +- + /* + * Only bits masked by msr_ia32_feature_control_valid_bits can be set in + * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included +@@ -432,6 +429,32 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, + + void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); + ++u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated); ++bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated); ++ ++static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val) ++{ ++ WARN_ON_ONCE(val & DEBUGCTLMSR_FREEZE_IN_SMM); ++ ++ val |= vcpu->arch.host_debugctl & DEBUGCTLMSR_FREEZE_IN_SMM; ++ vmcs_write64(GUEST_IA32_DEBUGCTL, val); ++} ++ ++static inline u64 vmx_guest_debugctl_read(void) ++{ ++ return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~DEBUGCTLMSR_FREEZE_IN_SMM; ++} ++ ++static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu) ++{ ++ u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL); ++ ++ if (!((val ^ vcpu->arch.host_debugctl) & DEBUGCTLMSR_FREEZE_IN_SMM)) ++ return; ++ ++ vmx_guest_debugctl_write(vcpu, val & ~DEBUGCTLMSR_FREEZE_IN_SMM); ++} ++ + /* + * Note, early Intel manuals have the write-low and read-high bitmap offsets + * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 55185670e0e566..af0b2b3bc991e2 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -10505,12 +10505,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) + static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); + } + +-void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) +-{ +- smp_send_reschedule(vcpu->cpu); +-} +-EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); +- + /* + * Called within kvm->srcu read side. + * Returns 1 to let vcpu_run() continue the guest execution loop without +@@ -10524,6 +10518,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + dm_request_for_irq_injection(vcpu) && + kvm_cpu_accept_dm_intr(vcpu); + fastpath_t exit_fastpath; ++ u64 run_flags, debug_ctl; + + bool req_immediate_exit = false; + +@@ -10756,9 +10751,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + goto cancel_injection; + } + ++ run_flags = 0; + if (req_immediate_exit) { ++ run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT; + kvm_make_request(KVM_REQ_EVENT, vcpu); +- static_call(kvm_x86_request_immediate_exit)(vcpu); + } + + fpregs_assert_state_consistent(); +@@ -10776,11 +10772,23 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + set_debugreg(vcpu->arch.eff_db[3], 3); + /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ + if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) +- static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6); ++ run_flags |= KVM_RUN_LOAD_GUEST_DR6; + } else if (unlikely(hw_breakpoint_active())) { + set_debugreg(0, 7); + } + ++ /* ++ * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL ++ * can be modified in IRQ context, e.g. via SMP function calls. Inform ++ * vendor code if any host-owned bits were changed, e.g. so that the ++ * value loaded into hardware while running the guest can be updated. ++ */ ++ debug_ctl = get_debugctlmsr(); ++ if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL && ++ !vcpu->arch.guest_state_protected) ++ run_flags |= KVM_RUN_LOAD_DEBUGCTL; ++ vcpu->arch.host_debugctl = debug_ctl; ++ + guest_timing_enter_irqoff(); + + for (;;) { +@@ -10793,7 +10801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && + (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); + +- exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); ++ exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, run_flags); + if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) + break; + +@@ -10805,6 +10813,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + break; + } + ++ run_flags = 0; ++ + /* Note, VM-Exits that go down the "slow" path are accounted below. */ + ++vcpu->stat.exits; + } +@@ -13256,16 +13266,22 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, + { + struct kvm_kernel_irqfd *irqfd = + container_of(cons, struct kvm_kernel_irqfd, consumer); ++ struct kvm *kvm = irqfd->kvm; + int ret; + +- irqfd->producer = prod; + kvm_arch_start_assignment(irqfd->kvm); ++ ++ spin_lock_irq(&kvm->irqfds.lock); ++ irqfd->producer = prod; ++ + ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, + prod->irq, irqfd->gsi, 1); +- + if (ret) + kvm_arch_end_assignment(irqfd->kvm); + ++ spin_unlock_irq(&kvm->irqfds.lock); ++ ++ + return ret; + } + +@@ -13275,9 +13291,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, + int ret; + struct kvm_kernel_irqfd *irqfd = + container_of(cons, struct kvm_kernel_irqfd, consumer); ++ struct kvm *kvm = irqfd->kvm; + + WARN_ON(irqfd->producer != prod); +- irqfd->producer = NULL; + + /* + * When producer of consumer is unregistered, we change back to +@@ -13285,11 +13301,17 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, + * when the irq is masked/disabled or the consumer side (KVM + * int this case doesn't want to receive the interrupts. + */ ++ spin_lock_irq(&kvm->irqfds.lock); ++ irqfd->producer = NULL; ++ + ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); + if (ret) + printk(KERN_INFO "irq bypass consumer (token %p) unregistration" + " fails: %d\n", irqfd->consumer.token, ret); + ++ spin_unlock_irq(&kvm->irqfds.lock); ++ ++ + kvm_arch_end_assignment(irqfd->kvm); + } + +diff --git a/block/blk-core.c b/block/blk-core.c +index 4f25d2c4bc7055..923b7d91e6dc5d 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -775,6 +775,15 @@ void submit_bio_noacct(struct bio *bio) + bio_clear_polled(bio); + + switch (bio_op(bio)) { ++ case REQ_OP_READ: ++ case REQ_OP_WRITE: ++ break; ++ case REQ_OP_FLUSH: ++ /* ++ * REQ_OP_FLUSH can't be submitted through bios, it is only ++ * synthetized in struct request by the flush state machine. ++ */ ++ goto not_supported; + case REQ_OP_DISCARD: + if (!bdev_max_discard_sectors(bdev)) + goto not_supported; +@@ -788,6 +797,10 @@ void submit_bio_noacct(struct bio *bio) + if (status != BLK_STS_OK) + goto end_io; + break; ++ case REQ_OP_WRITE_ZEROES: ++ if (!q->limits.max_write_zeroes_sectors) ++ goto not_supported; ++ break; + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: +@@ -799,12 +812,15 @@ void submit_bio_noacct(struct bio *bio) + if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q)) + goto not_supported; + break; +- case REQ_OP_WRITE_ZEROES: +- if (!q->limits.max_write_zeroes_sectors) +- goto not_supported; +- break; ++ case REQ_OP_DRV_IN: ++ case REQ_OP_DRV_OUT: ++ /* ++ * Driver private operations are only used with passthrough ++ * requests. ++ */ ++ fallthrough; + default: +- break; ++ goto not_supported; + } + + if (blk_throtl_bio(bio)) +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 7019b8e204d965..021994f6d2d829 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -634,7 +634,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + } + + /* chunk_sectors a multiple of the physical block size? */ +- if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { ++ if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) { + t->chunk_sectors = 0; + t->misaligned = 1; + ret = -1; +diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c +index 7d1463a1562acb..dd05faf00571f3 100644 +--- a/crypto/jitterentropy-kcapi.c ++++ b/crypto/jitterentropy-kcapi.c +@@ -134,7 +134,7 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + * Inject the data from the previous loop into the pool. This data is + * not considered to contain any entropy, but it stirs the pool a bit. + */ +- ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); ++ ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary)); + if (ret) + goto err; + +@@ -147,11 +147,12 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, + * conditioning operation to have an identical amount of input data + * according to section 3.1.5. + */ +- if (!stuck) { +- ret = crypto_shash_update(hash_state_desc, (u8 *)&time, +- sizeof(__u64)); ++ if (stuck) { ++ time = 0; + } + ++ ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64)); ++ + err: + shash_desc_zero(desc); + memzero_explicit(intermediary, sizeof(intermediary)); +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c +index 7053f1b9fc1ddc..c0f9cf9768ea9a 100644 +--- a/drivers/acpi/acpi_processor.c ++++ b/drivers/acpi/acpi_processor.c +@@ -250,7 +250,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) + + static int acpi_processor_get_info(struct acpi_device *device) + { +- union acpi_object object = { 0 }; ++ union acpi_object object = { .processor = { 0 } }; + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; + struct acpi_processor *pr = acpi_driver_data(device); + int device_declaration = 0; +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index 2abf20736702c0..ec364c2541124d 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -715,6 +715,17 @@ static bool ghes_do_proc(struct ghes *ghes, + } + } + ++ /* ++ * If no memory failure work is queued for abnormal synchronous ++ * errors, do a force kill. ++ */ ++ if (sync && !queued) { ++ dev_err(ghes->dev, ++ HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n", ++ current->comm, task_pid_nr(current)); ++ force_sig(SIGBUS); ++ } ++ + return queued; + } + +@@ -901,6 +912,8 @@ static void __ghes_panic(struct ghes *ghes, + + __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); + ++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); ++ + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); + + if (!panic_timeout) +diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c +index 98267f163e2bd0..aedf7e40145e06 100644 +--- a/drivers/acpi/pfr_update.c ++++ b/drivers/acpi/pfr_update.c +@@ -310,7 +310,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, + if (type == PFRU_CODE_INJECT_TYPE) + return payload_hdr->rt_ver >= cap->code_rt_version; + +- return payload_hdr->rt_ver >= cap->drv_rt_version; ++ return payload_hdr->svn_ver >= cap->drv_svn; + } + + static void print_update_debug_info(struct pfru_updated_result *result, +diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c +index a34f7d37877c9a..eb8f2a1ce1388d 100644 +--- a/drivers/acpi/prmt.c ++++ b/drivers/acpi/prmt.c +@@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa) + } + } + +- pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa); +- + return 0; + } + +@@ -154,13 +152,37 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) + guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); + th->handler_addr = + (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); ++ /* ++ * Print a warning message if handler_addr is zero which is not expected to ++ * ever happen. ++ */ ++ if (unlikely(!th->handler_addr)) ++ pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx", ++ &th->guid, handler_info->handler_address); + + th->static_data_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); ++ /* ++ * According to the PRM specification, static_data_buffer_address can be zero, ++ * so avoid printing a warning message in that case. Otherwise, if the ++ * return value of efi_pa_va_lookup() is zero, print the message. ++ */ ++ if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address)) ++ pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx", ++ &th->guid, handler_info->static_data_buffer_address); + + th->acpi_param_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address); + ++ /* ++ * According to the PRM specification, acpi_param_buffer_address can be zero, ++ * so avoid printing a warning message in that case. Otherwise, if the ++ * return value of efi_pa_va_lookup() is zero, print the message. ++ */ ++ if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address)) ++ pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx", ++ &th->guid, handler_info->acpi_param_buffer_address); ++ + } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info))); + + return 0; +diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c +index 4265814c74f81a..d81f30ce2341a0 100644 +--- a/drivers/acpi/processor_perflib.c ++++ b/drivers/acpi/processor_perflib.c +@@ -174,6 +174,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) + { + unsigned int cpu; + ++ if (ignore_ppc == 1) ++ return; ++ + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + int ret; +@@ -194,6 +197,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) + if (ret < 0) + pr_err("Failed to add freq constraint for CPU%d (%d)\n", + cpu, ret); ++ ++ if (!pr->performance) ++ continue; ++ ++ ret = acpi_processor_get_platform_limit(pr); ++ if (ret) ++ pr_err("Failed to update freq constraint for CPU%d (%d)\n", ++ cpu, ret); + } + } + +diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig +index 42b51c9812a0eb..188707d2970ef7 100644 +--- a/drivers/ata/Kconfig ++++ b/drivers/ata/Kconfig +@@ -117,7 +117,7 @@ config SATA_AHCI + + config SATA_MOBILE_LPM_POLICY + int "Default SATA Link Power Management policy for low power chipsets" +- range 0 4 ++ range 0 5 + default 0 + depends on SATA_AHCI + help +@@ -126,15 +126,32 @@ config SATA_MOBILE_LPM_POLICY + chipsets are typically found on most laptops but desktops and + servers now also widely use chipsets supporting low power modes. + +- The value set has the following meanings: ++ Each policy combines power saving states and features: ++ - Partial: The Phy logic is powered but is in a reduced power ++ state. The exit latency from this state is no longer than ++ 10us). ++ - Slumber: The Phy logic is powered but is in an even lower power ++ state. The exit latency from this state is potentially ++ longer, but no longer than 10ms. ++ - DevSleep: The Phy logic may be powered down. The exit latency from ++ this state is no longer than 20 ms, unless otherwise ++ specified by DETO in the device Identify Device Data log. ++ - HIPM: Host Initiated Power Management (host automatically ++ transitions to partial and slumber). ++ - DIPM: Device Initiated Power Management (device automatically ++ transitions to partial and slumber). ++ ++ The possible values for the default SATA link power management ++ policies are: + 0 => Keep firmware settings +- 1 => Maximum performance +- 2 => Medium power +- 3 => Medium power with Device Initiated PM enabled +- 4 => Minimum power +- +- Note "Minimum power" is known to cause issues, including disk +- corruption, with some disks and should not be used. ++ 1 => No power savings (maximum performance) ++ 2 => HIPM (Partial) ++ 3 => HIPM (Partial) and DIPM (Partial and Slumber) ++ 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) ++ 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) ++ ++ Excluding the value 0, higher values represent policies with higher ++ power savings. + + config SATA_AHCI_PLATFORM + tristate "Platform AHCI SATA support" +diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c +index be72030a500d44..9e0a820d6961d0 100644 +--- a/drivers/ata/libata-sata.c ++++ b/drivers/ata/libata-sata.c +@@ -817,6 +817,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device, + + spin_lock_irqsave(ap->lock, flags); + ++ if (ap->flags & ATA_FLAG_NO_LPM) { ++ count = -EOPNOTSUPP; ++ goto out_unlock; ++ } ++ + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (dev->horkage & ATA_HORKAGE_NOLPM) { +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 6a1460d35447cc..0b2f1e269ca496 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -856,18 +856,14 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, + {0xFF, 0xFF, 0xFF, 0xFF}, // END mark + }; + static const unsigned char stat_table[][4] = { +- /* Must be first because BUSY means no other bits valid */ +- {0x80, ABORTED_COMMAND, 0x47, 0x00}, +- // Busy, fake parity for now +- {0x40, ILLEGAL_REQUEST, 0x21, 0x04}, +- // Device ready, unaligned write command +- {0x20, HARDWARE_ERROR, 0x44, 0x00}, +- // Device fault, internal target failure +- {0x08, ABORTED_COMMAND, 0x47, 0x00}, +- // Timed out in xfer, fake parity for now +- {0x04, RECOVERED_ERROR, 0x11, 0x00}, +- // Recovered ECC error Medium error, recovered +- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark ++ /* Busy: must be first because BUSY means no other bits valid */ ++ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 }, ++ /* Device fault: INTERNAL TARGET FAILURE */ ++ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 }, ++ /* Corrected data error */ ++ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 }, ++ ++ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */ + }; + + /* +@@ -939,6 +935,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, + "missing result TF: can't generate ATA PT sense data\n"); ++ if (qc->err_mask) ++ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); + return; + } + +@@ -996,8 +994,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) + + if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { + ata_dev_dbg(dev, +- "missing result TF: can't generate sense data\n"); +- return; ++ "Missing result TF: reporting aborted command\n"); ++ goto aborted; + } + + /* Use ata_to_sense_error() to map status register bits +@@ -1008,19 +1006,20 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) + ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, + &sense_key, &asc, &ascq); + ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); +- } else { +- /* Could not decode error */ +- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", +- tf->status, qc->err_mask); +- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); +- return; +- } + +- block = ata_tf_read_block(&qc->result_tf, dev); +- if (block == U64_MAX) ++ block = ata_tf_read_block(&qc->result_tf, dev); ++ if (block != U64_MAX) ++ scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, ++ block); + return; ++ } + +- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); ++ /* Could not decode error */ ++ ata_dev_warn(dev, ++ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n", ++ tf->error, tf->status, qc->err_mask); ++aborted: ++ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); + } + + void ata_scsi_sdev_config(struct scsi_device *sdev) +@@ -3786,21 +3785,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, + /* Check cdl_ctrl */ + switch (buf[0] & 0x03) { + case 0: +- /* Disable CDL if it is enabled */ +- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) +- return 0; ++ /* Disable CDL */ + ata_dev_dbg(dev, "Disabling CDL\n"); + cdl_action = 0; + dev->flags &= ~ATA_DFLAG_CDL_ENABLED; + break; + case 0x02: + /* +- * Enable CDL if not already enabled. Since this is mutually +- * exclusive with NCQ priority, allow this only if NCQ priority +- * is disabled. ++ * Enable CDL. Since CDL is mutually exclusive with NCQ ++ * priority, allow this only if NCQ priority is disabled. + */ +- if (dev->flags & ATA_DFLAG_CDL_ENABLED) +- return 0; + if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { + ata_dev_err(dev, + "NCQ priority must be disabled to enable CDL\n"); +diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c +index b741b5ba82bd6e..2aa0c642529021 100644 +--- a/drivers/base/arch_topology.c ++++ b/drivers/base/arch_topology.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -26,7 +27,8 @@ + static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); + static struct cpumask scale_freq_counters_mask; + static bool scale_freq_invariant; +-static DEFINE_PER_CPU(u32, freq_factor) = 1; ++DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; ++EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); + + static bool supports_scale_freq_counters(const struct cpumask *cpus) + { +@@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure); + * operating on stale data when hot-plug is used for some CPUs. The + * @capped_freq reflects the currently allowed max CPUs frequency due to + * thermal capping. It might be also a boost frequency value, which is bigger +- * than the internal 'freq_factor' max frequency. In such case the pressure +- * value should simply be removed, since this is an indication that there is +- * no thermal throttling. The @capped_freq must be provided in kHz. ++ * than the internal 'capacity_freq_ref' max frequency. In such case the ++ * pressure value should simply be removed, since this is an indication that ++ * there is no thermal throttling. The @capped_freq must be provided in kHz. + */ + void topology_update_thermal_pressure(const struct cpumask *cpus, + unsigned long capped_freq) +@@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus, + + cpu = cpumask_first(cpus); + max_capacity = arch_scale_cpu_capacity(cpu); +- max_freq = per_cpu(freq_factor, cpu); +- +- /* Convert to MHz scale which is used in 'freq_factor' */ +- capped_freq /= 1000; ++ max_freq = arch_scale_freq_ref(cpu); + + /* + * Handle properly the boost frequencies, which should simply clean +@@ -279,13 +278,13 @@ void topology_normalize_cpu_scale(void) + + capacity_scale = 1; + for_each_possible_cpu(cpu) { +- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); ++ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); + capacity_scale = max(capacity, capacity_scale); + } + + pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); + for_each_possible_cpu(cpu) { +- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); ++ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); + topology_set_cpu_scale(cpu, capacity); +@@ -321,15 +320,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) + cpu_node, raw_capacity[cpu]); + + /* +- * Update freq_factor for calculating early boot cpu capacities. ++ * Update capacity_freq_ref for calculating early boot CPU capacities. + * For non-clk CPU DVFS mechanism, there's no way to get the + * frequency value now, assuming they are running at the same +- * frequency (by keeping the initial freq_factor value). ++ * frequency (by keeping the initial capacity_freq_ref value). + */ + cpu_clk = of_clk_get(cpu_node, 0); + if (!PTR_ERR_OR_ZERO(cpu_clk)) { +- per_cpu(freq_factor, cpu) = +- clk_get_rate(cpu_clk) / 1000; ++ per_cpu(capacity_freq_ref, cpu) = ++ clk_get_rate(cpu_clk) / HZ_PER_KHZ; + clk_put(cpu_clk); + } + } else { +@@ -345,11 +344,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) + return !ret; + } + ++void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) ++{ ++} ++ + #ifdef CONFIG_ACPI_CPPC_LIB + #include + + void topology_init_cpu_capacity_cppc(void) + { ++ u64 capacity, capacity_scale = 0; + struct cppc_perf_caps perf_caps; + int cpu; + +@@ -366,6 +370,10 @@ void topology_init_cpu_capacity_cppc(void) + (perf_caps.highest_perf >= perf_caps.nominal_perf) && + (perf_caps.highest_perf >= perf_caps.lowest_perf)) { + raw_capacity[cpu] = perf_caps.highest_perf; ++ capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); ++ ++ per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); ++ + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", + cpu, raw_capacity[cpu]); + continue; +@@ -376,7 +384,18 @@ void topology_init_cpu_capacity_cppc(void) + goto exit; + } + +- topology_normalize_cpu_scale(); ++ for_each_possible_cpu(cpu) { ++ freq_inv_set_max_ratio(cpu, ++ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); ++ ++ capacity = raw_capacity[cpu]; ++ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, ++ capacity_scale); ++ topology_set_cpu_scale(cpu, capacity); ++ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", ++ cpu, topology_get_cpu_scale(cpu)); ++ } ++ + schedule_work(&update_topology_flags_work); + pr_debug("cpu_capacity: cpu_capacity initialization done\n"); + +@@ -398,9 +417,6 @@ init_cpu_capacity_callback(struct notifier_block *nb, + struct cpufreq_policy *policy = data; + int cpu; + +- if (!raw_capacity) +- return 0; +- + if (val != CPUFREQ_CREATE_POLICY) + return 0; + +@@ -410,13 +426,18 @@ init_cpu_capacity_callback(struct notifier_block *nb, + + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); + +- for_each_cpu(cpu, policy->related_cpus) +- per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; ++ for_each_cpu(cpu, policy->related_cpus) { ++ per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; ++ freq_inv_set_max_ratio(cpu, ++ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); ++ } + + if (cpumask_empty(cpus_to_visit)) { +- topology_normalize_cpu_scale(); +- schedule_work(&update_topology_flags_work); +- free_raw_capacity(); ++ if (raw_capacity) { ++ topology_normalize_cpu_scale(); ++ schedule_work(&update_topology_flags_work); ++ free_raw_capacity(); ++ } + pr_debug("cpu_capacity: parsing done\n"); + schedule_work(&parsing_done_work); + } +@@ -436,7 +457,7 @@ static int __init register_cpufreq_notifier(void) + * On ACPI-based systems skip registering cpufreq notifier as cpufreq + * information is not needed for cpu capacity initialization. + */ +- if (!acpi_disabled || !raw_capacity) ++ if (!acpi_disabled) + return -EINVAL; + + if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c +index 0d43bf5b6cecbf..f53c14fb74fda7 100644 +--- a/drivers/base/power/runtime.c ++++ b/drivers/base/power/runtime.c +@@ -1175,16 +1175,18 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) + EXPORT_SYMBOL_GPL(__pm_runtime_resume); + + /** +- * pm_runtime_get_if_active - Conditionally bump up device usage counter. ++ * pm_runtime_get_conditional - Conditionally bump up device usage counter. + * @dev: Device to handle. + * @ign_usage_count: Whether or not to look at the current usage counter value. + * + * Return -EINVAL if runtime PM is disabled for @dev. + * +- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either +- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not +- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 +- * without changing the usage counter. ++ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count ++ * is set, or (2) @dev is not ignoring children and its active child count is ++ * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment ++ * the usage counter of @dev and return 1. ++ * ++ * Otherwise, return 0 without changing the usage counter. + * + * If @ign_usage_count is %true, this function can be used to prevent suspending + * the device when its runtime PM status is %RPM_ACTIVE. +@@ -1196,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); + * The caller is responsible for decrementing the runtime PM usage counter of + * @dev after this function has returned a positive value for it. + */ +-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) ++static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) + { + unsigned long flags; + int retval; +@@ -1206,7 +1208,8 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) + retval = -EINVAL; + } else if (dev->power.runtime_status != RPM_ACTIVE) { + retval = 0; +- } else if (ign_usage_count) { ++ } else if (ign_usage_count || (!dev->power.ignore_children && ++ atomic_read(&dev->power.child_count) > 0)) { + retval = 1; + atomic_inc(&dev->power.usage_count); + } else { +@@ -1217,8 +1220,45 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) + + return retval; + } ++ ++/** ++ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is ++ * in active state ++ * @dev: Target device. ++ * ++ * Increment the runtime PM usage counter of @dev if its runtime PM status is ++ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different ++ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the ++ * device, in which case also the usage_count will remain unmodified. ++ */ ++int pm_runtime_get_if_active(struct device *dev) ++{ ++ return pm_runtime_get_conditional(dev, true); ++} + EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); + ++/** ++ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. ++ * @dev: Target device. ++ * ++ * Increment the runtime PM usage counter of @dev if its runtime PM status is ++ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not ++ * ignoring children and its active child count is nonzero. 1 is returned in ++ * this case. ++ * ++ * If @dev is in a different state or it is not in use (that is, its usage ++ * counter is 0, or it is ignoring children, or its active child count is 0), ++ * 0 is returned. ++ * ++ * -EINVAL is returned if runtime PM is disabled for the device, in which case ++ * also the usage counter of @dev is not updated. ++ */ ++int pm_runtime_get_if_in_use(struct device *dev) ++{ ++ return pm_runtime_get_conditional(dev, false); ++} ++EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); ++ + /** + * __pm_runtime_set_status - Set runtime PM status of a device. + * @dev: Device to handle. +@@ -1754,6 +1794,11 @@ void pm_runtime_reinit(struct device *dev) + pm_runtime_put(dev->parent); + } + } ++ /* ++ * Clear power.needs_force_resume in case it has been set by ++ * pm_runtime_force_suspend() invoked from a driver remove callback. ++ */ ++ dev->power.needs_force_resume = false; + } + + /** +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c +index 0c9f54197768d6..ac18d36b0ea84e 100644 +--- a/drivers/block/drbd/drbd_receiver.c ++++ b/drivers/block/drbd/drbd_receiver.c +@@ -2500,7 +2500,11 @@ static int handle_write_conflicts(struct drbd_device *device, + peer_req->w.cb = superseded ? e_send_superseded : + e_send_retry_write; + list_add_tail(&peer_req->w.list, &device->done_ee); +- queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); ++ /* put is in drbd_send_acks_wf() */ ++ kref_get(&device->kref); ++ if (!queue_work(connection->ack_sender, ++ &peer_req->peer_device->send_acks_work)) ++ kref_put(&device->kref, drbd_destroy_device); + + err = -ENOENT; + goto out; +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 455e2a2b149f4b..ed004e1610dd1f 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -1472,19 +1472,36 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) + return error; + } + +-static int loop_set_block_size(struct loop_device *lo, unsigned long arg) ++static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, ++ struct block_device *bdev, unsigned long arg) + { + int err = 0; + +- if (lo->lo_state != Lo_bound) +- return -ENXIO; ++ /* ++ * If we don't hold exclusive handle for the device, upgrade to it ++ * here to avoid changing device under exclusive owner. ++ */ ++ if (!(mode & BLK_OPEN_EXCL)) { ++ err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); ++ if (err) ++ return err; ++ } ++ ++ err = mutex_lock_killable(&lo->lo_mutex); ++ if (err) ++ goto abort_claim; ++ ++ if (lo->lo_state != Lo_bound) { ++ err = -ENXIO; ++ goto unlock; ++ } + + err = blk_validate_block_size(arg); + if (err) + return err; + + if (lo->lo_queue->limits.logical_block_size == arg) +- return 0; ++ goto unlock; + + sync_blockdev(lo->lo_device); + invalidate_bdev(lo->lo_device); +@@ -1496,6 +1513,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) + loop_update_dio(lo); + blk_mq_unfreeze_queue(lo->lo_queue); + ++unlock: ++ mutex_unlock(&lo->lo_mutex); ++abort_claim: ++ if (!(mode & BLK_OPEN_EXCL)) ++ bd_abort_claiming(bdev, loop_set_block_size); + return err; + } + +@@ -1514,9 +1536,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, + case LOOP_SET_DIRECT_IO: + err = loop_set_dio(lo, arg); + break; +- case LOOP_SET_BLOCK_SIZE: +- err = loop_set_block_size(lo, arg); +- break; + default: + err = -EINVAL; + } +@@ -1571,9 +1590,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, + break; + case LOOP_GET_STATUS64: + return loop_get_status64(lo, argp); ++ case LOOP_SET_BLOCK_SIZE: ++ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ return loop_set_block_size(lo, mode, bdev, arg); + case LOOP_SET_CAPACITY: + case LOOP_SET_DIRECT_IO: +- case LOOP_SET_BLOCK_SIZE: + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + fallthrough; +diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c +index 7bf4b48e2282e7..a379a37c944984 100644 +--- a/drivers/block/sunvdc.c ++++ b/drivers/block/sunvdc.c +@@ -956,8 +956,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev) + dev = device_find_child(vdev->dev.parent, &port_data, + vdc_device_probed); + +- if (dev) ++ if (dev) { ++ put_device(dev); + return true; ++ } + + return false; + } +diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c +index edc0ec5a093398..022571cfec5cb0 100644 +--- a/drivers/bus/mhi/host/boot.c ++++ b/drivers/bus/mhi/host/boot.c +@@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + int ret; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { +- bhi_vec->dma_addr = mhi_buf->dma_addr; +- bhi_vec->size = mhi_buf->len; ++ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); ++ bhi_vec->size = cpu_to_le64(mhi_buf->len); + } + + dev_dbg(dev, "BHIe programming for RDDM\n"); +@@ -375,8 +375,8 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + while (remainder) { + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); +- bhi_vec->dma_addr = mhi_buf->dma_addr; +- bhi_vec->size = to_cpy; ++ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); ++ bhi_vec->size = cpu_to_le64(to_cpy); + + buf += to_cpy; + remainder -= to_cpy; +diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h +index d2858236af52b1..88c9bc11f17170 100644 +--- a/drivers/bus/mhi/host/internal.h ++++ b/drivers/bus/mhi/host/internal.h +@@ -31,8 +31,8 @@ struct mhi_ctxt { + }; + + struct bhi_vec_entry { +- u64 dma_addr; +- u64 size; ++ __le64 dma_addr; ++ __le64 size; + }; + + enum mhi_ch_state_type { +diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c +index ad1e97222a0f49..196929fff243e2 100644 +--- a/drivers/bus/mhi/host/main.c ++++ b/drivers/bus/mhi/host/main.c +@@ -603,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_ring_element *local_rp, *ev_tre; +- void *dev_rp; ++ void *dev_rp, *next_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + +@@ -622,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + result.dir = mhi_chan->dir; + + local_rp = tre_ring->rp; ++ ++ next_rp = local_rp + 1; ++ if (next_rp >= tre_ring->base + tre_ring->len) ++ next_rp = tre_ring->base; ++ if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) { ++ dev_err(&mhi_cntrl->mhi_dev->dev, ++ "Event element points to an unexpected TRE\n"); ++ break; ++ } ++ + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* If it's the last TRE, get length from the event */ +diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c +index 04b578a0be17c2..61f1a290ff0890 100644 +--- a/drivers/cdx/controller/cdx_rpmsg.c ++++ b/drivers/cdx/controller/cdx_rpmsg.c +@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev) + + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = rpdev->dst; +- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, +- strlen(cdx_rpmsg_id_table[0].name)); ++ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name)); + + cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo); + if (!cdx_mcdi->ept) { +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index db8f1dadaa9f4f..96f175bd6d9fb7 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -4618,10 +4618,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, + * The NetFN and Command in the response is not even + * marginally correct. + */ +- dev_warn(intf->si_dev, +- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", +- (msg->data[0] >> 2) | 1, msg->data[1], +- msg->rsp[0] >> 2, msg->rsp[1]); ++ dev_warn_ratelimited(intf->si_dev, ++ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", ++ (msg->data[0] >> 2) | 1, msg->data[1], ++ msg->rsp[0] >> 2, msg->rsp[1]); + + goto return_unspecified; + } +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c +index 9a459257489f0d..ca149ca8ccd669 100644 +--- a/drivers/char/ipmi/ipmi_watchdog.c ++++ b/drivers/char/ipmi/ipmi_watchdog.c +@@ -1190,14 +1190,8 @@ static struct ipmi_smi_watcher smi_watcher = { + .smi_gone = ipmi_smi_gone + }; + +-static int action_op(const char *inval, char *outval) ++static int action_op_set_val(const char *inval) + { +- if (outval) +- strcpy(outval, action); +- +- if (!inval) +- return 0; +- + if (strcmp(inval, "reset") == 0) + action_val = WDOG_TIMEOUT_RESET; + else if (strcmp(inval, "none") == 0) +@@ -1208,18 +1202,26 @@ static int action_op(const char *inval, char *outval) + action_val = WDOG_TIMEOUT_POWER_DOWN; + else + return -EINVAL; +- strcpy(action, inval); + return 0; + } + +-static int preaction_op(const char *inval, char *outval) ++static int action_op(const char *inval, char *outval) + { ++ int rv; ++ + if (outval) +- strcpy(outval, preaction); ++ strcpy(outval, action); + + if (!inval) + return 0; ++ rv = action_op_set_val(inval); ++ if (!rv) ++ strcpy(action, inval); ++ return rv; ++} + ++static int preaction_op_set_val(const char *inval) ++{ + if (strcmp(inval, "pre_none") == 0) + preaction_val = WDOG_PRETIMEOUT_NONE; + else if (strcmp(inval, "pre_smi") == 0) +@@ -1232,18 +1234,26 @@ static int preaction_op(const char *inval, char *outval) + preaction_val = WDOG_PRETIMEOUT_MSG_INT; + else + return -EINVAL; +- strcpy(preaction, inval); + return 0; + } + +-static int preop_op(const char *inval, char *outval) ++static int preaction_op(const char *inval, char *outval) + { ++ int rv; ++ + if (outval) +- strcpy(outval, preop); ++ strcpy(outval, preaction); + + if (!inval) + return 0; ++ rv = preaction_op_set_val(inval); ++ if (!rv) ++ strcpy(preaction, inval); ++ return 0; ++} + ++static int preop_op_set_val(const char *inval) ++{ + if (strcmp(inval, "preop_none") == 0) + preop_val = WDOG_PREOP_NONE; + else if (strcmp(inval, "preop_panic") == 0) +@@ -1252,7 +1262,22 @@ static int preop_op(const char *inval, char *outval) + preop_val = WDOG_PREOP_GIVE_DATA; + else + return -EINVAL; +- strcpy(preop, inval); ++ return 0; ++} ++ ++static int preop_op(const char *inval, char *outval) ++{ ++ int rv; ++ ++ if (outval) ++ strcpy(outval, preop); ++ ++ if (!inval) ++ return 0; ++ ++ rv = preop_op_set_val(inval); ++ if (!rv) ++ strcpy(preop, inval); + return 0; + } + +@@ -1289,18 +1314,18 @@ static int __init ipmi_wdog_init(void) + { + int rv; + +- if (action_op(action, NULL)) { ++ if (action_op_set_val(action)) { + action_op("reset", NULL); + pr_info("Unknown action '%s', defaulting to reset\n", action); + } + +- if (preaction_op(preaction, NULL)) { ++ if (preaction_op_set_val(preaction)) { + preaction_op("pre_none", NULL); + pr_info("Unknown preaction '%s', defaulting to none\n", + preaction); + } + +- if (preop_op(preop, NULL)) { ++ if (preop_op_set_val(preop)) { + preop_op("preop_none", NULL); + pr_info("Unknown preop '%s', defaulting to none\n", preop); + } +diff --git a/drivers/char/misc.c b/drivers/char/misc.c +index dda466f9181acf..30178e20d962d4 100644 +--- a/drivers/char/misc.c ++++ b/drivers/char/misc.c +@@ -314,8 +314,8 @@ static int __init misc_init(void) + if (err) + goto fail_remove; + +- err = -EIO; +- if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops)) ++ err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops); ++ if (err < 0) + goto fail_printk; + return 0; + +diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c +index 3136ba1c2a59cc..915e84db3c97da 100644 +--- a/drivers/clk/qcom/gcc-ipq5018.c ++++ b/drivers/clk/qcom/gcc-ipq5018.c +@@ -1370,7 +1370,7 @@ static struct clk_branch gcc_xo_clk = { + &gcc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, ++ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c +index 0626650a7011cc..c9fc52a36fce9c 100644 +--- a/drivers/clk/tegra/clk-periph.c ++++ b/drivers/clk/tegra/clk-periph.c +@@ -51,7 +51,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw, + struct tegra_clk_periph *periph = to_clk_periph(hw); + const struct clk_ops *div_ops = periph->div_ops; + struct clk_hw *div_hw = &periph->divider.hw; +- unsigned long rate; ++ long rate; + + __clk_hw_set_clk(div_hw, hw); + +@@ -59,7 +59,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw, + if (rate < 0) + return rate; + +- req->rate = rate; ++ req->rate = (unsigned long)rate; + return 0; + } + +diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c +index e4d62cdaff462d..1f12f7ea3dc492 100644 +--- a/drivers/comedi/comedi_fops.c ++++ b/drivers/comedi/comedi_fops.c +@@ -787,6 +787,7 @@ static int is_device_busy(struct comedi_device *dev) + struct comedi_subdevice *s; + int i; + ++ lockdep_assert_held_write(&dev->attach_lock); + lockdep_assert_held(&dev->mutex); + if (!dev->attached) + return 0; +@@ -795,7 +796,16 @@ static int is_device_busy(struct comedi_device *dev) + s = &dev->subdevices[i]; + if (s->busy) + return 1; +- if (s->async && comedi_buf_is_mmapped(s)) ++ if (!s->async) ++ continue; ++ if (comedi_buf_is_mmapped(s)) ++ return 1; ++ /* ++ * There may be tasks still waiting on the subdevice's wait ++ * queue, although they should already be about to be removed ++ * from it since the subdevice has no active async command. ++ */ ++ if (wq_has_sleeper(&s->async->wait_head)) + return 1; + } + +@@ -825,15 +835,22 @@ static int do_devconfig_ioctl(struct comedi_device *dev, + return -EPERM; + + if (!arg) { +- if (is_device_busy(dev)) +- return -EBUSY; ++ int rc = 0; ++ + if (dev->attached) { +- struct module *driver_module = dev->driver->module; ++ down_write(&dev->attach_lock); ++ if (is_device_busy(dev)) { ++ rc = -EBUSY; ++ } else { ++ struct module *driver_module = ++ dev->driver->module; + +- comedi_device_detach(dev); +- module_put(driver_module); ++ comedi_device_detach_locked(dev); ++ module_put(driver_module); ++ } ++ up_write(&dev->attach_lock); + } +- return 0; ++ return rc; + } + + if (copy_from_user(&it, arg, sizeof(it))) +@@ -1570,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev, + memset(&data[n], 0, (MIN_SAMPLES - n) * + sizeof(unsigned int)); + } ++ } else { ++ memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) * ++ sizeof(unsigned int)); + } + ret = parse_insn(dev, insns + i, data, file); + if (ret < 0) +@@ -1653,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev, + memset(&data[insn->n], 0, + (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); + } ++ } else { ++ memset(data, 0, n_data * sizeof(unsigned int)); + } + ret = parse_insn(dev, insn, data, file); + if (ret < 0) +diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h +index 9b3631a654c895..cf10ba016ebc81 100644 +--- a/drivers/comedi/comedi_internal.h ++++ b/drivers/comedi/comedi_internal.h +@@ -50,6 +50,7 @@ extern struct mutex comedi_drivers_list_lock; + int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, + struct comedi_insn *insn, unsigned int *data); + ++void comedi_device_detach_locked(struct comedi_device *dev); + void comedi_device_detach(struct comedi_device *dev); + int comedi_device_attach(struct comedi_device *dev, + struct comedi_devconfig *it); +diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c +index 086213bcc49933..ce4cde140518b0 100644 +--- a/drivers/comedi/drivers.c ++++ b/drivers/comedi/drivers.c +@@ -158,7 +158,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev) + int i; + struct comedi_subdevice *s; + +- lockdep_assert_held(&dev->attach_lock); ++ lockdep_assert_held_write(&dev->attach_lock); + lockdep_assert_held(&dev->mutex); + if (dev->subdevices) { + for (i = 0; i < dev->n_subdevices; i++) { +@@ -195,16 +195,23 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev) + comedi_clear_hw_dev(dev); + } + +-void comedi_device_detach(struct comedi_device *dev) ++void comedi_device_detach_locked(struct comedi_device *dev) + { ++ lockdep_assert_held_write(&dev->attach_lock); + lockdep_assert_held(&dev->mutex); + comedi_device_cancel_all(dev); +- down_write(&dev->attach_lock); + dev->attached = false; + dev->detach_count++; + if (dev->driver) + dev->driver->detach(dev); + comedi_device_detach_cleanup(dev); ++} ++ ++void comedi_device_detach(struct comedi_device *dev) ++{ ++ lockdep_assert_held(&dev->mutex); ++ down_write(&dev->attach_lock); ++ comedi_device_detach_locked(dev); + up_write(&dev->attach_lock); + } + +@@ -612,11 +619,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, + unsigned int chan = CR_CHAN(insn->chanspec); + unsigned int base_chan = (chan < 32) ? 0 : chan; + unsigned int _data[2]; ++ unsigned int i; + int ret; + +- if (insn->n == 0) +- return 0; +- + memset(_data, 0, sizeof(_data)); + memset(&_insn, 0, sizeof(_insn)); + _insn.insn = INSN_BITS; +@@ -627,18 +632,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, + if (insn->insn == INSN_WRITE) { + if (!(s->subdev_flags & SDF_WRITABLE)) + return -EINVAL; +- _data[0] = 1U << (chan - base_chan); /* mask */ +- _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ ++ _data[0] = 1U << (chan - base_chan); /* mask */ + } ++ for (i = 0; i < insn->n; i++) { ++ if (insn->insn == INSN_WRITE) ++ _data[1] = data[i] ? _data[0] : 0; /* bits */ + +- ret = s->insn_bits(dev, s, &_insn, _data); +- if (ret < 0) +- return ret; ++ ret = s->insn_bits(dev, s, &_insn, _data); ++ if (ret < 0) ++ return ret; + +- if (insn->insn == INSN_READ) +- data[0] = (_data[1] >> (chan - base_chan)) & 1; ++ if (insn->insn == INSN_READ) ++ data[i] = (_data[1] >> (chan - base_chan)) & 1; ++ } + +- return 1; ++ return insn->n; + } + + static int __comedi_device_postconfig_async(struct comedi_device *dev, +diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c +index 0430630e6ebb90..b542896fa0e427 100644 +--- a/drivers/comedi/drivers/pcl726.c ++++ b/drivers/comedi/drivers/pcl726.c +@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev, + * Hook up the external trigger source interrupt only if the + * user config option is valid and the board supports interrupts. + */ +- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) { ++ if (it->options[1] > 0 && it->options[1] < 16 && ++ (board->irq_mask & (1U << it->options[1]))) { + ret = request_irq(it->options[1], pcl726_interrupt, 0, + dev->board_name, dev); + if (ret == 0) { +diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c +index 8afefdea4d80c9..8a01032e57fa74 100644 +--- a/drivers/cpufreq/armada-8k-cpufreq.c ++++ b/drivers/cpufreq/armada-8k-cpufreq.c +@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables) + { + int opps_index, nb_cpus = num_possible_cpus(); + +- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) { ++ for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) { + int i; + + /* If cpu_dev is NULL then we reached the end of the array */ +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c +index aa34af940cb53b..ea32bdf7cc24e0 100644 +--- a/drivers/cpufreq/cppc_cpufreq.c ++++ b/drivers/cpufreq/cppc_cpufreq.c +@@ -847,7 +847,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = { + }; + + static struct cpufreq_driver cppc_cpufreq_driver = { +- .flags = CPUFREQ_CONST_LOOPS, ++ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, + .verify = cppc_verify_policy, + .target = cppc_cpufreq_set_target, + .get = cppc_cpufreq_get_rate, +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index cc98d8cf543303..30d8f2ada0f176 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + + arch_set_freq_scale(policy->related_cpus, + policy->cur, +- policy->cpuinfo.max_freq); ++ arch_scale_freq_ref(policy->cpu)); + + spin_lock(&policy->transition_lock); + policy->transition_ongoing = false; +@@ -2205,7 +2205,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, + + policy->cur = freq; + arch_set_freq_scale(policy->related_cpus, freq, +- policy->cpuinfo.max_freq); ++ arch_scale_freq_ref(policy->cpu)); + cpufreq_stats_record_transition(policy, freq); + + if (trace_cpu_frequency_enabled()) { +@@ -2701,10 +2701,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, + pr_debug("starting governor %s failed\n", policy->governor->name); + if (old_gov) { + policy->governor = old_gov; +- if (cpufreq_init_governor(policy)) ++ if (cpufreq_init_governor(policy)) { + policy->governor = NULL; +- else +- cpufreq_start_governor(policy); ++ } else if (cpufreq_start_governor(policy)) { ++ cpufreq_exit_governor(policy); ++ policy->governor = NULL; ++ } + } + + return ret; +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c +index edd9a8fb9878d6..92f9c00ad5f9fb 100644 +--- a/drivers/cpuidle/governors/menu.c ++++ b/drivers/cpuidle/governors/menu.c +@@ -21,7 +21,7 @@ + + #include "gov.h" + +-#define BUCKETS 12 ++#define BUCKETS 6 + #define INTERVAL_SHIFT 3 + #define INTERVALS (1UL << INTERVAL_SHIFT) + #define RESOLUTION 1024 +@@ -31,12 +31,11 @@ + /* + * Concepts and ideas behind the menu governor + * +- * For the menu governor, there are 3 decision factors for picking a C ++ * For the menu governor, there are 2 decision factors for picking a C + * state: + * 1) Energy break even point +- * 2) Performance impact +- * 3) Latency tolerance (from pmqos infrastructure) +- * These three factors are treated independently. ++ * 2) Latency tolerance (from pmqos infrastructure) ++ * These two factors are treated independently. + * + * Energy break even point + * ----------------------- +@@ -119,19 +118,10 @@ struct menu_device { + int interval_ptr; + }; + +-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) ++static inline int which_bucket(u64 duration_ns) + { + int bucket = 0; + +- /* +- * We keep two groups of stats; one with no +- * IO pending, one without. +- * This allows us to calculate +- * E(duration)|iowait +- */ +- if (nr_iowaiters) +- bucket = BUCKETS/2; +- + if (duration_ns < 10ULL * NSEC_PER_USEC) + return bucket; + if (duration_ns < 100ULL * NSEC_PER_USEC) +@@ -145,21 +135,16 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) + return bucket + 5; + } + +-/* +- * Return a multiplier for the exit latency that is intended +- * to take performance requirements into account. +- * The more performance critical we estimate the system +- * to be, the higher this multiplier, and thus the higher +- * the barrier to go to an expensive C state. +- */ +-static inline int performance_multiplier(unsigned int nr_iowaiters) ++static DEFINE_PER_CPU(struct menu_device, menu_devices); ++ ++static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) + { +- /* for IO wait tasks (per cpu!) we add 10x each */ +- return 1 + 10 * nr_iowaiters; ++ /* Update the repeating-pattern data. */ ++ data->intervals[data->interval_ptr++] = interval_us; ++ if (data->interval_ptr >= INTERVALS) ++ data->interval_ptr = 0; + } + +-static DEFINE_PER_CPU(struct menu_device, menu_devices); +- + static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); + + /* +@@ -276,18 +261,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + struct menu_device *data = this_cpu_ptr(&menu_devices); + s64 latency_req = cpuidle_governor_latency_req(dev->cpu); + u64 predicted_ns; +- u64 interactivity_req; +- unsigned int nr_iowaiters; + ktime_t delta, delta_tick; + int i, idx; + + if (data->needs_update) { + menu_update(drv, dev); + data->needs_update = 0; ++ } else if (!dev->last_residency_ns) { ++ /* ++ * This happens when the driver rejects the previously selected ++ * idle state and returns an error, so update the recent ++ * intervals table to prevent invalid information from being ++ * used going forward. ++ */ ++ menu_update_intervals(data, UINT_MAX); + } + +- nr_iowaiters = nr_iowait_cpu(dev->cpu); +- + /* Find the shortest expected idle interval. */ + predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; + if (predicted_ns > RESIDENCY_THRESHOLD_NS) { +@@ -301,7 +290,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + } + + data->next_timer_ns = delta; +- data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); ++ data->bucket = which_bucket(data->next_timer_ns); + + /* Round up the result for half microseconds. */ + timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + +@@ -319,7 +308,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + */ + data->next_timer_ns = KTIME_MAX; + delta_tick = TICK_NSEC / 2; +- data->bucket = which_bucket(KTIME_MAX, nr_iowaiters); ++ data->bucket = which_bucket(KTIME_MAX); + } + + if (unlikely(drv->state_count <= 1 || latency_req == 0) || +@@ -335,27 +324,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + return 0; + } + +- if (tick_nohz_tick_stopped()) { +- /* +- * If the tick is already stopped, the cost of possible short +- * idle duration misprediction is much higher, because the CPU +- * may be stuck in a shallow idle state for a long time as a +- * result of it. In that case say we might mispredict and use +- * the known time till the closest timer event for the idle +- * state selection. +- */ +- if (predicted_ns < TICK_NSEC) +- predicted_ns = data->next_timer_ns; +- } else { +- /* +- * Use the performance multiplier and the user-configurable +- * latency_req to determine the maximum exit latency. +- */ +- interactivity_req = div64_u64(predicted_ns, +- performance_multiplier(nr_iowaiters)); +- if (latency_req > interactivity_req) +- latency_req = interactivity_req; +- } ++ /* ++ * If the tick is already stopped, the cost of possible short idle ++ * duration misprediction is much higher, because the CPU may be stuck ++ * in a shallow idle state for a long time as a result of it. In that ++ * case, say we might mispredict and use the known time till the closest ++ * timer event for the idle state selection. ++ */ ++ if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC) ++ predicted_ns = data->next_timer_ns; + + /* + * Find the idle state with the lowest power while satisfying +@@ -371,13 +348,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + if (idx == -1) + idx = i; /* first enabled state */ + ++ if (s->exit_latency_ns > latency_req) ++ break; ++ + if (s->target_residency_ns > predicted_ns) { + /* + * Use a physical idle state, not busy polling, unless + * a timer is going to trigger soon enough. + */ + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && +- s->exit_latency_ns <= latency_req && + s->target_residency_ns <= data->next_timer_ns) { + predicted_ns = s->target_residency_ns; + idx = i; +@@ -409,8 +388,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + + return idx; + } +- if (s->exit_latency_ns > latency_req) +- break; + + idx = i; + } +@@ -553,10 +530,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) + + data->correction_factor[data->bucket] = new_factor; + +- /* update the repeating-pattern data */ +- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); +- if (data->interval_ptr >= INTERVALS) +- data->interval_ptr = 0; ++ menu_update_intervals(data, ktime_to_us(measured_ns)); + } + + /** +diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c +index 9a1c61be32ccdb..059319f7a7160f 100644 +--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c ++++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c +@@ -1482,11 +1482,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + ++ /* Do unmap before data processing */ ++ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); ++ + p = sg_virt(areq->dst); + memmove(p, p + ctx->key_sz - curve_sz, curve_sz); + memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); + +- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +@@ -1796,9 +1798,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + ++ /* Do unmap before data processing */ ++ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); ++ + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); + +- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +index 79ff7982378d9f..05d1402001a1ef 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h ++++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +@@ -193,6 +193,7 @@ void adf_exit_misc_wq(void); + bool adf_misc_wq_queue_work(struct work_struct *work); + bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, + unsigned long delay); ++void adf_misc_wq_flush(void); + #if defined(CONFIG_PCI_IOV) + int adf_sriov_configure(struct pci_dev *pdev, int numvfs); + void adf_disable_sriov(struct adf_accel_dev *accel_dev); +diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c +index 0f9e2d59ce3857..9c1b587a87e5b4 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_init.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_init.c +@@ -381,6 +381,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) + hw_data->exit_admin_comms(accel_dev); + + adf_cleanup_etr_data(accel_dev); ++ adf_misc_wq_flush(); + adf_dev_restore(accel_dev); + } + +diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c +index 2aba194a7c2922..ce7c9ef6346b39 100644 +--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c ++++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c +@@ -386,3 +386,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, + { + return queue_delayed_work(adf_misc_wq, work, delay); + } ++ ++void adf_misc_wq_flush(void) ++{ ++ flush_workqueue(adf_misc_wq); ++} +diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c +index 3c4bba4a87795e..d69cc1e5e0239e 100644 +--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c ++++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c +@@ -1277,7 +1277,7 @@ static struct aead_alg qat_aeads[] = { { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha1", +- .cra_priority = 4001, ++ .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), +@@ -1294,7 +1294,7 @@ static struct aead_alg qat_aeads[] = { { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha256", +- .cra_priority = 4001, ++ .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), +@@ -1311,7 +1311,7 @@ static struct aead_alg qat_aeads[] = { { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha512", +- .cra_priority = 4001, ++ .cra_priority = 100, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), +@@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { { + static struct skcipher_alg qat_skciphers[] = { { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "qat_aes_cbc", +- .base.cra_priority = 4001, ++ .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), +@@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { { + }, { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "qat_aes_ctr", +- .base.cra_priority = 4001, ++ .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), +@@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { { + }, { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "qat_aes_xts", +- .base.cra_priority = 4001, ++ .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY, + .base.cra_blocksize = AES_BLOCK_SIZE, +diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +index 1958b797a42100..682e7d80adb8c9 100644 +--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c ++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +@@ -1485,6 +1485,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) + dma_addr_t rptr_baddr; + struct pci_dev *pdev; + u32 len, compl_rlen; ++ int timeout = 10000; + int ret, etype; + void *rptr; + +@@ -1547,16 +1548,27 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) + etype); + otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr); + lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]); ++ timeout = 10000; + + while (lfs->ops->cpt_get_compcode(result) == +- OTX2_CPT_COMPLETION_CODE_INIT) ++ OTX2_CPT_COMPLETION_CODE_INIT) { + cpu_relax(); ++ udelay(1); ++ timeout--; ++ if (!timeout) { ++ ret = -ENODEV; ++ cptpf->is_eng_caps_discovered = false; ++ dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n"); ++ goto error_no_response; ++ } ++ } + + cptpf->eng_caps[etype].u = be64_to_cpup(rptr); + } +- dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); + cptpf->is_eng_caps_discovered = true; + ++error_no_response: ++ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); + free_result: + kfree(result); + lf_cleanup: +diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c +index d69672ccacc49b..8d057cea09d5b6 100644 +--- a/drivers/devfreq/governor_userspace.c ++++ b/drivers/devfreq/governor_userspace.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, + unsigned long wanted; + int err = 0; + ++ err = kstrtoul(buf, 0, &wanted); ++ if (err) ++ return err; ++ + mutex_lock(&devfreq->lock); + data = devfreq->governor_data; + +- sscanf(buf, "%lu", &wanted); + data->user_frequency = wanted; + data->valid = true; + err = update_devfreq(devfreq); +diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c +index 9840594a6aaa1f..3882080cffa69c 100644 +--- a/drivers/dma/stm32-dma.c ++++ b/drivers/dma/stm32-dma.c +@@ -745,7 +745,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) + /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ + if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) + stm32_dma_post_resume_reconfigure(chan); +- else if (scr & STM32_DMA_SCR_DBM) ++ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2) + stm32_dma_configure_next_sg(chan); + } else { + chan->busy = false; +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c +index 6ddc90d7ba7c2a..f8aaada42d3f25 100644 +--- a/drivers/edac/synopsys_edac.c ++++ b/drivers/edac/synopsys_edac.c +@@ -332,20 +332,26 @@ struct synps_edac_priv { + #endif + }; + ++enum synps_platform_type { ++ ZYNQ, ++ ZYNQMP, ++ SYNPS, ++}; ++ + /** + * struct synps_platform_data - synps platform data structure. ++ * @platform: Identifies the target hardware platform + * @get_error_info: Get EDAC error info. + * @get_mtype: Get mtype. + * @get_dtype: Get dtype. +- * @get_ecc_state: Get ECC state. + * @get_mem_info: Get EDAC memory info + * @quirks: To differentiate IPs. + */ + struct synps_platform_data { ++ enum synps_platform_type platform; + int (*get_error_info)(struct synps_edac_priv *priv); + enum mem_type (*get_mtype)(const void __iomem *base); + enum dev_type (*get_dtype)(const void __iomem *base); +- bool (*get_ecc_state)(void __iomem *base); + #ifdef CONFIG_EDAC_DEBUG + u64 (*get_mem_info)(struct synps_edac_priv *priv); + #endif +@@ -720,51 +726,38 @@ static enum dev_type zynqmp_get_dtype(const void __iomem *base) + return dt; + } + +-/** +- * zynq_get_ecc_state - Return the controller ECC enable/disable status. +- * @base: DDR memory controller base address. +- * +- * Get the ECC enable/disable status of the controller. +- * +- * Return: true if enabled, otherwise false. +- */ +-static bool zynq_get_ecc_state(void __iomem *base) ++static bool get_ecc_state(struct synps_edac_priv *priv) + { ++ u32 ecctype, clearval; + enum dev_type dt; +- u32 ecctype; +- +- dt = zynq_get_dtype(base); +- if (dt == DEV_UNKNOWN) +- return false; + +- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; +- if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) +- return true; +- +- return false; +-} +- +-/** +- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status. +- * @base: DDR memory controller base address. +- * +- * Get the ECC enable/disable status for the controller. +- * +- * Return: a ECC status boolean i.e true/false - enabled/disabled. +- */ +-static bool zynqmp_get_ecc_state(void __iomem *base) +-{ +- enum dev_type dt; +- u32 ecctype; +- +- dt = zynqmp_get_dtype(base); +- if (dt == DEV_UNKNOWN) +- return false; +- +- ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK; +- if ((ecctype == SCRUB_MODE_SECDED) && +- ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8))) +- return true; ++ if (priv->p_data->platform == ZYNQ) { ++ dt = zynq_get_dtype(priv->baseaddr); ++ if (dt == DEV_UNKNOWN) ++ return false; ++ ++ ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK; ++ if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) { ++ clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR; ++ writel(clearval, priv->baseaddr + ECC_CTRL_OFST); ++ writel(0x0, priv->baseaddr + ECC_CTRL_OFST); ++ return true; ++ } ++ } else { ++ dt = zynqmp_get_dtype(priv->baseaddr); ++ if (dt == DEV_UNKNOWN) ++ return false; ++ ++ ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK; ++ if (ecctype == SCRUB_MODE_SECDED && ++ (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) { ++ clearval = readl(priv->baseaddr + ECC_CLR_OFST) | ++ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | ++ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; ++ writel(clearval, priv->baseaddr + ECC_CLR_OFST); ++ return true; ++ } ++ } + + return false; + } +@@ -934,18 +927,18 @@ static int setup_irq(struct mem_ctl_info *mci, + } + + static const struct synps_platform_data zynq_edac_def = { ++ .platform = ZYNQ, + .get_error_info = zynq_get_error_info, + .get_mtype = zynq_get_mtype, + .get_dtype = zynq_get_dtype, +- .get_ecc_state = zynq_get_ecc_state, + .quirks = 0, + }; + + static const struct synps_platform_data zynqmp_edac_def = { ++ .platform = ZYNQMP, + .get_error_info = zynqmp_get_error_info, + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, +- .get_ecc_state = zynqmp_get_ecc_state, + #ifdef CONFIG_EDAC_DEBUG + .get_mem_info = zynqmp_get_mem_info, + #endif +@@ -957,10 +950,10 @@ static const struct synps_platform_data zynqmp_edac_def = { + }; + + static const struct synps_platform_data synopsys_edac_def = { ++ .platform = SYNPS, + .get_error_info = zynqmp_get_error_info, + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, +- .get_ecc_state = zynqmp_get_ecc_state, + .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR + #ifdef CONFIG_EDAC_DEBUG + | DDR_ECC_DATA_POISON_SUPPORT +@@ -1392,10 +1385,6 @@ static int mc_probe(struct platform_device *pdev) + if (!p_data) + return -ENODEV; + +- if (!p_data->get_ecc_state(baseaddr)) { +- edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); +- return -ENXIO; +- } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = SYNPS_EDAC_NR_CSROWS; +@@ -1415,6 +1404,12 @@ static int mc_probe(struct platform_device *pdev) + priv = mci->pvt_info; + priv->baseaddr = baseaddr; + priv->p_data = p_data; ++ if (!get_ecc_state(priv)) { ++ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); ++ rc = -ENODEV; ++ goto free_edac_mc; ++ } ++ + spin_lock_init(&priv->reglock); + + mc_init(mci, pdev); +diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig +index cde1ab8bd9d1cb..91f2320c0d0f89 100644 +--- a/drivers/firmware/tegra/Kconfig ++++ b/drivers/firmware/tegra/Kconfig +@@ -2,7 +2,7 @@ + menu "Tegra firmware driver" + + config TEGRA_IVC +- bool "Tegra IVC protocol" ++ bool "Tegra IVC protocol" if COMPILE_TEST + depends on ARCH_TEGRA + help + IVC (Inter-VM Communication) protocol is part of the IPC +@@ -13,8 +13,9 @@ config TEGRA_IVC + + config TEGRA_BPMP + bool "Tegra BPMP driver" +- depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC ++ depends on ARCH_TEGRA && TEGRA_HSP_MBOX + depends on !CPU_BIG_ENDIAN ++ select TEGRA_IVC + help + BPMP (Boot and Power Management Processor) is designed to off-loading + the PM functions which include clock/DVFS/thermal/power from the CPU. +diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c +index 96611d424a104e..b77097c93973aa 100644 +--- a/drivers/fpga/zynq-fpga.c ++++ b/drivers/fpga/zynq-fpga.c +@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) + } + } + +- priv->dma_nelms = +- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); +- if (priv->dma_nelms == 0) { ++ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); ++ if (err) { + dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); +- return -ENOMEM; ++ return err; + } ++ priv->dma_nelms = sgt->nents; + + /* enable clock */ + err = clk_enable(priv->clk); +@@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) + clk_disable(priv->clk); + + out_free: +- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); ++ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); + return err; + } + +diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c +index 6abe01bc39c3e1..c03945af8538e3 100644 +--- a/drivers/gpio/gpio-mlxbf2.c ++++ b/drivers/gpio/gpio-mlxbf2.c +@@ -397,7 +397,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev) + gc->ngpio = npins; + gc->owner = THIS_MODULE; + +- irq = platform_get_irq(pdev, 0); ++ irq = platform_get_irq_optional(pdev, 0); + if (irq >= 0) { + girq = &gs->gc.irq; + gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip); +diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c +index 9875e34bde72a4..ed29b07d16c190 100644 +--- a/drivers/gpio/gpio-mlxbf3.c ++++ b/drivers/gpio/gpio-mlxbf3.c +@@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) + struct mlxbf3_gpio_context *gs; + struct gpio_irq_chip *girq; + struct gpio_chip *gc; +- char *colon_ptr; + int ret, irq; +- long num; + + gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); + if (!gs) +@@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) + gc->owner = THIS_MODULE; + gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges; + +- colon_ptr = strchr(dev_name(dev), ':'); +- if (!colon_ptr) { +- dev_err(dev, "invalid device name format\n"); +- return -EINVAL; +- } +- +- ret = kstrtol(++colon_ptr, 16, &num); +- if (ret) { +- dev_err(dev, "invalid device instance\n"); +- return ret; +- } +- +- if (!num) { +- irq = platform_get_irq(pdev, 0); +- if (irq >= 0) { +- girq = &gs->gc.irq; +- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); +- girq->default_type = IRQ_TYPE_NONE; +- /* This will let us handle the parent IRQ in the driver */ +- girq->num_parents = 0; +- girq->parents = NULL; +- girq->parent_handler = NULL; +- girq->handler = handle_bad_irq; +- +- /* +- * Directly request the irq here instead of passing +- * a flow-handler because the irq is shared. +- */ +- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, +- IRQF_SHARED, dev_name(dev), gs); +- if (ret) +- return dev_err_probe(dev, ret, "failed to request IRQ"); +- } ++ irq = platform_get_irq_optional(pdev, 0); ++ if (irq >= 0) { ++ girq = &gs->gc.irq; ++ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); ++ girq->default_type = IRQ_TYPE_NONE; ++ /* This will let us handle the parent IRQ in the driver */ ++ girq->num_parents = 0; ++ girq->parents = NULL; ++ girq->parent_handler = NULL; ++ girq->handler = handle_bad_irq; ++ ++ /* ++ * Directly request the irq here instead of passing ++ * a flow-handler because the irq is shared. ++ */ ++ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, ++ IRQF_SHARED, dev_name(dev), gs); ++ if (ret) ++ return dev_err_probe(dev, ret, "failed to request IRQ"); + } + + platform_set_drvdata(pdev, gs); +diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c +index fab771cb6a87bf..bac757c191c2ea 100644 +--- a/drivers/gpio/gpio-tps65912.c ++++ b/drivers/gpio/gpio-tps65912.c +@@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc, + unsigned offset, int value) + { + struct tps65912_gpio *gpio = gpiochip_get_data(gc); ++ int ret; + + /* Set the initial value */ +- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, +- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); ++ ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, ++ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0); ++ if (ret) ++ return ret; + + return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset, + GPIO_CFG_MASK, GPIO_CFG_MASK); +diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c +index fcc5e8c08973b3..1d0eb49eae3b4c 100644 +--- a/drivers/gpio/gpio-virtio.c ++++ b/drivers/gpio/gpio-virtio.c +@@ -539,7 +539,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio, + + static int virtio_gpio_probe(struct virtio_device *vdev) + { +- struct virtio_gpio_config config; + struct device *dev = &vdev->dev; + struct virtio_gpio *vgpio; + u32 gpio_names_size; +@@ -551,9 +550,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev) + return -ENOMEM; + + /* Read configuration */ +- virtio_cread_bytes(vdev, 0, &config, sizeof(config)); +- gpio_names_size = le32_to_cpu(config.gpio_names_size); +- ngpio = le16_to_cpu(config.ngpio); ++ gpio_names_size = ++ virtio_cread32(vdev, offsetof(struct virtio_gpio_config, ++ gpio_names_size)); ++ ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config, ++ ngpio)); + if (!ngpio) { + dev_err(dev, "Number of GPIOs can't be zero\n"); + return -EINVAL; +diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c +index 2bba27b13947f1..cfa7b0a50c8e33 100644 +--- a/drivers/gpio/gpio-wcd934x.c ++++ b/drivers/gpio/gpio-wcd934x.c +@@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, + int val) + { + struct wcd_gpio_data *data = gpiochip_get_data(chip); ++ int ret; + +- regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, +- WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); ++ ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET, ++ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin)); ++ if (ret) ++ return ret; + + return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, + WCD_PIN_MASK(pin), +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +index 7200110197415f..384834fbd59011 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +@@ -89,8 +89,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + } + + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, +- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | +- AMDGPU_PTE_EXECUTABLE); ++ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | ++ AMDGPU_VM_PAGE_EXECUTABLE); + + if (r) { + DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +index 2992ce494e000c..fded8902346f5d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -2125,13 +2125,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, + */ + long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) + { +- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, +- DMA_RESV_USAGE_BOOKKEEP, +- true, timeout); ++ timeout = drm_sched_entity_flush(&vm->immediate, timeout); + if (timeout <= 0) + return timeout; + +- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); ++ return drm_sched_entity_flush(&vm->delayed, timeout); + } + + /** +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +index 12c7f4b46ea94f..b0c003da52fc1b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +@@ -36,40 +36,47 @@ + + static const char *mmhub_client_ids_v3_0_1[][2] = { + [0][0] = "VMC", ++ [1][0] = "ISPXT", ++ [2][0] = "ISPIXT", + [4][0] = "DCEDMC", + [5][0] = "DCEVGA", + [6][0] = "MP0", + [7][0] = "MP1", +- [8][0] = "MPIO", +- [16][0] = "HDP", +- [17][0] = "LSDMA", +- [18][0] = "JPEG", +- [19][0] = "VCNU0", +- [21][0] = "VSCH", +- [22][0] = "VCNU1", +- [23][0] = "VCN1", +- [32+20][0] = "VCN0", +- [2][1] = "DBGUNBIO", ++ [8][0] = "MPM", ++ [12][0] = "ISPTNR", ++ [14][0] = "ISPCRD0", ++ [15][0] = "ISPCRD1", ++ [16][0] = "ISPCRD2", ++ [22][0] = "HDP", ++ [23][0] = "LSDMA", ++ [24][0] = "JPEG", ++ [27][0] = "VSCH", ++ [28][0] = "VCNU", ++ [29][0] = "VCN", ++ [1][1] = "ISPXT", ++ [2][1] = "ISPIXT", + [3][1] = "DCEDWB", + [4][1] = "DCEDMC", + [5][1] = "DCEVGA", + [6][1] = "MP0", + [7][1] = "MP1", +- [8][1] = "MPIO", +- [10][1] = "DBGU0", +- [11][1] = "DBGU1", +- [12][1] = "DBGU2", +- [13][1] = "DBGU3", +- [14][1] = "XDP", +- [15][1] = "OSSSYS", +- [16][1] = "HDP", +- [17][1] = "LSDMA", +- [18][1] = "JPEG", +- [19][1] = "VCNU0", +- [20][1] = "VCN0", +- [21][1] = "VSCH", +- [22][1] = "VCNU1", +- [23][1] = "VCN1", ++ [8][1] = "MPM", ++ [10][1] = "ISPMWR0", ++ [11][1] = "ISPMWR1", ++ [12][1] = "ISPTNR", ++ [13][1] = "ISPSWR", ++ [14][1] = "ISPCWR0", ++ [15][1] = "ISPCWR1", ++ [16][1] = "ISPCWR2", ++ [17][1] = "ISPCWR3", ++ [18][1] = "XDP", ++ [21][1] = "OSSSYS", ++ [22][1] = "HDP", ++ [23][1] = "LSDMA", ++ [24][1] = "JPEG", ++ [27][1] = "VSCH", ++ [28][1] = "VCNU", ++ [29][1] = "VCN", + }; + + static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid, +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c +index aee2212e52f69a..33aa23450b3f72 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c +@@ -78,8 +78,8 @@ static int kfd_init(void) + static void kfd_exit(void) + { + kfd_cleanup_processes(); +- kfd_debugfs_fini(); + kfd_process_destroy_wq(); ++ kfd_debugfs_fini(); + kfd_procfs_shutdown(); + kfd_topology_shutdown(); + kfd_chardev_exit(); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index d4edddaa23dd3e..8421e5f0737bfc 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -4620,7 +4620,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) + + static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) + { +- drm_atomic_private_obj_fini(&dm->atomic_obj); ++ if (dm->atomic_obj.state) ++ drm_atomic_private_obj_fini(&dm->atomic_obj); + } + + /****************************************************************************** +@@ -6778,6 +6779,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, + struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); + int ret; + ++ if (WARN_ON(unlikely(!old_con_state || !new_con_state))) ++ return -EINVAL; ++ + trace_amdgpu_dm_connector_atomic_check(new_con_state); + + if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +index 30d4c6fd95f531..dab732c6c6c7cc 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +@@ -410,6 +410,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + return -EINVAL; + } + ++ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { ++ struct drm_plane_state *primary_state; ++ ++ /* Pull in primary plane for correct VRR handling */ ++ primary_state = drm_atomic_get_plane_state(state, crtc->primary); ++ if (IS_ERR(primary_state)) ++ return PTR_ERR(primary_state); ++ } ++ + /* In some use cases, like reset, no stream is attached */ + if (!dm_crtc_state->stream) + return 0; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +index fe96bab7d05d7b..67972d25366e59 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +@@ -124,8 +124,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) + psr_config.allow_multi_disp_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); + +- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) +- return false; ++ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { ++ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) ++ return false; ++ } + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + +diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c +index 818a529cacc373..12a54fabd80ec8 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c +@@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( + allocation.sPCLKInput.usFbDiv = + cpu_to_le16((uint16_t)bp_params->feedback_divider); + allocation.sPCLKInput.ucFracFbDiv = +- (uint8_t)bp_params->fractional_feedback_divider; ++ (uint8_t)(bp_params->fractional_feedback_divider / 100000); + allocation.sPCLKInput.ucPostDiv = + (uint8_t)bp_params->pixel_clock_post_divider; + +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +index dcedf96451615c..719881406f18e5 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +@@ -162,7 +162,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p + return NULL; + } + dce60_clk_mgr_construct(ctx, clk_mgr); +- dce_clk_mgr_construct(ctx, clk_mgr); + return &clk_mgr->base; + } + #endif +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +index 26feefbb8990ae..b268c367c27cc4 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +@@ -386,8 +386,6 @@ static void dce_pplib_apply_display_requirements( + { + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + +- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); +- + dce110_fill_display_configs(context, pp_display_cfg); + + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +index 78df96882d6ec5..fb2f154f4fda16 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +@@ -120,9 +120,15 @@ void dce110_fill_display_configs( + const struct dc_state *context, + struct dm_pp_display_configuration *pp_display_cfg) + { ++ struct dc *dc = context->clk_mgr->ctx->dc; + int j; + int num_cfgs = 0; + ++ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); ++ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; ++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; ++ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; ++ + for (j = 0; j < context->stream_count; j++) { + int k; + +@@ -164,6 +170,23 @@ void dce110_fill_display_configs( + cfg->v_refresh /= stream->timing.h_total; + cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) + / stream->timing.v_total; ++ ++ /* Find first CRTC index and calculate its line time. ++ * This is necessary for DPM on SI GPUs. ++ */ ++ if (cfg->pipe_idx < pp_display_cfg->crtc_index) { ++ const struct dc_crtc_timing *timing = ++ &context->streams[0]->timing; ++ ++ pp_display_cfg->crtc_index = cfg->pipe_idx; ++ pp_display_cfg->line_time_in_us = ++ timing->h_total * 10000 / timing->pix_clk_100hz; ++ } ++ } ++ ++ if (!num_cfgs) { ++ pp_display_cfg->crtc_index = 0; ++ pp_display_cfg->line_time_in_us = 0; + } + + pp_display_cfg->display_count = num_cfgs; +@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( + pp_display_cfg->min_engine_clock_deep_sleep_khz + = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; + +- pp_display_cfg->avail_mclk_switch_time_us = +- dce110_get_min_vblank_time_us(context); +- /* TODO: dce11.2*/ +- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; +- +- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; +- + dce110_fill_display_configs(context, pp_display_cfg); + +- /* TODO: is this still applicable?*/ +- if (pp_display_cfg->display_count == 1) { +- const struct dc_crtc_timing *timing = +- &context->streams[0]->timing; +- +- pp_display_cfg->crtc_index = +- pp_display_cfg->disp_configs[0].pipe_idx; +- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; +- } +- + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) + dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); + } +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +index 0267644717b27a..ffd0f4a7631023 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { + static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) + { + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); +- int dprefclk_wdivider; +- int dp_ref_clk_khz; +- int target_div; ++ struct dc_context *ctx = clk_mgr_base->ctx; ++ int dp_ref_clk_khz = 0; + +- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ +- +- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently +- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ +- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); +- +- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ +- target_div = dentist_get_divider_from_did(dprefclk_wdivider); +- +- /* Calculate the current DFS clock, in kHz.*/ +- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR +- * clk_mgr->base.dentist_vco_freq_khz) / target_div; ++ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) ++ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; ++ else ++ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz; + + return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); + } +@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( + { + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + +- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); +- + dce110_fill_display_configs(context, pp_display_cfg); + + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) +@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, + { + struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dm_pp_power_level_change_request level_change_req; +- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; +- +- /*TODO: W/A for dal3 linux, investigate why this works */ +- if (!clk_mgr_dce->dfs_bypass_active) +- patched_disp_clk = patched_disp_clk * 115 / 100; ++ const int max_disp_clk = ++ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; ++ int patched_disp_clk = min(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); + + level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); + /* get max clock state from PPLIB */ +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +index a825fd6c7fa666..f0b472e84a53d7 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +@@ -163,14 +163,13 @@ static void dcn20_setup_gsl_group_as_lock( + } + + /* at this point we want to program whether it's to enable or disable */ +- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && +- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { ++ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { + pipe_ctx->stream_res.tg->funcs->set_gsl( + pipe_ctx->stream_res.tg, + &gsl); +- +- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( +- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); ++ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) ++ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( ++ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + } else + BREAK_TO_DEBUGGER(); + } +@@ -782,7 +781,7 @@ enum dc_status dcn20_enable_stream_timing( + return DC_ERROR_UNEXPECTED; + } + +- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); ++ fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); + + params.vertical_total_min = stream->adjust.v_total_min; + params.vertical_total_max = stream->adjust.v_total_max; +diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +index 9b470812d96a5f..2ce2d9ff7568a6 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c ++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +@@ -137,7 +137,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) + } + } + +- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) ++ if (((!link->wa_flags.dp_keep_receiver_powered) || hw_init) && ++ (link->type != dc_connection_none)) + dpcd_write_rx_power_ctrl(link, false); + } + } +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +index 7f8f127e7722de..ab6964ca1c2b47 100644 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) + return MOD_HDCP_STATUS_FAILURE; + } + ++ if (!display) ++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; ++ + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; + + mutex_lock(&psp->hdcp_context.mutex); +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +index 2997aeed634084..632fc8aed6536f 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +@@ -1757,6 +1757,12 @@ static int smu_resume(void *handle) + + adev->pm.dpm_enabled = true; + ++ if (smu->current_power_limit) { ++ ret = smu_set_power_limit(smu, smu->current_power_limit); ++ if (ret && ret != -EOPNOTSUPP) ++ return ret; ++ } ++ + dev_info(adev->dev, "SMU is resumed successfully!\n"); + + return 0; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +index 454216bd6f1dd2..4fabecaa2b4195 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +@@ -686,7 +686,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, + { + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + SmuMetrics_t metrics; +- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int i, idx, size = 0, ret = 0; + uint32_t cur_value = 0, value = 0, count = 0; + bool cur_value_match_level = false; +@@ -702,31 +701,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, + + switch (clk_type) { + case SMU_OD_SCLK: +- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); +- size += sysfs_emit_at(buf, size, "0: %10uMhz\n", +- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); +- size += sysfs_emit_at(buf, size, "1: %10uMhz\n", +- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); +- } ++ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); ++ size += sysfs_emit_at(buf, size, "0: %10uMhz\n", ++ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); ++ size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ++ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); + break; + case SMU_OD_CCLK: +- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { +- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); +- size += sysfs_emit_at(buf, size, "0: %10uMhz\n", +- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); +- size += sysfs_emit_at(buf, size, "1: %10uMhz\n", +- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); +- } ++ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); ++ size += sysfs_emit_at(buf, size, "0: %10uMhz\n", ++ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); ++ size += sysfs_emit_at(buf, size, "1: %10uMhz\n", ++ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); + break; + case SMU_OD_RANGE: +- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { +- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); +- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", +- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); +- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", +- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); +- } ++ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); ++ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", ++ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); ++ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", ++ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); + break; + case SMU_SOCCLK: + /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ +diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c +index 851f0baf94600c..772d8e662278b9 100644 +--- a/drivers/gpu/drm/display/drm_dp_helper.c ++++ b/drivers/gpu/drm/display/drm_dp_helper.c +@@ -663,7 +663,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + * monitor doesn't power down exactly after the throw away read. + */ + if (!aux->is_remote) { +- ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV); ++ ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); + if (ret < 0) + return ret; + } +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +index 8a98fa276e8a9d..96f960bcfd82d3 100644 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +@@ -258,13 +258,13 @@ static int hibmc_load(struct drm_device *dev) + + ret = hibmc_hw_init(priv); + if (ret) +- goto err; ++ return ret; + + ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); +- goto err; ++ return ret; + } + + ret = hibmc_kms_init(priv); +diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c +index 6d8e5e5c0cba28..37c963d33bb7eb 100644 +--- a/drivers/gpu/drm/i915/intel_runtime_pm.c ++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c +@@ -434,7 +434,10 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm + * function, since the power state is undefined. This applies + * atm to the late/early system suspend/resume handlers. + */ +- if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) ++ if ((ignore_usecount && ++ pm_runtime_get_if_active(rpm->kdev) <= 0) || ++ (!ignore_usecount && ++ pm_runtime_get_if_in_use(rpm->kdev) <= 0)) + return 0; + } + +diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c +index 1113e6b2ec8ec9..aaf7c338eb96d2 100644 +--- a/drivers/gpu/drm/msm/msm_gem.c ++++ b/drivers/gpu/drm/msm/msm_gem.c +@@ -928,7 +928,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, + uint64_t off = drm_vma_node_start(&obj->vma_node); + const char *madv; + +- msm_gem_lock(obj); ++ if (!msm_gem_trylock(obj)) ++ return; + + stats->all.count++; + stats->all.size += obj->size; +diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h +index 8ddef544314083..631a9aa129bd5d 100644 +--- a/drivers/gpu/drm/msm/msm_gem.h ++++ b/drivers/gpu/drm/msm/msm_gem.h +@@ -183,6 +183,12 @@ msm_gem_lock(struct drm_gem_object *obj) + dma_resv_lock(obj->resv, NULL); + } + ++static inline bool __must_check ++msm_gem_trylock(struct drm_gem_object *obj) ++{ ++ return dma_resv_trylock(obj->resv); ++} ++ + static inline int + msm_gem_lock_interruptible(struct drm_gem_object *obj) + { +diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c +index 99296f03371ae0..07c1ebc2a94141 100644 +--- a/drivers/gpu/drm/nouveau/nvif/vmm.c ++++ b/drivers/gpu/drm/nouveau/nvif/vmm.c +@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, + case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; + default: + WARN_ON(1); +- return -EINVAL; ++ ret = -EINVAL; ++ goto done; + } + + memcpy(args->data, argv, argc); +diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c +index 10febea473cde9..6cec796dd463f6 100644 +--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c ++++ b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c +@@ -585,6 +585,9 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + if (mode->clock > 148500) + return MODE_CLOCK_HIGH; + ++ if (mode->clock < 5803) ++ return MODE_CLOCK_LOW; ++ + return MODE_OK; + } + +diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c +index 37c08fac7e7d01..80ba34cabca368 100644 +--- a/drivers/gpu/drm/ttm/ttm_pool.c ++++ b/drivers/gpu/drm/ttm/ttm_pool.c +@@ -615,7 +615,6 @@ void ttm_pool_fini(struct ttm_pool *pool) + } + EXPORT_SYMBOL(ttm_pool_fini); + +-/* As long as pages are available make sure to release at least one */ + static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, + struct shrink_control *sc) + { +@@ -623,9 +622,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, + + do + num_freed += ttm_pool_shrink(); +- while (!num_freed && atomic_long_read(&allocated_pages)); ++ while (num_freed < sc->nr_to_scan && ++ atomic_long_read(&allocated_pages)); + +- return num_freed; ++ sc->nr_scanned = num_freed; ++ ++ return num_freed ?: SHRINK_STOP; + } + + /* Return the number of pages available or SHRINK_EMPTY if we have none */ +diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c +index 46ff9c75bb124a..8f2423a15c71c6 100644 +--- a/drivers/gpu/drm/ttm/ttm_resource.c ++++ b/drivers/gpu/drm/ttm/ttm_resource.c +@@ -437,6 +437,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, + } + spin_unlock(&bdev->lru_lock); + ++ if (ret && ret != -ENOENT) ++ return ret; ++ + spin_lock(&man->move_lock); + fence = dma_fence_get(man->move); + spin_unlock(&man->move_lock); +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 0b561c1eb59e2e..7cf17c671da488 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -858,10 +858,12 @@ static int apple_probe(struct hid_device *hdev, + return ret; + } + +- timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0); +- mod_timer(&asc->battery_timer, +- jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS)); +- apple_fetch_battery(hdev); ++ if (quirks & APPLE_RDESC_BATTERY) { ++ timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0); ++ mod_timer(&asc->battery_timer, ++ jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS)); ++ apple_fetch_battery(hdev); ++ } + + if (quirks & APPLE_BACKLIGHT_CTL) + apple_backlight_init(hdev); +@@ -873,7 +875,8 @@ static void apple_remove(struct hid_device *hdev) + { + struct apple_sc *asc = hid_get_drvdata(hdev); + +- del_timer_sync(&asc->battery_timer); ++ if (asc->quirks & APPLE_RDESC_BATTERY) ++ del_timer_sync(&asc->battery_timer); + + hid_hw_stop(hdev); + } +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 9bb8daf7f78609..4fe1e0bc244934 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -772,16 +772,30 @@ static void magicmouse_enable_mt_work(struct work_struct *work) + hid_err(msc->hdev, "unable to request touch data (%d)\n", ret); + } + ++static bool is_usb_magicmouse2(__u32 vendor, __u32 product) ++{ ++ if (vendor != USB_VENDOR_ID_APPLE) ++ return false; ++ return product == USB_DEVICE_ID_APPLE_MAGICMOUSE2; ++} ++ ++static bool is_usb_magictrackpad2(__u32 vendor, __u32 product) ++{ ++ if (vendor != USB_VENDOR_ID_APPLE) ++ return false; ++ return product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || ++ product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC; ++} ++ + static int magicmouse_fetch_battery(struct hid_device *hdev) + { + #ifdef CONFIG_HID_BATTERY_STRENGTH + struct hid_report_enum *report_enum; + struct hid_report *report; + +- if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE || +- (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 && +- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && +- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)) ++ if (!hdev->battery || ++ (!is_usb_magicmouse2(hdev->vendor, hdev->product) && ++ !is_usb_magictrackpad2(hdev->vendor, hdev->product))) + return -1; + + report_enum = &hdev->report_enum[hdev->battery_report_type]; +@@ -843,16 +857,17 @@ static int magicmouse_probe(struct hid_device *hdev, + return ret; + } + +- timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0); +- mod_timer(&msc->battery_timer, +- jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); +- magicmouse_fetch_battery(hdev); ++ if (is_usb_magicmouse2(id->vendor, id->product) || ++ is_usb_magictrackpad2(id->vendor, id->product)) { ++ timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0); ++ mod_timer(&msc->battery_timer, ++ jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); ++ magicmouse_fetch_battery(hdev); ++ } + +- if (id->vendor == USB_VENDOR_ID_APPLE && +- (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || +- ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || +- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && +- hdev->type != HID_TYPE_USBMOUSE))) ++ if (is_usb_magicmouse2(id->vendor, id->product) || ++ (is_usb_magictrackpad2(id->vendor, id->product) && ++ hdev->type != HID_TYPE_USBMOUSE)) + return 0; + + if (!msc->input) { +@@ -908,7 +923,10 @@ static int magicmouse_probe(struct hid_device *hdev, + + return 0; + err_stop_hw: +- del_timer_sync(&msc->battery_timer); ++ if (is_usb_magicmouse2(id->vendor, id->product) || ++ is_usb_magictrackpad2(id->vendor, id->product)) ++ del_timer_sync(&msc->battery_timer); ++ + hid_hw_stop(hdev); + return ret; + } +@@ -919,7 +937,9 @@ static void magicmouse_remove(struct hid_device *hdev) + + if (msc) { + cancel_delayed_work_sync(&msc->work); +- del_timer_sync(&msc->battery_timer); ++ if (is_usb_magicmouse2(hdev->vendor, hdev->product) || ++ is_usb_magictrackpad2(hdev->vendor, hdev->product)) ++ del_timer_sync(&msc->battery_timer); + } + + hid_hw_stop(hdev); +@@ -936,10 +956,8 @@ static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + * 0x05, 0x01, // Usage Page (Generic Desktop) 0 + * 0x09, 0x02, // Usage (Mouse) 2 + */ +- if (hdev->vendor == USB_VENDOR_ID_APPLE && +- (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || +- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || +- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && ++ if ((is_usb_magicmouse2(hdev->vendor, hdev->product) || ++ is_usb_magictrackpad2(hdev->vendor, hdev->product)) && + *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) { + hid_info(hdev, + "fixing up magicmouse battery report descriptor\n"); +diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c +index 29f0e4945f1924..840acd5260f4e4 100644 +--- a/drivers/hwmon/emc2305.c ++++ b/drivers/hwmon/emc2305.c +@@ -303,6 +303,12 @@ static int emc2305_set_single_tz(struct device *dev, int idx) + dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]); + return PTR_ERR(data->cdev_data[cdev_idx].cdev); + } ++ ++ if (data->cdev_data[cdev_idx].cur_state > 0) ++ /* Update pwm when temperature is above trips */ ++ pwm = EMC2305_PWM_STATE2DUTY(data->cdev_data[cdev_idx].cur_state, ++ data->max_state, EMC2305_FAN_MAX); ++ + /* Set minimal PWM speed. */ + if (data->pwm_separate) { + ret = emc2305_set_pwm(dev, pwm, cdev_idx); +@@ -316,10 +322,10 @@ static int emc2305_set_single_tz(struct device *dev, int idx) + } + } + data->cdev_data[cdev_idx].cur_state = +- EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state, ++ EMC2305_PWM_DUTY2STATE(pwm, data->max_state, + EMC2305_FAN_MAX); + data->cdev_data[cdev_idx].last_hwmon_state = +- EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state, ++ EMC2305_PWM_DUTY2STATE(pwm, data->max_state, + EMC2305_FAN_MAX); + return 0; + } +diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c +index 1501ceb551e796..23238a80c000dd 100644 +--- a/drivers/hwmon/gsc-hwmon.c ++++ b/drivers/hwmon/gsc-hwmon.c +@@ -65,7 +65,7 @@ static ssize_t pwm_auto_point_temp_show(struct device *dev, + return ret; + + ret = regs[0] | regs[1] << 8; +- return sprintf(buf, "%d\n", ret * 10); ++ return sprintf(buf, "%d\n", ret * 100); + } + + static ssize_t pwm_auto_point_temp_store(struct device *dev, +@@ -100,7 +100,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev, + { + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + +- return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10))); ++ return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100); + } + + static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0); +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c +index d2499f302b5083..f43067f6797e94 100644 +--- a/drivers/i2c/i2c-core-acpi.c ++++ b/drivers/i2c/i2c-core-acpi.c +@@ -370,6 +370,7 @@ static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = { + * the device works without issues on Windows at what is expected to be + * a 400KHz frequency. The root cause of the issue is not known. + */ ++ { "DLL0945", 0 }, + { "ELAN06FA", 0 }, + {} + }; +diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h +index 908a807badaf9c..e267ea5ec5b95e 100644 +--- a/drivers/i3c/internals.h ++++ b/drivers/i3c/internals.h +@@ -9,6 +9,7 @@ + #define I3C_INTERNALS_H + + #include ++#include + + extern struct bus_type i3c_bus_type; + +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c +index 33254bc338b9c0..b6995e767850b4 100644 +--- a/drivers/i3c/master.c ++++ b/drivers/i3c/master.c +@@ -1398,7 +1398,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev) + + if (dev->info.bcr & I3C_BCR_HDR_CAP) { + ret = i3c_master_gethdrcap_locked(master, &dev->info); +- if (ret) ++ if (ret && ret != -ENOTSUPP) + return ret; + } + +@@ -2430,6 +2430,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action + case BUS_NOTIFY_DEL_DEVICE: + ret = i3c_master_i2c_detach(adap, client); + break; ++ default: ++ ret = -EINVAL; + } + i3c_bus_maintenance_unlock(&master->bus); + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index 44842f243f40b5..6908052dea7785 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -1432,7 +1432,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { + }; + + static const struct x86_cpu_id intel_mwait_ids[] __initconst = { +- X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), ++ X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL), + {} + }; + +diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c +index 967f06cd3f94e7..e147eaf1a3b15f 100644 +--- a/drivers/iio/adc/ad7768-1.c ++++ b/drivers/iio/adc/ad7768-1.c +@@ -203,6 +203,24 @@ static int ad7768_spi_reg_write(struct ad7768_state *st, + return spi_write(st->spi, st->data.d8, 2); + } + ++static int ad7768_send_sync_pulse(struct ad7768_state *st) ++{ ++ /* ++ * The datasheet specifies a minimum SYNC_IN pulse width of 1.5 × Tmclk, ++ * where Tmclk is the MCLK period. The supported MCLK frequencies range ++ * from 0.6 MHz to 17 MHz, which corresponds to a minimum SYNC_IN pulse ++ * width of approximately 2.5 µs in the worst-case scenario (0.6 MHz). ++ * ++ * Add a delay to ensure the pulse width is always sufficient to ++ * trigger synchronization. ++ */ ++ gpiod_set_value_cansleep(st->gpio_sync_in, 1); ++ fsleep(3); ++ gpiod_set_value_cansleep(st->gpio_sync_in, 0); ++ ++ return 0; ++} ++ + static int ad7768_set_mode(struct ad7768_state *st, + enum ad7768_conv_mode mode) + { +@@ -288,10 +306,7 @@ static int ad7768_set_dig_fil(struct ad7768_state *st, + return ret; + + /* A sync-in pulse is required every time the filter dec rate changes */ +- gpiod_set_value(st->gpio_sync_in, 1); +- gpiod_set_value(st->gpio_sync_in, 0); +- +- return 0; ++ return ad7768_send_sync_pulse(st); + } + + static int ad7768_set_freq(struct ad7768_state *st, +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c +index 533667eefe419c..914274ed899ec0 100644 +--- a/drivers/iio/adc/ad_sigma_delta.c ++++ b/drivers/iio/adc/ad_sigma_delta.c +@@ -378,7 +378,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) + return ret; + } + +- samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8); ++ samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits / 8, 8); + samples_buf_size += sizeof(int64_t); + samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf, + samples_buf_size, GFP_KERNEL); +@@ -406,7 +406,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) + return ret; + } + +-static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) ++static int ad_sd_buffer_predisable(struct iio_dev *indio_dev) + { + struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); + +@@ -534,7 +534,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l + + static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { + .postenable = &ad_sd_buffer_postenable, +- .postdisable = &ad_sd_buffer_postdisable, ++ .predisable = &ad_sd_buffer_predisable, + .validate_scan_mask = &ad_sd_validate_scan_mask, + }; + +diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c +index 52744dd98e65b4..98f17c29da69bd 100644 +--- a/drivers/iio/imu/bno055/bno055.c ++++ b/drivers/iio/imu/bno055/bno055.c +@@ -118,6 +118,7 @@ struct bno055_sysfs_attr { + int len; + int *fusion_vals; + int *hw_xlate; ++ int hw_xlate_len; + int type; + }; + +@@ -170,20 +171,24 @@ static int bno055_gyr_scale_vals[] = { + 1000, 1877467, 2000, 1877467, + }; + ++static int bno055_gyr_scale_hw_xlate[] = {0, 1, 2, 3, 4}; + static struct bno055_sysfs_attr bno055_gyr_scale = { + .vals = bno055_gyr_scale_vals, + .len = ARRAY_SIZE(bno055_gyr_scale_vals), + .fusion_vals = (int[]){1, 900}, +- .hw_xlate = (int[]){4, 3, 2, 1, 0}, ++ .hw_xlate = bno055_gyr_scale_hw_xlate, ++ .hw_xlate_len = ARRAY_SIZE(bno055_gyr_scale_hw_xlate), + .type = IIO_VAL_FRACTIONAL, + }; + + static int bno055_gyr_lpf_vals[] = {12, 23, 32, 47, 64, 116, 230, 523}; ++static int bno055_gyr_lpf_hw_xlate[] = {5, 4, 7, 3, 6, 2, 1, 0}; + static struct bno055_sysfs_attr bno055_gyr_lpf = { + .vals = bno055_gyr_lpf_vals, + .len = ARRAY_SIZE(bno055_gyr_lpf_vals), + .fusion_vals = (int[]){32}, +- .hw_xlate = (int[]){5, 4, 7, 3, 6, 2, 1, 0}, ++ .hw_xlate = bno055_gyr_lpf_hw_xlate, ++ .hw_xlate_len = ARRAY_SIZE(bno055_gyr_lpf_hw_xlate), + .type = IIO_VAL_INT, + }; + +@@ -561,7 +566,7 @@ static int bno055_get_regmask(struct bno055_priv *priv, int *val, int *val2, + + idx = (hwval & mask) >> shift; + if (attr->hw_xlate) +- for (i = 0; i < attr->len; i++) ++ for (i = 0; i < attr->hw_xlate_len; i++) + if (attr->hw_xlate[i] == idx) { + idx = i; + break; +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +index 94c0eb0bf8748a..809734e566e332 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +@@ -142,11 +142,11 @@ struct inv_icm42600_state { + struct inv_icm42600_suspended suspended; + struct iio_dev *indio_gyro; + struct iio_dev *indio_accel; +- uint8_t buffer[2] __aligned(IIO_DMA_MINALIGN); ++ u8 buffer[2] __aligned(IIO_DMA_MINALIGN); + struct inv_icm42600_fifo fifo; + struct { +- int64_t gyro; +- int64_t accel; ++ s64 gyro; ++ s64 accel; + } timestamp; + }; + +@@ -369,7 +369,7 @@ const struct iio_mount_matrix * + inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); + +-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr); ++u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr); + + int inv_icm42600_set_accel_conf(struct inv_icm42600_state *st, + struct inv_icm42600_sensor_conf *conf, +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +index 47720560de6e03..a4155939e9567b 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +@@ -77,8 +77,8 @@ static const struct iio_chan_spec inv_icm42600_accel_channels[] = { + */ + struct inv_icm42600_accel_buffer { + struct inv_icm42600_fifo_sensor_data accel; +- int16_t temp; +- int64_t timestamp __aligned(8); ++ s16 temp; ++ aligned_s64 timestamp; + }; + + #define INV_ICM42600_SCAN_MASK_ACCEL_3AXIS \ +@@ -142,7 +142,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev, + + static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st, + struct iio_chan_spec const *chan, +- int16_t *val) ++ s16 *val) + { + struct device *dev = regmap_get_device(st->map); + struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; +@@ -182,7 +182,7 @@ static int inv_icm42600_accel_read_sensor(struct inv_icm42600_state *st, + if (ret) + goto exit; + +- *val = (int16_t)be16_to_cpup(data); ++ *val = (s16)be16_to_cpup(data); + if (*val == INV_ICM42600_DATA_INVALID) + ret = -EINVAL; + exit: +@@ -359,11 +359,11 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st, + int *val, int *val2) + { + struct device *dev = regmap_get_device(st->map); +- int64_t val64; +- int32_t bias; ++ s64 val64; ++ s32 bias; + unsigned int reg; +- int16_t offset; +- uint8_t data[2]; ++ s16 offset; ++ u8 data[2]; + int ret; + + if (chan->type != IIO_ACCEL) +@@ -417,7 +417,7 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st, + * result in micro (1000000) + * (offset * 5 * 9.806650 * 1000000) / 10000 + */ +- val64 = (int64_t)offset * 5LL * 9806650LL; ++ val64 = (s64)offset * 5LL * 9806650LL; + /* for rounding, add + or - divisor (10000) divided by 2 */ + if (val64 >= 0) + val64 += 10000LL / 2LL; +@@ -435,10 +435,10 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st, + int val, int val2) + { + struct device *dev = regmap_get_device(st->map); +- int64_t val64; +- int32_t min, max; ++ s64 val64; ++ s32 min, max; + unsigned int reg, regval; +- int16_t offset; ++ s16 offset; + int ret; + + if (chan->type != IIO_ACCEL) +@@ -463,7 +463,7 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st, + inv_icm42600_accel_calibbias[1]; + max = inv_icm42600_accel_calibbias[4] * 1000000L + + inv_icm42600_accel_calibbias[5]; +- val64 = (int64_t)val * 1000000LL + (int64_t)val2; ++ val64 = (s64)val * 1000000LL + (s64)val2; + if (val64 < min || val64 > max) + return -EINVAL; + +@@ -538,7 +538,7 @@ static int inv_icm42600_accel_read_raw(struct iio_dev *indio_dev, + int *val, int *val2, long mask) + { + struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); +- int16_t data; ++ s16 data; + int ret; + + switch (chan->type) { +@@ -755,7 +755,8 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev) + const int8_t *temp; + unsigned int odr; + int64_t ts_val; +- struct inv_icm42600_accel_buffer buffer; ++ /* buffer is copied to userspace, zeroing it to avoid any data leak */ ++ struct inv_icm42600_accel_buffer buffer = { }; + + /* parse all fifo packets */ + for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) { +@@ -774,8 +775,6 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev) + inv_sensors_timestamp_apply_odr(ts, st->fifo.period, + st->fifo.nb.total, no); + +- /* buffer is copied to userspace, zeroing it to avoid any data leak */ +- memset(&buffer, 0, sizeof(buffer)); + memcpy(&buffer.accel, accel, sizeof(buffer.accel)); + /* convert 8 bits FIFO temperature in high resolution format */ + buffer.temp = temp ? (*temp * 64) : 0; +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +index 6ef1df9d60b77d..aca6ce75889053 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +@@ -26,28 +26,28 @@ + #define INV_ICM42600_FIFO_HEADER_ODR_GYRO BIT(0) + + struct inv_icm42600_fifo_1sensor_packet { +- uint8_t header; ++ u8 header; + struct inv_icm42600_fifo_sensor_data data; +- int8_t temp; ++ s8 temp; + } __packed; + #define INV_ICM42600_FIFO_1SENSOR_PACKET_SIZE 8 + + struct inv_icm42600_fifo_2sensors_packet { +- uint8_t header; ++ u8 header; + struct inv_icm42600_fifo_sensor_data accel; + struct inv_icm42600_fifo_sensor_data gyro; +- int8_t temp; ++ s8 temp; + __be16 timestamp; + } __packed; + #define INV_ICM42600_FIFO_2SENSORS_PACKET_SIZE 16 + + ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel, +- const void **gyro, const int8_t **temp, ++ const void **gyro, const s8 **temp, + const void **timestamp, unsigned int *odr) + { + const struct inv_icm42600_fifo_1sensor_packet *pack1 = packet; + const struct inv_icm42600_fifo_2sensors_packet *pack2 = packet; +- uint8_t header = *((const uint8_t *)packet); ++ u8 header = *((const u8 *)packet); + + /* FIFO empty */ + if (header & INV_ICM42600_FIFO_HEADER_MSG) { +@@ -100,7 +100,7 @@ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel, + + void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st) + { +- uint32_t period_gyro, period_accel, period; ++ u32 period_gyro, period_accel, period; + + if (st->fifo.en & INV_ICM42600_SENSOR_GYRO) + period_gyro = inv_icm42600_odr_to_period(st->conf.gyro.odr); +@@ -204,8 +204,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) + { + size_t packet_size, wm_size; + unsigned int wm_gyro, wm_accel, watermark; +- uint32_t period_gyro, period_accel, period; +- uint32_t latency_gyro, latency_accel, latency; ++ u32 period_gyro, period_accel, period; ++ u32 latency_gyro, latency_accel, latency; + bool restore; + __le16 raw_wm; + int ret; +@@ -451,7 +451,7 @@ int inv_icm42600_buffer_fifo_read(struct inv_icm42600_state *st, + __be16 *raw_fifo_count; + ssize_t i, size; + const void *accel, *gyro, *timestamp; +- const int8_t *temp; ++ const s8 *temp; + unsigned int odr; + int ret; + +@@ -538,7 +538,7 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st, + unsigned int count) + { + struct inv_sensors_timestamp *ts; +- int64_t gyro_ts, accel_ts; ++ s64 gyro_ts, accel_ts; + int ret; + + gyro_ts = iio_get_time_ns(st->indio_gyro); +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h +index 8b85ee333bf8f6..eed6a3152acf48 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h +@@ -28,7 +28,7 @@ struct inv_icm42600_state; + struct inv_icm42600_fifo { + unsigned int on; + unsigned int en; +- uint32_t period; ++ u32 period; + struct { + unsigned int gyro; + unsigned int accel; +@@ -39,7 +39,7 @@ struct inv_icm42600_fifo { + size_t accel; + size_t total; + } nb; +- uint8_t data[2080] __aligned(IIO_DMA_MINALIGN); ++ u8 data[2080] __aligned(IIO_DMA_MINALIGN); + }; + + /* FIFO data packet */ +@@ -50,7 +50,7 @@ struct inv_icm42600_fifo_sensor_data { + } __packed; + #define INV_ICM42600_FIFO_DATA_INVALID -32768 + +-static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d) ++static inline s16 inv_icm42600_fifo_get_sensor_data(__be16 d) + { + return be16_to_cpu(d); + } +@@ -58,7 +58,7 @@ static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d) + static inline bool + inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s) + { +- int16_t x, y, z; ++ s16 x, y, z; + + x = inv_icm42600_fifo_get_sensor_data(s->x); + y = inv_icm42600_fifo_get_sensor_data(s->y); +@@ -73,7 +73,7 @@ inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s) + } + + ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel, +- const void **gyro, const int8_t **temp, ++ const void **gyro, const s8 **temp, + const void **timestamp, unsigned int *odr); + + extern const struct iio_buffer_setup_ops inv_icm42600_buffer_ops; +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +index da65aa4e27242f..91c181bb92869d 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +@@ -56,7 +56,7 @@ const struct regmap_config inv_icm42600_spi_regmap_config = { + EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, IIO_ICM42600); + + struct inv_icm42600_hw { +- uint8_t whoami; ++ u8 whoami; + const char *name; + const struct inv_icm42600_conf *conf; + }; +@@ -115,9 +115,9 @@ inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev, + return &st->orientation; + } + +-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr) ++u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr) + { +- static uint32_t odr_periods[INV_ICM42600_ODR_NB] = { ++ static u32 odr_periods[INV_ICM42600_ODR_NB] = { + /* reserved values */ + 0, 0, 0, + /* 8kHz */ +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +index d08cd6839a3a67..9ee26478b666be 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +@@ -77,8 +77,8 @@ static const struct iio_chan_spec inv_icm42600_gyro_channels[] = { + */ + struct inv_icm42600_gyro_buffer { + struct inv_icm42600_fifo_sensor_data gyro; +- int16_t temp; +- int64_t timestamp __aligned(8); ++ s16 temp; ++ aligned_s64 timestamp; + }; + + #define INV_ICM42600_SCAN_MASK_GYRO_3AXIS \ +@@ -142,7 +142,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev, + + static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st, + struct iio_chan_spec const *chan, +- int16_t *val) ++ s16 *val) + { + struct device *dev = regmap_get_device(st->map); + struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; +@@ -182,7 +182,7 @@ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st, + if (ret) + goto exit; + +- *val = (int16_t)be16_to_cpup(data); ++ *val = (s16)be16_to_cpup(data); + if (*val == INV_ICM42600_DATA_INVALID) + ret = -EINVAL; + exit: +@@ -371,11 +371,11 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st, + int *val, int *val2) + { + struct device *dev = regmap_get_device(st->map); +- int64_t val64; +- int32_t bias; ++ s64 val64; ++ s32 bias; + unsigned int reg; +- int16_t offset; +- uint8_t data[2]; ++ s16 offset; ++ u8 data[2]; + int ret; + + if (chan->type != IIO_ANGL_VEL) +@@ -429,7 +429,7 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st, + * result in nano (1000000000) + * (offset * 64 * Pi * 1000000000) / (2048 * 180) + */ +- val64 = (int64_t)offset * 64LL * 3141592653LL; ++ val64 = (s64)offset * 64LL * 3141592653LL; + /* for rounding, add + or - divisor (2048 * 180) divided by 2 */ + if (val64 >= 0) + val64 += 2048 * 180 / 2; +@@ -447,9 +447,9 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st, + int val, int val2) + { + struct device *dev = regmap_get_device(st->map); +- int64_t val64, min, max; ++ s64 val64, min, max; + unsigned int reg, regval; +- int16_t offset; ++ s16 offset; + int ret; + + if (chan->type != IIO_ANGL_VEL) +@@ -470,11 +470,11 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st, + } + + /* inv_icm42600_gyro_calibbias: min - step - max in nano */ +- min = (int64_t)inv_icm42600_gyro_calibbias[0] * 1000000000LL + +- (int64_t)inv_icm42600_gyro_calibbias[1]; +- max = (int64_t)inv_icm42600_gyro_calibbias[4] * 1000000000LL + +- (int64_t)inv_icm42600_gyro_calibbias[5]; +- val64 = (int64_t)val * 1000000000LL + (int64_t)val2; ++ min = (s64)inv_icm42600_gyro_calibbias[0] * 1000000000LL + ++ (s64)inv_icm42600_gyro_calibbias[1]; ++ max = (s64)inv_icm42600_gyro_calibbias[4] * 1000000000LL + ++ (s64)inv_icm42600_gyro_calibbias[5]; ++ val64 = (s64)val * 1000000000LL + (s64)val2; + if (val64 < min || val64 > max) + return -EINVAL; + +@@ -549,7 +549,7 @@ static int inv_icm42600_gyro_read_raw(struct iio_dev *indio_dev, + int *val, int *val2, long mask) + { + struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); +- int16_t data; ++ s16 data; + int ret; + + switch (chan->type) { +@@ -764,10 +764,11 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev) + ssize_t i, size; + unsigned int no; + const void *accel, *gyro, *timestamp; +- const int8_t *temp; ++ const s8 *temp; + unsigned int odr; +- int64_t ts_val; +- struct inv_icm42600_gyro_buffer buffer; ++ s64 ts_val; ++ /* buffer is copied to userspace, zeroing it to avoid any data leak */ ++ struct inv_icm42600_gyro_buffer buffer = { }; + + /* parse all fifo packets */ + for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) { +@@ -786,8 +787,6 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev) + inv_sensors_timestamp_apply_odr(ts, st->fifo.period, + st->fifo.nb.total, no); + +- /* buffer is copied to userspace, zeroing it to avoid any data leak */ +- memset(&buffer, 0, sizeof(buffer)); + memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro)); + /* convert 8 bits FIFO temperature in high resolution format */ + buffer.temp = temp ? (*temp * 64) : 0; +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +index 91f0f381082bda..51430b4f5e51b6 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +@@ -13,7 +13,7 @@ + #include "inv_icm42600.h" + #include "inv_icm42600_temp.h" + +-static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp) ++static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp) + { + struct device *dev = regmap_get_device(st->map); + __be16 *raw; +@@ -31,9 +31,13 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp) + if (ret) + goto exit; + +- *temp = (int16_t)be16_to_cpup(raw); ++ *temp = (s16)be16_to_cpup(raw); ++ /* ++ * Temperature data is invalid if both accel and gyro are off. ++ * Return -EBUSY in this case. ++ */ + if (*temp == INV_ICM42600_DATA_INVALID) +- ret = -EINVAL; ++ ret = -EBUSY; + + exit: + mutex_unlock(&st->lock); +@@ -48,7 +52,7 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev, + int *val, int *val2, long mask) + { + struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); +- int16_t temp; ++ s16 temp; + int ret; + + if (chan->type != IIO_TEMP) +diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c +index c1f9604c27141e..166874c68ff639 100644 +--- a/drivers/iio/light/as73211.c ++++ b/drivers/iio/light/as73211.c +@@ -573,7 +573,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) + struct { + __le16 chan[4]; + s64 ts __aligned(8); +- } scan; ++ } scan = { }; + int data_result, ret; + + mutex_lock(&data->mutex); +diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c +index 84f6b333c91958..8d9e8a1c94c455 100644 +--- a/drivers/iio/pressure/bmp280-core.c ++++ b/drivers/iio/pressure/bmp280-core.c +@@ -2152,11 +2152,12 @@ int bmp280_common_probe(struct device *dev, + + /* Bring chip out of reset if there is an assigned GPIO line */ + gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); ++ if (IS_ERR(gpiod)) ++ return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n"); ++ + /* Deassert the signal */ +- if (gpiod) { +- dev_info(dev, "release reset\n"); +- gpiod_set_value(gpiod, 0); +- } ++ dev_info(dev, "release reset\n"); ++ gpiod_set_value(gpiod, 0); + + data->regmap = regmap; + +diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c +index bcebacaf3dab0d..9fd3d2e8cea6cd 100644 +--- a/drivers/iio/proximity/isl29501.c ++++ b/drivers/iio/proximity/isl29501.c +@@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct isl29501_private *isl29501 = iio_priv(indio_dev); + const unsigned long *active_mask = indio_dev->active_scan_mask; +- u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */ +- +- if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) +- isl29501_register_read(isl29501, REG_DISTANCE, buffer); ++ u32 value; ++ struct { ++ u16 data; ++ aligned_s64 ts; ++ } scan = { }; ++ ++ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) { ++ isl29501_register_read(isl29501, REG_DISTANCE, &value); ++ scan.data = value; ++ } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c +index 555a61e2f3fdd1..44fba61ccfe27d 100644 +--- a/drivers/iio/temperature/maxim_thermocouple.c ++++ b/drivers/iio/temperature/maxim_thermocouple.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -122,8 +123,15 @@ struct maxim_thermocouple_data { + struct spi_device *spi; + const struct maxim_thermocouple_chip *chip; + char tc_type; +- +- u8 buffer[16] __aligned(IIO_DMA_MINALIGN); ++ /* Buffer for reading up to 2 hardware channels. */ ++ struct { ++ union { ++ __be16 raw16; ++ __be32 raw32; ++ __be16 raw[2]; ++ }; ++ aligned_s64 timestamp; ++ } buffer __aligned(IIO_DMA_MINALIGN); + }; + + static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, +@@ -131,18 +139,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, + { + unsigned int storage_bytes = data->chip->read_size; + unsigned int shift = chan->scan_type.shift + (chan->address * 8); +- __be16 buf16; +- __be32 buf32; + int ret; + + switch (storage_bytes) { + case 2: +- ret = spi_read(data->spi, (void *)&buf16, storage_bytes); +- *val = be16_to_cpu(buf16); ++ ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes); ++ *val = be16_to_cpu(data->buffer.raw16); + break; + case 4: +- ret = spi_read(data->spi, (void *)&buf32, storage_bytes); +- *val = be32_to_cpu(buf32); ++ ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes); ++ *val = be32_to_cpu(data->buffer.raw32); + break; + default: + ret = -EINVAL; +@@ -167,9 +173,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private) + struct maxim_thermocouple_data *data = iio_priv(indio_dev); + int ret; + +- ret = spi_read(data->spi, data->buffer, data->chip->read_size); ++ ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size); + if (!ret) { +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + iio_get_time_ns(indio_dev)); + } + +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c +index 6d1dbc97875906..a94723a12bb486 100644 +--- a/drivers/infiniband/core/nldev.c ++++ b/drivers/infiniband/core/nldev.c +@@ -1412,10 +1412,11 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { + + }; + +-static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, +- struct netlink_ext_ack *extack, +- enum rdma_restrack_type res_type, +- res_fill_func_t fill_func) ++static noinline_for_stack int ++res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, ++ struct netlink_ext_ack *extack, ++ enum rdma_restrack_type res_type, ++ res_fill_func_t fill_func) + { + const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; + struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; +@@ -2153,10 +2154,10 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + return ret; + } + +-static int stat_get_doit_default_counter(struct sk_buff *skb, +- struct nlmsghdr *nlh, +- struct netlink_ext_ack *extack, +- struct nlattr *tb[]) ++static noinline_for_stack int ++stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh, ++ struct netlink_ext_ack *extack, ++ struct nlattr *tb[]) + { + struct rdma_hw_stats *stats; + struct nlattr *table_attr; +@@ -2246,8 +2247,9 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, + return ret; + } + +-static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, +- struct netlink_ext_ack *extack, struct nlattr *tb[]) ++static noinline_for_stack int ++stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, ++ struct netlink_ext_ack *extack, struct nlattr *tb[]) + + { + static enum rdma_nl_counter_mode mode; +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index f7345e4890a141..31fff5885f1a83 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -1823,7 +1823,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, + struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, + ib_srq); + struct bnxt_re_dev *rdev = srq->rdev; +- int rc; + + switch (srq_attr_mask) { + case IB_SRQ_MAX_WR: +@@ -1835,11 +1834,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, + return -EINVAL; + + srq->qplib_srq.threshold = srq_attr->srq_limit; +- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); +- if (rc) { +- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); +- return rc; +- } ++ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold); ++ + /* On success, update the shadow */ + srq->srq_limit = srq_attr->srq_limit; + /* No need to Build and send response back to udata */ +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +index 68ea4ed0b171b8..c19dd732c2354c 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +@@ -685,9 +685,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, + srq->dbinfo.db = srq->dpi->dbr; + srq->dbinfo.max_slot = 1; + srq->dbinfo.priv_db = res->dpi_tbl.priv_db; +- if (srq->threshold) +- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); +- srq->arm_req = false; ++ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); + + return 0; + fail: +@@ -697,24 +695,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, + return rc; + } + +-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, +- struct bnxt_qplib_srq *srq) +-{ +- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; +- u32 count; +- +- count = __bnxt_qplib_get_avail(srq_hwq); +- if (count > srq->threshold) { +- srq->arm_req = false; +- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); +- } else { +- /* Deferred arming */ +- srq->arm_req = true; +- } +- +- return 0; +-} +- + int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq) + { +@@ -756,7 +736,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, + struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; + struct rq_wqe *srqe; + struct sq_sge *hw_sge; +- u32 count = 0; + int i, next; + + spin_lock(&srq_hwq->lock); +@@ -788,15 +767,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, + + bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); + +- spin_lock(&srq_hwq->lock); +- count = __bnxt_qplib_get_avail(srq_hwq); +- spin_unlock(&srq_hwq->lock); + /* Ring DB */ + bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); +- if (srq->arm_req == true && count > srq->threshold) { +- srq->arm_req = false; +- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); +- } + + return 0; + } +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h +index 55fd840359ef23..288196facfd7ce 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h +@@ -519,8 +519,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, + srqn_handler_t srq_handler); + int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); +-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, +- struct bnxt_qplib_srq *srq); + int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); + void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c +index 96ceec1e8199a6..77da7cf344274b 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c +@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, + pbl->pg_arr = vmalloc_array(pages, sizeof(void *)); + if (!pbl->pg_arr) + return -ENOMEM; ++ memset(pbl->pg_arr, 0, pages * sizeof(void *)); + + pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t)); + if (!pbl->pg_map_arr) { +@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, + pbl->pg_arr = NULL; + return -ENOMEM; + } ++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t)); + pbl->pg_count = 0; + pbl->pg_size = sginfo->pgsize; + +diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c +index 29ad2f5ffabe20..e990690d8b3cd5 100644 +--- a/drivers/infiniband/hw/erdma/erdma_verbs.c ++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c +@@ -979,7 +979,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, + if (ret) + goto err_out_cmd; + } else { +- init_kernel_qp(dev, qp, attrs); ++ ret = init_kernel_qp(dev, qp, attrs); ++ if (ret) ++ goto err_out_xa; + } + + qp->attrs.max_send_sge = attrs->cap.max_send_sge; +diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c +index bbc957c578e1e5..e5db39f4720de9 100644 +--- a/drivers/infiniband/hw/hfi1/affinity.c ++++ b/drivers/infiniband/hw/hfi1/affinity.c +@@ -964,31 +964,35 @@ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, + struct hfi1_affinity_node_list *affinity) + { + int possible, curr_cpu, i; +- uint num_cores_per_socket = node_affinity.num_online_cpus / ++ uint num_cores_per_socket; ++ ++ cpumask_copy(hw_thread_mask, &affinity->proc.mask); ++ ++ if (affinity->num_core_siblings == 0) ++ return; ++ ++ num_cores_per_socket = node_affinity.num_online_cpus / + affinity->num_core_siblings / + node_affinity.num_online_nodes; + +- cpumask_copy(hw_thread_mask, &affinity->proc.mask); +- if (affinity->num_core_siblings > 0) { +- /* Removing other siblings not needed for now */ +- possible = cpumask_weight(hw_thread_mask); +- curr_cpu = cpumask_first(hw_thread_mask); +- for (i = 0; +- i < num_cores_per_socket * node_affinity.num_online_nodes; +- i++) +- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); +- +- for (; i < possible; i++) { +- cpumask_clear_cpu(curr_cpu, hw_thread_mask); +- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); +- } ++ /* Removing other siblings not needed for now */ ++ possible = cpumask_weight(hw_thread_mask); ++ curr_cpu = cpumask_first(hw_thread_mask); ++ for (i = 0; ++ i < num_cores_per_socket * node_affinity.num_online_nodes; ++ i++) ++ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); + +- /* Identifying correct HW threads within physical cores */ +- cpumask_shift_left(hw_thread_mask, hw_thread_mask, +- num_cores_per_socket * +- node_affinity.num_online_nodes * +- hw_thread_no); ++ for (; i < possible; i++) { ++ cpumask_clear_cpu(curr_cpu, hw_thread_mask); ++ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); + } ++ ++ /* Identifying correct HW threads within physical cores */ ++ cpumask_shift_left(hw_thread_mask, hw_thread_mask, ++ num_cores_per_socket * ++ node_affinity.num_online_nodes * ++ hw_thread_no); + } + + int hfi1_get_proc_affinity(int node) +diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c +index feae920784be80..03e3f6668840f3 100644 +--- a/drivers/infiniband/sw/siw/siw_qp_tx.c ++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c +@@ -340,18 +340,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, + if (!sendpage_ok(page[i])) + msg.msg_flags &= ~MSG_SPLICE_PAGES; + bvec_set_page(&bvec, page[i], bytes, offset); +- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); ++ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes); + + try_page_again: + lock_sock(sk); +- rv = tcp_sendmsg_locked(sk, &msg, size); ++ rv = tcp_sendmsg_locked(sk, &msg, bytes); + release_sock(sk); + + if (rv > 0) { + size -= rv; + sent += rv; + if (rv != bytes) { +- offset += rv; + bytes -= rv; + goto try_page_again; + } +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c +index 2e7a12f306510c..431cea41df2af1 100644 +--- a/drivers/iommu/amd/init.c ++++ b/drivers/iommu/amd/init.c +@@ -3625,7 +3625,7 @@ static int __init parse_ivrs_acpihid(char *str) + { + u32 seg = 0, bus, dev, fn; + char *hid, *uid, *p, *addr; +- char acpiid[ACPIID_LEN] = {0}; ++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ + int i; + + addr = strchr(str, '@'); +@@ -3651,7 +3651,7 @@ static int __init parse_ivrs_acpihid(char *str) + /* We have the '@', make it the terminator to get just the acpiid */ + *addr++ = 0; + +- if (strlen(str) > ACPIID_LEN + 1) ++ if (strlen(str) > ACPIID_LEN) + goto not_found; + + if (sscanf(str, "=%s", acpiid) != 1) +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +index e6b4bab0dde2e5..3d031366979637 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +@@ -255,6 +255,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { + { .compatible = "qcom,sdm670-mdss" }, + { .compatible = "qcom,sdm845-mdss" }, + { .compatible = "qcom,sdm845-mss-pil" }, ++ { .compatible = "qcom,sm6115-mdss" }, + { .compatible = "qcom,sm6350-mdss" }, + { .compatible = "qcom,sm6375-mdss" }, + { .compatible = "qcom,sm8150-mdss" }, +diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c +index e76b2293999481..f058405c5fbb66 100644 +--- a/drivers/iommu/iommufd/io_pagetable.c ++++ b/drivers/iommu/iommufd/io_pagetable.c +@@ -69,36 +69,45 @@ struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter) + return iter->area; + } + +-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span, +- unsigned long length, +- unsigned long iova_alignment, +- unsigned long page_offset) ++static bool __alloc_iova_check_range(unsigned long *start, unsigned long last, ++ unsigned long length, ++ unsigned long iova_alignment, ++ unsigned long page_offset) + { +- if (span->is_used || span->last_hole - span->start_hole < length - 1) ++ unsigned long aligned_start; ++ ++ /* ALIGN_UP() */ ++ if (check_add_overflow(*start, iova_alignment - 1, &aligned_start)) + return false; ++ aligned_start &= ~(iova_alignment - 1); ++ aligned_start |= page_offset; + +- span->start_hole = ALIGN(span->start_hole, iova_alignment) | +- page_offset; +- if (span->start_hole > span->last_hole || +- span->last_hole - span->start_hole < length - 1) ++ if (aligned_start >= last || last - aligned_start < length - 1) + return false; ++ *start = aligned_start; + return true; + } + +-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, ++static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span, + unsigned long length, + unsigned long iova_alignment, + unsigned long page_offset) + { +- if (span->is_hole || span->last_used - span->start_used < length - 1) ++ if (span->is_used) + return false; ++ return __alloc_iova_check_range(&span->start_hole, span->last_hole, ++ length, iova_alignment, page_offset); ++} + +- span->start_used = ALIGN(span->start_used, iova_alignment) | +- page_offset; +- if (span->start_used > span->last_used || +- span->last_used - span->start_used < length - 1) ++static bool __alloc_iova_check_used(struct interval_tree_span_iter *span, ++ unsigned long length, ++ unsigned long iova_alignment, ++ unsigned long page_offset) ++{ ++ if (span->is_hole) + return false; +- return true; ++ return __alloc_iova_check_range(&span->start_used, span->last_used, ++ length, iova_alignment, page_offset); + } + + /* +@@ -524,8 +533,10 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, + iommufd_access_notify_unmap(iopt, area_first, length); + /* Something is not responding to unmap requests. */ + tries++; +- if (WARN_ON(tries > 100)) +- return -EDEADLOCK; ++ if (WARN_ON(tries > 100)) { ++ rc = -EDEADLOCK; ++ goto out_unmapped; ++ } + goto again; + } + +@@ -547,6 +558,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start, + out_unlock_iova: + up_write(&iopt->iova_rwsem); + up_read(&iopt->domains_rwsem); ++out_unmapped: + if (unmapped) + *unmapped = unmapped_bytes; + return rc; +diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c +index 17391aefeb941f..a619dbe0152434 100644 +--- a/drivers/leds/flash/leds-qcom-flash.c ++++ b/drivers/leds/flash/leds-qcom-flash.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* +- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. ++ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + + #include +@@ -14,6 +14,9 @@ + #include + + /* registers definitions */ ++#define FLASH_REVISION_REG 0x00 ++#define FLASH_4CH_REVISION_V0P1 0x01 ++ + #define FLASH_TYPE_REG 0x04 + #define FLASH_TYPE_VAL 0x18 + +@@ -73,6 +76,16 @@ + + #define UA_PER_MA 1000 + ++/* thermal threshold constants */ ++#define OTST_3CH_MIN_VAL 3 ++#define OTST1_4CH_MIN_VAL 0 ++#define OTST1_4CH_V0P1_MIN_VAL 3 ++#define OTST2_4CH_MIN_VAL 0 ++ ++#define OTST1_MAX_CURRENT_MA 1000 ++#define OTST2_MAX_CURRENT_MA 500 ++#define OTST3_MAX_CURRENT_MA 200 ++ + enum hw_type { + QCOM_MVFLASH_3CH, + QCOM_MVFLASH_4CH, +@@ -98,10 +111,13 @@ enum { + REG_IRESOLUTION, + REG_CHAN_STROBE, + REG_CHAN_EN, ++ REG_THERM_THRSH1, ++ REG_THERM_THRSH2, ++ REG_THERM_THRSH3, + REG_MAX_COUNT, + }; + +-static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { ++static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { + REG_FIELD(0x08, 0, 7), /* status1 */ + REG_FIELD(0x09, 0, 7), /* status2 */ + REG_FIELD(0x0a, 0, 7), /* status3 */ +@@ -111,9 +127,12 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { + REG_FIELD(0x47, 0, 5), /* iresolution */ + REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */ + REG_FIELD(0x4c, 0, 2), /* chan_en */ ++ REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */ ++ REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */ ++ REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */ + }; + +-static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { ++static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { + REG_FIELD(0x06, 0, 7), /* status1 */ + REG_FIELD(0x07, 0, 6), /* status2 */ + REG_FIELD(0x09, 0, 7), /* status3 */ +@@ -123,6 +142,8 @@ static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { + REG_FIELD(0x49, 0, 3), /* iresolution */ + REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */ + REG_FIELD(0x4e, 0, 3), /* chan_en */ ++ REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */ ++ REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */ + }; + + struct qcom_flash_data { +@@ -130,9 +151,11 @@ struct qcom_flash_data { + struct regmap_field *r_fields[REG_MAX_COUNT]; + struct mutex lock; + enum hw_type hw_type; ++ u32 total_ma; + u8 leds_count; + u8 max_channels; + u8 chan_en_bits; ++ u8 revision; + }; + + struct qcom_flash_led { +@@ -143,6 +166,7 @@ struct qcom_flash_led { + u32 max_timeout_ms; + u32 flash_current_ma; + u32 flash_timeout_ms; ++ u32 current_in_use_ma; + u8 *chan_id; + u8 chan_count; + bool enabled; +@@ -172,6 +196,127 @@ static int set_flash_module_en(struct qcom_flash_led *led, bool en) + return rc; + } + ++static int update_allowed_flash_current(struct qcom_flash_led *led, u32 *current_ma, bool strobe) ++{ ++ struct qcom_flash_data *flash_data = led->flash_data; ++ u32 therm_ma, avail_ma, thrsh[3], min_thrsh, sts; ++ int rc = 0; ++ ++ mutex_lock(&flash_data->lock); ++ /* ++ * Put previously allocated current into allowed budget in either of these two cases: ++ * 1) LED is disabled; ++ * 2) LED is enabled repeatedly ++ */ ++ if (!strobe || led->current_in_use_ma != 0) { ++ if (flash_data->total_ma >= led->current_in_use_ma) ++ flash_data->total_ma -= led->current_in_use_ma; ++ else ++ flash_data->total_ma = 0; ++ ++ led->current_in_use_ma = 0; ++ if (!strobe) ++ goto unlock; ++ } ++ ++ /* ++ * Cache the default thermal threshold settings, and set them to the lowest levels before ++ * reading over-temp real time status. If over-temp has been triggered at the lowest ++ * threshold, it's very likely that it would be triggered at a higher (default) threshold ++ * when more flash current is requested. Prevent device from triggering over-temp condition ++ * by limiting the flash current for the new request. ++ */ ++ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH1], &thrsh[0]); ++ if (rc < 0) ++ goto unlock; ++ ++ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH2], &thrsh[1]); ++ if (rc < 0) ++ goto unlock; ++ ++ if (flash_data->hw_type == QCOM_MVFLASH_3CH) { ++ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH3], &thrsh[2]); ++ if (rc < 0) ++ goto unlock; ++ } ++ ++ min_thrsh = OTST_3CH_MIN_VAL; ++ if (flash_data->hw_type == QCOM_MVFLASH_4CH) ++ min_thrsh = (flash_data->revision == FLASH_4CH_REVISION_V0P1) ? ++ OTST1_4CH_V0P1_MIN_VAL : OTST1_4CH_MIN_VAL; ++ ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], min_thrsh); ++ if (rc < 0) ++ goto unlock; ++ ++ if (flash_data->hw_type == QCOM_MVFLASH_4CH) ++ min_thrsh = OTST2_4CH_MIN_VAL; ++ ++ /* ++ * The default thermal threshold settings have been updated hence ++ * restore them if any fault happens starting from here. ++ */ ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], min_thrsh); ++ if (rc < 0) ++ goto restore; ++ ++ if (flash_data->hw_type == QCOM_MVFLASH_3CH) { ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], min_thrsh); ++ if (rc < 0) ++ goto restore; ++ } ++ ++ /* Read thermal level status to get corresponding derating flash current */ ++ rc = regmap_field_read(flash_data->r_fields[REG_STATUS2], &sts); ++ if (rc) ++ goto restore; ++ ++ therm_ma = FLASH_TOTAL_CURRENT_MAX_UA / 1000; ++ if (flash_data->hw_type == QCOM_MVFLASH_3CH) { ++ if (sts & FLASH_STS_3CH_OTST3) ++ therm_ma = OTST3_MAX_CURRENT_MA; ++ else if (sts & FLASH_STS_3CH_OTST2) ++ therm_ma = OTST2_MAX_CURRENT_MA; ++ else if (sts & FLASH_STS_3CH_OTST1) ++ therm_ma = OTST1_MAX_CURRENT_MA; ++ } else { ++ if (sts & FLASH_STS_4CH_OTST2) ++ therm_ma = OTST2_MAX_CURRENT_MA; ++ else if (sts & FLASH_STS_4CH_OTST1) ++ therm_ma = OTST1_MAX_CURRENT_MA; ++ } ++ ++ /* Calculate the allowed flash current for the request */ ++ if (therm_ma <= flash_data->total_ma) ++ avail_ma = 0; ++ else ++ avail_ma = therm_ma - flash_data->total_ma; ++ ++ *current_ma = min_t(u32, *current_ma, avail_ma); ++ led->current_in_use_ma = *current_ma; ++ flash_data->total_ma += led->current_in_use_ma; ++ ++ dev_dbg(led->flash.led_cdev.dev, "allowed flash current: %dmA, total current: %dmA\n", ++ led->current_in_use_ma, flash_data->total_ma); ++ ++restore: ++ /* Restore to default thermal threshold settings */ ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], thrsh[0]); ++ if (rc < 0) ++ goto unlock; ++ ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], thrsh[1]); ++ if (rc < 0) ++ goto unlock; ++ ++ if (flash_data->hw_type == QCOM_MVFLASH_3CH) ++ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], thrsh[2]); ++ ++unlock: ++ mutex_unlock(&flash_data->lock); ++ return rc; ++} ++ + static int set_flash_current(struct qcom_flash_led *led, u32 current_ma, enum led_mode mode) + { + struct qcom_flash_data *flash_data = led->flash_data; +@@ -313,6 +458,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat + if (rc) + return rc; + ++ rc = update_allowed_flash_current(led, &led->flash_current_ma, state); ++ if (rc < 0) ++ return rc; ++ + rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE); + if (rc) + return rc; +@@ -429,6 +578,10 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev, + if (rc) + return rc; + ++ rc = update_allowed_flash_current(led, ¤t_ma, enable); ++ if (rc < 0) ++ return rc; ++ + rc = set_flash_current(led, current_ma, TORCH_MODE); + if (rc) + return rc; +@@ -702,11 +855,25 @@ static int qcom_flash_led_probe(struct platform_device *pdev) + if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) { + flash_data->hw_type = QCOM_MVFLASH_3CH; + flash_data->max_channels = 3; +- regs = mvflash_3ch_regs; ++ regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs), ++ GFP_KERNEL); ++ if (!regs) ++ return -ENOMEM; + } else if (val == FLASH_SUBTYPE_4CH_VAL) { + flash_data->hw_type = QCOM_MVFLASH_4CH; + flash_data->max_channels = 4; +- regs = mvflash_4ch_regs; ++ regs = devm_kmemdup(dev, mvflash_4ch_regs, sizeof(mvflash_4ch_regs), ++ GFP_KERNEL); ++ if (!regs) ++ return -ENOMEM; ++ ++ rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val); ++ if (rc < 0) { ++ dev_err(dev, "Failed to read flash LED module revision, rc=%d\n", rc); ++ return rc; ++ } ++ ++ flash_data->revision = val; + } else { + dev_err(dev, "flash LED subtype %#x is not yet supported\n", val); + return -ENODEV; +@@ -720,6 +887,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev) + dev_err(dev, "Failed to allocate regmap field, rc=%d\n", rc); + return rc; + } ++ devm_kfree(dev, regs); /* devm_regmap_field_bulk_alloc() makes copies */ + + platform_set_drvdata(pdev, flash_data); + mutex_init(&flash_data->lock); +diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c +index 68c4d9967d6831..182a590b026775 100644 +--- a/drivers/leds/leds-lp50xx.c ++++ b/drivers/leds/leds-lp50xx.c +@@ -486,6 +486,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv) + } + + fwnode_for_each_child_node(child, led_node) { ++ int multi_index; + ret = fwnode_property_read_u32(led_node, "color", + &color_id); + if (ret) { +@@ -493,8 +494,16 @@ static int lp50xx_probe_dt(struct lp50xx *priv) + dev_err(priv->dev, "Cannot read color\n"); + goto child_out; + } ++ ret = fwnode_property_read_u32(led_node, "reg", &multi_index); ++ if (ret != 0) { ++ dev_err(priv->dev, "reg must be set\n"); ++ return -EINVAL; ++ } else if (multi_index >= LP50XX_LEDS_PER_MODULE) { ++ dev_err(priv->dev, "reg %i out of range\n", multi_index); ++ return -EINVAL; ++ } + +- mc_led_info[num_colors].color_index = color_id; ++ mc_led_info[multi_index].color_index = color_id; + num_colors++; + } + +diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c +index f8912fa60c4988..79719fc8a08fb4 100644 +--- a/drivers/leds/trigger/ledtrig-netdev.c ++++ b/drivers/leds/trigger/ledtrig-netdev.c +@@ -54,7 +54,6 @@ struct led_netdev_data { + unsigned int last_activity; + + unsigned long mode; +- unsigned long blink_delay; + int link_speed; + u8 duplex; + +@@ -70,10 +69,6 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) + /* Already validated, hw control is possible with the requested mode */ + if (trigger_data->hw_control) { + led_cdev->hw_control_set(led_cdev, trigger_data->mode); +- if (led_cdev->blink_set) { +- led_cdev->blink_set(led_cdev, &trigger_data->blink_delay, +- &trigger_data->blink_delay); +- } + + return; + } +@@ -391,11 +386,10 @@ static ssize_t interval_store(struct device *dev, + size_t size) + { + struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); +- struct led_classdev *led_cdev = trigger_data->led_cdev; + unsigned long value; + int ret; + +- if (trigger_data->hw_control && !led_cdev->blink_set) ++ if (trigger_data->hw_control) + return -EINVAL; + + ret = kstrtoul(buf, 0, &value); +@@ -404,13 +398,9 @@ static ssize_t interval_store(struct device *dev, + + /* impose some basic bounds on the timer interval */ + if (value >= 5 && value <= 10000) { +- if (trigger_data->hw_control) { +- trigger_data->blink_delay = value; +- } else { +- cancel_delayed_work_sync(&trigger_data->work); ++ cancel_delayed_work_sync(&trigger_data->work); + +- atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); +- } ++ atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); + set_baseline_state(trigger_data); /* resets timer */ + } + +diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c +index b49e10d76d0302..2c8626a83de437 100644 +--- a/drivers/md/dm-ps-historical-service-time.c ++++ b/drivers/md/dm-ps-historical-service-time.c +@@ -541,8 +541,10 @@ static int __init dm_hst_init(void) + { + int r = dm_register_path_selector(&hst_ps); + +- if (r < 0) ++ if (r < 0) { + DMERR("register failed %d", r); ++ return r; ++ } + + DMINFO("version " HST_VERSION " loaded"); + +diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c +index e305f05ad1e5e8..eb543e6431e038 100644 +--- a/drivers/md/dm-ps-queue-length.c ++++ b/drivers/md/dm-ps-queue-length.c +@@ -260,8 +260,10 @@ static int __init dm_ql_init(void) + { + int r = dm_register_path_selector(&ql_ps); + +- if (r < 0) ++ if (r < 0) { + DMERR("register failed %d", r); ++ return r; ++ } + + DMINFO("version " QL_VERSION " loaded"); + +diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c +index 0f04b673597aeb..62ac820125cbdc 100644 +--- a/drivers/md/dm-ps-round-robin.c ++++ b/drivers/md/dm-ps-round-robin.c +@@ -220,8 +220,10 @@ static int __init dm_rr_init(void) + { + int r = dm_register_path_selector(&rr_ps); + +- if (r < 0) ++ if (r < 0) { + DMERR("register failed %d", r); ++ return r; ++ } + + DMINFO("version " RR_VERSION " loaded"); + +diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c +index 969d31c40272e2..f8c43aecdb27ad 100644 +--- a/drivers/md/dm-ps-service-time.c ++++ b/drivers/md/dm-ps-service-time.c +@@ -341,8 +341,10 @@ static int __init dm_st_init(void) + { + int r = dm_register_path_selector(&st_ps); + +- if (r < 0) ++ if (r < 0) { + DMERR("register failed %d", r); ++ return r; ++ } + + DMINFO("version " ST_VERSION " loaded"); + +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index bf2ade89c8c2dc..ed0a5e91968dfb 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -862,17 +862,17 @@ static bool dm_table_supports_dax(struct dm_table *t, + return true; + } + +-static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct block_device *bdev = dev->bdev; + struct request_queue *q = bdev_get_queue(bdev); + + /* request-based cannot stack on partitions! */ + if (bdev_is_partition(bdev)) +- return false; ++ return true; + +- return queue_is_mq(q); ++ return !queue_is_mq(q); + } + + static int dm_table_determine_type(struct dm_table *t) +@@ -968,7 +968,7 @@ static int dm_table_determine_type(struct dm_table *t) + + /* Non-request-stackable devices can't be used for request-based dm */ + if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { ++ ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) { + DMERR("table load rejected: including non-request-stackable devices"); + return -EINVAL; + } +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c +index b487f7acc860f7..36e55a5bcb0de8 100644 +--- a/drivers/md/dm-zoned-target.c ++++ b/drivers/md/dm-zoned-target.c +@@ -1062,7 +1062,7 @@ static int dmz_iterate_devices(struct dm_target *ti, + struct dmz_target *dmz = ti->private; + unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata); + sector_t capacity; +- int i, r; ++ int i, r = 0; + + for (i = 0; i < dmz->nr_ddevs; i++) { + capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1); +diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c +index ee870ea1a88601..6f8d6797c61459 100644 +--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c ++++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c +@@ -171,11 +171,12 @@ static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data, + { + struct rain *rain = serio_get_drvdata(serio); + ++ spin_lock(&rain->buf_lock); + if (rain->buf_len == DATA_SIZE) { ++ spin_unlock(&rain->buf_lock); + dev_warn_once(rain->dev, "buffer overflow\n"); + return IRQ_HANDLED; + } +- spin_lock(&rain->buf_lock); + rain->buf_len++; + rain->buf[rain->buf_wr_idx] = data; + rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff; +diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c +index 444fe1c4bf2d71..e561c7cc35bfaf 100644 +--- a/drivers/media/dvb-frontends/dib7000p.c ++++ b/drivers/media/dvb-frontends/dib7000p.c +@@ -2198,6 +2198,8 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms + struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); + u8 n_overflow = 1; + u16 i = 1000; ++ if (msg[0].len < 3) ++ return -EOPNOTSUPP; + u16 serpar_num = msg[0].buf[0]; + + while (n_overflow == 1 && i) { +@@ -2217,6 +2219,8 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg + struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); + u8 n_overflow = 1, n_empty = 1; + u16 i = 1000; ++ if (msg[0].len < 1 || msg[1].len < 2) ++ return -EOPNOTSUPP; + u16 serpar_num = msg[0].buf[0]; + u16 read_word; + +@@ -2261,8 +2265,12 @@ static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap, + u16 word; + + if (num == 1) { /* write */ ++ if (msg[0].len < 3) ++ return -EOPNOTSUPP; + dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2]))); + } else { ++ if (msg[1].len < 2) ++ return -EOPNOTSUPP; + word = dib7000p_read_word(state, apb_address); + msg[1].buf[0] = (word >> 8) & 0xff; + msg[1].buf[1] = (word) & 0xff; +diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c +index 36402612425779..4d31b2bb8f09f6 100644 +--- a/drivers/media/i2c/ccs/ccs-core.c ++++ b/drivers/media/i2c/ccs/ccs-core.c +@@ -665,7 +665,7 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl) + break; + } + +- pm_status = pm_runtime_get_if_active(&client->dev, true); ++ pm_status = pm_runtime_get_if_active(&client->dev); + if (!pm_status) + return 0; + +diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c +index fd56ba13873915..d6ef4a249e94f9 100644 +--- a/drivers/media/i2c/hi556.c ++++ b/drivers/media/i2c/hi556.c +@@ -689,21 +689,23 @@ static int hi556_test_pattern(struct hi556 *hi556, u32 pattern) + int ret; + u32 val; + +- if (pattern) { +- ret = hi556_read_reg(hi556, HI556_REG_ISP, +- HI556_REG_VALUE_08BIT, &val); +- if (ret) +- return ret; ++ ret = hi556_read_reg(hi556, HI556_REG_ISP, ++ HI556_REG_VALUE_08BIT, &val); ++ if (ret) ++ return ret; + +- ret = hi556_write_reg(hi556, HI556_REG_ISP, +- HI556_REG_VALUE_08BIT, +- val | HI556_REG_ISP_TPG_EN); +- if (ret) +- return ret; +- } ++ val = pattern ? (val | HI556_REG_ISP_TPG_EN) : ++ (val & ~HI556_REG_ISP_TPG_EN); ++ ++ ret = hi556_write_reg(hi556, HI556_REG_ISP, ++ HI556_REG_VALUE_08BIT, val); ++ if (ret) ++ return ret; ++ ++ val = pattern ? BIT(pattern - 1) : 0; + + return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN, +- HI556_REG_VALUE_08BIT, pattern); ++ HI556_REG_VALUE_08BIT, val); + } + + static int hi556_set_ctrl(struct v4l2_ctrl *ctrl) +diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c +index 5429bd2eb05318..5206784726dbde 100644 +--- a/drivers/media/i2c/ov2659.c ++++ b/drivers/media/i2c/ov2659.c +@@ -1479,14 +1479,15 @@ static int ov2659_probe(struct i2c_client *client) + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(ov2659_test_pattern_menu) - 1, + 0, 0, ov2659_test_pattern_menu); +- ov2659->sd.ctrl_handler = &ov2659->ctrls; + + if (ov2659->ctrls.error) { + dev_err(&client->dev, "%s: control initialization error %d\n", + __func__, ov2659->ctrls.error); ++ v4l2_ctrl_handler_free(&ov2659->ctrls); + return ov2659->ctrls.error; + } + ++ ov2659->sd.ctrl_handler = &ov2659->ctrls; + sd = &ov2659->sd; + client->flags |= I2C_CLIENT_SCCB; + #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c +index c81dd41834043f..8a1a33862ba712 100644 +--- a/drivers/media/i2c/tc358743.c ++++ b/drivers/media/i2c/tc358743.c +@@ -110,7 +110,7 @@ static inline struct tc358743_state *to_state(struct v4l2_subdev *sd) + + /* --------------- I2C --------------- */ + +-static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) ++static int i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) + { + struct tc358743_state *state = to_state(sd); + struct i2c_client *client = state->i2c_client; +@@ -136,6 +136,7 @@ static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) + v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed: %d\n", + __func__, reg, client->addr, err); + } ++ return err != ARRAY_SIZE(msgs); + } + + static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) +@@ -192,15 +193,24 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) + } + } + +-static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n) ++static noinline u32 i2c_rdreg_err(struct v4l2_subdev *sd, u16 reg, u32 n, ++ int *err) + { ++ int error; + __le32 val = 0; + +- i2c_rd(sd, reg, (u8 __force *)&val, n); ++ error = i2c_rd(sd, reg, (u8 __force *)&val, n); ++ if (err) ++ *err = error; + + return le32_to_cpu(val); + } + ++static inline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n) ++{ ++ return i2c_rdreg_err(sd, reg, n, NULL); ++} ++ + static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n) + { + __le32 raw = cpu_to_le32(val); +@@ -229,6 +239,13 @@ static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) + return i2c_rdreg(sd, reg, 2); + } + ++static int i2c_rd16_err(struct v4l2_subdev *sd, u16 reg, u16 *value) ++{ ++ int err; ++ *value = i2c_rdreg_err(sd, reg, 2, &err); ++ return err; ++} ++ + static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val) + { + i2c_wrreg(sd, reg, val, 2); +@@ -1651,12 +1668,23 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd, + return 0; + } + ++static u32 tc358743_g_colorspace(u32 code) ++{ ++ switch (code) { ++ case MEDIA_BUS_FMT_RGB888_1X24: ++ return V4L2_COLORSPACE_SRGB; ++ case MEDIA_BUS_FMT_UYVY8_1X16: ++ return V4L2_COLORSPACE_SMPTE170M; ++ default: ++ return 0; ++ } ++} ++ + static int tc358743_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *format) + { + struct tc358743_state *state = to_state(sd); +- u8 vi_rep = i2c_rd8(sd, VI_REP); + + if (format->pad != 0) + return -EINVAL; +@@ -1666,23 +1694,7 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd, + format->format.height = state->timings.bt.height; + format->format.field = V4L2_FIELD_NONE; + +- switch (vi_rep & MASK_VOUT_COLOR_SEL) { +- case MASK_VOUT_COLOR_RGB_FULL: +- case MASK_VOUT_COLOR_RGB_LIMITED: +- format->format.colorspace = V4L2_COLORSPACE_SRGB; +- break; +- case MASK_VOUT_COLOR_601_YCBCR_LIMITED: +- case MASK_VOUT_COLOR_601_YCBCR_FULL: +- format->format.colorspace = V4L2_COLORSPACE_SMPTE170M; +- break; +- case MASK_VOUT_COLOR_709_YCBCR_FULL: +- case MASK_VOUT_COLOR_709_YCBCR_LIMITED: +- format->format.colorspace = V4L2_COLORSPACE_REC709; +- break; +- default: +- format->format.colorspace = 0; +- break; +- } ++ format->format.colorspace = tc358743_g_colorspace(format->format.code); + + return 0; + } +@@ -1696,19 +1708,14 @@ static int tc358743_set_fmt(struct v4l2_subdev *sd, + u32 code = format->format.code; /* is overwritten by get_fmt */ + int ret = tc358743_get_fmt(sd, sd_state, format); + +- format->format.code = code; ++ if (code == MEDIA_BUS_FMT_RGB888_1X24 || ++ code == MEDIA_BUS_FMT_UYVY8_1X16) ++ format->format.code = code; ++ format->format.colorspace = tc358743_g_colorspace(format->format.code); + + if (ret) + return ret; + +- switch (code) { +- case MEDIA_BUS_FMT_RGB888_1X24: +- case MEDIA_BUS_FMT_UYVY8_1X16: +- break; +- default: +- return -EINVAL; +- } +- + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + return 0; + +@@ -1932,8 +1939,19 @@ static int tc358743_probe_of(struct tc358743_state *state) + state->pdata.refclk_hz = clk_get_rate(refclk); + state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS; + state->pdata.enable_hdcp = false; +- /* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */ +- state->pdata.fifo_level = 16; ++ /* ++ * Ideally the FIFO trigger level should be set based on the input and ++ * output data rates, but the calculations required are buried in ++ * Toshiba's register settings spreadsheet. ++ * A value of 16 works with a 594Mbps data rate for 720p60 (using 2 ++ * lanes) and 1080p60 (using 4 lanes), but fails when the data rate ++ * is increased, or a lower pixel clock is used that result in CSI ++ * reading out faster than the data is arriving. ++ * ++ * A value of 374 works with both those modes at 594Mbps, and with most ++ * modes on 972Mbps. ++ */ ++ state->pdata.fifo_level = 374; + /* + * The PLL input clock is obtained by dividing refclk by pll_prd. + * It must be between 6 MHz and 40 MHz, lower frequency is better. +@@ -2021,6 +2039,7 @@ static int tc358743_probe(struct i2c_client *client) + struct tc358743_platform_data *pdata = client->dev.platform_data; + struct v4l2_subdev *sd; + u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK; ++ u16 chipid; + int err; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) +@@ -2052,7 +2071,8 @@ static int tc358743_probe(struct i2c_client *client) + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; + + /* i2c access */ +- if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) { ++ if (i2c_rd16_err(sd, CHIPID, &chipid) || ++ (chipid & MASK_CHIPID) != 0) { + v4l2_info(sd, "not a TC358743 on address 0x%x\n", + client->addr << 1); + return -ENODEV; +diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c +index a0491f30783119..5696a951c0fbca 100644 +--- a/drivers/media/pci/intel/ivsc/mei_ace.c ++++ b/drivers/media/pci/intel/ivsc/mei_ace.c +@@ -528,6 +528,8 @@ static void mei_ace_remove(struct mei_cl_device *cldev) + + ace_set_camera_owner(ace, ACE_CAMERA_IVSC); + ++ mei_cldev_disable(cldev); ++ + mutex_destroy(&ace->lock); + } + +diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c +index 685b2ec96071a4..6c16721a6226ec 100644 +--- a/drivers/media/pci/intel/ivsc/mei_csi.c ++++ b/drivers/media/pci/intel/ivsc/mei_csi.c +@@ -807,6 +807,8 @@ static void mei_csi_remove(struct mei_cl_device *cldev) + + pm_runtime_disable(&cldev->dev); + ++ mei_cldev_disable(cldev); ++ + mutex_destroy(&csi->lock); + } + +diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c +index 0754645d26acba..e62245c5c6fc47 100644 +--- a/drivers/media/platform/qcom/camss/camss.c ++++ b/drivers/media/platform/qcom/camss/camss.c +@@ -1660,7 +1660,7 @@ static int camss_probe(struct platform_device *pdev) + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); +- goto err_genpd_cleanup; ++ goto err_media_device_cleanup; + } + + v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); +@@ -1711,6 +1711,8 @@ static int camss_probe(struct platform_device *pdev) + v4l2_device_unregister(&camss->v4l2_dev); + v4l2_async_nf_cleanup(&camss->notifier); + pm_runtime_disable(dev); ++err_media_device_cleanup: ++ media_device_cleanup(&camss->media_dev); + err_genpd_cleanup: + camss_genpd_cleanup(camss); + +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c +index 47ce3365451d37..64a858783c41ef 100644 +--- a/drivers/media/platform/qcom/venus/core.c ++++ b/drivers/media/platform/qcom/venus/core.c +@@ -334,13 +334,13 @@ static int venus_probe(struct platform_device *pdev) + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); + init_waitqueue_head(&core->sys_err_done); + +- ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread, +- IRQF_TRIGGER_HIGH | IRQF_ONESHOT, +- "venus", core); ++ ret = hfi_create(core, &venus_core_ops); + if (ret) + goto err_core_put; + +- ret = hfi_create(core, &venus_core_ops); ++ ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread, ++ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, ++ "venus", core); + if (ret) + goto err_core_put; + +diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h +index 4a633261ece473..ba8afda6667d44 100644 +--- a/drivers/media/platform/qcom/venus/core.h ++++ b/drivers/media/platform/qcom/venus/core.h +@@ -28,6 +28,8 @@ + #define VIDC_PMDOMAINS_NUM_MAX 3 + #define VIDC_RESETS_NUM_MAX 2 + ++#define VENUS_MAX_FPS 240 ++ + extern int venus_fw_debug; + + struct freq_tbl { +diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c +index 0a041b4db9efc5..cf0d97cbc4631f 100644 +--- a/drivers/media/platform/qcom/venus/hfi_msgs.c ++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c +@@ -33,8 +33,9 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, + struct hfi_buffer_requirements *bufreq; + struct hfi_extradata_input_crop *crop; + struct hfi_dpb_counts *dpb_count; ++ u32 ptype, rem_bytes; ++ u32 size_read = 0; + u8 *data_ptr; +- u32 ptype; + + inst->error = HFI_ERR_NONE; + +@@ -44,86 +45,118 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, + break; + default: + inst->error = HFI_ERR_SESSION_INVALID_PARAMETER; +- goto done; ++ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); ++ return; + } + + event.event_type = pkt->event_data1; + + num_properties_changed = pkt->event_data2; +- if (!num_properties_changed) { +- inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; +- goto done; +- } ++ if (!num_properties_changed) ++ goto error; + + data_ptr = (u8 *)&pkt->ext_event_data[0]; ++ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt); ++ + do { ++ if (rem_bytes < sizeof(u32)) ++ goto error; + ptype = *((u32 *)data_ptr); ++ ++ data_ptr += sizeof(u32); ++ rem_bytes -= sizeof(u32); ++ + switch (ptype) { + case HFI_PROPERTY_PARAM_FRAME_SIZE: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_framesize)) ++ goto error; ++ + frame_sz = (struct hfi_framesize *)data_ptr; + event.width = frame_sz->width; + event.height = frame_sz->height; +- data_ptr += sizeof(*frame_sz); ++ size_read = sizeof(struct hfi_framesize); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_profile_level)) ++ goto error; ++ + profile_level = (struct hfi_profile_level *)data_ptr; + event.profile = profile_level->profile; + event.level = profile_level->level; +- data_ptr += sizeof(*profile_level); ++ size_read = sizeof(struct hfi_profile_level); + break; + case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_bit_depth)) ++ goto error; ++ + pixel_depth = (struct hfi_bit_depth *)data_ptr; + event.bit_depth = pixel_depth->bit_depth; +- data_ptr += sizeof(*pixel_depth); ++ size_read = sizeof(struct hfi_bit_depth); + break; + case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_pic_struct)) ++ goto error; ++ + pic_struct = (struct hfi_pic_struct *)data_ptr; + event.pic_struct = pic_struct->progressive_only; +- data_ptr += sizeof(*pic_struct); ++ size_read = sizeof(struct hfi_pic_struct); + break; + case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_colour_space)) ++ goto error; ++ + colour_info = (struct hfi_colour_space *)data_ptr; + event.colour_space = colour_info->colour_space; +- data_ptr += sizeof(*colour_info); ++ size_read = sizeof(struct hfi_colour_space); + break; + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(u32)) ++ goto error; ++ + event.entropy_mode = *(u32 *)data_ptr; +- data_ptr += sizeof(u32); ++ size_read = sizeof(u32); + break; + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_buffer_requirements)) ++ goto error; ++ + bufreq = (struct hfi_buffer_requirements *)data_ptr; + event.buf_count = hfi_bufreq_get_count_min(bufreq, ver); +- data_ptr += sizeof(*bufreq); ++ size_read = sizeof(struct hfi_buffer_requirements); + break; + case HFI_INDEX_EXTRADATA_INPUT_CROP: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_extradata_input_crop)) ++ goto error; ++ + crop = (struct hfi_extradata_input_crop *)data_ptr; + event.input_crop.left = crop->left; + event.input_crop.top = crop->top; + event.input_crop.width = crop->width; + event.input_crop.height = crop->height; +- data_ptr += sizeof(*crop); ++ size_read = sizeof(struct hfi_extradata_input_crop); + break; + case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS: +- data_ptr += sizeof(u32); ++ if (rem_bytes < sizeof(struct hfi_dpb_counts)) ++ goto error; ++ + dpb_count = (struct hfi_dpb_counts *)data_ptr; + event.buf_count = dpb_count->fw_min_cnt; +- data_ptr += sizeof(*dpb_count); ++ size_read = sizeof(struct hfi_dpb_counts); + break; + default: ++ size_read = 0; + break; + } ++ data_ptr += size_read; ++ rem_bytes -= size_read; + num_properties_changed--; + } while (num_properties_changed > 0); + +-done: ++ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); ++ return; ++ ++error: ++ inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); + } + +diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c +index ab93757fff4b31..8e211527960118 100644 +--- a/drivers/media/platform/qcom/venus/hfi_venus.c ++++ b/drivers/media/platform/qcom/venus/hfi_venus.c +@@ -239,6 +239,7 @@ static int venus_write_queue(struct venus_hfi_device *hdev, + static int venus_read_queue(struct venus_hfi_device *hdev, + struct iface_queue *queue, void *pkt, u32 *tx_req) + { ++ struct hfi_pkt_hdr *pkt_hdr = NULL; + struct hfi_queue_header *qhdr; + u32 dwords, new_rd_idx; + u32 rd_idx, wr_idx, type, qsize; +@@ -304,6 +305,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev, + memcpy(pkt, rd_ptr, len); + memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2); + } ++ pkt_hdr = (struct hfi_pkt_hdr *)(pkt); ++ if ((pkt_hdr->size >> 2) != dwords) ++ return -EINVAL; + } else { + /* bad packet received, dropping */ + new_rd_idx = qhdr->write_idx; +@@ -1689,6 +1693,7 @@ void venus_hfi_destroy(struct venus_core *core) + venus_interface_queues_release(hdev); + mutex_destroy(&hdev->lock); + kfree(hdev); ++ disable_irq(core->irq); + core->ops = NULL; + } + +diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c +index 884ee6e9d4bd1a..8be056210f1d34 100644 +--- a/drivers/media/platform/qcom/venus/vdec.c ++++ b/drivers/media/platform/qcom/venus/vdec.c +@@ -481,11 +481,10 @@ static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + +- if (!us_per_frame) +- return -EINVAL; +- ++ us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC); + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); ++ fps = min(VENUS_MAX_FPS, fps); + + inst->fps = fps; + inst->timeperframe = *timeperframe; +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c +index 44b13696cf82a5..dd3840f7bb7bde 100644 +--- a/drivers/media/platform/qcom/venus/venc.c ++++ b/drivers/media/platform/qcom/venus/venc.c +@@ -411,11 +411,10 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + +- if (!us_per_frame) +- return -EINVAL; +- ++ us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC); + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); ++ fps = min(VENUS_MAX_FPS, fps); + + inst->timeperframe = *timeperframe; + inst->fps = fps; +diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c +index f9752767078355..df5b7dadb1b4e4 100644 +--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c ++++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c +@@ -17,7 +17,6 @@ + + #define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000) + #define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000) +-#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000) + + #define ROCKCHIP_VPU981_MIN_SIZE 64 + +@@ -441,13 +440,6 @@ static int rk3066_vpu_hw_init(struct hantro_dev *vpu) + return 0; + } + +-static int rk3588_vpu981_hw_init(struct hantro_dev *vpu) +-{ +- /* Bump ACLKs to max. possible freq. to improve performance. */ +- clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ); +- return 0; +-} +- + static int rockchip_vpu_hw_init(struct hantro_dev *vpu) + { + /* Bump ACLK to max. possible freq. to improve performance. */ +@@ -808,7 +800,6 @@ const struct hantro_variant rk3588_vpu981_variant = { + .codec_ops = rk3588_vpu981_codec_ops, + .irqs = rk3588_vpu981_irqs, + .num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs), +- .init = rk3588_vpu981_hw_init, + .clk_names = rk3588_vpu981_vpu_clk_names, + .num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names) + }; +diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c +index f2b20e25a7a497..5ca385a6a13656 100644 +--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c ++++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c +@@ -240,7 +240,8 @@ static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = { + .min = 0x00, + .max = 0xff, + .step = 1, +- .dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV }, ++ .dims = { DIV_ROUND_UP(360, PIXEL_ARRAY_DIV), ++ DIV_ROUND_UP(640, PIXEL_ARRAY_DIV) }, + }; + + static const struct v4l2_ctrl_config vivid_ctrl_s32_array = { +diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c +index 0ab47fb8696bd0..5d1f78c7604dea 100644 +--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c ++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c +@@ -460,8 +460,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) + if (keep_controls) + return; + +- dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); +- dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); ++ dims[0] = DIV_ROUND_UP(dev->src_rect.height, PIXEL_ARRAY_DIV); ++ dims[1] = DIV_ROUND_UP(dev->src_rect.width, PIXEL_ARRAY_DIV); + v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); + } + +diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c +index d98343fd33fe34..91e177aa8136fd 100644 +--- a/drivers/media/usb/gspca/vicam.c ++++ b/drivers/media/usb/gspca/vicam.c +@@ -227,6 +227,7 @@ static int sd_init(struct gspca_dev *gspca_dev) + const struct ihex_binrec *rec; + const struct firmware *fw; + u8 *firmware_buf; ++ int len; + + ret = request_ihex_firmware(&fw, VICAM_FIRMWARE, + &gspca_dev->dev->dev); +@@ -241,9 +242,14 @@ static int sd_init(struct gspca_dev *gspca_dev) + goto exit; + } + for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) { +- memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len)); ++ len = be16_to_cpu(rec->len); ++ if (len > PAGE_SIZE) { ++ ret = -EINVAL; ++ break; ++ } ++ memcpy(firmware_buf, rec->data, len); + ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf, +- be16_to_cpu(rec->len)); ++ len); + if (ret < 0) + break; + } +diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c +index 070559b01b01b8..54956a8ff15e86 100644 +--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c ++++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c +@@ -165,10 +165,16 @@ static const struct i2c_algorithm hdpvr_algo = { + .functionality = hdpvr_functionality, + }; + ++/* prevent invalid 0-length usb_control_msg */ ++static const struct i2c_adapter_quirks hdpvr_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN_READ, ++}; ++ + static const struct i2c_adapter hdpvr_i2c_adapter_template = { + .name = "Hauppauge HD PVR I2C", + .owner = THIS_MODULE, + .algo = &hdpvr_algo, ++ .quirks = &hdpvr_quirks, + }; + + static int hdpvr_activate_ir(struct hdpvr_device *dev) +diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c +index 7495df6b519125..f3633448e8b969 100644 +--- a/drivers/media/usb/usbtv/usbtv-video.c ++++ b/drivers/media/usb/usbtv/usbtv-video.c +@@ -73,6 +73,10 @@ static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm) + } + + if (params) { ++ if (vb2_is_busy(&usbtv->vb2q) && ++ (usbtv->width != params->cap_width || ++ usbtv->height != params->cap_height)) ++ return -EBUSY; + usbtv->width = params->cap_width; + usbtv->height = params->cap_height; + usbtv->n_chunks = usbtv->width * usbtv->height +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 76f18557f37bd4..09753993068a95 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -235,6 +235,9 @@ static int uvc_parse_format(struct uvc_device *dev, + unsigned int i, n; + u8 ftype; + ++ if (buflen < 4) ++ return -EINVAL; ++ + format->type = buffer[2]; + format->index = buffer[3]; + format->frames = frames; +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c +index 9572fdfe74f246..a9f880eb518ad8 100644 +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -258,6 +258,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, + + ctrl->dwMaxPayloadTransferSize = bandwidth; + } ++ ++ if (stream->intf->num_altsetting > 1 && ++ ctrl->dwMaxPayloadTransferSize > stream->maxpsize) { ++ dev_warn_ratelimited(&stream->intf->dev, ++ "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n", ++ ctrl->dwMaxPayloadTransferSize, ++ stream->maxpsize); ++ ctrl->dwMaxPayloadTransferSize = stream->maxpsize; ++ } + } + + static size_t uvc_video_ctrl_size(struct uvc_streaming *stream) +@@ -1405,12 +1414,6 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, + if (!meta_buf || length == 2) + return; + +- if (meta_buf->length - meta_buf->bytesused < +- length + sizeof(meta->ns) + sizeof(meta->sof)) { +- meta_buf->error = 1; +- return; +- } +- + has_pts = mem[1] & UVC_STREAM_PTS; + has_scr = mem[1] & UVC_STREAM_SCR; + +@@ -1431,6 +1434,12 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, + !memcmp(scr, stream->clock.last_scr, 6))) + return; + ++ if (meta_buf->length - meta_buf->bytesused < ++ length + sizeof(meta->ns) + sizeof(meta->sof)) { ++ meta_buf->error = 1; ++ return; ++ } ++ + meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused); + local_irq_save(flags); + time = uvc_video_get_time(); +diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c +index 3a4b15a98e0216..b37507e093399c 100644 +--- a/drivers/media/v4l2-core/v4l2-common.c ++++ b/drivers/media/v4l2-core/v4l2-common.c +@@ -487,10 +487,10 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, + + freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); + +- pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", +- __func__); +- pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", +- __func__); ++ pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", ++ __func__); ++ pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", ++ __func__); + } + + return freq > 0 ? freq : -EINVAL; +diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c +index 84fbf4e06cd33c..a1d3e93a409565 100644 +--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c ++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c +@@ -1578,7 +1578,6 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) + kvfree(hdl->buckets); + hdl->buckets = NULL; + hdl->cached = NULL; +- hdl->error = 0; + mutex_unlock(hdl->lock); + mutex_destroy(&hdl->_lock); + } +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c +index ac71abdce1b254..e0895e979e35b3 100644 +--- a/drivers/memstick/core/memstick.c ++++ b/drivers/memstick/core/memstick.c +@@ -548,7 +548,6 @@ EXPORT_SYMBOL(memstick_add_host); + */ + void memstick_remove_host(struct memstick_host *host) + { +- host->removing = 1; + flush_workqueue(workqueue); + mutex_lock(&host->lock); + if (host->card) +diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c +index dec279845a752d..43ec4948daa206 100644 +--- a/drivers/memstick/host/rtsx_usb_ms.c ++++ b/drivers/memstick/host/rtsx_usb_ms.c +@@ -812,6 +812,7 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) + int err; + + host->eject = true; ++ msh->removing = true; + cancel_work_sync(&host->handle_req); + cancel_delayed_work_sync(&host->poll_card); + +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c +index 87603eeaa27705..2b85da0fcf27e1 100644 +--- a/drivers/mfd/axp20x.c ++++ b/drivers/mfd/axp20x.c +@@ -936,7 +936,8 @@ static const struct mfd_cell axp152_cells[] = { + }; + + static struct mfd_cell axp313a_cells[] = { +- MFD_CELL_NAME("axp20x-regulator"), ++ /* AXP323 is sometimes paired with AXP717 as sub-PMIC */ ++ MFD_CELL_BASIC("axp20x-regulator", NULL, NULL, 0, 1), + MFD_CELL_RES("axp313a-pek", axp313a_pek_resources), + }; + +diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c +index f150d8769f1986..f546b050cb495f 100644 +--- a/drivers/misc/cardreader/rtsx_usb.c ++++ b/drivers/misc/cardreader/rtsx_usb.c +@@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) + } + + #ifdef CONFIG_PM ++static int rtsx_usb_resume_child(struct device *dev, void *data) ++{ ++ pm_request_resume(dev); ++ return 0; ++} ++ + static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) + { + struct rtsx_ucr *ucr = +@@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) + mutex_unlock(&ucr->dev_mutex); + + /* Defer the autosuspend if card exists */ +- if (val & (SD_CD | MS_CD)) ++ if (val & (SD_CD | MS_CD)) { ++ device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); + return -EAGAIN; ++ } + } else { + /* There is an ongoing operation*/ + return -EAGAIN; +@@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) + return 0; + } + +-static int rtsx_usb_resume_child(struct device *dev, void *data) +-{ +- pm_request_resume(dev); +- return 0; +-} +- + static int rtsx_usb_resume(struct usb_interface *intf) + { + device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 2e65ce6bdec7d9..b94cf7393fad6a 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -1269,10 +1269,16 @@ static void mei_dev_bus_put(struct mei_device *bus) + static void mei_cl_bus_dev_release(struct device *dev) + { + struct mei_cl_device *cldev = to_mei_cl_device(dev); ++ struct mei_device *mdev = cldev->cl->dev; ++ struct mei_cl *cl; + + mei_cl_flush_queues(cldev->cl, NULL); + mei_me_cl_put(cldev->me_cl); + mei_dev_bus_put(cldev->bus); ++ ++ list_for_each_entry(cl, &mdev->file_list, link) ++ WARN_ON(cl == cldev->cl); ++ + kfree(cldev->cl); + kfree(cldev); + } +diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c +index ded9b6849e35e9..90ea92bbdb2cf9 100644 +--- a/drivers/mmc/host/rtsx_usb_sdmmc.c ++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c +@@ -1032,9 +1032,7 @@ static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, + err = sd_power_on(host); + } + +- if (!err) +- host->power_mode = power_mode; +- ++ host->power_mode = power_mode; + return err; + } + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 82808cc373f68b..c2144a3efb308e 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -1564,6 +1564,7 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) + { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); ++ struct mmc_host *mmc = host->mmc; + bool done = false; + u32 val = SWITCHABLE_SIGNALING_VOLTAGE; + const struct sdhci_msm_offset *msm_offset = +@@ -1621,6 +1622,12 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) + "%s: pwr_irq for req: (%d) timed out\n", + mmc_hostname(host->mmc), req_type); + } ++ ++ if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) { ++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); ++ host->pwr = 0; ++ } ++ + pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), + __func__, req_type); + } +@@ -1679,6 +1686,13 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) + udelay(10); + } + ++ if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card && ++ !mmc->ops->get_cd(mmc)) { ++ msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host, ++ msm_offset->core_pwrctl_ctl); ++ return; ++ } ++ + /* Handle BUS ON/OFF*/ + if (irq_status & CORE_PWRCTL_BUS_ON) { + pwr_state = REQ_BUS_ON; +diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c +index 11c404374d79da..1dc1b9274b68cc 100644 +--- a/drivers/mmc/host/sdhci-pci-gli.c ++++ b/drivers/mmc/host/sdhci-pci-gli.c +@@ -28,9 +28,6 @@ + #define PCI_GLI_9750_PM_CTRL 0xFC + #define PCI_GLI_9750_PM_STATE GENMASK(1, 0) + +-#define PCI_GLI_9750_CORRERR_MASK 0x214 +-#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) +- + #define SDHCI_GLI_9750_CFG2 0x848 + #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24) + #define GLI_9750_CFG2_L1DLY_VALUE 0x1F +@@ -155,9 +152,6 @@ + #define PCI_GLI_9755_PM_CTRL 0xFC + #define PCI_GLI_9755_PM_STATE GENMASK(1, 0) + +-#define PCI_GLI_9755_CORRERR_MASK 0x214 +-#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) +- + #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510 + #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8) + +@@ -227,6 +221,20 @@ + #define GLI_MAX_TUNING_LOOP 40 + + /* Genesys Logic chipset */ ++static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev) ++{ ++ int aer; ++ u32 value; ++ ++ /* mask the replay timer timeout of AER */ ++ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); ++ if (aer) { ++ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); ++ value |= PCI_ERR_COR_REP_TIMER; ++ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); ++ } ++} ++ + static inline void gl9750_wt_on(struct sdhci_host *host) + { + u32 wt_value; +@@ -568,9 +576,7 @@ static void gl9750_hw_setting(struct sdhci_host *host) + pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value); + + /* mask the replay timer timeout of AER */ +- pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value); +- value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; +- pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value); ++ sdhci_gli_mask_replay_timer_timeout(pdev); + + gl9750_wt_off(host); + } +@@ -782,9 +788,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot) + pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value); + + /* mask the replay timer timeout of AER */ +- pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value); +- value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; +- pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value); ++ sdhci_gli_mask_replay_timer_timeout(pdev); + + gl9755_wt_off(pdev); + } +@@ -1343,7 +1347,7 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot) + return ret; + } + +-static void gli_set_gl9763e(struct sdhci_pci_slot *slot) ++static void gl9763e_hw_setting(struct sdhci_pci_slot *slot) + { + struct pci_dev *pdev = slot->chip->pdev; + u32 value; +@@ -1372,6 +1376,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot) + value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5); + pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value); + ++ /* mask the replay timer timeout of AER */ ++ sdhci_gli_mask_replay_timer_timeout(pdev); ++ + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); + value &= ~GLI_9763E_VHS_REV; + value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); +@@ -1515,7 +1522,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) + gli_pcie_enable_msi(slot); + host->mmc_host_ops.hs400_enhanced_strobe = + gl9763e_hs400_enhanced_strobe; +- gli_set_gl9763e(slot); ++ gl9763e_hw_setting(slot); + sdhci_enable_v4_mode(host); + + return 0; +diff --git a/drivers/most/core.c b/drivers/most/core.c +index e4412c7d25b0e0..5d073b3d279675 100644 +--- a/drivers/most/core.c ++++ b/drivers/most/core.c +@@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch) + dev = bus_find_device_by_name(&mostbus, NULL, mdev); + if (!dev) + return NULL; +- put_device(dev); + iface = dev_get_drvdata(dev); ++ put_device(dev); + list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { + if (!strcmp(dev_name(&c->dev), mdev_ch)) + return c; +diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c +index 811982da355740..fe5912d31beea4 100644 +--- a/drivers/mtd/nand/raw/fsmc_nand.c ++++ b/drivers/mtd/nand/raw/fsmc_nand.c +@@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, + + dma_dev = chan->device; + dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); ++ if (dma_mapping_error(dma_dev->dev, dma_addr)) ++ return -EINVAL; + + if (direction == DMA_TO_DEVICE) { + dma_src = dma_addr; +diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c +index 589021ea9eb2ac..a9e79f0acbe2a9 100644 +--- a/drivers/mtd/nand/raw/renesas-nand-controller.c ++++ b/drivers/mtd/nand/raw/renesas-nand-controller.c +@@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, + /* Configure DMA */ + dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize, + DMA_FROM_DEVICE); ++ if (dma_mapping_error(rnandc->dev, dma_addr)) ++ return -ENOMEM; ++ + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); + writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); + writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); +@@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + /* Configure DMA */ + dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize, + DMA_TO_DEVICE); ++ if (dma_mapping_error(rnandc->dev, dma_addr)) ++ return -ENOMEM; ++ + writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG); + writel(mtd->writesize, rnandc->regs + DMA_CNT_REG); + writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG); +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index cd21bf8f254a75..ee61b2d8823203 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -624,7 +624,10 @@ static int spinand_write_page(struct spinand_device *spinand, + SPINAND_WRITE_INITIAL_DELAY_US, + SPINAND_WRITE_POLL_DELAY_US, + &status); +- if (!ret && (status & STATUS_PROG_FAILED)) ++ if (ret) ++ return ret; ++ ++ if (status & STATUS_PROG_FAILED) + return -EIO; + + return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); +diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c +index 5ab9d532486081..a186d1fde8694a 100644 +--- a/drivers/mtd/spi-nor/swp.c ++++ b/drivers/mtd/spi-nor/swp.c +@@ -50,7 +50,6 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) + static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, + uint64_t *len) + { +- struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); +@@ -71,13 +70,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + *len = min_prot_len << (bp - 1); + +- if (*len > mtd->size) +- *len = mtd->size; ++ if (*len > nor->params->size) ++ *len = nor->params->size; + + if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) + *ofs = 0; + else +- *ofs = mtd->size - *len; ++ *ofs = nor->params->size - *len; + } + + /* +@@ -153,7 +152,6 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, + */ + static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) + { +- struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); +@@ -178,7 +176,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) + can_be_bottom = false; + + /* If anything above us is unlocked, we can't use 'top' protection */ +- if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), ++ if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len), + status_old)) + can_be_top = false; + +@@ -190,11 +188,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) + + /* lock_len: length of region that should end up locked */ + if (use_top) +- lock_len = mtd->size - ofs; ++ lock_len = nor->params->size - ofs; + else + lock_len = ofs + len; + +- if (lock_len == mtd->size) { ++ if (lock_len == nor->params->size) { + val = mask; + } else { + min_prot_len = spi_nor_get_min_prot_length_sr(nor); +@@ -243,7 +241,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) + */ + static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) + { +- struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); +@@ -268,7 +265,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) + can_be_top = false; + + /* If anything above us is locked, we can't use 'bottom' protection */ +- if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), ++ if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len), + status_old)) + can_be_bottom = false; + +@@ -280,7 +277,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) + + /* lock_len: length of region that should remain locked */ + if (use_top) +- lock_len = mtd->size - (ofs + len); ++ lock_len = nor->params->size - (ofs + len); + else + lock_len = ofs; + +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c +index c99ffe6c683a38..d02a91cefec89b 100644 +--- a/drivers/net/bonding/bond_3ad.c ++++ b/drivers/net/bonding/bond_3ad.c +@@ -99,13 +99,16 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker); + static void ad_mux_machine(struct port *port, bool *update_slave_arr); + static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); + static void ad_tx_machine(struct port *port); +-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params); ++static void ad_periodic_machine(struct port *port); + static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); + static void ad_agg_selection_logic(struct aggregator *aggregator, + bool *update_slave_arr); + static void ad_clear_agg(struct aggregator *aggregator); + static void ad_initialize_agg(struct aggregator *aggregator); +-static void ad_initialize_port(struct port *port, int lacp_fast); ++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params); ++static void ad_enable_collecting(struct port *port); ++static void ad_disable_distributing(struct port *port, ++ bool *update_slave_arr); + static void ad_enable_collecting_distributing(struct port *port, + bool *update_slave_arr); + static void ad_disable_collecting_distributing(struct port *port, +@@ -171,9 +174,38 @@ static inline int __agg_has_partner(struct aggregator *agg) + return !is_zero_ether_addr(agg->partner_system.mac_addr_value); + } + ++/** ++ * __disable_distributing_port - disable the port's slave for distributing. ++ * Port will still be able to collect. ++ * @port: the port we're looking at ++ * ++ * This will disable only distributing on the port's slave. ++ */ ++static void __disable_distributing_port(struct port *port) ++{ ++ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); ++} ++ ++/** ++ * __enable_collecting_port - enable the port's slave for collecting, ++ * if it's up ++ * @port: the port we're looking at ++ * ++ * This will enable only collecting on the port's slave. ++ */ ++static void __enable_collecting_port(struct port *port) ++{ ++ struct slave *slave = port->slave; ++ ++ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave)) ++ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER); ++} ++ + /** + * __disable_port - disable the port's slave + * @port: the port we're looking at ++ * ++ * This will disable both collecting and distributing on the port's slave. + */ + static inline void __disable_port(struct port *port) + { +@@ -183,6 +215,8 @@ static inline void __disable_port(struct port *port) + /** + * __enable_port - enable the port's slave, if it's up + * @port: the port we're looking at ++ * ++ * This will enable both collecting and distributing on the port's slave. + */ + static inline void __enable_port(struct port *port) + { +@@ -193,10 +227,27 @@ static inline void __enable_port(struct port *port) + } + + /** +- * __port_is_enabled - check if the port's slave is in active state ++ * __port_move_to_attached_state - check if port should transition back to attached ++ * state. + * @port: the port we're looking at + */ +-static inline int __port_is_enabled(struct port *port) ++static bool __port_move_to_attached_state(struct port *port) ++{ ++ if (!(port->sm_vars & AD_PORT_SELECTED) || ++ (port->sm_vars & AD_PORT_STANDBY) || ++ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || ++ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) ++ port->sm_mux_state = AD_MUX_ATTACHED; ++ ++ return port->sm_mux_state == AD_MUX_ATTACHED; ++} ++ ++/** ++ * __port_is_collecting_distributing - check if the port's slave is in the ++ * combined collecting/distributing state ++ * @port: the port we're looking at ++ */ ++static int __port_is_collecting_distributing(struct port *port) + { + return bond_is_active_slave(port->slave); + } +@@ -942,6 +993,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) + */ + static void ad_mux_machine(struct port *port, bool *update_slave_arr) + { ++ struct bonding *bond = __get_bond_by_port(port); + mux_states_t last_state; + + /* keep current State Machine state to compare later if it was +@@ -999,9 +1051,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) + if ((port->sm_vars & AD_PORT_SELECTED) && + (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && + !__check_agg_selection_timer(port)) { +- if (port->aggregator->is_active) +- port->sm_mux_state = +- AD_MUX_COLLECTING_DISTRIBUTING; ++ if (port->aggregator->is_active) { ++ int state = AD_MUX_COLLECTING_DISTRIBUTING; ++ ++ if (!bond->params.coupled_control) ++ state = AD_MUX_COLLECTING; ++ port->sm_mux_state = state; ++ } + } else if (!(port->sm_vars & AD_PORT_SELECTED) || + (port->sm_vars & AD_PORT_STANDBY)) { + /* if UNSELECTED or STANDBY */ +@@ -1019,11 +1075,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) + } + break; + case AD_MUX_COLLECTING_DISTRIBUTING: ++ if (!__port_move_to_attached_state(port)) { ++ /* if port state hasn't changed make ++ * sure that a collecting distributing ++ * port in an active aggregator is enabled ++ */ ++ if (port->aggregator->is_active && ++ !__port_is_collecting_distributing(port)) { ++ __enable_port(port); ++ *update_slave_arr = true; ++ } ++ } ++ break; ++ case AD_MUX_COLLECTING: ++ if (!__port_move_to_attached_state(port)) { ++ if ((port->sm_vars & AD_PORT_SELECTED) && ++ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && ++ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) { ++ port->sm_mux_state = AD_MUX_DISTRIBUTING; ++ } else { ++ /* If port state hasn't changed, make sure that a collecting ++ * port is enabled for an active aggregator. ++ */ ++ struct slave *slave = port->slave; ++ ++ if (port->aggregator->is_active && ++ bond_is_slave_rx_disabled(slave)) { ++ ad_enable_collecting(port); ++ *update_slave_arr = true; ++ } ++ } ++ } ++ break; ++ case AD_MUX_DISTRIBUTING: + if (!(port->sm_vars & AD_PORT_SELECTED) || + (port->sm_vars & AD_PORT_STANDBY) || ++ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) || + !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || + !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) { +- port->sm_mux_state = AD_MUX_ATTACHED; ++ port->sm_mux_state = AD_MUX_COLLECTING; + } else { + /* if port state hasn't changed make + * sure that a collecting distributing +@@ -1031,7 +1121,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) + */ + if (port->aggregator && + port->aggregator->is_active && +- !__port_is_enabled(port)) { ++ !__port_is_collecting_distributing(port)) { + __enable_port(port); + *update_slave_arr = true; + } +@@ -1082,6 +1172,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) + update_slave_arr); + port->ntt = true; + break; ++ case AD_MUX_COLLECTING: ++ port->actor_oper_port_state |= LACP_STATE_COLLECTING; ++ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; ++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; ++ ad_enable_collecting(port); ++ ad_disable_distributing(port, update_slave_arr); ++ port->ntt = true; ++ break; ++ case AD_MUX_DISTRIBUTING: ++ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING; ++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; ++ ad_enable_collecting_distributing(port, ++ update_slave_arr); ++ break; + default: + break; + } +@@ -1196,10 +1300,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) + * case of EXPIRED even if LINK_DOWN didn't arrive for + * the port. + */ +- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; + port->sm_vars &= ~AD_PORT_MATCHED; ++ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive ++ * machine state diagram, the statue should be ++ * Partner_Oper_Port_State.Synchronization = FALSE; ++ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout; ++ * start current_while_timer(Short Timeout); ++ * Actor_Oper_Port_State.Expired = TRUE; ++ */ ++ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; + port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT; +- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY; + port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); + port->actor_oper_port_state |= LACP_STATE_EXPIRED; + port->sm_vars |= AD_PORT_CHURNED; +@@ -1305,11 +1415,10 @@ static void ad_tx_machine(struct port *port) + /** + * ad_periodic_machine - handle a port's periodic state machine + * @port: the port we're looking at +- * @bond_params: bond parameters we will use + * + * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. + */ +-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params) ++static void ad_periodic_machine(struct port *port) + { + periodic_states_t last_state; + +@@ -1318,8 +1427,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para + + /* check if port was reinitialized */ + if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || +- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) || +- !bond_params->lacp_active) { ++ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) { + port->sm_periodic_state = AD_NO_PERIODIC; + } + /* check if state machine should change state */ +@@ -1843,16 +1951,16 @@ static void ad_initialize_agg(struct aggregator *aggregator) + /** + * ad_initialize_port - initialize a given port's parameters + * @port: the port we're looking at +- * @lacp_fast: boolean. whether fast periodic should be used ++ * @bond_params: bond parameters we will use + */ +-static void ad_initialize_port(struct port *port, int lacp_fast) ++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params) + { + static const struct port_params tmpl = { + .system_priority = 0xffff, + .key = 1, + .port_number = 1, + .port_priority = 0xff, +- .port_state = 1, ++ .port_state = 0, + }; + static const struct lacpdu lacpdu = { + .subtype = 0x01, +@@ -1870,12 +1978,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast) + port->actor_port_priority = 0xff; + port->actor_port_aggregator_identifier = 0; + port->ntt = false; +- port->actor_admin_port_state = LACP_STATE_AGGREGATION | +- LACP_STATE_LACP_ACTIVITY; +- port->actor_oper_port_state = LACP_STATE_AGGREGATION | +- LACP_STATE_LACP_ACTIVITY; ++ port->actor_admin_port_state = LACP_STATE_AGGREGATION; ++ port->actor_oper_port_state = LACP_STATE_AGGREGATION; ++ if (bond_params->lacp_active) { ++ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY; ++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; ++ } + +- if (lacp_fast) ++ if (bond_params->lacp_fast) + port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; + + memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); +@@ -1906,6 +2016,45 @@ static void ad_initialize_port(struct port *port, int lacp_fast) + } + } + ++/** ++ * ad_enable_collecting - enable a port's receive ++ * @port: the port we're looking at ++ * ++ * Enable @port if it's in an active aggregator ++ */ ++static void ad_enable_collecting(struct port *port) ++{ ++ if (port->aggregator->is_active) { ++ struct slave *slave = port->slave; ++ ++ slave_dbg(slave->bond->dev, slave->dev, ++ "Enabling collecting on port %d (LAG %d)\n", ++ port->actor_port_number, ++ port->aggregator->aggregator_identifier); ++ __enable_collecting_port(port); ++ } ++} ++ ++/** ++ * ad_disable_distributing - disable a port's transmit ++ * @port: the port we're looking at ++ * @update_slave_arr: Does slave array need update? ++ */ ++static void ad_disable_distributing(struct port *port, bool *update_slave_arr) ++{ ++ if (port->aggregator && ++ !MAC_ADDRESS_EQUAL(&port->aggregator->partner_system, ++ &(null_mac_addr))) { ++ slave_dbg(port->slave->bond->dev, port->slave->dev, ++ "Disabling distributing on port %d (LAG %d)\n", ++ port->actor_port_number, ++ port->aggregator->aggregator_identifier); ++ __disable_distributing_port(port); ++ /* Slave array needs an update */ ++ *update_slave_arr = true; ++ } ++} ++ + /** + * ad_enable_collecting_distributing - enable a port's transmit/receive + * @port: the port we're looking at +@@ -2052,7 +2201,7 @@ void bond_3ad_bind_slave(struct slave *slave) + /* port initialization */ + port = &(SLAVE_AD_INFO(slave)->port); + +- ad_initialize_port(port, bond->params.lacp_fast); ++ ad_initialize_port(port, &bond->params); + + port->slave = slave; + port->actor_port_number = SLAVE_AD_INFO(slave)->id; +@@ -2364,7 +2513,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) + } + + ad_rx_machine(NULL, port); +- ad_periodic_machine(port, &bond->params); ++ ad_periodic_machine(port); + ad_port_selection_logic(port, &update_slave_arr); + ad_mux_machine(port, &update_slave_arr); + ad_tx_machine(port); +@@ -2734,6 +2883,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) + spin_unlock_bh(&bond->mode_lock); + } + ++/** ++ * bond_3ad_update_lacp_active - change the lacp active ++ * @bond: bonding struct ++ * ++ * Update actor_oper_port_state when lacp_active is modified. ++ */ ++void bond_3ad_update_lacp_active(struct bonding *bond) ++{ ++ struct port *port = NULL; ++ struct list_head *iter; ++ struct slave *slave; ++ int lacp_active; ++ ++ lacp_active = bond->params.lacp_active; ++ spin_lock_bh(&bond->mode_lock); ++ bond_for_each_slave(bond, slave, iter) { ++ port = &(SLAVE_AD_INFO(slave)->port); ++ if (lacp_active) ++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; ++ else ++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY; ++ } ++ spin_unlock_bh(&bond->mode_lock); ++} ++ + size_t bond_3ad_stats_size(void) + { + return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */ +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 85ab6925716272..cd5691ed9f171e 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -6399,6 +6399,7 @@ static int __init bond_check_params(struct bond_params *params) + params->ad_actor_sys_prio = ad_actor_sys_prio; + eth_zero_addr(params->ad_actor_system); + params->ad_user_port_key = ad_user_port_key; ++ params->coupled_control = 1; + if (packets_per_slave > 0) { + params->reciprocal_packets_per_slave = + reciprocal_value(packets_per_slave); +diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c +index 27cbe148f0db5f..aebc814ad495d9 100644 +--- a/drivers/net/bonding/bond_netlink.c ++++ b/drivers/net/bonding/bond_netlink.c +@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { + [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range), + [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 }, + [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED }, ++ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 }, + }; + + static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { +@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], + return err; + } + ++ if (data[IFLA_BOND_COUPLED_CONTROL]) { ++ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]); ++ ++ bond_opt_initval(&newval, coupled_control); ++ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval, ++ data[IFLA_BOND_COUPLED_CONTROL], extack); ++ if (err) ++ return err; ++ } ++ + return 0; + } + +@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) + /* IFLA_BOND_NS_IP6_TARGET */ + nla_total_size(sizeof(struct nlattr)) + + nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS + ++ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */ + 0; + } + +@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb, + bond->params.missed_max)) + goto nla_put_failure; + ++ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL, ++ bond->params.coupled_control)) ++ goto nla_put_failure; ++ + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + struct ad_info info; + +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c +index 6d003c0ef6698f..8291803e4f00ab 100644 +--- a/drivers/net/bonding/bond_options.c ++++ b/drivers/net/bonding/bond_options.c +@@ -85,7 +85,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond, + const struct bond_opt_value *newval); + static int bond_option_missed_max_set(struct bonding *bond, + const struct bond_opt_value *newval); +- ++static int bond_option_coupled_control_set(struct bonding *bond, ++ const struct bond_opt_value *newval); + + static const struct bond_opt_value bond_mode_tbl[] = { + { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT}, +@@ -233,6 +234,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = { + { NULL, -1, 0}, + }; + ++static const struct bond_opt_value bond_coupled_control_tbl[] = { ++ { "on", 1, BOND_VALFLAG_DEFAULT}, ++ { "off", 0, 0}, ++ { NULL, -1, 0}, ++}; ++ + static const struct bond_option bond_opts[BOND_OPT_LAST] = { + [BOND_OPT_MODE] = { + .id = BOND_OPT_MODE, +@@ -497,6 +504,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { + .desc = "Delay between each peer notification on failover event, in milliseconds", + .values = bond_peer_notif_delay_tbl, + .set = bond_option_peer_notif_delay_set ++ }, ++ [BOND_OPT_COUPLED_CONTROL] = { ++ .id = BOND_OPT_COUPLED_CONTROL, ++ .name = "coupled_control", ++ .desc = "Opt into using coupled control MUX for LACP states", ++ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), ++ .flags = BOND_OPTFLAG_IFDOWN, ++ .values = bond_coupled_control_tbl, ++ .set = bond_option_coupled_control_set, + } + }; + +@@ -1618,6 +1634,7 @@ static int bond_option_lacp_active_set(struct bonding *bond, + netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n", + newval->string, newval->value); + bond->params.lacp_active = newval->value; ++ bond_3ad_update_lacp_active(bond); + + return 0; + } +@@ -1811,3 +1828,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond, + bond->params.ad_user_port_key = newval->value; + return 0; + } ++ ++static int bond_option_coupled_control_set(struct bonding *bond, ++ const struct bond_opt_value *newval) ++{ ++ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n", ++ newval->string, newval->value); ++ ++ bond->params.coupled_control = newval->value; ++ return 0; ++} +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index 004d2c988ff097..b00bac4686773d 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -339,18 +339,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + +- /* Include IMP port in dumb forwarding mode +- */ +- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); +- mgmt |= B53_MII_DUMB_FWDG_EN; +- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); +- +- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether +- * frames should be flooded or not. +- */ +- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); +- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; +- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); ++ if (!is5325(dev)) { ++ /* Include IMP port in dumb forwarding mode */ ++ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); ++ mgmt |= B53_MII_DUMB_FWDG_EN; ++ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); ++ ++ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether ++ * frames should be flooded or not. ++ */ ++ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); ++ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; ++ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); ++ } else { ++ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); ++ mgmt |= B53_IP_MCAST_25; ++ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); ++ } + } + + static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, +@@ -507,6 +512,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) + unsigned int i; + u16 pvlan; + ++ /* BCM5325 CPU port is at 8 */ ++ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) ++ cpu_port = B53_CPU_PORT; ++ + /* Enable the IMP port to be in the same VLAN as the other ports + * on a per-port basis such that we only have Port i and IMP in + * the same VLAN. +@@ -557,6 +566,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port, + { + u16 reg; + ++ if (is5325(dev)) ++ return; ++ + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); + if (learning) + reg &= ~BIT(port); +@@ -1163,6 +1175,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link) + if (port == dev->imp_port) { + off = B53_PORT_OVERRIDE_CTRL; + val = PORT_OVERRIDE_EN; ++ } else if (is5325(dev)) { ++ return; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + val = GMII_PO_EN; +@@ -1187,6 +1201,8 @@ static void b53_force_port_config(struct b53_device *dev, int port, + if (port == dev->imp_port) { + off = B53_PORT_OVERRIDE_CTRL; + val = PORT_OVERRIDE_EN; ++ } else if (is5325(dev)) { ++ return; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + val = GMII_PO_EN; +@@ -1217,10 +1233,19 @@ static void b53_force_port_config(struct b53_device *dev, int port, + return; + } + +- if (rx_pause) +- reg |= PORT_OVERRIDE_RX_FLOW; +- if (tx_pause) +- reg |= PORT_OVERRIDE_TX_FLOW; ++ if (rx_pause) { ++ if (is5325(dev)) ++ reg |= PORT_OVERRIDE_LP_FLOW_25; ++ else ++ reg |= PORT_OVERRIDE_RX_FLOW; ++ } ++ ++ if (tx_pause) { ++ if (is5325(dev)) ++ reg |= PORT_OVERRIDE_LP_FLOW_25; ++ else ++ reg |= PORT_OVERRIDE_TX_FLOW; ++ } + + b53_write8(dev, B53_CTRL_PAGE, off, reg); + } +@@ -2045,7 +2070,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) + { +- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) ++ struct b53_device *dev = ds->priv; ++ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); ++ ++ if (!is5325(dev)) ++ mask |= BR_LEARNING; ++ ++ if (flags.mask & ~mask) + return -EINVAL; + + return 0; +diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h +index bfbcb66bef6626..3179fe58de6b62 100644 +--- a/drivers/net/dsa/b53/b53_regs.h ++++ b/drivers/net/dsa/b53/b53_regs.h +@@ -92,6 +92,7 @@ + #define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S) + #define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S) + #define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S) ++#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */ + #define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */ + #define PORT_OVERRIDE_RX_FLOW BIT(4) + #define PORT_OVERRIDE_TX_FLOW BIT(5) +@@ -103,6 +104,7 @@ + + /* IP Multicast control (8 bit) */ + #define B53_IP_MULTICAST_CTRL 0x21 ++#define B53_IP_MCAST_25 BIT(0) + #define B53_IPMC_FWD_EN BIT(1) + #define B53_UC_FWD_EN BIT(6) + #define B53_MC_FWD_EN BIT(7) +diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c +index 3d9220f9c9fe74..294dbe2c3797af 100644 +--- a/drivers/net/ethernet/agere/et131x.c ++++ b/drivers/net/ethernet/agere/et131x.c +@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) + skb->data, + skb_headlen(skb), + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, ++ dma_addr)) ++ return -ENOMEM; ++ + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; +@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) + skb->data, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, ++ dma_addr)) ++ return -ENOMEM; ++ + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; +@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) + skb_headlen(skb) / 2, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, ++ dma_addr)) ++ goto unmap_first_out; ++ + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; +@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) + 0, + desc[frag].len_vlan, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, dma_addr)) ++ goto unmap_out; ++ + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; +@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) + &adapter->regs->global.watchdog_timer); + } + return 0; ++ ++unmap_out: ++ // Unmap the body of the packet with map_page ++ while (--i) { ++ frag--; ++ dma_addr = desc[frag].addr_lo; ++ dma_addr |= (u64)desc[frag].addr_hi << 32; ++ dma_unmap_page(&adapter->pdev->dev, dma_addr, ++ desc[frag].len_vlan, DMA_TO_DEVICE); ++ } ++ ++unmap_first_out: ++ // Unmap the header with map_single ++ while (frag--) { ++ dma_addr = desc[frag].addr_lo; ++ dma_addr |= (u64)desc[frag].addr_hi << 32; ++ dma_unmap_single(&adapter->pdev->dev, dma_addr, ++ desc[frag].len_vlan, DMA_TO_DEVICE); ++ } ++ ++ return -ENOMEM; + } + + static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +index dbd28466013580..7f616abd3db2c4 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +@@ -113,6 +113,8 @@ struct aq_stats_s { + #define AQ_HW_POWER_STATE_D0 0U + #define AQ_HW_POWER_STATE_D3 3U + ++#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10) ++ + #define AQ_HW_FLAG_STARTED 0x00000004U + #define AQ_HW_FLAG_STOPPING 0x00000008U + #define AQ_HW_FLAG_RESETTING 0x00000010U +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +index 52e2070a4a2f0c..7370e3f76b6208 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp) + return aq_a2_fw_get_phy_temp(self, temp); + } + ++static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol) ++{ ++ struct mac_address_aligned_s mac_address; ++ struct link_control_s link_control; ++ struct wake_on_lan_s wake_on_lan; ++ ++ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN); ++ hw_atl2_shared_buffer_write(self, mac_address, mac_address); ++ ++ memset(&wake_on_lan, 0, sizeof(wake_on_lan)); ++ ++ if (wol & WAKE_MAGIC) ++ wake_on_lan.wake_on_magic_packet = 1U; ++ ++ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM)) ++ wake_on_lan.wake_on_link_up = 1U; ++ ++ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan); ++ ++ hw_atl2_shared_buffer_get(self, link_control, link_control); ++ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY; ++ hw_atl2_shared_buffer_write(self, link_control, link_control); ++ ++ return hw_atl2_shared_buffer_finish_ack(self); ++} ++ ++static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state, ++ const u8 *mac) ++{ ++ u32 wol = self->aq_nic_cfg->wol; ++ int err = 0; ++ ++ if (wol) ++ err = aq_a2_fw_set_wol_params(self, mac, wol); ++ ++ return err; ++} ++ + static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed) + { + struct link_options_s link_options; +@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = { + .set_state = aq_a2_fw_set_state, + .update_link_status = aq_a2_fw_update_link_status, + .update_stats = aq_a2_fw_update_stats, ++ .set_power = aq_a2_fw_set_power, + .get_mac_temp = aq_a2_fw_get_mac_temp, + .get_phy_temp = aq_a2_fw_get_phy_temp, + .set_eee_rate = aq_a2_fw_set_eee_rate, +diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c +index 009e0b3066fa3f..baf12ae0b8c4cb 100644 +--- a/drivers/net/ethernet/atheros/ag71xx.c ++++ b/drivers/net/ethernet/atheros/ag71xx.c +@@ -1234,6 +1234,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, + buf->rx.rx_buf = data; + buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, + DMA_FROM_DEVICE); ++ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) { ++ skb_free_frag(data); ++ buf->rx.rx_buf = NULL; ++ return false; ++ } + desc->data = (u32)buf->rx.dma_addr + offset; + return true; + } +@@ -1532,6 +1537,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, + + dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) { ++ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n"); ++ goto err_drop; ++ } + + i = ring->curr & ring_mask; + desc = ag71xx_ring_desc(ring, i); +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +index a317feb8decb64..087d4c2b3efd1a 100644 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +@@ -1427,9 +1427,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, + { + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + struct bgx *bgx = context; +- char bgx_sel[5]; ++ char bgx_sel[7]; + +- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); ++ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id); + if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { + pr_warn("Invalid link device\n"); + return AE_OK; +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 6bc0fde95f9dcf..0fda17bc8e2303 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -1465,10 +1465,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue) + ntohs(tcphdr->source)); + dev_info(dev, "TCP dest port %d\n", + ntohs(tcphdr->dest)); +- dev_info(dev, "TCP sequence num %d\n", +- ntohs(tcphdr->seq)); +- dev_info(dev, "TCP ack_seq %d\n", +- ntohs(tcphdr->ack_seq)); ++ dev_info(dev, "TCP sequence num %u\n", ++ ntohl(tcphdr->seq)); ++ dev_info(dev, "TCP ack_seq %u\n", ++ ntohl(tcphdr->ack_seq)); + } else if (ip_hdr(skb)->protocol == + IPPROTO_UDP) { + udphdr = udp_hdr(skb); +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +index e7bf70ac9a4ca5..6b7e1bb5c62d90 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +@@ -28,7 +28,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -3141,7 +3140,6 @@ static const struct net_device_ops dpaa_ops = { + .ndo_stop = dpaa_eth_stop, + .ndo_tx_timeout = dpaa_tx_timeout, + .ndo_get_stats64 = dpaa_get_stats64, +- .ndo_change_carrier = fixed_phy_change_carrier, + .ndo_set_mac_address = dpaa_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = dpaa_set_rx_mode, +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +index 3f8cd4a7d84576..20d73a4ed22cd5 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +@@ -415,8 +415,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev, + of_node_put(ptp_node); + } + +- if (ptp_dev) ++ if (ptp_dev) { + ptp = platform_get_drvdata(ptp_dev); ++ put_device(&ptp_dev->dev); ++ } + + if (ptp) + info->phc_index = ptp->phc_index; +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c +index a856047f1dfd6b..0ad3ea380e9b18 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c +@@ -1179,19 +1179,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) + { + struct platform_device *ierb_pdev; + struct device_node *ierb_node; ++ int ret; + + ierb_node = of_find_compatible_node(NULL, NULL, + "fsl,ls1028a-enetc-ierb"); +- if (!ierb_node || !of_device_is_available(ierb_node)) ++ if (!ierb_node) + return -ENODEV; + ++ if (!of_device_is_available(ierb_node)) { ++ of_node_put(ierb_node); ++ return -ENODEV; ++ } ++ + ierb_pdev = of_find_device_by_node(ierb_node); + of_node_put(ierb_node); + + if (!ierb_pdev) + return -EPROBE_DEFER; + +- return enetc_ierb_register_pf(ierb_pdev, pdev); ++ ret = enetc_ierb_register_pf(ierb_pdev, pdev); ++ ++ put_device(&ierb_pdev->dev); ++ ++ return ret; + } + + static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 291c88a76a27f4..2a8b5429df5957 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -3069,27 +3069,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) + static void fec_enet_itr_coal_set(struct net_device *ndev) + { + struct fec_enet_private *fep = netdev_priv(ndev); +- int rx_itr, tx_itr; ++ u32 rx_itr = 0, tx_itr = 0; ++ int rx_ictt, tx_ictt; + +- /* Must be greater than zero to avoid unpredictable behavior */ +- if (!fep->rx_time_itr || !fep->rx_pkts_itr || +- !fep->tx_time_itr || !fep->tx_pkts_itr) +- return; +- +- /* Select enet system clock as Interrupt Coalescing +- * timer Clock Source +- */ +- rx_itr = FEC_ITR_CLK_SEL; +- tx_itr = FEC_ITR_CLK_SEL; ++ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); ++ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + +- /* set ICFT and ICTT */ +- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); +- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); +- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); +- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); ++ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) { ++ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ ++ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; ++ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); ++ rx_itr |= FEC_ITR_ICTT(rx_ictt); ++ } + +- rx_itr |= FEC_ITR_EN; +- tx_itr |= FEC_ITR_EN; ++ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) { ++ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ ++ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; ++ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); ++ tx_itr |= FEC_ITR_ICTT(tx_ictt); ++ } + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); +diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c +index 7a15b9245698bb..30c7bc019d2feb 100644 +--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c ++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c +@@ -1468,8 +1468,10 @@ static int gfar_get_ts_info(struct net_device *dev, + if (ptp_node) { + ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); +- if (ptp_dev) ++ if (ptp_dev) { + ptp = platform_get_drvdata(ptp_dev); ++ put_device(&ptp_dev->dev); ++ } + } + + if (ptp) +diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c +index 79db7a6d42bc2f..9c50febb427179 100644 +--- a/drivers/net/ethernet/google/gve/gve_adminq.c ++++ b/drivers/net/ethernet/google/gve/gve_adminq.c +@@ -431,6 +431,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, + break; + default: + dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); ++ return -EINVAL; + } + + return 0; +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c +index ec189f0703f99e..241a541b8edd2f 100644 +--- a/drivers/net/ethernet/google/gve/gve_main.c ++++ b/drivers/net/ethernet/google/gve/gve_main.c +@@ -2373,6 +2373,8 @@ static void gve_shutdown(struct pci_dev *pdev) + struct gve_priv *priv = netdev_priv(netdev); + bool was_up = netif_carrier_ok(priv->dev); + ++ netif_device_detach(netdev); ++ + rtnl_lock(); + if (was_up && gve_close(priv->dev)) { + /* If the dev was up, attempt to close, if close fails, reset */ +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index 11543db4c47f0e..3e1408e1c1fcfb 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -6772,6 +6772,13 @@ static int igc_probe(struct pci_dev *pdev, + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + ++ /* PCI config space info */ ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ hw->revision_id = pdev->revision; ++ hw->subsystem_vendor_id = pdev->subsystem_vendor; ++ hw->subsystem_device_id = pdev->subsystem_device; ++ + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); +@@ -6797,13 +6804,6 @@ static int igc_probe(struct pci_dev *pdev, + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + +- /* PCI config space info */ +- hw->vendor_id = pdev->vendor; +- hw->device_id = pdev->device; +- hw->revision_id = pdev->revision; +- hw->subsystem_vendor_id = pdev->subsystem_vendor; +- hw->subsystem_device_id = pdev->subsystem_device; +- + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +index 1703c640a434db..7ef82c30e85712 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +@@ -403,7 +403,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) + dma_addr_t dma; + u32 cmd_type; + +- while (budget-- > 0) { ++ while (likely(budget)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + work_done = false; + break; +@@ -438,6 +438,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; ++ ++ budget--; + } + + if (tx_desc) { +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +index 237f82082ebe5d..0f4e462d39c2ea 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +@@ -580,8 +580,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) + if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) + *features &= ~BIT_ULL(NPC_OUTER_VID); + +- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ +- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && ++ /* Allow extracting SPI field from AH and ESP headers at same offset */ ++ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) && + (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) + *features |= BIT_ULL(NPC_IPSEC_SPI); + +diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +index 889fd26843e608..11e16c9e4e9221 100644 +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) + return -1; + ++ rcu_read_lock(); + err = dev_fill_forward_path(dev, addr, &stack); ++ rcu_read_unlock(); + if (err) + return err; + +diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c +index 85a9ad2b86bfff..525e2d365cd95c 100644 +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -1886,7 +1886,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + if (!pdev) + goto err_of_node_put; + +- get_device(&pdev->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + goto err_put_device; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +index 5ae787656a7ca0..3efa8bf1d14ef4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev, + /* Total shared buffer size is split in a ratio of 3:1 between + * lossy and lossless pools respectively. + */ +- lossy_epool_size = (shared_buffer_size / 4) * 3; + lossless_ipool_size = shared_buffer_size / 4; ++ lossy_epool_size = shared_buffer_size - lossless_ipool_size; + + mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, + lossy_epool_size); +@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv, + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); +- u32 new_headroom_size = 0; +- u32 current_headroom_size; ++ u32 current_headroom_cells = 0; ++ u32 new_headroom_cells = 0; + void *in; + int err; + int i; + +- current_headroom_size = port_buffer->headroom_size; +- + in = kzalloc(sz, GFP_KERNEL); + if (!in) + return -ENOMEM; +@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv, + + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { + void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); ++ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size); ++ + u64 size = port_buffer->buffer[i].size; + u64 xoff = port_buffer->buffer[i].xoff; + u64 xon = port_buffer->buffer[i].xon; + +- new_headroom_size += size; + do_div(size, port_buff_cell_sz); ++ new_headroom_cells += size; + do_div(xoff, port_buff_cell_sz); + do_div(xon, port_buff_cell_sz); + MLX5_SET(bufferx_reg, buffer, size, size); +@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv, + MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); + } + +- new_headroom_size /= port_buff_cell_sz; +- current_headroom_size /= port_buff_cell_sz; +- err = port_update_shared_buffer(priv->mdev, current_headroom_size, +- new_headroom_size); ++ err = port_update_shared_buffer(priv->mdev, current_headroom_cells, ++ new_headroom_cells); + if (err) + goto out; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +index d9acc37afe1c86..74729bf168b136 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +@@ -362,7 +362,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que + void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) + { + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); +- struct Qdisc *qdisc = dev_queue->qdisc_sleeping; ++ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + + if (!qdisc) + return; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +index d8e739cbcbced1..91319b5acd3de5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch * + devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, + vport_num - 1, external); + } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { ++ u16 base_vport = mlx5_core_ec_vf_vport_base(dev); ++ + memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); + dl_port->attrs.switch_id.id_len = ppid.id_len; + devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, +- vport_num - 1, false); ++ vport_num - base_vport, false); + } + } + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +index 9dbd5edff0b023..51f49510826a3e 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -2528,6 +2528,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { + ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, + ROUTER_EXP, false), ++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD, ++ ROUTER_EXP, false), + /* Multicast Router Traps */ + MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), + MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), +diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h +index 83477c8e6971b8..5bfc1499347a93 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h +@@ -95,6 +95,7 @@ enum { + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C, ++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D, + MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178, + MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179, + MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B, +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +index bc7c5cd3859695..1ac7a40fcc43e3 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +@@ -3394,10 +3394,6 @@ void ionic_lif_free(struct ionic_lif *lif) + lif->info = NULL; + lif->info_pa = 0; + +- /* unmap doorbell page */ +- ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); +- lif->kern_dbpage = NULL; +- + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); + +@@ -3423,6 +3419,9 @@ void ionic_lif_deinit(struct ionic_lif *lif) + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + ++ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); ++ lif->kern_dbpage = NULL; ++ + ionic_lif_reset(lif); + } + +diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c +index f3315c65151561..e7306ed529226a 100644 +--- a/drivers/net/ethernet/ti/icssg/icss_iep.c ++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c +@@ -638,7 +638,8 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) + + static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) + { +- u32 val, cap, ret = 0; ++ u32 val, cap; ++ int ret = 0; + + mutex_lock(&iep->ptp_clk_mutex); + +@@ -702,10 +703,16 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) + struct platform_device *pdev; + struct device_node *iep_np; + struct icss_iep *iep; ++ int ret; + + iep_np = of_parse_phandle(np, "ti,iep", idx); +- if (!iep_np || !of_device_is_available(iep_np)) ++ if (!iep_np) ++ return ERR_PTR(-ENODEV); ++ ++ if (!of_device_is_available(iep_np)) { ++ of_node_put(iep_np); + return ERR_PTR(-ENODEV); ++ } + + pdev = of_find_device_by_node(iep_np); + of_node_put(iep_np); +@@ -715,21 +722,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) + return ERR_PTR(-EPROBE_DEFER); + + iep = platform_get_drvdata(pdev); +- if (!iep) +- return ERR_PTR(-EPROBE_DEFER); ++ if (!iep) { ++ ret = -EPROBE_DEFER; ++ goto err_put_pdev; ++ } + + device_lock(iep->dev); + if (iep->client_np) { + device_unlock(iep->dev); + dev_err(iep->dev, "IEP is already acquired by %s", + iep->client_np->name); +- return ERR_PTR(-EBUSY); ++ ret = -EBUSY; ++ goto err_put_pdev; + } + iep->client_np = np; + device_unlock(iep->dev); +- get_device(iep->dev); + + return iep; ++ ++err_put_pdev: ++ put_device(&pdev->dev); ++ ++ return ERR_PTR(ret); + } + EXPORT_SYMBOL_GPL(icss_iep_get_idx); + +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h +index 4c12067b07f05e..f07bde6695c7b9 100644 +--- a/drivers/net/hyperv/hyperv_net.h ++++ b/drivers/net/hyperv/hyperv_net.h +@@ -1061,6 +1061,7 @@ struct net_device_context { + struct net_device __rcu *vf_netdev; + struct netvsc_vf_pcpu_stats __percpu *vf_stats; + struct delayed_work vf_takeover; ++ struct delayed_work vfns_work; + + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 vf_alloc; +@@ -1075,6 +1076,8 @@ struct net_device_context { + struct netvsc_device_info *saved_netvsc_dev_info; + }; + ++void netvsc_vfns_work(struct work_struct *w); ++ + /* Azure hosts don't support non-TCP port numbers in hashing for fragmented + * packets. We can use ethtool to change UDP hash level when necessary. + */ +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index f33f9167ba6b6e..aa114240e340d9 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -2513,6 +2513,7 @@ static int netvsc_probe(struct hv_device *dev, + spin_lock_init(&net_device_ctx->lock); + INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); ++ INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work); + + net_device_ctx->vf_stats + = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); +@@ -2655,6 +2656,8 @@ static void netvsc_remove(struct hv_device *dev) + cancel_delayed_work_sync(&ndev_ctx->dwork); + + rtnl_lock(); ++ cancel_delayed_work_sync(&ndev_ctx->vfns_work); ++ + nvdev = rtnl_dereference(ndev_ctx->nvdev); + if (nvdev) { + cancel_work_sync(&nvdev->subchan_work); +@@ -2696,6 +2699,7 @@ static int netvsc_suspend(struct hv_device *dev) + cancel_delayed_work_sync(&ndev_ctx->dwork); + + rtnl_lock(); ++ cancel_delayed_work_sync(&ndev_ctx->vfns_work); + + nvdev = rtnl_dereference(ndev_ctx->nvdev); + if (nvdev == NULL) { +@@ -2789,6 +2793,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev) + } + } + ++void netvsc_vfns_work(struct work_struct *w) ++{ ++ struct net_device_context *ndev_ctx = ++ container_of(w, struct net_device_context, vfns_work.work); ++ struct net_device *ndev; ++ ++ if (!rtnl_trylock()) { ++ schedule_delayed_work(&ndev_ctx->vfns_work, 1); ++ return; ++ } ++ ++ ndev = hv_get_drvdata(ndev_ctx->device_ctx); ++ if (!ndev) ++ goto out; ++ ++ netvsc_event_set_vf_ns(ndev); ++ ++out: ++ rtnl_unlock(); ++} ++ + /* + * On Hyper-V, every VF interface is matched with a corresponding + * synthetic interface. The synthetic interface is presented first +@@ -2799,10 +2824,12 @@ static int netvsc_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) + { + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); ++ struct net_device_context *ndev_ctx; + int ret = 0; + + if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { +- netvsc_event_set_vf_ns(event_dev); ++ ndev_ctx = netdev_priv(event_dev); ++ schedule_delayed_work(&ndev_ctx->vfns_work, 0); + return NOTIFY_DONE; + } + +diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c +index 5620dc271fac33..cbf3d4761ce357 100644 +--- a/drivers/net/ipa/ipa_smp2p.c ++++ b/drivers/net/ipa/ipa_smp2p.c +@@ -92,7 +92,7 @@ static void ipa_smp2p_notify(struct ipa_smp2p *smp2p) + return; + + dev = &smp2p->ipa->pdev->dev; +- smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0; ++ smp2p->power_on = pm_runtime_get_if_active(dev) > 0; + + /* Signal whether the IPA power is enabled */ + mask = BIT(smp2p->enabled_bit); +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c +index 9a0432145645f4..6a114883ed8c5a 100644 +--- a/drivers/net/phy/micrel.c ++++ b/drivers/net/phy/micrel.c +@@ -372,6 +372,8 @@ static const struct kszphy_type ksz8051_type = { + + static const struct kszphy_type ksz8081_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, ++ .cable_diag_reg = KSZ8081_LMD, ++ .pair_mask = KSZPHY_WIRE_PAIR_MASK, + .has_broadcast_disable = true, + .has_nand_tree_disable = true, + .has_rmii_ref_clk_sel = true, +@@ -4720,6 +4722,14 @@ static int lan8841_suspend(struct phy_device *phydev) + return genphy_suspend(phydev); + } + ++static int ksz9131_resume(struct phy_device *phydev) ++{ ++ if (phydev->suspended && phy_interface_is_rgmii(phydev)) ++ ksz9131_config_rgmii_delay(phydev); ++ ++ return kszphy_resume(phydev); ++} ++ + static struct phy_driver ksphy_driver[] = { + { + .phy_id = PHY_ID_KS8737, +@@ -4966,7 +4976,7 @@ static struct phy_driver ksphy_driver[] = { + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, + .suspend = kszphy_suspend, +- .resume = kszphy_resume, ++ .resume = ksz9131_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, + .get_features = ksz9477_get_features, +diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h +index 7a962050a4d45e..cdb343779a8fb5 100644 +--- a/drivers/net/phy/mscc/mscc.h ++++ b/drivers/net/phy/mscc/mscc.h +@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat { + u16 mask; + }; + ++struct vsc8531_skb_cb { ++ u32 ns; ++}; ++ ++#define VSC8531_SKB_CB(skb) \ ++ ((struct vsc8531_skb_cb *)((skb)->cb)) ++ + struct vsc8531_private { + int rate_magic; + u16 supp_led_modes; +@@ -410,6 +417,11 @@ struct vsc8531_private { + */ + struct mutex ts_lock; + struct mutex phc_lock; ++ ++ /* list of skbs that were received and need timestamp information but it ++ * didn't received it yet ++ */ ++ struct sk_buff_head rx_skbs_list; + }; + + /* Shared structure between the PHYs of the same package. +diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c +index 4171f01d34e579..3de72d9cc22bd0 100644 +--- a/drivers/net/phy/mscc/mscc_main.c ++++ b/drivers/net/phy/mscc/mscc_main.c +@@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev) + return vsc85xx_dt_led_modes_get(phydev, default_mode); + } + ++static void vsc85xx_remove(struct phy_device *phydev) ++{ ++ struct vsc8531_private *priv = phydev->priv; ++ ++ skb_queue_purge(&priv->rx_skbs_list); ++} ++ + /* Microsemi VSC85xx PHYs */ + static struct phy_driver vsc85xx_driver[] = { + { +@@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = { + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, ++ .remove = &vsc85xx_remove, + .probe = &vsc8574_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, +@@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = { + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, ++ .remove = &vsc85xx_remove, + .probe = &vsc8574_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, +@@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = { + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, ++ .remove = &vsc85xx_remove, + .probe = &vsc8584_probe, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, +@@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = { + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, ++ .remove = &vsc85xx_remove, + .probe = &vsc8584_probe, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, +@@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = { + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, ++ .remove = &vsc85xx_remove, + .probe = &vsc8584_probe, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, +diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c +index d0bd6ab45ebed7..add1a9ee721afa 100644 +--- a/drivers/net/phy/mscc/mscc_ptp.c ++++ b/drivers/net/phy/mscc/mscc_ptp.c +@@ -1193,9 +1193,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, + { + struct vsc8531_private *vsc8531 = + container_of(mii_ts, struct vsc8531_private, mii_ts); +- struct skb_shared_hwtstamps *shhwtstamps = NULL; + struct vsc85xx_ptphdr *ptphdr; +- struct timespec64 ts; + unsigned long ns; + + if (!vsc8531->ptp->configured) +@@ -1205,27 +1203,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, + type == PTP_CLASS_NONE) + return false; + +- vsc85xx_gettime(&vsc8531->ptp->caps, &ts); +- + ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter); + if (!ptphdr) + return false; + +- shhwtstamps = skb_hwtstamps(skb); +- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); +- + ns = ntohl(ptphdr->rsrvd2); + +- /* nsec is in reserved field */ +- if (ts.tv_nsec < ns) +- ts.tv_sec--; ++ VSC8531_SKB_CB(skb)->ns = ns; ++ skb_queue_tail(&vsc8531->rx_skbs_list, skb); + +- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); +- netif_rx(skb); ++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0); + + return true; + } + ++static long vsc85xx_do_aux_work(struct ptp_clock_info *info) ++{ ++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); ++ struct skb_shared_hwtstamps *shhwtstamps = NULL; ++ struct phy_device *phydev = ptp->phydev; ++ struct vsc8531_private *priv = phydev->priv; ++ struct sk_buff_head received; ++ struct sk_buff *rx_skb; ++ struct timespec64 ts; ++ unsigned long flags; ++ ++ __skb_queue_head_init(&received); ++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags); ++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received); ++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags); ++ ++ vsc85xx_gettime(info, &ts); ++ while ((rx_skb = __skb_dequeue(&received)) != NULL) { ++ shhwtstamps = skb_hwtstamps(rx_skb); ++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); ++ ++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns) ++ ts.tv_sec--; ++ ++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ++ VSC8531_SKB_CB(rx_skb)->ns); ++ netif_rx(rx_skb); ++ } ++ ++ return -1; ++} ++ + static const struct ptp_clock_info vsc85xx_clk_caps = { + .owner = THIS_MODULE, + .name = "VSC85xx timer", +@@ -1239,6 +1262,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = { + .adjfine = &vsc85xx_adjfine, + .gettime64 = &vsc85xx_gettime, + .settime64 = &vsc85xx_settime, ++ .do_aux_work = &vsc85xx_do_aux_work, + }; + + static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) +@@ -1566,6 +1590,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev) + + mutex_init(&vsc8531->phc_lock); + mutex_init(&vsc8531->ts_lock); ++ skb_queue_head_init(&vsc8531->rx_skbs_list); + + /* Retrieve the shared load/save GPIO. Request it as non exclusive as + * the same GPIO can be requested by all the PHYs of the same package. +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c +index 4ca813c009476a..5f9bb0ebe455d2 100644 +--- a/drivers/net/phy/smsc.c ++++ b/drivers/net/phy/smsc.c +@@ -786,6 +786,7 @@ static struct phy_driver smsc_phy_driver[] = { + + /* PHY_BASIC_FEATURES */ + ++ .flags = PHY_RST_AFTER_CLK_EN, + .probe = smsc_phy_probe, + + /* basic functions */ +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index ee1527cf3d0c4c..28b894bcd7a93d 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, + if (ppp->flags & SC_MULTILINK) + return -EOPNOTSUPP; + +- if (list_empty(&ppp->channels)) ++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist); ++ if (!pch) ++ return -ENODEV; ++ ++ chan = READ_ONCE(pch->chan); ++ if (!chan) + return -ENODEV; + +- pch = list_first_entry(&ppp->channels, struct channel, clist); +- chan = pch->chan; + if (!chan->ops->fill_forward_path) + return -EOPNOTSUPP; + +@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan) + */ + down_write(&pch->chan_sem); + spin_lock_bh(&pch->downl); +- pch->chan = NULL; ++ WRITE_ONCE(pch->chan, NULL); + spin_unlock_bh(&pch->downl); + up_write(&pch->chan_sem); + ppp_disconnect_channel(pch); +@@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit) + hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ + if (hdrlen > ppp->dev->hard_header_len) + ppp->dev->hard_header_len = hdrlen; +- list_add_tail(&pch->clist, &ppp->channels); ++ list_add_tail_rcu(&pch->clist, &ppp->channels); + ++ppp->n_channels; + pch->ppp = ppp; + refcount_inc(&ppp->file.refcnt); +@@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch) + if (ppp) { + /* remove it from the ppp unit's list */ + ppp_lock(ppp); +- list_del(&pch->clist); ++ list_del_rcu(&pch->clist); + if (--ppp->n_channels == 0) + wake_up_interruptible(&ppp->file.rwait); + ppp_unlock(ppp); ++ synchronize_net(); + if (refcount_dec_and_test(&ppp->file.refcnt)) + ppp_destroy_interface(ppp); + err = 0; +diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c +index 0a53ec293d0408..dcaa62377808c2 100644 +--- a/drivers/net/thunderbolt/main.c ++++ b/drivers/net/thunderbolt/main.c +@@ -396,9 +396,9 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) + + ret = tb_xdomain_disable_paths(net->xd, + net->local_transmit_path, +- net->rx_ring.ring->hop, ++ net->tx_ring.ring->hop, + net->remote_transmit_path, +- net->tx_ring.ring->hop); ++ net->rx_ring.ring->hop); + if (ret) + netdev_warn(net->dev, "failed to disable DMA paths\n"); + +@@ -662,9 +662,9 @@ static void tbnet_connected_work(struct work_struct *work) + goto err_free_rx_buffers; + + ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, +- net->rx_ring.ring->hop, ++ net->tx_ring.ring->hop, + net->remote_transmit_path, +- net->tx_ring.ring->hop); ++ net->rx_ring.ring->hop); + if (ret) { + netdev_err(net->dev, "failed to enable DMA paths\n"); + goto err_free_tx_buffers; +@@ -924,8 +924,12 @@ static int tbnet_open(struct net_device *dev) + + netif_carrier_off(dev); + +- ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, +- RING_FLAG_FRAME); ++ flags = RING_FLAG_FRAME; ++ /* Only enable full E2E if the other end supports it too */ ++ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) ++ flags |= RING_FLAG_E2E; ++ ++ ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags); + if (!ring) { + netdev_err(dev, "failed to allocate Tx ring\n"); + return -ENOMEM; +@@ -944,11 +948,6 @@ static int tbnet_open(struct net_device *dev) + sof_mask = BIT(TBIP_PDF_FRAME_START); + eof_mask = BIT(TBIP_PDF_FRAME_END); + +- flags = RING_FLAG_FRAME; +- /* Only enable full E2E if the other end supports it too */ +- if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) +- flags |= RING_FLAG_E2E; +- + ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags, + net->tx_ring.ring->hop, sof_mask, + eof_mask, tbnet_start_poll, net); +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c +index 119295f5f3b351..f4340d4ef7eeab 100644 +--- a/drivers/net/usb/asix_devices.c ++++ b/drivers/net/usb/asix_devices.c +@@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev) + priv->mdio->read = &asix_mdio_bus_read; + priv->mdio->write = &asix_mdio_bus_write; + priv->mdio->name = "Asix MDIO Bus"; ++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR)); + /* mii bus name is usb-- */ + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", + dev->udev->bus->busnum, dev->udev->devnum); +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index db05622f1f703e..d9792fd515a904 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -893,6 +893,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ + } + } + ++ if (ctx->func_desc) ++ ctx->filtering_supported = !!(ctx->func_desc->bmNetworkCapabilities ++ & USB_CDC_NCM_NCAP_ETH_FILTER); ++ + iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; + + /* Device-specific flags */ +@@ -1898,6 +1902,14 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) + } + } + ++static void cdc_ncm_update_filter(struct usbnet *dev) ++{ ++ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; ++ ++ if (ctx->filtering_supported) ++ usbnet_cdc_update_filter(dev); ++} ++ + static const struct driver_info cdc_ncm_info = { + .description = "CDC NCM (NO ZLP)", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET +@@ -1908,7 +1920,7 @@ static const struct driver_info cdc_ncm_info = { + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, +- .set_rx_mode = usbnet_cdc_update_filter, ++ .set_rx_mode = cdc_ncm_update_filter, + }; + + /* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */ +@@ -1922,7 +1934,7 @@ static const struct driver_info cdc_ncm_zlp_info = { + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, +- .set_rx_mode = usbnet_cdc_update_filter, ++ .set_rx_mode = cdc_ncm_update_filter, + }; + + /* Same as cdc_ncm_info, but with FLAG_WWAN */ +@@ -1936,7 +1948,7 @@ static const struct driver_info wwan_info = { + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, +- .set_rx_mode = usbnet_cdc_update_filter, ++ .set_rx_mode = cdc_ncm_update_filter, + }; + + /* Same as wwan_info, but with FLAG_NOARP */ +@@ -1950,7 +1962,7 @@ static const struct driver_info wwan_noarp_info = { + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, +- .set_rx_mode = usbnet_cdc_update_filter, ++ .set_rx_mode = cdc_ncm_update_filter, + }; + + static const struct usb_device_id cdc_devs[] = { +diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c +index 9d8efec46508a1..39d9aad33bc690 100644 +--- a/drivers/net/wireless/ath/ath11k/ce.c ++++ b/drivers/net/wireless/ath/ath11k/ce.c +@@ -393,9 +393,6 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, + goto err; + } + +- /* Make sure descriptor is read after the head pointer. */ +- dma_rmb(); +- + *nbytes = ath11k_hal_ce_dst_status_get_length(desc); + + *skb = pipe->dest_ring->skb[sw_index]; +diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c +index 2b7bee66647286..33b9764eaa9167 100644 +--- a/drivers/net/wireless/ath/ath11k/dp_rx.c ++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c +@@ -2662,9 +2662,6 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, + try_again: + ath11k_hal_srng_access_begin(ab, srng); + +- /* Make sure descriptor is read after the head pointer. */ +- dma_rmb(); +- + while (likely(desc = + (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, + srng))) { +diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c +index df493d1760623e..1215408d1a6abb 100644 +--- a/drivers/net/wireless/ath/ath11k/hal.c ++++ b/drivers/net/wireless/ath/ath11k/hal.c +@@ -796,13 +796,23 @@ u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) + + void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) + { ++ u32 hp; ++ + lockdep_assert_held(&srng->lock); + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.cached_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; + } else { +- srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); ++ hp = READ_ONCE(*srng->u.dst_ring.hp_addr); ++ ++ if (hp != srng->u.dst_ring.cached_hp) { ++ srng->u.dst_ring.cached_hp = hp; ++ /* Make sure descriptor is read after the head ++ * pointer. ++ */ ++ dma_rmb(); ++ } + + /* Try to prefetch the next descriptor in the ring */ + if (srng->flags & HAL_SRNG_FLAGS_CACHED) +@@ -817,7 +827,6 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) + { + lockdep_assert_held(&srng->lock); + +- /* TODO: See if we need a write memory barrier here */ + if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { + /* For LMAC rings, ring pointer updates are done through FW and + * hence written to a shared memory location that is read by FW +@@ -825,21 +834,37 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; +- *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; ++ /* Make sure descriptor is written before updating the ++ * head pointer. ++ */ ++ dma_wmb(); ++ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; +- *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; ++ /* Make sure descriptor is read before updating the ++ * tail pointer. ++ */ ++ dma_mb(); ++ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); + } + } else { + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; ++ /* Assume implementation use an MMIO write accessor ++ * which has the required wmb() so that the descriptor ++ * is written before the updating the head pointer. ++ */ + ath11k_hif_write32(ab, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem, + srng->u.src_ring.hp); + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; ++ /* Make sure descriptor is read before updating the ++ * tail pointer. ++ */ ++ mb(); + ath11k_hif_write32(ab, + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem, +diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c +index 740586fe49d1f9..b66d23d6b2bd9e 100644 +--- a/drivers/net/wireless/ath/ath12k/ce.c ++++ b/drivers/net/wireless/ath/ath12k/ce.c +@@ -343,9 +343,6 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe, + goto err; + } + +- /* Make sure descriptor is read after the head pointer. */ +- dma_rmb(); +- + *nbytes = ath12k_hal_ce_dst_status_get_length(desc); + + *skb = pipe->dest_ring->skb[sw_index]; +diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c +index c663ff990b4791..c918f5d12975cb 100644 +--- a/drivers/net/wireless/ath/ath12k/dp.c ++++ b/drivers/net/wireless/ath/ath12k/dp.c +@@ -74,6 +74,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr) + ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id); + if (ret) { + ath12k_warn(ab, "failed to setup rx defrag context\n"); ++ tid--; + goto peer_clean; + } + +@@ -91,7 +92,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr) + return -ENOENT; + } + +- for (; tid >= 0; tid--) ++ for (tid--; tid >= 0; tid--) + ath12k_dp_rx_peer_tid_delete(ar, peer, tid); + + spin_unlock_bh(&ab->base_lock); +diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c +index 169e16c6ed650f..2d80cb9f0e7b7f 100644 +--- a/drivers/net/wireless/ath/ath12k/hal.c ++++ b/drivers/net/wireless/ath/ath12k/hal.c +@@ -1728,13 +1728,24 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab, + + void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng) + { ++ u32 hp; ++ + lockdep_assert_held(&srng->lock); + +- if (srng->ring_dir == HAL_SRNG_DIR_SRC) ++ if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.cached_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; +- else +- srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); ++ } else { ++ hp = READ_ONCE(*srng->u.dst_ring.hp_addr); ++ ++ if (hp != srng->u.dst_ring.cached_hp) { ++ srng->u.dst_ring.cached_hp = hp; ++ /* Make sure descriptor is read after the head ++ * pointer. ++ */ ++ dma_rmb(); ++ } ++ } + } + + /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin() +@@ -1744,7 +1755,6 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng) + { + lockdep_assert_held(&srng->lock); + +- /* TODO: See if we need a write memory barrier here */ + if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { + /* For LMAC rings, ring pointer updates are done through FW and + * hence written to a shared memory location that is read by FW +@@ -1752,21 +1762,37 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng) + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; +- *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; ++ /* Make sure descriptor is written before updating the ++ * head pointer. ++ */ ++ dma_wmb(); ++ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; +- *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; ++ /* Make sure descriptor is read before updating the ++ * tail pointer. ++ */ ++ dma_mb(); ++ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); + } + } else { + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; ++ /* Assume implementation use an MMIO write accessor ++ * which has the required wmb() so that the descriptor ++ * is written before the updating the head pointer. ++ */ + ath12k_hif_write32(ab, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem, + srng->u.src_ring.hp); + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; ++ /* Make sure descriptor is read before updating the ++ * tail pointer. ++ */ ++ mb(); + ath12k_hif_write32(ab, + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem, +diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c +index dafd7c34d74650..97ed179be22835 100644 +--- a/drivers/net/wireless/ath/ath12k/hw.c ++++ b/drivers/net/wireless/ath/ath12k/hw.c +@@ -1002,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { + .download_calib = true, + .supports_suspend = false, + .tcl_ring_retry = true, +- .reoq_lut_support = false, ++ .reoq_lut_support = true, + .supports_shadow_regs = false, + + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), +diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c +index e918218ce2d607..7e400a0e0eb111 100644 +--- a/drivers/net/wireless/ath/ath12k/wmi.c ++++ b/drivers/net/wireless/ath/ath12k/wmi.c +@@ -4965,6 +4965,11 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, + dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + + info = IEEE80211_SKB_CB(msdu); ++ memset(&info->status, 0, sizeof(info->status)); ++ ++ /* skip tx rate update from ieee80211_status*/ ++ info->status.rates[0].idx = -1; ++ + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) + info->flags |= IEEE80211_TX_STAT_ACK; + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +index 47c0e8e429e544..3064e603e7e3e0 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +@@ -919,7 +919,7 @@ void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti) + + static void + wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id, +- const u16 *tbl_ptr, u32 tbl_len, ++ u16 *tbl_ptr, u32 tbl_len, + u32 tbl_width, u32 tbl_offset) + { + struct phytbl_info tab; +diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c +index 0a4aa3c678c101..75118e24061911 100644 +--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c ++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c +@@ -1575,8 +1575,11 @@ il4965_tx_cmd_build_rate(struct il_priv *il, + || rate_idx > RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index(&il->bands[info->band], sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ +- if (info->band == NL80211_BAND_5GHZ) ++ if (info->band == NL80211_BAND_5GHZ) { + rate_idx += IL_FIRST_OFDM_RATE; ++ if (rate_idx > IL_LAST_OFDM_RATE) ++ rate_idx = IL_LAST_OFDM_RATE; ++ } + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = il_rates[rate_idx].plcp; + /* Zero out flags for this packet */ +diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +index f4a6f76cf193ab..e70024525eb90a 100644 +--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c ++++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +@@ -2904,7 +2904,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ +- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { ++ while (repeat_rate > 0 && index < (LINK_QUAL_MAX_RETRY_NUM - 1)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +index 0a1f302ad6d3fd..2deb259615d9c5 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +@@ -2816,6 +2816,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dump_desc *desc; + unsigned int delay = 0; + bool monitor_only = false; ++ int ret; + + if (trigger) { + u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; +@@ -2846,7 +2847,11 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + desc->trig_desc.type = cpu_to_le32(trig); + memcpy(desc->trig_desc.data, str, len); + +- return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); ++ ret = iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); ++ if (ret) ++ kfree(desc); ++ ++ return ret; + } + IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +index a82cdd897173f5..6c108dbbbc5402 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +@@ -2143,6 +2143,7 @@ static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, + + status->gtk[0].len = data->key_len; + status->gtk[0].flags = data->key_flags; ++ status->gtk[0].id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK; + + memcpy(status->gtk[0].key, data->key, sizeof(data->key)); + +@@ -2369,6 +2370,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) + * currently used key. + */ + status->gtk[0].flags = v6->gtk.key_index | BIT(7); ++ status->gtk[0].id = v6->gtk.key_index; + } else if (notif_ver == 7) { + struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +index cc866401aad0ba..8b22779e5b3e57 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +@@ -828,7 +828,7 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, + int n_channels) + { + return ((n_ssids <= PROBE_OPTION_MAX) && +- (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & ++ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) && + (ies->common_ie_len + + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] + + ies->len[NL80211_BAND_6GHZ] <= +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +index f0226db2e57c7a..fae9ec98da3b9d 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +@@ -2060,16 +2060,21 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) + { + int ret; + +- /* make sure fw is download state */ +- if (mt7915_firmware_state(dev, false)) { +- /* restart firmware once */ +- mt76_connac_mcu_restart(&dev->mt76); +- ret = mt7915_firmware_state(dev, false); +- if (ret) { +- dev_err(dev->mt76.dev, +- "Firmware is not ready for download\n"); +- return ret; +- } ++ /* Release Semaphore if taken by previous failed attempt */ ++ ret = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); ++ if (ret != PATCH_REL_SEM_SUCCESS) { ++ dev_err(dev->mt76.dev, "Could not release semaphore\n"); ++ /* Continue anyways */ ++ } ++ ++ /* Always restart MCU firmware */ ++ mt76_connac_mcu_restart(&dev->mt76); ++ ++ /* Check if MCU is ready */ ++ ret = mt7915_firmware_state(dev, false); ++ if (ret) { ++ dev_err(dev->mt76.dev, "Firmware did not enter download state\n"); ++ return ret; + } + + ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH)); +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c +index 3645f212021f92..40112b2c37775d 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.c ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c +@@ -573,8 +573,11 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, + dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb), + rtlpci->rxbuffersize, DMA_FROM_DEVICE); + bufferaddress = *((dma_addr_t *)skb->cb); +- if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) ++ if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) { ++ if (!new_skb) ++ kfree_skb(skb); + return 0; ++ } + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; + if (rtlpriv->use_new_trx_flow) { + /* skb->cb may be 64 bit address */ +@@ -803,13 +806,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) + skb = new_skb; + no_new: + if (rtlpriv->use_new_trx_flow) { +- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, +- rxring_idx, +- rtlpci->rx_ring[rxring_idx].idx); ++ if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, ++ rxring_idx, ++ rtlpci->rx_ring[rxring_idx].idx)) { ++ if (new_skb) ++ dev_kfree_skb_any(skb); ++ } + } else { +- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, +- rxring_idx, +- rtlpci->rx_ring[rxring_idx].idx); ++ if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, ++ rxring_idx, ++ rtlpci->rx_ring[rxring_idx].idx)) { ++ if (new_skb) ++ dev_kfree_skb_any(skb); ++ } + if (rtlpci->rx_ring[rxring_idx].idx == + rtlpci->rxringcount - 1) + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, +diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c +index 21e9ec8768b5dc..c172ef13c9540c 100644 +--- a/drivers/net/wireless/realtek/rtw89/core.c ++++ b/drivers/net/wireless/realtek/rtw89/core.c +@@ -1996,6 +1996,9 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) + { + const struct rtw89_chip_info *chip = rtwdev->chip; + ++ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE) ++ return RTW89_PS_MODE_NONE; ++ + if (rtw89_disable_ps_mode || !chip->ps_mode_supported || + RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw)) + return RTW89_PS_MODE_NONE; +diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c +index 89b0a7970508e2..539537360914c6 100644 +--- a/drivers/net/wireless/realtek/rtw89/fw.c ++++ b/drivers/net/wireless/realtek/rtw89/fw.c +@@ -3427,13 +3427,18 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_fw_info *fw_info = &rtwdev->fw; + const u32 *c2h_reg = chip->c2h_regs; +- u32 ret; ++ u32 ret, timeout; + u8 i, val; + + info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; + ++ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) ++ timeout = RTW89_C2H_TIMEOUT_USB; ++ else ++ timeout = RTW89_C2H_TIMEOUT; ++ + ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, +- RTW89_C2H_TIMEOUT, false, rtwdev, ++ timeout, false, rtwdev, + chip->c2h_ctrl_reg); + if (ret) { + rtw89_warn(rtwdev, "c2h reg timeout\n"); +diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h +index 775f4e8fbda4dd..bc6a9ea9352e0f 100644 +--- a/drivers/net/wireless/realtek/rtw89/fw.h ++++ b/drivers/net/wireless/realtek/rtw89/fw.h +@@ -69,6 +69,8 @@ struct rtw89_h2creg_sch_tx_en { + #define RTW89_C2HREG_HDR_LEN 2 + #define RTW89_H2CREG_HDR_LEN 2 + #define RTW89_C2H_TIMEOUT 1000000 ++#define RTW89_C2H_TIMEOUT_USB 4000 ++ + struct rtw89_mac_c2h_info { + u8 id; + u8 content_len; +diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c +index 3c818c4b4653ad..3d63f8b2770e21 100644 +--- a/drivers/net/wireless/realtek/rtw89/mac.c ++++ b/drivers/net/wireless/realtek/rtw89/mac.c +@@ -1301,6 +1301,23 @@ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev) + rtw89_mac_send_rpwm(rtwdev, state, true); + } + ++static void rtw89_mac_power_switch_boot_mode(struct rtw89_dev *rtwdev) ++{ ++ u32 boot_mode; ++ ++ if (rtwdev->hci.type != RTW89_HCI_TYPE_USB) ++ return; ++ ++ boot_mode = rtw89_read32_mask(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE); ++ if (!boot_mode) ++ return; ++ ++ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC); ++ rtw89_write32_clr(rtwdev, R_AX_SYS_STATUS1, B_AX_AUTO_WLPON); ++ rtw89_write32_clr(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE); ++ rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); ++} ++ + static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) + { + #define PWR_ACT 1 +@@ -1310,6 +1327,8 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) + int ret; + u8 val; + ++ rtw89_mac_power_switch_boot_mode(rtwdev); ++ + if (on) { + cfg_seq = chip->pwr_on_seq; + cfg_func = chip->ops->pwr_on_func; +diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h +index c0aac4d3678a32..ef116259504232 100644 +--- a/drivers/net/wireless/realtek/rtw89/reg.h ++++ b/drivers/net/wireless/realtek/rtw89/reg.h +@@ -172,6 +172,7 @@ + + #define R_AX_SYS_STATUS1 0x00F4 + #define B_AX_SEL_0XC0_MASK GENMASK(17, 16) ++#define B_AX_AUTO_WLPON BIT(10) + #define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3) + #define MAC_AX_HCI_SEL_SDIO_UART 0 + #define MAC_AX_HCI_SEL_MULTI_USB 1 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 0115f8f5b7245f..5b59c0ee6c6660 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -637,8 +637,6 @@ static int xennet_xdp_xmit_one(struct net_device *dev, + tx_stats->packets++; + u64_stats_update_end(&tx_stats->syncp); + +- xennet_tx_buf_gc(queue); +- + return 0; + } + +@@ -848,9 +846,6 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev + tx_stats->packets++; + u64_stats_update_end(&tx_stats->syncp); + +- /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ +- xennet_tx_buf_gc(queue); +- + if (!netfront_tx_slot_available(queue)) + netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 97ab91a479d112..136dba6221d86a 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -1755,8 +1755,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) + * might be pointing at! + */ + result = nvme_disable_ctrl(&dev->ctrl, false); +- if (result < 0) +- return result; ++ if (result < 0) { ++ struct pci_dev *pdev = to_pci_dev(dev->dev); ++ ++ /* ++ * The NVMe Controller Reset method did not get an expected ++ * CSTS.RDY transition, so something with the device appears to ++ * be stuck. Use the lower level and bigger hammer PCIe ++ * Function Level Reset to attempt restoring the device to its ++ * initial state, and try again. ++ */ ++ result = pcie_reset_flr(pdev, false); ++ if (result < 0) ++ return result; ++ ++ pci_restore_state(pdev); ++ result = nvme_disable_ctrl(&dev->ctrl, false); ++ if (result < 0) ++ return result; ++ ++ dev_info(dev->ctrl.device, ++ "controller reset completed after pcie flr\n"); ++ } + + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); + if (result) +diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c +index cedfbd4258631d..23b8dda70cb2f0 100644 +--- a/drivers/pci/controller/dwc/pci-imx6.c ++++ b/drivers/pci/controller/dwc/pci-imx6.c +@@ -1043,7 +1043,10 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +- .reserved_bar = 1 << BAR_1 | 1 << BAR_3, ++ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, ++ .bar_fixed_size = { ++ [BAR_4] = SZ_256, ++ }, + .align = SZ_64K, + }; + +@@ -1098,8 +1101,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } +- /* Start LTSSM. */ +- imx6_pcie_ltssm_enable(dev); + + return 0; + } +diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c +index 6ff20d58539662..c25f32abbdf646 100644 +--- a/drivers/pci/controller/pcie-rockchip-host.c ++++ b/drivers/pci/controller/pcie-rockchip-host.c +@@ -11,6 +11,7 @@ + * ARM PCI Host generic driver. + */ + ++#include + #include + #include + #include +@@ -40,18 +41,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) + { + u32 status; + +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + } + + static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) + { + u32 status; + +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + } + + static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) +@@ -269,7 +270,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) + scale = 3; /* 0.001x */ + curr = curr / 1000; /* convert to mA */ + power = (curr * 3300) / 1000; /* milliwatt */ +- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { ++ while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) { + if (!scale) { + dev_warn(rockchip->dev, "invalid power supply\n"); + return; +@@ -278,10 +279,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) + power = power / 10; + } + +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); +- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | +- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); ++ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power); ++ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); + } + + /** +@@ -309,14 +310,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) + rockchip_pcie_set_power_limit(rockchip); + + /* Set RC's clock architecture as common clock */ +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + status |= PCI_EXP_LNKSTA_SLC << 16; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + + /* Set RC's RCB to 128 */ +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + status |= PCI_EXP_LNKCTL_RCB; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + + /* Enable Gen1 training */ + rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, +@@ -338,9 +339,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) + * Enable retrain for gen2. This should be configured only after + * gen1 finished. + */ +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); ++ status &= ~PCI_EXP_LNKCTL2_TLS; ++ status |= PCI_EXP_LNKCTL2_TLS_5_0GT; ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + status |= PCI_EXP_LNKCTL_RL; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); + + err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, + status, PCIE_LINK_IS_GEN2(status), 20, +@@ -377,15 +382,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) + + /* Clear L0s from RC's link cap */ + if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); +- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); ++ status &= ~PCI_EXP_LNKCAP_ASPM_L0S; ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); + } + +- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); +- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; +- status |= PCIE_RC_CONFIG_DCSR_MPS_256; +- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); ++ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); ++ status &= ~PCI_EXP_DEVCTL_PAYLOAD; ++ status |= PCI_EXP_DEVCTL_PAYLOAD_256B; ++ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); + + return 0; + err_power_off_phy: +diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h +index 15ee949f2485e3..049ad984a4164a 100644 +--- a/drivers/pci/controller/pcie-rockchip.h ++++ b/drivers/pci/controller/pcie-rockchip.h +@@ -144,16 +144,7 @@ + #define PCIE_EP_CONFIG_BASE 0xa00000 + #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) + #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) +-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) +-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 +-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff +-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) +-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) +-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) +-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) +-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) +-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) ++#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0) + #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) + #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) + #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) +diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c +index 0ea64e24ed616b..c17dff4bd19b43 100644 +--- a/drivers/pci/endpoint/pci-ep-cfs.c ++++ b/drivers/pci/endpoint/pci-ep-cfs.c +@@ -683,6 +683,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group) + if (IS_ERR_OR_NULL(group)) + return; + ++ list_del(&group->group_entry); + configfs_unregister_default_group(group); + } + EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); +diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c +index 059f8639f21e92..03673ade6ce4df 100644 +--- a/drivers/pci/endpoint/pci-epf-core.c ++++ b/drivers/pci/endpoint/pci-epf-core.c +@@ -311,7 +311,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver) + mutex_lock(&pci_epf_mutex); + list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) + pci_ep_cfs_remove_epf_group(group); +- list_del(&driver->epf_group); ++ WARN_ON(!list_empty(&driver->epf_group)); + mutex_unlock(&pci_epf_mutex); + } + +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c +index 05b7357bd25861..61bded8623d218 100644 +--- a/drivers/pci/pci-acpi.c ++++ b/drivers/pci/pci-acpi.c +@@ -793,13 +793,11 @@ int pci_acpi_program_hp_params(struct pci_dev *dev) + bool pciehp_is_native(struct pci_dev *bridge) + { + const struct pci_host_bridge *host; +- u32 slot_cap; + + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + return false; + +- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); +- if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) ++ if (!bridge->is_pciehp) + return false; + + if (pcie_ports_native) +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 4541dfbf0e1b63..df7f7e2ed0064a 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -2475,7 +2475,7 @@ static void pci_pme_list_scan(struct work_struct *work) + * course of the call. + */ + if (bdev) { +- bref = pm_runtime_get_if_active(bdev, true); ++ bref = pm_runtime_get_if_active(bdev); + if (!bref) + continue; + +@@ -3065,8 +3065,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { + * pci_bridge_d3_possible - Is it possible to put the bridge into D3 + * @bridge: Bridge to check + * +- * This function checks if it is possible to move the bridge to D3. +- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt. ++ * Currently we only allow D3 for some PCIe ports and for Thunderbolt. ++ * ++ * Return: Whether it is possible to move the bridge to D3. ++ * ++ * The return value is guaranteed to be constant across the entire lifetime ++ * of the bridge, including its hot-removal. + */ + bool pci_bridge_d3_possible(struct pci_dev *bridge) + { +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index b7cec139d816ba..5557290b63dc15 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1594,7 +1594,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) + + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); + if (reg32 & PCI_EXP_SLTCAP_HPC) +- pdev->is_hotplug_bridge = 1; ++ pdev->is_hotplug_bridge = pdev->is_pciehp = 1; + } + + static void set_pcie_thunderbolt(struct pci_dev *dev) +diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c +index 308c9969642e1f..c03df0f5288984 100644 +--- a/drivers/perf/cxl_pmu.c ++++ b/drivers/perf/cxl_pmu.c +@@ -881,7 +881,7 @@ static int cxl_pmu_probe(struct device *dev) + return rc; + irq = rc; + +- irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name); ++ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow", dev_name); + if (!irq_name) + return -ENOMEM; + +diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c +index 89c9d74e35466c..a5e60039a264a2 100644 +--- a/drivers/phy/qualcomm/phy-qcom-m31.c ++++ b/drivers/phy/qualcomm/phy-qcom-m31.c +@@ -58,14 +58,16 @@ + #define USB2_0_TX_ENABLE BIT(2) + + #define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8 +- #define HSTX_SLEW_RATE_565PS GENMASK(1, 0) ++ #define HSTX_SLEW_RATE_400PS GENMASK(2, 0) + #define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3) + #define ODT_VALUE_38_02_OHM GENMASK(7, 6) + + #define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc +- #define ODT_VALUE_45_02_OHM BIT(2) + #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0) + ++#define USB2PHY_USB_PHY_M31_XCFGI_9 0xdc ++ #define HSTX_CURRENT_17_1MA_385MV BIT(1) ++ + #define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4 + #define XCFG_COARSE_TUNE_NUM BIT(1) + #define XCFG_FINE_TUNE_NUM BIT(3) +@@ -120,7 +122,7 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { + }, + { + USB2PHY_USB_PHY_M31_XCFGI_4, +- HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, ++ HSTX_SLEW_RATE_400PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, + 0 + }, + { +@@ -130,9 +132,13 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { + }, + { + USB2PHY_USB_PHY_M31_XCFGI_5, +- ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA, ++ HSTX_PRE_EMPHASIS_LEVEL_0_55MA, + 4 + }, ++ { ++ USB2PHY_USB_PHY_M31_XCFGI_9, ++ HSTX_CURRENT_17_1MA_385MV, ++ }, + { + USB_PHY_UTMI_CTRL5, + 0x0, +diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c +index 8234b83fdd88c0..cbf3c140a138ee 100644 +--- a/drivers/phy/rockchip/phy-rockchip-pcie.c ++++ b/drivers/phy/rockchip/phy-rockchip-pcie.c +@@ -31,9 +31,8 @@ + #define PHY_CFG_ADDR_SHIFT 1 + #define PHY_CFG_DATA_MASK 0xf + #define PHY_CFG_ADDR_MASK 0x3f +-#define PHY_CFG_RD_MASK 0x3ff + #define PHY_CFG_WR_ENABLE 1 +-#define PHY_CFG_WR_DISABLE 1 ++#define PHY_CFG_WR_DISABLE 0 + #define PHY_CFG_WR_SHIFT 0 + #define PHY_CFG_WR_MASK 1 + #define PHY_CFG_PLL_LOCK 0x10 +diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c +index 84121b125d90ed..67c2791ee246f5 100644 +--- a/drivers/pinctrl/stm32/pinctrl-stm32.c ++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c +@@ -418,6 +418,7 @@ static struct irq_chip stm32_gpio_irq_chip = { + .irq_set_wake = irq_chip_set_wake_parent, + .irq_request_resources = stm32_gpio_irq_request_resources, + .irq_release_resources = stm32_gpio_irq_release_resources, ++ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL, + }; + + static int stm32_gpio_domain_translate(struct irq_domain *d, +diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c +index 47d19f7e295a7a..e82f433e542ef0 100644 +--- a/drivers/platform/chrome/cros_ec.c ++++ b/drivers/platform/chrome/cros_ec.c +@@ -313,6 +313,9 @@ EXPORT_SYMBOL(cros_ec_register); + */ + void cros_ec_unregister(struct cros_ec_device *ec_dev) + { ++ if (ec_dev->mkbp_event_supported) ++ blocking_notifier_chain_unregister(&ec_dev->event_notifier, ++ &ec_dev->notifier_ready); + platform_device_unregister(ec_dev->pd); + platform_device_unregister(ec_dev->ec); + mutex_destroy(&ec_dev->lock); +diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c +index 66fdc6fa73ec54..76807ceb313a97 100644 +--- a/drivers/platform/chrome/cros_ec_typec.c ++++ b/drivers/platform/chrome/cros_ec_typec.c +@@ -1179,8 +1179,8 @@ static int cros_typec_probe(struct platform_device *pdev) + + typec->ec = dev_get_drvdata(pdev->dev.parent); + if (!typec->ec) { +- dev_err(dev, "couldn't find parent EC device\n"); +- return -ENODEV; ++ dev_warn(dev, "couldn't find parent EC device\n"); ++ return -EPROBE_DEFER; + } + + platform_set_drvdata(pdev, typec); +diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c +index 7ed12c1d3b34c0..04686ae1e976bd 100644 +--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c ++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c +@@ -189,6 +189,15 @@ static const struct dmi_system_id fwbug_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), + } + }, ++ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ ++ { ++ .ident = "Lenovo Yoga 6 13ALC6", ++ .driver_data = &quirk_s2idle_bug, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), ++ } ++ }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ + { + .ident = "HP Laptop 15s-eq2xxx", +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index 8de0d3232e48c5..88364a5502e691 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -537,12 +537,12 @@ static unsigned long __init tpacpi_check_quirks( + return 0; + } + +-static inline bool __pure __init tpacpi_is_lenovo(void) ++static __always_inline bool __pure __init tpacpi_is_lenovo(void) + { + return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO; + } + +-static inline bool __pure __init tpacpi_is_ibm(void) ++static __always_inline bool __pure __init tpacpi_is_ibm(void) + { + return thinkpad_id.vendor == PCI_VENDOR_ID_IBM; + } +diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c +index cc5ef6e2f0a8cb..0dfaf1d14035de 100644 +--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c ++++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c +@@ -664,6 +664,11 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { + #define LCDIF_1_RD_HURRY GENMASK(15, 13) + #define LCDIF_0_RD_HURRY GENMASK(12, 10) + ++#define ISI_CACHE_CTRL 0x50 ++#define ISI_V_WR_HURRY GENMASK(28, 26) ++#define ISI_U_WR_HURRY GENMASK(25, 23) ++#define ISI_Y_WR_HURRY GENMASK(22, 20) ++ + static int imx8mp_media_power_notifier(struct notifier_block *nb, + unsigned long action, void *data) + { +@@ -693,6 +698,11 @@ static int imx8mp_media_power_notifier(struct notifier_block *nb, + regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL, + FIELD_PREP(LCDIF_1_RD_HURRY, 7) | + FIELD_PREP(LCDIF_0_RD_HURRY, 7)); ++ /* Same here for ISI */ ++ regmap_set_bits(bc->regmap, ISI_CACHE_CTRL, ++ FIELD_PREP(ISI_V_WR_HURRY, 7) | ++ FIELD_PREP(ISI_U_WR_HURRY, 7) | ++ FIELD_PREP(ISI_Y_WR_HURRY, 7)); + } + + return NOTIFY_OK; +diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c +index 5b3681b9100c1e..190e8a4cfa97f4 100644 +--- a/drivers/power/supply/qcom_battmgr.c ++++ b/drivers/power/supply/qcom_battmgr.c +@@ -977,6 +977,8 @@ static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry + { + if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) + return POWER_SUPPLY_TECHNOLOGY_LION; ++ if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN)) ++ return POWER_SUPPLY_TECHNOLOGY_LIPO; + + pr_err("Unknown battery technology '%s'\n", chemistry); + return POWER_SUPPLY_TECHNOLOGY_UNKNOWN; +diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c +index bf3b6f1aa98425..41e1fdbcda165c 100644 +--- a/drivers/pps/clients/pps-gpio.c ++++ b/drivers/pps/clients/pps-gpio.c +@@ -206,8 +206,8 @@ static int pps_gpio_probe(struct platform_device *pdev) + } + + /* register IRQ interrupt handler */ +- ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler, +- get_irqf_trigger_flags(data), data->info.name, data); ++ ret = request_irq(data->irq, pps_gpio_irq_handler, ++ get_irqf_trigger_flags(data), data->info.name, data); + if (ret) { + pps_unregister_source(data->pps); + dev_err(dev, "failed to acquire IRQ %d\n", data->irq); +@@ -224,6 +224,7 @@ static int pps_gpio_remove(struct platform_device *pdev) + { + struct pps_gpio_device_data *data = platform_get_drvdata(pdev); + ++ free_irq(data->irq, data); + pps_unregister_source(data->pps); + del_timer_sync(&data->echo_timer); + /* reset echo pin in any case */ +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c +index b7fc260ed43bc4..0682bb340221ab 100644 +--- a/drivers/ptp/ptp_clock.c ++++ b/drivers/ptp/ptp_clock.c +@@ -79,7 +79,7 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + + if (ptp_clock_freerun(ptp)) { +- pr_err("ptp: physical clock is free running\n"); ++ pr_err_ratelimited("ptp: physical clock is free running\n"); + return -EBUSY; + } + +diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h +index a54124269c2f49..3fbd1d68a9bcb3 100644 +--- a/drivers/ptp/ptp_private.h ++++ b/drivers/ptp/ptp_private.h +@@ -20,6 +20,11 @@ + #define PTP_BUF_TIMESTAMPS 30 + #define PTP_DEFAULT_MAX_VCLOCKS 20 + ++enum { ++ PTP_LOCK_PHYSICAL = 0, ++ PTP_LOCK_VIRTUAL, ++}; ++ + struct timestamp_event_queue { + struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; + int head; +diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c +index dcf752c9e04506..7d08ff3b30fc27 100644 +--- a/drivers/ptp/ptp_vclock.c ++++ b/drivers/ptp/ptp_vclock.c +@@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp) + return PTP_VCLOCK_REFRESH_INTERVAL; + } + ++static void ptp_vclock_set_subclass(struct ptp_clock *ptp) ++{ ++ lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); ++} ++ + static const struct ptp_clock_info ptp_vclock_info = { + .owner = THIS_MODULE, + .name = "ptp virtual clock", +@@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) + return NULL; + } + ++ ptp_vclock_set_subclass(vclock->clock); ++ + timecounter_init(&vclock->tc, &vclock->cc, 0); + ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + +diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c +index 2fc6163eace315..6591f8f84ce8e7 100644 +--- a/drivers/pwm/pwm-imx-tpm.c ++++ b/drivers/pwm/pwm-imx-tpm.c +@@ -204,6 +204,15 @@ static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip, + val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale); + writel(val, tpm->base + PWM_IMX_TPM_SC); + ++ /* ++ * if the counter is disabled (CMOD == 0), programming the new ++ * period length (MOD) will not reset the counter (CNT). If ++ * CNT.COUNT happens to be bigger than the new MOD value then ++ * the counter will end up being reset way too late. Therefore, ++ * manually reset it to 0. ++ */ ++ if (!cmod) ++ writel(0x0, tpm->base + PWM_IMX_TPM_CNT); + /* + * set period count: + * if the PWM is disabled (CMOD[1:0] = 2b00), then MOD register +diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c +index ff7c70a0033d8a..34413607b86f4b 100644 +--- a/drivers/pwm/pwm-mediatek.c ++++ b/drivers/pwm/pwm-mediatek.c +@@ -118,6 +118,26 @@ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip, + writel(value, chip->regs + chip->soc->reg_offset[num] + offset); + } + ++static void pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); ++ u32 value; ++ ++ value = readl(pc->regs); ++ value |= BIT(pwm->hwpwm); ++ writel(value, pc->regs); ++} ++ ++static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); ++ u32 value; ++ ++ value = readl(pc->regs); ++ value &= ~BIT(pwm->hwpwm); ++ writel(value, pc->regs); ++} ++ + static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) + { +@@ -147,7 +167,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + do_div(resolution, clk_rate); + + cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution); +- while (cnt_period > 8191) { ++ if (!cnt_period) ++ return -EINVAL; ++ ++ while (cnt_period > 8192) { + resolution *= 2; + clkdiv++; + cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, +@@ -170,9 +193,16 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + } + + cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution); ++ + pwm_mediatek_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv); +- pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); +- pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); ++ pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period - 1); ++ ++ if (cnt_duty) { ++ pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty - 1); ++ pwm_mediatek_enable(chip, pwm); ++ } else { ++ pwm_mediatek_disable(chip, pwm); ++ } + + out: + pwm_mediatek_clk_disable(chip, pwm); +@@ -180,35 +210,6 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + return ret; + } + +-static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) +-{ +- struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); +- u32 value; +- int ret; +- +- ret = pwm_mediatek_clk_enable(chip, pwm); +- if (ret < 0) +- return ret; +- +- value = readl(pc->regs); +- value |= BIT(pwm->hwpwm); +- writel(value, pc->regs); +- +- return 0; +-} +- +-static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm) +-{ +- struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); +- u32 value; +- +- value = readl(pc->regs); +- value &= ~BIT(pwm->hwpwm); +- writel(value, pc->regs); +- +- pwm_mediatek_clk_disable(chip, pwm); +-} +- + static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) + { +@@ -218,8 +219,10 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, + return -EINVAL; + + if (!state->enabled) { +- if (pwm->state.enabled) ++ if (pwm->state.enabled) { + pwm_mediatek_disable(chip, pwm); ++ pwm_mediatek_clk_disable(chip, pwm); ++ } + + return 0; + } +@@ -229,7 +232,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, + return err; + + if (!pwm->state.enabled) +- err = pwm_mediatek_enable(chip, pwm); ++ err = pwm_mediatek_clk_enable(chip, pwm); + + return err; + } +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c +index 610a69928dff2a..251f9840d85bd8 100644 +--- a/drivers/remoteproc/imx_rproc.c ++++ b/drivers/remoteproc/imx_rproc.c +@@ -1088,8 +1088,8 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv) + struct device *dev = priv->dev; + int ret; + +- /* Remote core is not under control of Linux */ +- if (dcfg->method == IMX_RPROC_NONE) ++ /* Remote core is not under control of Linux or it is managed by SCU API */ ++ if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API) + return 0; + + priv->clk = devm_clk_get(dev, NULL); +diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig +index ccd59ddd76100a..9f25eb3aec2561 100644 +--- a/drivers/reset/Kconfig ++++ b/drivers/reset/Kconfig +@@ -51,8 +51,8 @@ config RESET_BERLIN + + config RESET_BRCMSTB + tristate "Broadcom STB reset controller" +- depends on ARCH_BRCMSTB || COMPILE_TEST +- default ARCH_BRCMSTB ++ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST ++ default ARCH_BRCMSTB || ARCH_BCM2835 + help + This enables the reset controller driver for Broadcom STB SoCs using + a SUN_TOP_CTRL_SW_INIT style controller. +@@ -60,11 +60,11 @@ config RESET_BRCMSTB + config RESET_BRCMSTB_RESCAL + tristate "Broadcom STB RESCAL reset controller" + depends on HAS_IOMEM +- depends on ARCH_BRCMSTB || COMPILE_TEST +- default ARCH_BRCMSTB ++ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST ++ default ARCH_BRCMSTB || ARCH_BCM2835 + help + This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on +- BCM7216. ++ BCM7216 or the BCM2712. + + config RESET_HSDK + bool "Synopsys HSDK Reset Driver" +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c +index e14981383c0125..74aad2b12460c3 100644 +--- a/drivers/rtc/rtc-ds1307.c ++++ b/drivers/rtc/rtc-ds1307.c +@@ -274,6 +274,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) + if (tmp & DS1340_BIT_OSF) + return -EINVAL; + break; ++ case ds_1341: ++ ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &tmp); ++ if (ret) ++ return ret; ++ if (tmp & DS1337_BIT_OSF) ++ return -EINVAL; ++ break; + case ds_1388: + ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp); + if (ret) +@@ -372,6 +379,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) + regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG, + DS1340_BIT_OSF, 0); + break; ++ case ds_1341: ++ regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS, ++ DS1337_BIT_OSF, 0); ++ break; + case ds_1388: + regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG, + DS1388_BIT_OSF, 0); +@@ -1808,10 +1819,8 @@ static int ds1307_probe(struct i2c_client *client) + regmap_write(ds1307->regmap, DS1337_REG_CONTROL, + regs[0]); + +- /* oscillator fault? clear flag, and warn */ ++ /* oscillator fault? warn */ + if (regs[1] & DS1337_BIT_OSF) { +- regmap_write(ds1307->regmap, DS1337_REG_STATUS, +- regs[1] & ~DS1337_BIT_OSF); + dev_warn(ds1307->dev, "SET TIME!\n"); + } + break; +diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c +index 6fa0fb35e5210f..3409822dfbb41d 100644 +--- a/drivers/s390/char/sclp.c ++++ b/drivers/s390/char/sclp.c +@@ -76,6 +76,13 @@ unsigned long sclp_console_full; + /* The currently active SCLP command word. */ + static sclp_cmdw_t active_cmd; + ++static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) ++{ ++ if (sccb_int) ++ return __va(sccb_int); ++ return NULL; ++} ++ + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) + { + struct sclp_trace_entry e; +@@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb) + + static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) + { +- struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); ++ struct sccb_header *sccb = sclpint_to_sccb(sccb_int); + struct evbuf_header *evbuf; + u16 response; + +@@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code, + + /* INT: Interrupt received (a=intparm, b=cmd) */ + sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, +- (struct sccb_header *)__va(finished_sccb), ++ sclpint_to_sccb(finished_sccb), + !ok_response(finished_sccb, active_cmd)); + + if (finished_sccb) { +diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c +index 0f64b024430376..31b95e6c96c5fe 100644 +--- a/drivers/scsi/aacraid/comminit.c ++++ b/drivers/scsi/aacraid/comminit.c +@@ -481,8 +481,7 @@ void aac_define_int_mode(struct aac_dev *dev) + pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { + min_msix = 2; + i = pci_alloc_irq_vectors(dev->pdev, +- min_msix, msi_count, +- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); ++ min_msix, msi_count, PCI_IRQ_MSIX); + if (i > 0) { + dev->msi_enabled = 1; + msi_count = i; +diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c +index a9d3d8562d3c17..0ec76d1cb6fbe9 100644 +--- a/drivers/scsi/bfa/bfad_im.c ++++ b/drivers/scsi/bfa/bfad_im.c +@@ -706,6 +706,7 @@ bfad_im_probe(struct bfad_s *bfad) + + if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { + kfree(im); ++ bfad->im = NULL; + return BFA_STATUS_FAILED; + } + +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index 0fda8905eabd82..916c076484608a 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -3184,7 +3184,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, + return NULL; + conn = cls_conn->dd_data; + +- conn->dd_data = cls_conn->dd_data + sizeof(*conn); ++ if (dd_size) ++ conn->dd_data = cls_conn->dd_data + sizeof(*conn); + conn->session = session; + conn->cls_conn = cls_conn; + conn->c_stage = ISCSI_CONN_INITIAL_STAGE; +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index 20662b4f339eb3..6b6c964e8076e2 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -6291,7 +6291,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + } + phba->nvmeio_trc_on = 1; + phba->nvmeio_trc_output_idx = 0; +- phba->nvmeio_trc = NULL; + } else { + nvmeio_off: + phba->nvmeio_trc_size = 0; +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index 070654cc929208..dcbb2432c978bc 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -390,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; + ++ /* may be called before queues established if hba_setup fails */ ++ if (!phba->sli4_hba.hdwq) ++ return; ++ + spin_lock_irqsave(&phba->hbalock, iflag); + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; +diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h +index ae98d15c30b1dd..90da783457fbff 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr.h ++++ b/drivers/scsi/mpi3mr/mpi3mr.h +@@ -1025,6 +1025,8 @@ struct scmd_priv { + * @logdata_buf: Circular buffer to store log data entries + * @logdata_buf_idx: Index of entry in buffer to store + * @logdata_entry_sz: log data entry size ++ * @adm_req_q_bar_writeq_lock: Admin request queue lock ++ * @adm_reply_q_bar_writeq_lock: Admin reply queue lock + * @pend_large_data_sz: Counter to track pending large data + * @io_throttle_data_length: I/O size to track in 512b blocks + * @io_throttle_high: I/O size to start throttle in 512b blocks +@@ -1055,7 +1057,7 @@ struct mpi3mr_ioc { + char name[MPI3MR_NAME_LENGTH]; + char driver_name[MPI3MR_NAME_LENGTH]; + +- volatile struct mpi3_sysif_registers __iomem *sysif_regs; ++ struct mpi3_sysif_registers __iomem *sysif_regs; + resource_size_t sysif_regs_phys; + int bars; + u64 dma_mask; +@@ -1207,6 +1209,8 @@ struct mpi3mr_ioc { + u8 *logdata_buf; + u16 logdata_buf_idx; + u16 logdata_entry_sz; ++ spinlock_t adm_req_q_bar_writeq_lock; ++ spinlock_t adm_reply_q_bar_writeq_lock; + + atomic_t pend_large_data_sz; + u32 io_throttle_data_length; +diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c +index 60714a6c26375e..b03e4b8cb67d66 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c +@@ -23,17 +23,22 @@ module_param(poll_queues, int, 0444); + MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); + + #if defined(writeq) && defined(CONFIG_64BIT) +-static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) ++static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, ++ spinlock_t *write_queue_lock) + { + writeq(b, addr); + } + #else +-static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) ++static inline void mpi3mr_writeq(__u64 b, void __iomem *addr, ++ spinlock_t *write_queue_lock) + { + __u64 data_out = b; ++ unsigned long flags; + ++ spin_lock_irqsave(write_queue_lock, flags); + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); ++ spin_unlock_irqrestore(write_queue_lock, flags); + } + #endif + +@@ -411,8 +416,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, + MPI3MR_SENSE_BUF_SZ); + } + if (cmdptr->is_waiting) { +- complete(&cmdptr->done); + cmdptr->is_waiting = 0; ++ complete(&cmdptr->done); + } else if (cmdptr->callback) + cmdptr->callback(mrioc, cmdptr); + } +@@ -2666,9 +2671,11 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) + (mrioc->num_admin_req); + writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); + mpi3mr_writeq(mrioc->admin_req_dma, +- &mrioc->sysif_regs->admin_request_queue_address); ++ &mrioc->sysif_regs->admin_request_queue_address, ++ &mrioc->adm_req_q_bar_writeq_lock); + mpi3mr_writeq(mrioc->admin_reply_dma, +- &mrioc->sysif_regs->admin_reply_queue_address); ++ &mrioc->sysif_regs->admin_reply_queue_address, ++ &mrioc->adm_reply_q_bar_writeq_lock); + writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); + writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); + return retval; +diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c +index 7880675a68dba6..1bf3572c7aac44 100644 +--- a/drivers/scsi/mpi3mr/mpi3mr_os.c ++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c +@@ -49,6 +49,13 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, + + #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) + ++/* ++ * SAS Log info code for a NCQ collateral abort after an NCQ error: ++ * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR ++ * See: drivers/message/fusion/lsi/mpi_log_sas.h ++ */ ++#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 ++ + /** + * mpi3mr_host_tag_for_scmd - Get host tag for a scmd + * @mrioc: Adapter instance reference +@@ -3270,7 +3277,18 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, + scmd->result = DID_NO_CONNECT << 16; + break; + case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: +- scmd->result = DID_SOFT_ERROR << 16; ++ if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { ++ /* ++ * This is a ATA NCQ command aborted due to another NCQ ++ * command failure. We must retry this command ++ * immediately but without incrementing its retry ++ * counter. ++ */ ++ WARN_ON_ONCE(xfer_count != 0); ++ scmd->result = DID_IMM_RETRY << 16; ++ } else { ++ scmd->result = DID_SOFT_ERROR << 16; ++ } + break; + case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: +@@ -5084,6 +5102,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) + spin_lock_init(&mrioc->tgtdev_lock); + spin_lock_init(&mrioc->watchdog_lock); + spin_lock_init(&mrioc->chain_buf_lock); ++ spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); ++ spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); + spin_lock_init(&mrioc->sas_node_lock); + + INIT_LIST_HEAD(&mrioc->fwevt_list); +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 0afa485fb300ca..7bef42a2fb5762 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -196,6 +196,14 @@ struct sense_info { + #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) + #define MPT3SAS_ABRT_TASK_SET (0xFFFE) + #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) ++ ++/* ++ * SAS Log info code for a NCQ collateral abort after an NCQ error: ++ * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR ++ * See: drivers/message/fusion/lsi/mpi_log_sas.h ++ */ ++#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 ++ + /** + * struct fw_event_work - firmware event struct + * @list: link list framework +@@ -5824,6 +5832,17 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } ++ if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { ++ /* ++ * This is a ATA NCQ command aborted due to another NCQ ++ * command failure. We must retry this command ++ * immediately but without incrementing its retry ++ * counter. ++ */ ++ WARN_ON_ONCE(xfer_cnt != 0); ++ scmd->result = DID_IMM_RETRY << 16; ++ break; ++ } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 77c28d2ebf0137..d91efd7c983ce0 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, + + ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); + vfree(dst_addr); ++ if (IS_ERR(ep)) ++ return NULL; + return ep; + } + +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index cead0fbbe5dbdf..8ee74dddef16e0 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1851,7 +1851,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, + + return 0; + } +- ++EXPORT_SYMBOL(scsi_scan_host_selected); + static void scsi_sysfs_add_devices(struct Scsi_Host *shost) + { + struct scsi_device *sdev; +diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c +index 7fdd2b61fe855e..7b4c4752e21609 100644 +--- a/drivers/scsi/scsi_transport_sas.c ++++ b/drivers/scsi/scsi_transport_sas.c +@@ -40,6 +40,8 @@ + #include + + #include "scsi_sas_internal.h" ++#include "scsi_priv.h" ++ + struct sas_host_attrs { + struct list_head rphy_list; + struct mutex lock; +@@ -1681,32 +1683,66 @@ int scsi_is_sas_rphy(const struct device *dev) + } + EXPORT_SYMBOL(scsi_is_sas_rphy); + +- +-/* +- * SCSI scan helper +- */ +- +-static int sas_user_scan(struct Scsi_Host *shost, uint channel, +- uint id, u64 lun) ++static void scan_channel_zero(struct Scsi_Host *shost, uint id, u64 lun) + { + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); + struct sas_rphy *rphy; + +- mutex_lock(&sas_host->lock); + list_for_each_entry(rphy, &sas_host->rphy_list, list) { + if (rphy->identify.device_type != SAS_END_DEVICE || + rphy->scsi_target_id == -1) + continue; + +- if ((channel == SCAN_WILD_CARD || channel == 0) && +- (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { ++ if (id == SCAN_WILD_CARD || id == rphy->scsi_target_id) { + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, + lun, SCSI_SCAN_MANUAL); + } + } +- mutex_unlock(&sas_host->lock); ++} + +- return 0; ++/* ++ * SCSI scan helper ++ */ ++ ++static int sas_user_scan(struct Scsi_Host *shost, uint channel, ++ uint id, u64 lun) ++{ ++ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); ++ int res = 0; ++ int i; ++ ++ switch (channel) { ++ case 0: ++ mutex_lock(&sas_host->lock); ++ scan_channel_zero(shost, id, lun); ++ mutex_unlock(&sas_host->lock); ++ break; ++ ++ case SCAN_WILD_CARD: ++ mutex_lock(&sas_host->lock); ++ scan_channel_zero(shost, id, lun); ++ mutex_unlock(&sas_host->lock); ++ ++ for (i = 1; i <= shost->max_channel; i++) { ++ res = scsi_scan_host_selected(shost, i, id, lun, ++ SCSI_SCAN_MANUAL); ++ if (res) ++ goto exit_scan; ++ } ++ break; ++ ++ default: ++ if (channel < shost->max_channel) { ++ res = scsi_scan_host_selected(shost, channel, id, lun, ++ SCSI_SCAN_MANUAL); ++ } else { ++ res = -EINVAL; ++ } ++ break; ++ } ++ ++exit_scan: ++ return res; + } + + +diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c +index 6f177e46fa0f8e..a6773075bfe3ef 100644 +--- a/drivers/soc/qcom/mdt_loader.c ++++ b/drivers/soc/qcom/mdt_loader.c +@@ -17,6 +17,37 @@ + #include + #include + ++static bool mdt_header_valid(const struct firmware *fw) ++{ ++ const struct elf32_hdr *ehdr; ++ size_t phend; ++ size_t shend; ++ ++ if (fw->size < sizeof(*ehdr)) ++ return false; ++ ++ ehdr = (struct elf32_hdr *)fw->data; ++ ++ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) ++ return false; ++ ++ if (ehdr->e_phentsize != sizeof(struct elf32_phdr)) ++ return false; ++ ++ phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), ehdr->e_phoff); ++ if (phend > fw->size) ++ return false; ++ ++ if (ehdr->e_shentsize != sizeof(struct elf32_shdr)) ++ return false; ++ ++ shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff); ++ if (shend > fw->size) ++ return false; ++ ++ return true; ++} ++ + static bool mdt_phdr_valid(const struct elf32_phdr *phdr) + { + if (phdr->p_type != PT_LOAD) +@@ -84,8 +115,11 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw) + phys_addr_t max_addr = 0; + int i; + ++ if (!mdt_header_valid(fw)) ++ return -EINVAL; ++ + ehdr = (struct elf32_hdr *)fw->data; +- phdrs = (struct elf32_phdr *)(ehdr + 1); ++ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; +@@ -136,8 +170,11 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + ssize_t ret; + void *data; + ++ if (!mdt_header_valid(fw)) ++ return ERR_PTR(-EINVAL); ++ + ehdr = (struct elf32_hdr *)fw->data; +- phdrs = (struct elf32_phdr *)(ehdr + 1); ++ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); + + if (ehdr->e_phnum < 2) + return ERR_PTR(-EINVAL); +@@ -216,8 +253,11 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + int ret; + int i; + ++ if (!mdt_header_valid(fw)) ++ return -EINVAL; ++ + ehdr = (struct elf32_hdr *)fw->data; +- phdrs = (struct elf32_phdr *)(ehdr + 1); ++ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; +@@ -272,7 +312,7 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na + int i; + + ehdr = (struct elf32_hdr *)fw->data; +- phdrs = (struct elf32_phdr *)(ehdr + 1); ++ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); + + for (i = 0; i < ehdr->e_phnum; i++) { + /* +@@ -312,9 +352,12 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + if (!fw || !mem_region || !mem_phys || !mem_size) + return -EINVAL; + ++ if (!mdt_header_valid(fw)) ++ return -EINVAL; ++ + is_split = qcom_mdt_bins_are_split(fw, fw_name); + ehdr = (struct elf32_hdr *)fw->data; +- phdrs = (struct elf32_phdr *)(ehdr + 1); ++ phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; +diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c +index dfc2d4e38fa9b9..163a58eb02e0a2 100644 +--- a/drivers/soc/qcom/rpmh-rsc.c ++++ b/drivers/soc/qcom/rpmh-rsc.c +@@ -1075,7 +1075,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev) + drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); + drv->ver.minor >>= MINOR_VER_SHIFT; + +- if (drv->ver.major == 3) ++ if (drv->ver.major >= 3) + drv->regs = rpmh_rsc_reg_offset_ver_3_0; + else + drv->regs = rpmh_rsc_reg_offset_ver_2_7; +diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c +index 162f52456f654e..1306e3b8b5c04b 100644 +--- a/drivers/soc/tegra/pmc.c ++++ b/drivers/soc/tegra/pmc.c +@@ -1232,7 +1232,7 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg, + } + + static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, +- struct device_node *np, bool off) ++ struct device_node *np) + { + struct device *dev = pg->pmc->dev; + int err; +@@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, + err = reset_control_acquire(pg->reset); + if (err < 0) { + pr_err("failed to acquire resets: %d\n", err); +- goto out; +- } +- +- if (off) { +- err = reset_control_assert(pg->reset); +- } else { +- err = reset_control_deassert(pg->reset); +- if (err < 0) +- goto out; +- +- reset_control_release(pg->reset); +- } +- +-out: +- if (err) { +- reset_control_release(pg->reset); + reset_control_put(pg->reset); + } + +@@ -1307,20 +1291,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) + goto set_available; + } + +- err = tegra_powergate_of_get_resets(pg, np, off); ++ err = tegra_powergate_of_get_resets(pg, np); + if (err < 0) { + dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); + goto remove_clks; + } + +- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { +- if (off) +- WARN_ON(tegra_powergate_power_up(pg, true)); ++ /* ++ * If the power-domain is off, then ensure the resets are asserted. ++ * If the power-domain is on, then power down to ensure that when is ++ * it turned on the power-domain, clocks and resets are all in the ++ * expected state. ++ */ ++ if (off) { ++ err = reset_control_assert(pg->reset); ++ if (err) { ++ pr_err("failed to assert resets: %d\n", err); ++ goto remove_resets; ++ } ++ } else { ++ err = tegra_powergate_power_down(pg); ++ if (err) { ++ dev_err(dev, "failed to turn off PM domain %s: %d\n", ++ pg->genpd.name, err); ++ goto remove_resets; ++ } ++ } + ++ /* ++ * If PM_GENERIC_DOMAINS is not enabled, power-on ++ * the domain and skip the genpd registration. ++ */ ++ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { ++ WARN_ON(tegra_powergate_power_up(pg, true)); + goto remove_resets; + } + +- err = pm_genpd_init(&pg->genpd, NULL, off); ++ err = pm_genpd_init(&pg->genpd, NULL, true); + if (err < 0) { + dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, + err); +diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c +index b89f8067e6cdd7..3d8937245c1807 100644 +--- a/drivers/soundwire/amd_manager.c ++++ b/drivers/soundwire/amd_manager.c +@@ -1104,10 +1104,10 @@ static int __maybe_unused amd_pm_prepare(struct device *dev) + * device is not in runtime suspend state, observed that device alerts are missing + * without pm_prepare on AMD platforms in clockstop mode0. + */ +- if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { +- ret = pm_request_resume(dev); ++ if (amd_manager->power_mode_mask) { ++ ret = pm_runtime_resume(dev); + if (ret < 0) { +- dev_err(bus->dev, "pm_request_resume failed: %d\n", ret); ++ dev_err(bus->dev, "pm_runtime_resume failed: %d\n", ret); + return 0; + } + } +diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c +index 767942f19adb6a..e7397fd8e9ad94 100644 +--- a/drivers/soundwire/bus.c ++++ b/drivers/soundwire/bus.c +@@ -1730,15 +1730,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) + + /* Update the Slave driver */ + if (slave_notify) { ++ if (slave->prop.use_domain_irq && slave->irq) ++ handle_nested_irq(slave->irq); ++ + mutex_lock(&slave->sdw_dev_lock); + + if (slave->probed) { + struct device *dev = &slave->dev; + struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); + +- if (slave->prop.use_domain_irq && slave->irq) +- handle_nested_irq(slave->irq); +- + if (drv->ops && drv->ops->interrupt_callback) { + slave_intr.sdca_cascade = sdca_cascade; + slave_intr.control_port = clear; +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c +index 9e2541dee56e57..fa899ab2014c6a 100644 +--- a/drivers/spi/spi-fsl-lpspi.c ++++ b/drivers/spi/spi-fsl-lpspi.c +@@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) + } + + if (config.speed_hz > perclk_rate / 2) { +- dev_err(fsl_lpspi->dev, +- "per-clk should be at least two times of transfer speed"); +- return -EINVAL; ++ div = 2; ++ } else { ++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz); + } + +- div = DIV_ROUND_UP(perclk_rate, config.speed_hz); +- + for (prescale = 0; prescale <= prescale_max; prescale++) { + scldiv = div / (1 << prescale) - 2; + if (scldiv >= 0 && scldiv < 256) { +diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c +index 95cca281e8a378..07104e7f5a5f9d 100644 +--- a/drivers/staging/media/imx/imx-media-csc-scaler.c ++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c +@@ -914,7 +914,7 @@ imx_media_csc_scaler_device_init(struct imx_media_dev *md) + return &priv->vdev; + + err_m2m: +- video_set_drvdata(vfd, NULL); ++ video_device_release(vfd); + err_vfd: + kfree(priv); + return ERR_PTR(ret); +diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c +index 6600ae44f29d9e..d3ab251ba04973 100644 +--- a/drivers/target/target_core_fabric_lib.c ++++ b/drivers/target/target_core_fabric_lib.c +@@ -257,11 +257,41 @@ static int iscsi_get_pr_transport_id_len( + return len; + } + +-static char *iscsi_parse_pr_out_transport_id( ++static void sas_parse_pr_out_transport_id(char *buf, char *i_str) ++{ ++ char hex[17] = {}; ++ ++ bin2hex(hex, buf + 4, 8); ++ snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex); ++} ++ ++static void srp_parse_pr_out_transport_id(char *buf, char *i_str) ++{ ++ char hex[33] = {}; ++ ++ bin2hex(hex, buf + 8, 16); ++ snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex); ++} ++ ++static void fcp_parse_pr_out_transport_id(char *buf, char *i_str) ++{ ++ snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8); ++} ++ ++static void sbp_parse_pr_out_transport_id(char *buf, char *i_str) ++{ ++ char hex[17] = {}; ++ ++ bin2hex(hex, buf + 8, 8); ++ snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex); ++} ++ ++static bool iscsi_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + char *buf, + u32 *out_tid_len, +- char **port_nexus_ptr) ++ char **port_nexus_ptr, ++ char *i_str) + { + char *p; + int i; +@@ -282,7 +312,7 @@ static char *iscsi_parse_pr_out_transport_id( + if ((format_code != 0x00) && (format_code != 0x40)) { + pr_err("Illegal format code: 0x%02x for iSCSI" + " Initiator Transport ID\n", format_code); +- return NULL; ++ return false; + } + /* + * If the caller wants the TransportID Length, we set that value for the +@@ -306,7 +336,7 @@ static char *iscsi_parse_pr_out_transport_id( + pr_err("Unable to locate \",i,0x\" separator" + " for Initiator port identifier: %s\n", + &buf[4]); +- return NULL; ++ return false; + } + *p = '\0'; /* Terminate iSCSI Name */ + p += 5; /* Skip over ",i,0x" separator */ +@@ -339,7 +369,8 @@ static char *iscsi_parse_pr_out_transport_id( + } else + *port_nexus_ptr = NULL; + +- return &buf[4]; ++ strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN); ++ return true; + } + + int target_get_pr_transport_id_len(struct se_node_acl *nacl, +@@ -387,33 +418,35 @@ int target_get_pr_transport_id(struct se_node_acl *nacl, + } + } + +-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, +- char *buf, u32 *out_tid_len, char **port_nexus_ptr) ++bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, ++ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str) + { +- u32 offset; +- + switch (tpg->proto_id) { + case SCSI_PROTOCOL_SAS: + /* + * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID + * for initiator ports using SCSI over SAS Serial SCSI Protocol. + */ +- offset = 4; ++ sas_parse_pr_out_transport_id(buf, i_str); + break; +- case SCSI_PROTOCOL_SBP: + case SCSI_PROTOCOL_SRP: ++ srp_parse_pr_out_transport_id(buf, i_str); ++ break; + case SCSI_PROTOCOL_FCP: +- offset = 8; ++ fcp_parse_pr_out_transport_id(buf, i_str); ++ break; ++ case SCSI_PROTOCOL_SBP: ++ sbp_parse_pr_out_transport_id(buf, i_str); + break; + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, +- port_nexus_ptr); ++ port_nexus_ptr, i_str); + default: + pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id); +- return NULL; ++ return false; + } + + *port_nexus_ptr = NULL; + *out_tid_len = 24; +- return buf + offset; ++ return true; + } +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h +index 408be26d2e9b4d..20aab1f505655c 100644 +--- a/drivers/target/target_core_internal.h ++++ b/drivers/target/target_core_internal.h +@@ -103,8 +103,8 @@ int target_get_pr_transport_id_len(struct se_node_acl *nacl, + int target_get_pr_transport_id(struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, int *format_code, + unsigned char *buf); +-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, +- char *buf, u32 *out_tid_len, char **port_nexus_ptr); ++bool target_parse_pr_out_transport_id(struct se_portal_group *tpg, ++ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str); + + /* target_core_hba.c */ + struct se_hba *core_alloc_hba(const char *, u32, u32); +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index a9eb6a3e838347..624d2f68bf385b 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -1477,11 +1477,12 @@ core_scsi3_decode_spec_i_port( + LIST_HEAD(tid_dest_list); + struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; + unsigned char *buf, *ptr, proto_ident; +- const unsigned char *i_str = NULL; ++ unsigned char i_str[TRANSPORT_IQN_LEN]; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; + sense_reason_t ret; + u32 tpdl, tid_len = 0; + u32 dest_rtpi = 0; ++ bool tid_found; + + /* + * Allocate a struct pr_transport_id_holder and setup the +@@ -1570,9 +1571,9 @@ core_scsi3_decode_spec_i_port( + dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi; + + iport_ptr = NULL; +- i_str = target_parse_pr_out_transport_id(tmp_tpg, +- ptr, &tid_len, &iport_ptr); +- if (!i_str) ++ tid_found = target_parse_pr_out_transport_id(tmp_tpg, ++ ptr, &tid_len, &iport_ptr, i_str); ++ if (!tid_found) + continue; + /* + * Determine if this SCSI device server requires that +@@ -3152,13 +3153,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, + struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char *buf; +- const unsigned char *initiator_str; ++ unsigned char initiator_str[TRANSPORT_IQN_LEN]; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { }; + u32 tid_len, tmp_tid_len; + int new_reg = 0, type, scope, matching_iname; + sense_reason_t ret; + unsigned short rtpi; + unsigned char proto_ident; ++ bool tid_found; + + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); +@@ -3277,9 +3279,9 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } +- initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, +- &buf[24], &tmp_tid_len, &iport_ptr); +- if (!initiator_str) { ++ tid_found = target_parse_pr_out_transport_id(dest_se_tpg, ++ &buf[24], &tmp_tid_len, &iport_ptr, initiator_str); ++ if (!tid_found) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + " initiator_str from Transport ID\n"); + ret = TCM_INVALID_PARAMETER_LIST; +diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +index 78c5cfe6a0c0f5..eeccf905f83edd 100644 +--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c ++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* + * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + + #include +@@ -16,6 +17,7 @@ + + #include "../thermal_hwmon.h" + ++#define QPNP_TM_REG_DIG_MINOR 0x00 + #define QPNP_TM_REG_DIG_MAJOR 0x01 + #define QPNP_TM_REG_TYPE 0x04 + #define QPNP_TM_REG_SUBTYPE 0x05 +@@ -31,7 +33,7 @@ + #define STATUS_GEN2_STATE_MASK GENMASK(6, 4) + #define STATUS_GEN2_STATE_SHIFT 4 + +-#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6) ++#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2 BIT(6) + #define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0) + + #define SHUTDOWN_CTRL1_RATE_25HZ BIT(3) +@@ -79,6 +81,7 @@ struct qpnp_tm_chip { + /* protects .thresh, .stage and chip registers */ + struct mutex lock; + bool initialized; ++ bool require_stage2_shutdown; + + struct iio_channel *adc; + const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; +@@ -221,13 +224,13 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, + { + long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1]; + long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1]; +- bool disable_s2_shutdown = false; ++ bool disable_stage2_shutdown = false; + u8 reg; + + WARN_ON(!mutex_is_locked(&chip->lock)); + + /* +- * Default: S2 and S3 shutdown enabled, thresholds at ++ * Default: Stage 2 and Stage 3 shutdown enabled, thresholds at + * lowest threshold set, monitoring at 25Hz + */ + reg = SHUTDOWN_CTRL1_RATE_25HZ; +@@ -242,12 +245,12 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, + chip->thresh = THRESH_MAX - + ((stage2_threshold_max - temp) / + TEMP_THRESH_STEP); +- disable_s2_shutdown = true; ++ disable_stage2_shutdown = true; + } else { + chip->thresh = THRESH_MAX; + + if (chip->adc) +- disable_s2_shutdown = true; ++ disable_stage2_shutdown = true; + else + dev_warn(chip->dev, + "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", +@@ -256,8 +259,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, + + skip: + reg |= chip->thresh; +- if (disable_s2_shutdown) +- reg |= SHUTDOWN_CTRL1_OVERRIDE_S2; ++ if (disable_stage2_shutdown && !chip->require_stage2_shutdown) ++ reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2; + + return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); + } +@@ -371,8 +374,8 @@ static int qpnp_tm_probe(struct platform_device *pdev) + { + struct qpnp_tm_chip *chip; + struct device_node *node; +- u8 type, subtype, dig_major; +- u32 res; ++ u8 type, subtype, dig_major, dig_minor; ++ u32 res, dig_revision; + int ret, irq; + + node = pdev->dev.of_node; +@@ -424,6 +427,11 @@ static int qpnp_tm_probe(struct platform_device *pdev) + return dev_err_probe(&pdev->dev, ret, + "could not read dig_major\n"); + ++ ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MINOR, &dig_minor); ++ if (ret < 0) ++ return dev_err_probe(&pdev->dev, ret, ++ "could not read dig_minor\n"); ++ + if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 + && subtype != QPNP_TM_SUBTYPE_GEN2)) { + dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", +@@ -437,6 +445,23 @@ static int qpnp_tm_probe(struct platform_device *pdev) + else + chip->temp_map = &temp_map_gen1; + ++ if (chip->subtype == QPNP_TM_SUBTYPE_GEN2) { ++ dig_revision = (dig_major << 8) | dig_minor; ++ /* ++ * Check if stage 2 automatic partial shutdown must remain ++ * enabled to avoid potential repeated faults upon reaching ++ * over-temperature stage 3. ++ */ ++ switch (dig_revision) { ++ case 0x0001: ++ case 0x0002: ++ case 0x0100: ++ case 0x0101: ++ chip->require_stage2_shutdown = true; ++ break; ++ } ++ } ++ + /* + * Register the sensor before initializing the hardware to be able to + * read the trip points. get_temp() returns the default temperature +diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c +index eef40d4f306394..0dea605faadbcc 100644 +--- a/drivers/thermal/thermal_sysfs.c ++++ b/drivers/thermal/thermal_sysfs.c +@@ -39,10 +39,13 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf) + + ret = thermal_zone_get_temp(tz, &temperature); + +- if (ret) +- return ret; ++ if (!ret) ++ return sprintf(buf, "%d\n", temperature); + +- return sprintf(buf, "%d\n", temperature); ++ if (ret == -EAGAIN) ++ return -ENODATA; ++ ++ return ret; + } + + static ssize_t +diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c +index 31f3da4e6a08df..73be797acd5092 100644 +--- a/drivers/thunderbolt/domain.c ++++ b/drivers/thunderbolt/domain.c +@@ -36,7 +36,7 @@ static bool match_service_id(const struct tb_service_id *id, + return false; + } + +- if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { ++ if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) { + if (id->protocol_revision != svc->prtcrevs) + return false; + } +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index d5ad6cae6b652b..23aed9e89e3047 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2375,9 +2375,8 @@ int serial8250_do_startup(struct uart_port *port) + /* + * Now, initialize the UART + */ +- serial_port_out(port, UART_LCR, UART_LCR_WLEN8); +- + spin_lock_irqsave(&port->lock, flags); ++ serial_port_out(port, UART_LCR, UART_LCR_WLEN8); + if (up->port.flags & UPF_FOURPORT) { + if (!up->port.irq) + up->port.mctrl |= TIOCM_OUT1; +diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped +index 0c043e4f292e8a..6af7bf8d5460c5 100644 +--- a/drivers/tty/vt/defkeymap.c_shipped ++++ b/drivers/tty/vt/defkeymap.c_shipped +@@ -23,6 +23,22 @@ unsigned short plain_map[NR_KEYS] = { + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short shift_map[NR_KEYS] = { +@@ -42,6 +58,22 @@ static unsigned short shift_map[NR_KEYS] = { + 0xf20b, 0xf601, 0xf602, 0xf117, 0xf600, 0xf20a, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short altgr_map[NR_KEYS] = { +@@ -61,6 +93,22 @@ static unsigned short altgr_map[NR_KEYS] = { + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short ctrl_map[NR_KEYS] = { +@@ -80,6 +128,22 @@ static unsigned short ctrl_map[NR_KEYS] = { + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short shift_ctrl_map[NR_KEYS] = { +@@ -99,6 +163,22 @@ static unsigned short shift_ctrl_map[NR_KEYS] = { + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short alt_map[NR_KEYS] = { +@@ -118,6 +198,22 @@ static unsigned short alt_map[NR_KEYS] = { + 0xf118, 0xf210, 0xf211, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + static unsigned short ctrl_alt_map[NR_KEYS] = { +@@ -137,6 +233,22 @@ static unsigned short ctrl_alt_map[NR_KEYS] = { + 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf20c, + 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d, + 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, ++ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, + }; + + unsigned short *key_maps[MAX_NR_KEYMAPS] = { +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index 358f216c6cd6ee..18b3c197c1349b 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -1496,7 +1496,7 @@ static void kbd_keycode(unsigned int keycode, int down, bool hw_raw) + rc = atomic_notifier_call_chain(&keyboard_notifier_list, + KBD_UNICODE, ¶m); + if (rc != NOTIFY_STOP) +- if (down && !raw_mode) ++ if (down && !(raw_mode || kbd->kbdmode == VC_OFF)) + k_unicode(vc, keysym, !down); + return; + } +diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h +index dffc932285ac50..e3346660905243 100644 +--- a/drivers/ufs/core/ufshcd-priv.h ++++ b/drivers/ufs/core/ufshcd-priv.h +@@ -326,7 +326,7 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) + + static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) + { +- return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev, true); ++ return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); + } + + static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) +diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c +index f61126189876e9..14c1b855e10ad2 100644 +--- a/drivers/ufs/host/ufs-exynos.c ++++ b/drivers/ufs/host/ufs-exynos.c +@@ -1028,8 +1028,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) + hci_writel(ufs, 0xa, HCI_DATA_REORDER); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); +- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); +- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); ++ hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); ++ hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); + + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) +diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c +index 248a49e5e7f357..c38ea3395b2c10 100644 +--- a/drivers/ufs/host/ufshcd-pci.c ++++ b/drivers/ufs/host/ufshcd-pci.c +@@ -213,6 +213,32 @@ static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) + return ret; + } + ++static void ufs_intel_ctrl_uic_compl(struct ufs_hba *hba, bool enable) ++{ ++ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); ++ ++ if (enable) ++ set |= UIC_COMMAND_COMPL; ++ else ++ set &= ~UIC_COMMAND_COMPL; ++ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); ++} ++ ++static void ufs_intel_mtl_h8_notify(struct ufs_hba *hba, ++ enum uic_cmd_dme cmd, ++ enum ufs_notify_change_status status) ++{ ++ /* ++ * Disable UIC COMPL INTR to prevent access to UFSHCI after ++ * checking HCS.UPMCRS ++ */ ++ if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) ++ ufs_intel_ctrl_uic_compl(hba, false); ++ ++ if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) ++ ufs_intel_ctrl_uic_compl(hba, true); ++} ++ + #define INTEL_ACTIVELTR 0x804 + #define INTEL_IDLELTR 0x808 + +@@ -439,10 +465,23 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) + return ufs_intel_common_init(hba); + } + ++static void ufs_intel_mtl_late_init(struct ufs_hba *hba) ++{ ++ hba->rpm_lvl = UFS_PM_LVL_2; ++ hba->spm_lvl = UFS_PM_LVL_2; ++} ++ + static int ufs_intel_mtl_init(struct ufs_hba *hba) + { ++ struct ufs_host *ufs_host; ++ int err; ++ + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; +- return ufs_intel_common_init(hba); ++ err = ufs_intel_common_init(hba); ++ /* Get variant after it is set in ufs_intel_common_init() */ ++ ufs_host = ufshcd_get_variant(hba); ++ ufs_host->late_init = ufs_intel_mtl_late_init; ++ return err; + } + + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { +@@ -487,6 +526,7 @@ static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { + .init = ufs_intel_mtl_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, ++ .hibern8_notify = ufs_intel_mtl_h8_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c +index 1443e9cf631a6e..db9a8f2731f1a4 100644 +--- a/drivers/usb/atm/cxacru.c ++++ b/drivers/usb/atm/cxacru.c +@@ -980,25 +980,60 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw, + return ret; + } + +-static void cxacru_upload_firmware(struct cxacru_data *instance, +- const struct firmware *fw, +- const struct firmware *bp) ++ ++static int cxacru_find_firmware(struct cxacru_data *instance, ++ char *phase, const struct firmware **fw_p) + { +- int ret; ++ struct usbatm_data *usbatm = instance->usbatm; ++ struct device *dev = &usbatm->usb_intf->dev; ++ char buf[16]; ++ ++ sprintf(buf, "cxacru-%s.bin", phase); ++ usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); ++ ++ if (request_firmware(fw_p, buf, dev)) { ++ usb_dbg(usbatm, "no stage %s firmware found\n", phase); ++ return -ENOENT; ++ } ++ ++ usb_info(usbatm, "found firmware %s\n", buf); ++ ++ return 0; ++} ++ ++static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, ++ struct usb_interface *usb_intf) ++{ ++ const struct firmware *fw, *bp; ++ struct cxacru_data *instance = usbatm_instance->driver_data; + struct usbatm_data *usbatm = instance->usbatm; + struct usb_device *usb_dev = usbatm->usb_dev; + __le16 signature[] = { usb_dev->descriptor.idVendor, + usb_dev->descriptor.idProduct }; + __le32 val; ++ int ret; + +- usb_dbg(usbatm, "%s\n", __func__); ++ ret = cxacru_find_firmware(instance, "fw", &fw); ++ if (ret) { ++ usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); ++ return ret; ++ } ++ ++ if (instance->modem_type->boot_rom_patch) { ++ ret = cxacru_find_firmware(instance, "bp", &bp); ++ if (ret) { ++ usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); ++ release_firmware(fw); ++ return ret; ++ } ++ } + + /* FirmwarePllFClkValue */ + val = cpu_to_le32(instance->modem_type->pll_f_clk); + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4); + if (ret) { + usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret); +- return; ++ goto done; + } + + /* FirmwarePllBClkValue */ +@@ -1006,7 +1041,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4); + if (ret) { + usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret); +- return; ++ goto done; + } + + /* Enable SDRAM */ +@@ -1014,7 +1049,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4); + if (ret) { + usb_err(usbatm, "Enable SDRAM failed: %d\n", ret); +- return; ++ goto done; + } + + /* Firmware */ +@@ -1022,7 +1057,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); + if (ret) { + usb_err(usbatm, "Firmware upload failed: %d\n", ret); +- return; ++ goto done; + } + + /* Boot ROM patch */ +@@ -1031,7 +1066,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); + if (ret) { + usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); +- return; ++ goto done; + } + } + +@@ -1039,7 +1074,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4); + if (ret) { + usb_err(usbatm, "Signature storing failed: %d\n", ret); +- return; ++ goto done; + } + + usb_info(usbatm, "starting device\n"); +@@ -1051,7 +1086,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + } + if (ret) { + usb_err(usbatm, "Passing control to firmware failed: %d\n", ret); +- return; ++ goto done; + } + + /* Delay to allow firmware to start up. */ +@@ -1065,53 +1100,10 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, + ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); + if (ret < 0) { + usb_err(usbatm, "modem failed to initialize: %d\n", ret); +- return; +- } +-} +- +-static int cxacru_find_firmware(struct cxacru_data *instance, +- char *phase, const struct firmware **fw_p) +-{ +- struct usbatm_data *usbatm = instance->usbatm; +- struct device *dev = &usbatm->usb_intf->dev; +- char buf[16]; +- +- sprintf(buf, "cxacru-%s.bin", phase); +- usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); +- +- if (request_firmware(fw_p, buf, dev)) { +- usb_dbg(usbatm, "no stage %s firmware found\n", phase); +- return -ENOENT; +- } +- +- usb_info(usbatm, "found firmware %s\n", buf); +- +- return 0; +-} +- +-static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, +- struct usb_interface *usb_intf) +-{ +- const struct firmware *fw, *bp; +- struct cxacru_data *instance = usbatm_instance->driver_data; +- int ret = cxacru_find_firmware(instance, "fw", &fw); +- +- if (ret) { +- usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); +- return ret; ++ goto done; + } + +- if (instance->modem_type->boot_rom_patch) { +- ret = cxacru_find_firmware(instance, "bp", &bp); +- if (ret) { +- usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); +- release_firmware(fw); +- return ret; +- } +- } +- +- cxacru_upload_firmware(instance, fw, bp); +- ++done: + if (instance->modem_type->boot_rom_patch) + release_firmware(bp); + release_firmware(fw); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index c1d7d87b32cc5a..f0c87c914149d4 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1520,6 +1520,12 @@ static int acm_probe(struct usb_interface *intf, + goto err_remove_files; + } + ++ if (quirks & CLEAR_HALT_CONDITIONS) { ++ /* errors intentionally ignored */ ++ usb_clear_halt(usb_dev, acm->in); ++ usb_clear_halt(usb_dev, acm->out); ++ } ++ + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, + &control_interface->dev); + if (IS_ERR(tty_dev)) { +@@ -1527,11 +1533,6 @@ static int acm_probe(struct usb_interface *intf, + goto err_release_data_interface; + } + +- if (quirks & CLEAR_HALT_CONDITIONS) { +- usb_clear_halt(usb_dev, acm->in); +- usb_clear_halt(usb_dev, acm->out); +- } +- + dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); + + return 0; +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 847dd32c0f5e28..3180419424c064 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + */ + desc = (struct usb_ss_ep_comp_descriptor *) buffer; + +- if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || +- size < USB_DT_SS_EP_COMP_SIZE) { ++ if (size < USB_DT_SS_EP_COMP_SIZE) { ++ dev_notice(ddev, ++ "invalid SuperSpeed endpoint companion descriptor " ++ "of length %d, skipping\n", size); ++ return; ++ } ++ ++ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { + dev_notice(ddev, "No SuperSpeed endpoint companion for config %d " + " interface %d altsetting %d ep %d: " + "using minimum values\n", +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 12b6dfeaf658c9..0b2a3f645d2fe2 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2152,7 +2152,7 @@ static struct urb *request_single_step_set_feature_urb( + urb->complete = usb_ehset_completion; + urb->status = -EINPROGRESS; + urb->actual_length = 0; +- urb->transfer_flags = URB_DIR_IN; ++ urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); +@@ -2216,9 +2216,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) + + /* Complete remaining DATA and STATUS stages using the same URB */ + urb->status = -EINPROGRESS; ++ urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); ++ if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) { ++ usb_put_urb(urb); ++ goto out1; ++ } ++ + retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0); + if (!retval && !wait_for_completion_timeout(&done, + msecs_to_jiffies(2000))) { +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 46db600fdd824e..bfd97cad8aa4d7 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + + /* SanDisk Corp. SanDisk 3.2Gen1 */ ++ { USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* SanDisk Extreme 55AE */ +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index 7576920e2d5a3e..9f202f575cecce 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -500,7 +500,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + + /* Check that the pipe's type matches the endpoint's type */ + if (usb_pipe_type_check(urb->dev, urb->pipe)) +- dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", ++ dev_warn_once(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", + usb_pipetype(urb->pipe), pipetypes[xfertype]); + + /* Check against a simple/standard policy */ +diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c +index a1e15f2fffdbff..b53468c41f6771 100644 +--- a/drivers/usb/dwc3/dwc3-imx8mp.c ++++ b/drivers/usb/dwc3/dwc3-imx8mp.c +@@ -244,7 +244,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) + IRQF_ONESHOT, dev_name(dev), dwc3_imx); + if (err) { + dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err); +- goto depopulate; ++ goto put_dwc3; + } + + device_set_wakeup_capable(dev, true); +@@ -252,6 +252,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) + + return 0; + ++put_dwc3: ++ put_device(&dwc3_imx->dwc3->dev); + depopulate: + of_platform_depopulate(dev); + err_node_put: +@@ -272,6 +274,8 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev) + struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + ++ put_device(&dwc3_imx->dwc3->dev); ++ + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + +diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c +index 2c07c038b584dc..6ea1a876203d9a 100644 +--- a/drivers/usb/dwc3/dwc3-meson-g12a.c ++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c +@@ -837,6 +837,9 @@ static void dwc3_meson_g12a_remove(struct platform_device *pdev) + + usb_role_switch_unregister(priv->role_switch); + ++ put_device(priv->switch_desc.udc); ++ put_device(priv->switch_desc.usb2_port); ++ + of_platform_depopulate(dev); + + for (i = 0 ; i < PHY_COUNT ; ++i) { +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 54a4ee2b90b7f4..39c72cb52ce76a 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -41,6 +41,7 @@ + #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee + #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee + #define PCI_DEVICE_ID_INTEL_JSP 0x4dee ++#define PCI_DEVICE_ID_INTEL_WCL 0x4d7e + #define PCI_DEVICE_ID_INTEL_ADL 0x460e + #define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee + #define PCI_DEVICE_ID_INTEL_ADLN 0x465e +@@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { + { PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) }, ++ { PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) }, +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c +index 874497f86499b3..876a839f2d1d09 100644 +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) + dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8, + DWC3_TRBCTL_CONTROL_SETUP, false); + ret = dwc3_ep0_start_trans(dep); +- WARN_ON(ret < 0); ++ if (ret < 0) ++ dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret); ++ + for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) { + struct dwc3_ep *dwc3_ep; + +@@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, + ret = dwc3_ep0_start_trans(dep); + } + +- WARN_ON(ret < 0); ++ if (ret < 0) ++ dev_err(dwc->dev, ++ "ep0 data phase start transfer failed: %d\n", ret); + } + + static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) +@@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) + + static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) + { +- WARN_ON(dwc3_ep0_start_control_status(dep)); ++ int ret; ++ ++ ret = dwc3_ep0_start_control_status(dep); ++ if (ret) ++ dev_err(dwc->dev, ++ "ep0 status phase start transfer failed: %d\n", ret); + } + + static void dwc3_ep0_do_control_status(struct dwc3 *dwc, +@@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) + cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); + memset(¶ms, 0, sizeof(params)); + ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); +- WARN_ON_ONCE(ret); ++ if (ret) ++ dev_err_ratelimited(dwc->dev, ++ "ep0 data phase end transfer failed: %d\n", ret); ++ + dep->resource_index = 0; + } + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index a17af4ab20a323..6e90f2ad0426a4 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1765,7 +1765,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int + dep->flags |= DWC3_EP_DELAY_STOP; + return 0; + } +- WARN_ON_ONCE(ret); ++ ++ if (ret) ++ dev_err_ratelimited(dep->dwc->dev, ++ "end transfer failed: %d\n", ret); ++ + dep->resource_index = 0; + + if (!interrupt) +@@ -3735,6 +3739,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep, + static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, + const struct dwc3_event_depevt *event) + { ++ /* ++ * During a device-initiated disconnect, a late xferNotReady event can ++ * be generated after the End Transfer command resets the event filter, ++ * but before the controller is halted. Ignore it to prevent a new ++ * transfer from starting. ++ */ ++ if (!dep->dwc->connected) ++ return; ++ + dwc3_gadget_endpoint_frame_from_event(dep, event); + + /* +@@ -4036,7 +4049,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) + dep->flags &= ~DWC3_EP_STALL; + + ret = dwc3_send_clear_stall_ep_cmd(dep); +- WARN_ON_ONCE(ret); ++ if (ret) ++ dev_err_ratelimited(dwc->dev, ++ "failed to clear STALL on %s\n", dep->name); + } + } + +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c +index a93ad93390ba17..34685c714473dd 100644 +--- a/drivers/usb/gadget/udc/renesas_usb3.c ++++ b/drivers/usb/gadget/udc/renesas_usb3.c +@@ -2658,6 +2658,7 @@ static void renesas_usb3_remove(struct platform_device *pdev) + struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); + + debugfs_remove_recursive(usb3->dentry); ++ put_device(usb3->host_dev); + device_remove_file(&pdev->dev, &dev_attr_role); + + cancel_work_sync(&usb3->role_work); +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index a2b6a922077ee3..d3d535ed00b50b 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -735,8 +735,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, + if (!xhci->devs[i]) + continue; + +- retval = xhci_disable_slot(xhci, i); +- xhci_free_virt_device(xhci, i); ++ retval = xhci_disable_and_free_slot(xhci, i); + if (retval) + xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", + i, retval); +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index cceb69d4f61e1c..04718048b74bd9 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -849,21 +849,20 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, + * will be manipulated by the configure endpoint, allocate device, or update + * hub functions while this function is removing the TT entries from the list. + */ +-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ++void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, ++ int slot_id) + { +- struct xhci_virt_device *dev; + int i; + int old_active_eps = 0; + + /* Slot ID 0 is reserved */ +- if (slot_id == 0 || !xhci->devs[slot_id]) ++ if (slot_id == 0 || !dev) + return; + +- dev = xhci->devs[slot_id]; +- +- xhci->dcbaa->dev_context_ptrs[slot_id] = 0; +- if (!dev) +- return; ++ /* If device ctx array still points to _this_ device, clear it */ ++ if (dev->out_ctx && ++ xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma)) ++ xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + + trace_xhci_free_virt_device(dev); + +@@ -902,8 +901,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) + + if (dev->udev && dev->udev->slot_id) + dev->udev->slot_id = 0; +- kfree(xhci->devs[slot_id]); +- xhci->devs[slot_id] = NULL; ++ if (xhci->devs[slot_id] == dev) ++ xhci->devs[slot_id] = NULL; ++ kfree(dev); + } + + /* +@@ -945,7 +945,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i + out: + /* we are now at a leaf device */ + xhci_debugfs_remove_slot(xhci, slot_id); +- xhci_free_virt_device(xhci, slot_id); ++ xhci_free_virt_device(xhci, vdev, slot_id); + } + + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, +@@ -1182,6 +1182,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud + ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | + dev->eps[0].ring->cycle_state); + ++ ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); ++ + trace_xhci_setup_addressable_virt_device(dev); + + /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ +diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c +index 93f8b355bc706e..4ceed19c64f032 100644 +--- a/drivers/usb/host/xhci-pci-renesas.c ++++ b/drivers/usb/host/xhci-pci-renesas.c +@@ -47,8 +47,9 @@ + #define RENESAS_ROM_ERASE_MAGIC 0x5A65726F + #define RENESAS_ROM_WRITE_MAGIC 0x53524F4D + +-#define RENESAS_RETRY 10000 +-#define RENESAS_DELAY 10 ++#define RENESAS_RETRY 50000 /* 50000 * RENESAS_DELAY ~= 500ms */ ++#define RENESAS_CHIP_ERASE_RETRY 500000 /* 500000 * RENESAS_DELAY ~= 5s */ ++#define RENESAS_DELAY 10 + + static int renesas_fw_download_image(struct pci_dev *dev, + const u32 *fw, size_t step, bool rom) +@@ -405,7 +406,7 @@ static void renesas_rom_erase(struct pci_dev *pdev) + /* sleep a bit while ROM is erased */ + msleep(20); + +- for (i = 0; i < RENESAS_RETRY; i++) { ++ for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) { + retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, + &status); + status &= RENESAS_ROM_STATUS_ERASE; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 44352df58c9e4e..a21ac9d80275f7 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1338,12 +1338,15 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, + */ + void xhci_hc_died(struct xhci_hcd *xhci) + { ++ bool notify; + int i, j; + + if (xhci->xhc_state & XHCI_STATE_DYING) + return; + +- xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); ++ notify = !(xhci->xhc_state & XHCI_STATE_REMOVING); ++ if (notify) ++ xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + + xhci_cleanup_command_queue(xhci); +@@ -1357,7 +1360,7 @@ void xhci_hc_died(struct xhci_hcd *xhci) + } + + /* inform usb core hc died if PCI remove isn't already handling it */ +- if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) ++ if (notify) + usb_hc_died(xhci_to_hcd(xhci)); + } + +@@ -1584,7 +1587,8 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, + command->slot_id = 0; + } + +-static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) ++static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id, ++ u32 cmd_comp_code) + { + struct xhci_virt_device *virt_dev; + struct xhci_slot_ctx *slot_ctx; +@@ -1599,6 +1603,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) + if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) + /* Delete default control endpoint resources */ + xhci_free_device_endpoint_resources(xhci, virt_dev, true); ++ if (cmd_comp_code == COMP_SUCCESS) { ++ xhci->dcbaa->dev_context_ptrs[slot_id] = 0; ++ xhci->devs[slot_id] = NULL; ++ } + } + + static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, +@@ -1847,7 +1855,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, + xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); + break; + case TRB_DISABLE_SLOT: +- xhci_handle_cmd_disable_slot(xhci, slot_id); ++ xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code); + break; + case TRB_CONFIG_EP: + if (!cmd->completion) +@@ -4454,7 +4462,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { +- xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); ++ xhci_dbg(xhci, "xHCI dying or halted, can't queue_command. state: 0x%x\n", ++ xhci->xhc_state); + return -ESHUTDOWN; + } + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index ce38cd2435c8c3..8fd88fedbb30ea 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -119,7 +119,8 @@ int xhci_halt(struct xhci_hcd *xhci) + ret = xhci_handshake(&xhci->op_regs->status, + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); + if (ret) { +- xhci_warn(xhci, "Host halt failed, %d\n", ret); ++ if (!(xhci->xhc_state & XHCI_STATE_DYING)) ++ xhci_warn(xhci, "Host halt failed, %d\n", ret); + return ret; + } + +@@ -178,7 +179,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) + state = readl(&xhci->op_regs->status); + + if (state == ~(u32)0) { +- xhci_warn(xhci, "Host not accessible, reset failed.\n"); ++ if (!(xhci->xhc_state & XHCI_STATE_DYING)) ++ xhci_warn(xhci, "Host not accessible, reset failed.\n"); + return -ENODEV; + } + +@@ -3758,8 +3760,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, + * Obtaining a new device slot to inform the xHCI host that + * the USB device has been reset. + */ +- ret = xhci_disable_slot(xhci, udev->slot_id); +- xhci_free_virt_device(xhci, udev->slot_id); ++ ret = xhci_disable_and_free_slot(xhci, udev->slot_id); + if (!ret) { + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) +@@ -3914,7 +3915,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) + xhci_disable_slot(xhci, udev->slot_id); + + spin_lock_irqsave(&xhci->lock, flags); +- xhci_free_virt_device(xhci, udev->slot_id); ++ xhci_free_virt_device(xhci, virt_dev, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + + } +@@ -3963,6 +3964,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) + return 0; + } + ++int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) ++{ ++ struct xhci_virt_device *vdev = xhci->devs[slot_id]; ++ int ret; ++ ++ ret = xhci_disable_slot(xhci, slot_id); ++ xhci_free_virt_device(xhci, vdev, slot_id); ++ return ret; ++} ++ + /* + * Checks if we have enough host controller resources for the default control + * endpoint. +@@ -4069,8 +4080,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) + return 1; + + disable_slot: +- xhci_disable_slot(xhci, udev->slot_id); +- xhci_free_virt_device(xhci, udev->slot_id); ++ xhci_disable_and_free_slot(xhci, udev->slot_id); + + return 0; + } +@@ -4206,8 +4216,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, + dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); + + mutex_unlock(&xhci->mutex); +- ret = xhci_disable_slot(xhci, udev->slot_id); +- xhci_free_virt_device(xhci, udev->slot_id); ++ ret = xhci_disable_and_free_slot(xhci, udev->slot_id); + if (!ret) { + if (xhci_alloc_dev(hcd, udev) == 1) + xhci_setup_addressable_virt_dev(xhci, udev); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 159cdfc7129070..808f2ee43b9444 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1798,7 +1798,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), + /* xHCI memory management */ + void xhci_mem_cleanup(struct xhci_hcd *xhci); + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); +-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); ++void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id); + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); + int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); + void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, +@@ -1895,6 +1895,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); ++int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id); + int xhci_ext_cap_init(struct xhci_hcd *xhci); + + int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); +diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c +index b4a4c1df4e0d96..a4668c6d575dcf 100644 +--- a/drivers/usb/musb/omap2430.c ++++ b/drivers/usb/musb/omap2430.c +@@ -400,7 +400,7 @@ static int omap2430_probe(struct platform_device *pdev) + ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); + if (ret) { + dev_err(&pdev->dev, "failed to add resources\n"); +- goto err2; ++ goto err_put_control_otghs; + } + + if (populate_irqs) { +@@ -413,7 +413,7 @@ static int omap2430_probe(struct platform_device *pdev) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -EINVAL; +- goto err2; ++ goto err_put_control_otghs; + } + + musb_res[i].start = res->start; +@@ -441,14 +441,14 @@ static int omap2430_probe(struct platform_device *pdev) + ret = platform_device_add_resources(musb, musb_res, i); + if (ret) { + dev_err(&pdev->dev, "failed to add IRQ resources\n"); +- goto err2; ++ goto err_put_control_otghs; + } + } + + ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); + if (ret) { + dev_err(&pdev->dev, "failed to add platform_data\n"); +- goto err2; ++ goto err_put_control_otghs; + } + + pm_runtime_enable(glue->dev); +@@ -463,7 +463,9 @@ static int omap2430_probe(struct platform_device *pdev) + + err3: + pm_runtime_disable(glue->dev); +- ++err_put_control_otghs: ++ if (!IS_ERR(glue->control_otghs)) ++ put_device(glue->control_otghs); + err2: + platform_device_put(musb); + +@@ -477,6 +479,8 @@ static void omap2430_remove(struct platform_device *pdev) + + platform_device_unregister(glue->musb); + pm_runtime_disable(glue->dev); ++ if (!IS_ERR(glue->control_otghs)) ++ put_device(glue->control_otghs); + } + + #ifdef CONFIG_PM +diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c +index 0c423916d7bfa4..a026c6cb6e684b 100644 +--- a/drivers/usb/storage/realtek_cr.c ++++ b/drivers/usb/storage/realtek_cr.c +@@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun, + return USB_STOR_TRANSPORT_ERROR; + } + +- residue = bcs->Residue; ++ residue = le32_to_cpu(bcs->Residue); + if (bcs->Tag != us->tag) + return USB_STOR_TRANSPORT_ERROR; + +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 54f0b1c83317cd..dfa5276a5a43e2 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -934,6 +934,13 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SANE_SENSE ), + ++/* Added by Maël GUERIN */ ++UNUSUAL_DEV( 0x0603, 0x8611, 0x0000, 0xffff, ++ "Novatek", ++ "NTK96550-based camera", ++ USB_SC_SCSI, USB_PR_BULK, NULL, ++ US_FL_BULK_IGNORE_TAG ), ++ + /* + * Reported by Hanno Boeck + * Taken from the Lycoris Kernel +@@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_WP_DETECT ), + ++/* ++ * Reported by Zenm Chen ++ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch ++ * the device into Wi-Fi mode. ++ */ ++UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff, ++ "Realtek", ++ "DISK", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_IGNORE_DEVICE ), ++ ++/* ++ * Reported by Zenm Chen ++ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch ++ * the device into Wi-Fi mode. ++ */ ++UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff, ++ "Realtek", ++ "DISK", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_IGNORE_DEVICE ), ++ + UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, + "Maxtor", + "USB to SATA", +diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c +index 60ed1f809130d8..a174ff7a9abd39 100644 +--- a/drivers/usb/typec/mux/intel_pmc_mux.c ++++ b/drivers/usb/typec/mux/intel_pmc_mux.c +@@ -730,7 +730,7 @@ static int pmc_usb_probe(struct platform_device *pdev) + + pmc->ipc = devm_intel_scu_ipc_dev_get(&pdev->dev); + if (!pmc->ipc) +- return -ENODEV; ++ return -EPROBE_DEFER; + + pmc->dev = &pdev->dev; + +diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c +index bc21006e979c66..03749a392fdbd5 100644 +--- a/drivers/usb/typec/tcpm/fusb302.c ++++ b/drivers/usb/typec/tcpm/fusb302.c +@@ -103,6 +103,7 @@ struct fusb302_chip { + bool vconn_on; + bool vbus_on; + bool charge_on; ++ bool pd_rx_on; + bool vbus_present; + enum typec_cc_polarity cc_polarity; + enum typec_cc_status cc1; +@@ -841,6 +842,11 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on) + int ret = 0; + + mutex_lock(&chip->lock); ++ if (chip->pd_rx_on == on) { ++ fusb302_log(chip, "pd is already %s", str_on_off(on)); ++ goto done; ++ } ++ + ret = fusb302_pd_rx_flush(chip); + if (ret < 0) { + fusb302_log(chip, "cannot flush pd rx buffer, ret=%d", ret); +@@ -863,6 +869,8 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on) + on ? "on" : "off", ret); + goto done; + } ++ ++ chip->pd_rx_on = on; + fusb302_log(chip, "pd := %s", on ? "on" : "off"); + done: + mutex_unlock(&chip->lock); +diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c +index 60f90272fed315..e37bf26cca805c 100644 +--- a/drivers/usb/typec/tcpm/maxim_contaminant.c ++++ b/drivers/usb/typec/tcpm/maxim_contaminant.c +@@ -5,6 +5,7 @@ + * USB-C module to reduce wakeups due to contaminants. + */ + ++#include + #include + #include + #include +@@ -189,6 +190,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven + if (ret < 0) + return ret; + ++ /* Disable low power mode */ ++ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, ++ FIELD_PREP(CCLPMODESEL_MASK, ++ LOW_POWER_MODE_DISABLE)); ++ + /* Sleep to allow comparators settle */ + usleep_range(5000, 6000); + ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1); +@@ -322,6 +328,34 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) + return 0; + } + ++static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip) ++{ ++ struct regmap *regmap = chip->data.regmap; ++ int ret; ++ ++ /* Disable dry detection if enabled. */ ++ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, ++ ULTRA_LOW_POWER_MODE); ++ if (ret) ++ return ret; ++ ++ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0); ++ if (ret) ++ return ret; ++ ++ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP | 0xA); ++ if (ret) ++ return ret; ++ ++ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, ++ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT, ++ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT); ++ if (ret) ++ return ret; ++ ++ return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION); ++} ++ + bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce) + { + u8 cc_status, pwr_cntl; +@@ -335,6 +369,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect + if (ret < 0) + return false; + ++ if (cc_status & TCPC_CC_STATUS_TOGGLING) { ++ if (chip->contaminant_state == DETECTED) ++ return true; ++ return false; ++ } ++ + if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) { + if (!disconnect_while_debounce) + msleep(100); +@@ -367,6 +407,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect + max_contaminant_enable_dry_detection(chip); + return true; + } ++ ++ ret = max_contaminant_enable_toggling(chip); ++ if (ret) ++ dev_err(chip->dev, ++ "Failed to enable toggling, ret=%d", ++ ret); + } + return false; + } else if (chip->contaminant_state == DETECTED) { +@@ -375,6 +421,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect + if (chip->contaminant_state == DETECTED) { + max_contaminant_enable_dry_detection(chip); + return true; ++ } else { ++ ret = max_contaminant_enable_toggling(chip); ++ if (ret) { ++ dev_err(chip->dev, ++ "Failed to enable toggling, ret=%d", ++ ret); ++ return true; ++ } + } + } + } +diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h +index 2c1c4d161b0dcb..861801cc456ff4 100644 +--- a/drivers/usb/typec/tcpm/tcpci_maxim.h ++++ b/drivers/usb/typec/tcpm/tcpci_maxim.h +@@ -21,6 +21,7 @@ + #define CCOVPDIS BIT(6) + #define SBURPCTRL BIT(5) + #define CCLPMODESEL_MASK GENMASK(4, 3) ++#define LOW_POWER_MODE_DISABLE 0 + #define ULTRA_LOW_POWER_MODE BIT(3) + #define CCRPCTRL_MASK GENMASK(2, 0) + #define UA_1_SRC 1 +diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c +index b35c6e07911e90..9b0157063df0a3 100644 +--- a/drivers/usb/typec/ucsi/psy.c ++++ b/drivers/usb/typec/ucsi/psy.c +@@ -163,7 +163,7 @@ static int ucsi_psy_get_current_max(struct ucsi_connector *con, + case UCSI_CONSTAT_PWR_OPMODE_DEFAULT: + /* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */ + default: +- val->intval = 0; ++ val->intval = UCSI_TYPEC_DEFAULT_CURRENT * 1000; + break; + } + return 0; +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index ea98bc5674940d..e5c001ee0cd7c0 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -910,6 +910,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) + + if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) { + typec_set_pwr_role(con->port, role); ++ ucsi_port_psy_changed(con); + + /* Complete pending power role swap */ + if (!completion_done(&con->complete)) +diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h +index 7706f4e951258b..51e745117dcbac 100644 +--- a/drivers/usb/typec/ucsi/ucsi.h ++++ b/drivers/usb/typec/ucsi/ucsi.h +@@ -340,9 +340,10 @@ struct ucsi { + #define UCSI_MAX_SVID 5 + #define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6) + +-#define UCSI_TYPEC_VSAFE5V 5000 +-#define UCSI_TYPEC_1_5_CURRENT 1500 +-#define UCSI_TYPEC_3_0_CURRENT 3000 ++#define UCSI_TYPEC_VSAFE5V 5000 ++#define UCSI_TYPEC_DEFAULT_CURRENT 100 ++#define UCSI_TYPEC_1_5_CURRENT 1500 ++#define UCSI_TYPEC_3_0_CURRENT 3000 + + struct ucsi_connector { + int num; +diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c +index 2d996c913ecd56..82558fa7712e4a 100644 +--- a/drivers/vfio/pci/mlx5/cmd.c ++++ b/drivers/vfio/pci/mlx5/cmd.c +@@ -1389,8 +1389,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, + log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size); + max_msg_size = (1ULL << log_max_msg_size); + /* The RQ must hold at least 4 WQEs/messages for successful QP creation */ +- if (rq_size < 4 * max_msg_size) +- rq_size = 4 * max_msg_size; ++ if (rq_size < 4ULL * max_msg_size) ++ rq_size = 4ULL * max_msg_size; + + memset(tracker, 0, sizeof(*tracker)); + tracker->uar = mlx5_get_uars_page(mdev); +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 5fe7aed3672eea..f63f116b9cd0a6 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -635,6 +635,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + + while (npage) { + if (!batch->size) { ++ /* ++ * Large mappings may take a while to repeatedly refill ++ * the batch, so conditionally relinquish the CPU when ++ * needed to avoid stalls. ++ */ ++ cond_resched(); ++ + /* Empty batch, so refill it. */ + long req_pages = min_t(long, npage, batch->capacity); + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index d0238bd741b089..147cfb64bba2d5 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -2770,6 +2770,9 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, + } + r = __vhost_add_used_n(vq, heads, count); + ++ if (r < 0) ++ return r; ++ + /* Make sure buffer is written before we update index. */ + smp_wmb(); + if (vhost_put_used_idx(vq)) { +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index d94a06008ff647..2ec0d52606853a 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -340,6 +340,9 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, + + len = iov_length(vq->iov, out); + ++ if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) ++ return NULL; ++ + /* len contains both payload and hdr */ + skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); + if (!skb) +@@ -363,8 +366,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, + return skb; + + /* The pkt is too big or the length in the header is invalid */ +- if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || +- payload_len + sizeof(*hdr) > len) { ++ if (payload_len + sizeof(*hdr) > len) { + kfree_skb(skb); + return NULL; + } +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index bbc362db40c586..14e5312a00308e 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -1139,7 +1139,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + c->vc_screenbuf_size - delta); + c->vc_origin = vga_vram_end - c->vc_screenbuf_size; + vga_rolled_over = 0; +- } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) ++ } else + c->vc_origin -= delta; + c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; + scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index ed68ba89b80b8f..58eee27aa6cca9 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -808,7 +808,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, + fg_vc->vc_rows); + } + +- update_screen(vc_cons[fg_console].d); ++ if (fg_console != unit) ++ update_screen(vc_cons[fg_console].d); + } + + /** +@@ -1353,6 +1354,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, + struct vc_data *svc; + struct fbcon_ops *ops = info->fbcon_par; + int rows, cols; ++ unsigned long ret = 0; + + p = &fb_display[unit]; + +@@ -1403,11 +1405,10 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, + rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols /= vc->vc_font.width; + rows /= vc->vc_font.height; +- vc_resize(vc, cols, rows); ++ ret = vc_resize(vc, cols, rows); + +- if (con_is_visible(vc)) { ++ if (con_is_visible(vc) && !ret) + update_screen(vc); +- } + } + + static __inline__ void ywrap_up(struct vc_data *vc, int count) +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 52bd3af5436908..942b942f6bf9af 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -943,6 +943,9 @@ static int do_register_framebuffer(struct fb_info *fb_info) + if (!registered_fb[i]) + break; + ++ if (i >= FB_MAX) ++ return -ENXIO; ++ + if (!fb_info->modelist.prev || !fb_info->modelist.next) + INIT_LIST_HEAD(&fb_info->modelist); + +diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c +index e700a5ef704315..d996feb0509a19 100644 +--- a/drivers/virt/coco/efi_secret/efi_secret.c ++++ b/drivers/virt/coco/efi_secret/efi_secret.c +@@ -136,15 +136,7 @@ static int efi_secret_unlink(struct inode *dir, struct dentry *dentry) + if (s->fs_files[i] == dentry) + s->fs_files[i] = NULL; + +- /* +- * securityfs_remove tries to lock the directory's inode, but we reach +- * the unlink callback when it's already locked +- */ +- inode_unlock(dir); +- securityfs_remove(dentry); +- inode_lock(dir); +- +- return 0; ++ return simple_unlink(inode, dentry); + } + + static const struct inode_operations efi_secret_dir_inode_operations = { +diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c +index 84dca3695f862d..e5e6d7f159180f 100644 +--- a/drivers/watchdog/dw_wdt.c ++++ b/drivers/watchdog/dw_wdt.c +@@ -644,6 +644,8 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) + } else { + wdd->timeout = DW_WDT_DEFAULT_SECONDS; + watchdog_init_timeout(wdd, 0, dev); ++ /* Limit timeout value to hardware constraints. */ ++ dw_wdt_set_timeout(wdd, wdd->timeout); + } + + platform_set_drvdata(pdev, dw_wdt); +diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c +index dd297dcd524c9c..68973be2ce6269 100644 +--- a/drivers/watchdog/iTCO_wdt.c ++++ b/drivers/watchdog/iTCO_wdt.c +@@ -601,7 +601,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev) + /* Check that the heartbeat value is within it's range; + if not reset to the default */ + if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) { +- iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); ++ ret = iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); ++ if (ret != 0) { ++ dev_err(dev, "Failed to set watchdog timeout (%d)\n", WATCHDOG_TIMEOUT); ++ return ret; ++ } + dev_info(dev, "timeout value out of range, using %d\n", + WATCHDOG_TIMEOUT); + } +diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c +index 5f23913ce3b49c..6ce1bfb3906413 100644 +--- a/drivers/watchdog/sbsa_gwdt.c ++++ b/drivers/watchdog/sbsa_gwdt.c +@@ -75,11 +75,17 @@ + #define SBSA_GWDT_VERSION_MASK 0xF + #define SBSA_GWDT_VERSION_SHIFT 16 + ++#define SBSA_GWDT_IMPL_MASK 0x7FF ++#define SBSA_GWDT_IMPL_SHIFT 0 ++#define SBSA_GWDT_IMPL_MEDIATEK 0x426 ++ + /** + * struct sbsa_gwdt - Internal representation of the SBSA GWDT + * @wdd: kernel watchdog_device structure + * @clk: store the System Counter clock frequency, in Hz. + * @version: store the architecture version ++ * @need_ws0_race_workaround: ++ * indicate whether to adjust wdd->timeout to avoid a race with WS0 + * @refresh_base: Virtual address of the watchdog refresh frame + * @control_base: Virtual address of the watchdog control frame + */ +@@ -87,6 +93,7 @@ struct sbsa_gwdt { + struct watchdog_device wdd; + u32 clk; + int version; ++ bool need_ws0_race_workaround; + void __iomem *refresh_base; + void __iomem *control_base; + }; +@@ -161,6 +168,31 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, + */ + sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt); + ++ /* ++ * Some watchdog hardware has a race condition where it will ignore ++ * sbsa_gwdt_keepalive() if it is called at the exact moment that a ++ * timeout occurs and WS0 is being asserted. Unfortunately, the default ++ * behavior of the watchdog core is very likely to trigger this race ++ * when action=0 because it programs WOR to be half of the desired ++ * timeout, and watchdog_next_keepalive() chooses the exact same time to ++ * send keepalive pings. ++ * ++ * This triggers a race where sbsa_gwdt_keepalive() can be called right ++ * as WS0 is being asserted, and affected hardware will ignore that ++ * write and continue to assert WS0. After another (timeout / 2) ++ * seconds, the same race happens again. If the driver wins then the ++ * explicit refresh will reset WS0 to false but if the hardware wins, ++ * then WS1 is asserted and the system resets. ++ * ++ * Avoid the problem by scheduling keepalive heartbeats one second later ++ * than the WOR timeout. ++ * ++ * This workaround might not be needed in a future revision of the ++ * hardware. ++ */ ++ if (gwdt->need_ws0_race_workaround) ++ wdd->min_hw_heartbeat_ms = timeout * 500 + 1000; ++ + return 0; + } + +@@ -202,12 +234,15 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd) + static void sbsa_gwdt_get_version(struct watchdog_device *wdd) + { + struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); +- int ver; ++ int iidr, ver, impl; + +- ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); +- ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; ++ iidr = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); ++ ver = (iidr >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; ++ impl = (iidr >> SBSA_GWDT_IMPL_SHIFT) & SBSA_GWDT_IMPL_MASK; + + gwdt->version = ver; ++ gwdt->need_ws0_race_workaround = ++ !action && (impl == SBSA_GWDT_IMPL_MEDIATEK); + } + + static int sbsa_gwdt_start(struct watchdog_device *wdd) +@@ -299,6 +334,15 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) + else + wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000; + ++ if (gwdt->need_ws0_race_workaround) { ++ /* ++ * A timeout of 3 seconds means that WOR will be set to 1.5 ++ * seconds and the heartbeat will be scheduled every 2.5 ++ * seconds. ++ */ ++ wdd->min_timeout = 3; ++ } ++ + status = readl(cf_base + SBSA_GWDT_WCS); + if (status & SBSA_GWDT_WCS_WS1) { + dev_warn(dev, "System reset by WDT.\n"); +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index a2ba1c7fc16af4..10f803d0534e63 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -222,8 +222,8 @@ static void free_pref(struct prelim_ref *ref) + * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 + * indicates a 'higher' block. + */ +-static int prelim_ref_compare(struct prelim_ref *ref1, +- struct prelim_ref *ref2) ++static int prelim_ref_compare(const struct prelim_ref *ref1, ++ const struct prelim_ref *ref2) + { + if (ref1->level < ref2->level) + return -1; +@@ -254,7 +254,7 @@ static int prelim_ref_compare(struct prelim_ref *ref1, + } + + static void update_share_count(struct share_check *sc, int oldcount, +- int newcount, struct prelim_ref *newref) ++ int newcount, const struct prelim_ref *newref) + { + if ((!sc) || (oldcount == 0 && newcount < 1)) + return; +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 226e6434a58a94..5a3a41c6d509f5 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -23,7 +23,7 @@ + #include "extent-tree.h" + + #ifdef CONFIG_BTRFS_DEBUG +-int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) ++int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group) + { + struct btrfs_fs_info *fs_info = block_group->fs_info; + +@@ -34,15 +34,28 @@ int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) + } + #endif + ++static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group) ++{ ++ /* The meta_write_pointer is available only on the zoned setup. */ ++ if (!btrfs_is_zoned(block_group->fs_info)) ++ return false; ++ ++ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) ++ return false; ++ ++ return block_group->start + block_group->alloc_offset > ++ block_group->meta_write_pointer; ++} ++ + /* + * Return target flags in extended format or 0 if restripe for this chunk_type + * is not in progress + * + * Should be called with balance_lock held + */ +-static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) ++static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags) + { +- struct btrfs_balance_control *bctl = fs_info->balance_ctl; ++ const struct btrfs_balance_control *bctl = fs_info->balance_ctl; + u64 target = 0; + + if (!bctl) +@@ -1240,6 +1253,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + goto out; + + spin_lock(&block_group->lock); ++ /* ++ * Hitting this WARN means we removed a block group with an unwritten ++ * region. It will cause "unable to find chunk map for logical" errors. ++ */ ++ if (WARN_ON(has_unwritten_metadata(block_group))) ++ btrfs_warn(fs_info, ++ "block group %llu is removed before metadata write out", ++ block_group->start); ++ + set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); + + /* +@@ -1418,9 +1440,9 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force) + } + + static bool clean_pinned_extents(struct btrfs_trans_handle *trans, +- struct btrfs_block_group *bg) ++ const struct btrfs_block_group *bg) + { +- struct btrfs_fs_info *fs_info = bg->fs_info; ++ struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_transaction *prev_trans = NULL; + const u64 start = bg->start; + const u64 end = start + bg->length - 1; +@@ -1563,8 +1585,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + * needing to allocate extents from the block group. + */ + used = btrfs_space_info_used(space_info, true); +- if (space_info->total_bytes - block_group->length < used && +- block_group->zone_unusable < block_group->length) { ++ if ((space_info->total_bytes - block_group->length < used && ++ block_group->zone_unusable < block_group->length) || ++ has_unwritten_metadata(block_group)) { + /* + * Add a reference for the list, compensate for the ref + * drop under the "next" label for the +@@ -1752,14 +1775,14 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a, + return bg1->used > bg2->used; + } + +-static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) ++static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) + { + if (btrfs_is_zoned(fs_info)) + return btrfs_zoned_should_reclaim(fs_info); + return true; + } + +-static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) ++static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed) + { + const struct btrfs_space_info *space_info = bg->space_info; + const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); +@@ -1991,8 +2014,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) + spin_unlock(&fs_info->unused_bgs_lock); + } + +-static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, +- struct btrfs_path *path) ++static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, ++ const struct btrfs_path *path) + { + struct extent_map_tree *em_tree; + struct extent_map *em; +@@ -2044,7 +2067,7 @@ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, + + static int find_first_block_group(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, +- struct btrfs_key *key) ++ const struct btrfs_key *key) + { + struct btrfs_root *root = btrfs_block_group_root(fs_info); + int ret; +@@ -2636,8 +2659,8 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans, + } + + static int insert_dev_extent(struct btrfs_trans_handle *trans, +- struct btrfs_device *device, u64 chunk_offset, +- u64 start, u64 num_bytes) ++ const struct btrfs_device *device, u64 chunk_offset, ++ u64 start, u64 num_bytes) + { + struct btrfs_fs_info *fs_info = device->fs_info; + struct btrfs_root *root = fs_info->dev_root; +@@ -2787,7 +2810,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) + * For extent tree v2 we use the block_group_item->chunk_offset to point at our + * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. + */ +-static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) ++static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset) + { + u64 div = SZ_1G; + u64 index; +@@ -3823,8 +3846,8 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) + } + } + +-static int should_alloc_chunk(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *sinfo, int force) ++static int should_alloc_chunk(const struct btrfs_fs_info *fs_info, ++ const struct btrfs_space_info *sinfo, int force) + { + u64 bytes_used = btrfs_space_info_used(sinfo, false); + u64 thresh; +@@ -4199,7 +4222,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, + return ret; + } + +-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) ++static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type) + { + u64 num_dev; + +@@ -4606,7 +4629,7 @@ int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, + return 0; + } + +-bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) ++bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg) + { + if (btrfs_is_zoned(bg->fs_info)) + return false; +diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h +index 089979981e4aaa..a8a6a21e393d2e 100644 +--- a/fs/btrfs/block-group.h ++++ b/fs/btrfs/block-group.h +@@ -250,7 +250,7 @@ struct btrfs_block_group { + enum btrfs_block_group_size_class size_class; + }; + +-static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) ++static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group) + { + return (block_group->start + block_group->length); + } +@@ -262,8 +262,7 @@ static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg) + return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); + } + +-static inline bool btrfs_is_block_group_data_only( +- struct btrfs_block_group *block_group) ++static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group) + { + /* + * In mixed mode the fragmentation is expected to be high, lowering the +@@ -274,7 +273,7 @@ static inline bool btrfs_is_block_group_data_only( + } + + #ifdef CONFIG_BTRFS_DEBUG +-int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group); ++int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group); + #endif + + struct btrfs_block_group *btrfs_lookup_first_block_group( +@@ -355,7 +354,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) + return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); + } + +-static inline int btrfs_block_group_done(struct btrfs_block_group *cache) ++static inline int btrfs_block_group_done(const struct btrfs_block_group *cache) + { + smp_mb(); + return cache->cached == BTRFS_CACHE_FINISHED || +@@ -372,6 +371,6 @@ enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size); + int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, + enum btrfs_block_group_size_class size_class, + bool force_wrong_size_class); +-bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg); ++bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg); + + #endif /* BTRFS_BLOCK_GROUP_H */ +diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c +index db8da4e7b22891..97084ea3af0cca 100644 +--- a/fs/btrfs/block-rsv.c ++++ b/fs/btrfs/block-rsv.c +@@ -547,7 +547,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, + return ERR_PTR(ret); + } + +-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, ++int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *rsv) + { + u64 needed_bytes; +diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h +index 43a9a6b5a79f46..3c9a15f59731fc 100644 +--- a/fs/btrfs/block-rsv.h ++++ b/fs/btrfs/block-rsv.h +@@ -82,7 +82,7 @@ void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info); + struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u32 blocksize); +-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, ++int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *rsv); + static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *block_rsv, +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index c23c56ead6b236..c4968efc3fc464 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -251,7 +251,8 @@ struct btrfs_inode { + struct btrfs_delayed_node *delayed_node; + + /* File creation time. */ +- struct timespec64 i_otime; ++ u64 i_otime_sec; ++ u32 i_otime_nsec; + + /* Hook into fs_info->delayed_iputs */ + struct list_head delayed_iput; +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index 4b21ca49b6665d..31b1b448efc2eb 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -347,7 +347,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, + + write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); + +- WARN_ON(btrfs_header_generation(buf) > trans->transid); ++ if (unlikely(btrfs_header_generation(buf) > trans->transid)) { ++ btrfs_tree_unlock(cow); ++ free_extent_buffer(cow); ++ ret = -EUCLEAN; ++ btrfs_abort_transaction(trans, ret); ++ return ret; ++ } ++ + if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) + ret = btrfs_inc_ref(trans, root, cow, 1); + else +@@ -2712,7 +2719,7 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, + * + */ + static void fixup_low_keys(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, ++ const struct btrfs_path *path, + struct btrfs_disk_key *key, int level) + { + int i; +@@ -2742,7 +2749,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans, + * that the new key won't break the order + */ + void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, ++ const struct btrfs_path *path, + const struct btrfs_key *new_key) + { + struct btrfs_fs_info *fs_info = trans->fs_info; +@@ -2808,8 +2815,8 @@ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, + * is correct, we only need to bother the last key of @left and the first + * key of @right. + */ +-static bool check_sibling_keys(struct extent_buffer *left, +- struct extent_buffer *right) ++static bool check_sibling_keys(const struct extent_buffer *left, ++ const struct extent_buffer *right) + { + struct btrfs_key left_last; + struct btrfs_key right_first; +@@ -3049,6 +3056,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, + if (ret < 0) { + int ret2; + ++ btrfs_clear_buffer_dirty(trans, c); + ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); + if (ret2 < 0) + btrfs_abort_transaction(trans, ret2); +@@ -3077,7 +3085,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, + * blocknr is the block the key points to. + */ + static int insert_ptr(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, ++ const struct btrfs_path *path, + struct btrfs_disk_key *key, u64 bytenr, + int slot, int level) + { +@@ -4168,7 +4176,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, + * the front. + */ + void btrfs_truncate_item(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, u32 new_size, int from_end) ++ const struct btrfs_path *path, u32 new_size, int from_end) + { + int slot; + struct extent_buffer *leaf; +@@ -4260,7 +4268,7 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans, + * make the item pointed to by the path bigger, data_size is the added size. + */ + void btrfs_extend_item(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, u32 data_size) ++ const struct btrfs_path *path, u32 data_size) + { + int slot; + struct extent_buffer *leaf; +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 7df3ed2945b049..834af67fac231d 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -521,7 +521,7 @@ int btrfs_previous_item(struct btrfs_root *root, + int btrfs_previous_extent_item(struct btrfs_root *root, + struct btrfs_path *path, u64 min_objectid); + void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, ++ const struct btrfs_path *path, + const struct btrfs_key *new_key); + struct extent_buffer *btrfs_root_node(struct btrfs_root *root); + int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, +@@ -555,9 +555,9 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, + int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct btrfs_path *path, int level, int slot); + void btrfs_extend_item(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, u32 data_size); ++ const struct btrfs_path *path, u32 data_size); + void btrfs_truncate_item(struct btrfs_trans_handle *trans, +- struct btrfs_path *path, u32 new_size, int from_end); ++ const struct btrfs_path *path, u32 new_size, int from_end); + int btrfs_split_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 32c5f5a8a0e93d..c39e39142abf1d 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1849,10 +1849,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, + btrfs_set_stack_timespec_nsec(&inode_item->ctime, + inode_get_ctime(inode).tv_nsec); + +- btrfs_set_stack_timespec_sec(&inode_item->otime, +- BTRFS_I(inode)->i_otime.tv_sec); +- btrfs_set_stack_timespec_nsec(&inode_item->otime, +- BTRFS_I(inode)->i_otime.tv_nsec); ++ btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec); ++ btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec); + } + + int btrfs_fill_inode(struct inode *inode, u32 *rdev) +@@ -1901,10 +1899,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) + inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime), + btrfs_stack_timespec_nsec(&inode_item->ctime)); + +- BTRFS_I(inode)->i_otime.tv_sec = +- btrfs_stack_timespec_sec(&inode_item->otime); +- BTRFS_I(inode)->i_otime.tv_nsec = +- btrfs_stack_timespec_nsec(&inode_item->otime); ++ BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime); ++ BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime); + + inode->i_generation = BTRFS_I(inode)->generation; + BTRFS_I(inode)->index_cnt = (u64)-1; +diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c +index 3981c941f5b556..d6eef4bd9e9d45 100644 +--- a/fs/btrfs/discard.c ++++ b/fs/btrfs/discard.c +@@ -68,7 +68,7 @@ static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { + }; + + static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, +- struct btrfs_block_group *block_group) ++ const struct btrfs_block_group *block_group) + { + return &discard_ctl->discard_list[block_group->discard_index]; + } +@@ -80,7 +80,7 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, + * + * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set. + */ +-static bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) ++static bool btrfs_run_discard_work(const struct btrfs_discard_ctl *discard_ctl) + { + struct btrfs_fs_info *fs_info = container_of(discard_ctl, + struct btrfs_fs_info, +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index ef77d420851040..8248113eb067fa 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -3530,6 +3530,21 @@ btrfs_release_block_group(struct btrfs_block_group *cache, + btrfs_put_block_group(cache); + } + ++static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl, ++ const struct btrfs_block_group *bg) ++{ ++ if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) ++ return true; ++ if (!btrfs_block_group_should_use_size_class(bg)) ++ return true; ++ if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) ++ return true; ++ if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && ++ bg->size_class == BTRFS_BG_SZ_NONE) ++ return true; ++ return ffe_ctl->size_class == bg->size_class; ++} ++ + /* + * Helper function for find_free_extent(). + * +@@ -3551,7 +3566,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg, + if (!cluster_bg) + goto refill_cluster; + if (cluster_bg != bg && (cluster_bg->ro || +- !block_group_bits(cluster_bg, ffe_ctl->flags))) ++ !block_group_bits(cluster_bg, ffe_ctl->flags) || ++ !find_free_extent_check_size_class(ffe_ctl, cluster_bg))) + goto release_cluster; + + offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, +@@ -4107,21 +4123,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, + return -ENOSPC; + } + +-static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, +- struct btrfs_block_group *bg) +-{ +- if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) +- return true; +- if (!btrfs_block_group_should_use_size_class(bg)) +- return true; +- if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) +- return true; +- if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && +- bg->size_class == BTRFS_BG_SZ_NONE) +- return true; +- return ffe_ctl->size_class == bg->size_class; +-} +- + static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, + struct find_free_extent_ctl *ffe_ctl, + struct btrfs_space_info *space_info, +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index 45cae356e89ba0..ea5759a689b97d 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -153,7 +153,7 @@ static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) + * Calculate the total size needed to allocate for an ordered sum structure + * spanning @bytes in the file. + */ +-static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) ++static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes) + { + return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); + } +@@ -1263,7 +1263,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + + void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, + const struct btrfs_path *path, +- struct btrfs_file_extent_item *fi, ++ const struct btrfs_file_extent_item *fi, + struct extent_map *em) + { + struct btrfs_fs_info *fs_info = inode->root->fs_info; +diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h +index 04bd2d34efb14b..2b1d08b88b616d 100644 +--- a/fs/btrfs/file-item.h ++++ b/fs/btrfs/file-item.h +@@ -62,7 +62,7 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path, + unsigned long *csum_bitmap); + void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, + const struct btrfs_path *path, +- struct btrfs_file_extent_item *fi, ++ const struct btrfs_file_extent_item *fi, + struct extent_map *em); + int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, + u64 len); +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index 300ee0b68b498e..8efe3a9369df04 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -1371,12 +1371,17 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, + clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags); + + ret = add_new_free_space_info(trans, block_group, path); +- if (ret) ++ if (ret) { ++ btrfs_abort_transaction(trans, ret); + return ret; ++ } ++ ++ ret = __add_to_free_space_tree(trans, block_group, path, ++ block_group->start, block_group->length); ++ if (ret) ++ btrfs_abort_transaction(trans, ret); + +- return __add_to_free_space_tree(trans, block_group, path, +- block_group->start, +- block_group->length); ++ return 0; + } + + int add_block_group_free_space(struct btrfs_trans_handle *trans, +@@ -1396,16 +1401,14 @@ int add_block_group_free_space(struct btrfs_trans_handle *trans, + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; ++ btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = __add_block_group_free_space(trans, block_group, path); +- + out: + btrfs_free_path(path); + mutex_unlock(&block_group->free_space_lock); +- if (ret) +- btrfs_abort_transaction(trans, ret); + return ret; + } + +diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c +index d3ff97374d48aa..ab741a57842409 100644 +--- a/fs/btrfs/inode-item.c ++++ b/fs/btrfs/inode-item.c +@@ -15,7 +15,7 @@ + #include "extent-tree.h" + #include "file-item.h" + +-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, ++struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, + int slot, + const struct fscrypt_str *name) + { +@@ -43,7 +43,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, + } + + struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( +- struct extent_buffer *leaf, int slot, u64 ref_objectid, ++ const struct extent_buffer *leaf, int slot, u64 ref_objectid, + const struct fscrypt_str *name) + { + struct btrfs_inode_extref *extref; +@@ -424,9 +424,9 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root + return ret; + } + +-static inline void btrfs_trace_truncate(struct btrfs_inode *inode, +- struct extent_buffer *leaf, +- struct btrfs_file_extent_item *fi, ++static inline void btrfs_trace_truncate(const struct btrfs_inode *inode, ++ const struct extent_buffer *leaf, ++ const struct btrfs_file_extent_item *fi, + u64 offset, int extent_type, int slot) + { + if (!inode) +diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h +index ede43b6c65591d..d43633d5620f29 100644 +--- a/fs/btrfs/inode-item.h ++++ b/fs/btrfs/inode-item.h +@@ -100,11 +100,11 @@ struct btrfs_inode_extref *btrfs_lookup_inode_extref( + u64 inode_objectid, u64 ref_objectid, int ins_len, + int cow); + +-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, ++struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, + int slot, + const struct fscrypt_str *name); + struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( +- struct extent_buffer *leaf, int slot, u64 ref_objectid, ++ const struct extent_buffer *leaf, int slot, u64 ref_objectid, + const struct fscrypt_str *name); + + #endif +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 218d15f5ddf737..4502a474a81dab 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3785,10 +3785,8 @@ static int btrfs_read_locked_inode(struct inode *inode, + inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), + btrfs_timespec_nsec(leaf, &inode_item->ctime)); + +- BTRFS_I(inode)->i_otime.tv_sec = +- btrfs_timespec_sec(leaf, &inode_item->otime); +- BTRFS_I(inode)->i_otime.tv_nsec = +- btrfs_timespec_nsec(leaf, &inode_item->otime); ++ BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); ++ BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime); + + inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); + BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); +@@ -3958,10 +3956,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, + btrfs_set_token_timespec_nsec(&token, &item->ctime, + inode_get_ctime(inode).tv_nsec); + +- btrfs_set_token_timespec_sec(&token, &item->otime, +- BTRFS_I(inode)->i_otime.tv_sec); +- btrfs_set_token_timespec_nsec(&token, &item->otime, +- BTRFS_I(inode)->i_otime.tv_nsec); ++ btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); ++ btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec); + + btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); + btrfs_set_token_inode_generation(&token, item, +@@ -5644,7 +5640,8 @@ static struct inode *new_simple_dir(struct inode *dir, + inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; + inode->i_mtime = inode_set_ctime_current(inode); + inode->i_atime = dir->i_atime; +- BTRFS_I(inode)->i_otime = inode->i_mtime; ++ BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec; ++ BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec; + inode->i_uid = dir->i_uid; + inode->i_gid = dir->i_gid; + +@@ -6321,7 +6318,8 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + + inode->i_mtime = inode_set_ctime_current(inode); + inode->i_atime = inode->i_mtime; +- BTRFS_I(inode)->i_otime = inode->i_mtime; ++ BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec; ++ BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec; + + /* + * We're going to fill the inode item now, so at this point the inode +@@ -8550,8 +8548,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) + + ei->delayed_node = NULL; + +- ei->i_otime.tv_sec = 0; +- ei->i_otime.tv_nsec = 0; ++ ei->i_otime_sec = 0; ++ ei->i_otime_nsec = 0; + + inode = &ei->vfs_inode; + extent_map_tree_init(&ei->extent_tree); +@@ -8703,8 +8701,8 @@ static int btrfs_getattr(struct mnt_idmap *idmap, + u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; + + stat->result_mask |= STATX_BTIME; +- stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; +- stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; ++ stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; ++ stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; + if (bi_flags & BTRFS_INODE_APPEND) + stat->attributes |= STATX_ATTR_APPEND; + if (bi_flags & BTRFS_INODE_COMPRESS) +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 1b9f4f16d12404..c46ea2ecf18817 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -579,22 +579,30 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) + + /* + * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), +- * first two are in single-threaded paths.And for the third one, we have set +- * quota_root to be null with qgroup_lock held before, so it is safe to clean +- * up the in-memory structures without qgroup_lock held. ++ * first two are in single-threaded paths. + */ + void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) + { + struct rb_node *n; + struct btrfs_qgroup *qgroup; + ++ /* ++ * btrfs_quota_disable() can be called concurrently with ++ * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the ++ * lock. ++ */ ++ spin_lock(&fs_info->qgroup_lock); + while ((n = rb_first(&fs_info->qgroup_tree))) { + qgroup = rb_entry(n, struct btrfs_qgroup, node); + rb_erase(n, &fs_info->qgroup_tree); + __del_qgroup_rb(fs_info, qgroup); ++ spin_unlock(&fs_info->qgroup_lock); + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); + kfree(qgroup); ++ spin_lock(&fs_info->qgroup_lock); + } ++ spin_unlock(&fs_info->qgroup_lock); ++ + /* + * We call btrfs_free_qgroup_config() when unmounting + * filesystem and disabling quota, so we set qgroup_ulist +@@ -3616,12 +3624,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) + qgroup_rescan_zero_tracking(fs_info); + + mutex_lock(&fs_info->qgroup_rescan_lock); +- fs_info->qgroup_rescan_running = true; +- btrfs_queue_work(fs_info->qgroup_rescan_workers, +- &fs_info->qgroup_rescan_work); ++ /* ++ * The rescan worker is only for full accounting qgroups, check if it's ++ * enabled as it is pointless to queue it otherwise. A concurrent quota ++ * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED. ++ */ ++ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { ++ fs_info->qgroup_rescan_running = true; ++ btrfs_queue_work(fs_info->qgroup_rescan_workers, ++ &fs_info->qgroup_rescan_work); ++ } else { ++ ret = -ENOTCONN; ++ } + mutex_unlock(&fs_info->qgroup_rescan_lock); + +- return 0; ++ return ret; + } + + int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index 474758c878fcab..8cc1f4b832773e 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -693,6 +693,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, + if (root->root_key.objectid == objectid) { + u64 commit_root_gen; + ++ /* ++ * Relocation will wait for cleaner thread, and any half-dropped ++ * subvolume will be fully cleaned up at mount time. ++ * So here we shouldn't hit a subvolume with non-zero drop_progress. ++ * ++ * If this isn't the case, error out since it can make us attempt to ++ * drop references for extents that were already dropped before. ++ */ ++ if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { ++ struct btrfs_key cpu_key; ++ ++ btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); ++ btrfs_err(fs_info, ++ "cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)", ++ objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset); ++ ret = -EUCLEAN; ++ goto fail; ++ } ++ + /* called by btrfs_init_reloc_root */ + ret = btrfs_copy_root(trans, root, root->commit_root, &eb, + BTRFS_TREE_RELOC_OBJECTID); +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index e2ead36e5be422..c25eb4416a6710 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -4,6 +4,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -179,6 +180,7 @@ struct send_ctx { + u64 cur_inode_rdev; + u64 cur_inode_last_extent; + u64 cur_inode_next_write_offset; ++ struct fs_path cur_inode_path; + bool cur_inode_new; + bool cur_inode_new_gen; + bool cur_inode_deleted; +@@ -436,6 +438,14 @@ static void fs_path_reset(struct fs_path *p) + } + } + ++static void init_path(struct fs_path *p) ++{ ++ p->reversed = 0; ++ p->buf = p->inline_buf; ++ p->buf_len = FS_PATH_INLINE_SIZE; ++ fs_path_reset(p); ++} ++ + static struct fs_path *fs_path_alloc(void) + { + struct fs_path *p; +@@ -443,10 +453,7 @@ static struct fs_path *fs_path_alloc(void) + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return NULL; +- p->reversed = 0; +- p->buf = p->inline_buf; +- p->buf_len = FS_PATH_INLINE_SIZE; +- fs_path_reset(p); ++ init_path(p); + return p; + } + +@@ -471,7 +478,7 @@ static void fs_path_free(struct fs_path *p) + kfree(p); + } + +-static int fs_path_len(struct fs_path *p) ++static inline int fs_path_len(const struct fs_path *p) + { + return p->end - p->start; + } +@@ -624,6 +631,14 @@ static void fs_path_unreverse(struct fs_path *p) + p->reversed = 0; + } + ++static inline bool is_current_inode_path(const struct send_ctx *sctx, ++ const struct fs_path *path) ++{ ++ const struct fs_path *cur = &sctx->cur_inode_path; ++ ++ return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0); ++} ++ + static struct btrfs_path *alloc_path_for_send(void) + { + struct btrfs_path *path; +@@ -2450,6 +2465,14 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, + u64 parent_inode = 0; + u64 parent_gen = 0; + int stop = 0; ++ const bool is_cur_inode = (ino == sctx->cur_ino && gen == sctx->cur_inode_gen); ++ ++ if (is_cur_inode && fs_path_len(&sctx->cur_inode_path) > 0) { ++ if (dest != &sctx->cur_inode_path) ++ return fs_path_copy(dest, &sctx->cur_inode_path); ++ ++ return 0; ++ } + + name = fs_path_alloc(); + if (!name) { +@@ -2501,8 +2524,12 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, + + out: + fs_path_free(name); +- if (!ret) ++ if (!ret) { + fs_path_unreverse(dest); ++ if (is_cur_inode && dest != &sctx->cur_inode_path) ++ ret = fs_path_copy(&sctx->cur_inode_path, dest); ++ } ++ + return ret; + } + +@@ -2597,6 +2624,47 @@ static int send_subvol_begin(struct send_ctx *sctx) + return ret; + } + ++static struct fs_path *get_cur_inode_path(struct send_ctx *sctx) ++{ ++ if (fs_path_len(&sctx->cur_inode_path) == 0) { ++ int ret; ++ ++ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, ++ &sctx->cur_inode_path); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ } ++ ++ return &sctx->cur_inode_path; ++} ++ ++static struct fs_path *get_path_for_command(struct send_ctx *sctx, u64 ino, u64 gen) ++{ ++ struct fs_path *path; ++ int ret; ++ ++ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen) ++ return get_cur_inode_path(sctx); ++ ++ path = fs_path_alloc(); ++ if (!path) ++ return ERR_PTR(-ENOMEM); ++ ++ ret = get_cur_path(sctx, ino, gen, path); ++ if (ret < 0) { ++ fs_path_free(path); ++ return ERR_PTR(ret); ++ } ++ ++ return path; ++} ++ ++static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *path) ++{ ++ if (path != &sctx->cur_inode_path) ++ fs_path_free(path); ++} ++ + static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) + { + struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; +@@ -2605,17 +2673,14 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) + + btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_path_for_command(sctx, ino, gen); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, ino, gen, p); +- if (ret < 0) +- goto out; + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); + +@@ -2623,7 +2688,7 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) + + tlv_put_failure: + out: +- fs_path_free(p); ++ free_path_for_command(sctx, p); + return ret; + } + +@@ -2635,17 +2700,14 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) + + btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_path_for_command(sctx, ino, gen); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, ino, gen, p); +- if (ret < 0) +- goto out; + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); + +@@ -2653,7 +2715,7 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) + + tlv_put_failure: + out: +- fs_path_free(p); ++ free_path_for_command(sctx, p); + return ret; + } + +@@ -2668,17 +2730,14 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr) + + btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_path_for_command(sctx, ino, gen); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR); + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, ino, gen, p); +- if (ret < 0) +- goto out; + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr); + +@@ -2686,7 +2745,7 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr) + + tlv_put_failure: + out: +- fs_path_free(p); ++ free_path_for_command(sctx, p); + return ret; + } + +@@ -2699,17 +2758,14 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) + btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", + ino, uid, gid); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_path_for_command(sctx, ino, gen); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, ino, gen, p); +- if (ret < 0) +- goto out; + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); + TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); +@@ -2718,7 +2774,7 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) + + tlv_put_failure: + out: +- fs_path_free(p); ++ free_path_for_command(sctx, p); + return ret; + } + +@@ -2735,9 +2791,9 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) + + btrfs_debug(fs_info, "send_utimes %llu", ino); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_path_for_command(sctx, ino, gen); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + path = alloc_path_for_send(); + if (!path) { +@@ -2762,9 +2818,6 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, ino, gen, p); +- if (ret < 0) +- goto out; + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); +@@ -2776,7 +2829,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) + + tlv_put_failure: + out: +- fs_path_free(p); ++ free_path_for_command(sctx, p); + btrfs_free_path(path); + return ret; + } +@@ -3113,6 +3166,11 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, + goto out; + + ret = send_rename(sctx, path, orphan); ++ if (ret < 0) ++ goto out; ++ ++ if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen) ++ ret = fs_path_copy(&sctx->cur_inode_path, orphan); + + out: + fs_path_free(orphan); +@@ -4166,6 +4224,23 @@ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) + return ret; + } + ++static int rename_current_inode(struct send_ctx *sctx, ++ struct fs_path *current_path, ++ struct fs_path *new_path) ++{ ++ int ret; ++ ++ ret = send_rename(sctx, current_path, new_path); ++ if (ret < 0) ++ return ret; ++ ++ ret = fs_path_copy(&sctx->cur_inode_path, new_path); ++ if (ret < 0) ++ return ret; ++ ++ return fs_path_copy(current_path, new_path); ++} ++ + /* + * This does all the move/link/unlink/rmdir magic. + */ +@@ -4180,9 +4255,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + u64 ow_inode = 0; + u64 ow_gen; + u64 ow_mode; +- int did_overwrite = 0; +- int is_orphan = 0; + u64 last_dir_ino_rm = 0; ++ bool did_overwrite = false; ++ bool is_orphan = false; + bool can_rename = true; + bool orphanized_dir = false; + bool orphanized_ancestor = false; +@@ -4224,14 +4299,14 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + if (ret < 0) + goto out; + if (ret) +- did_overwrite = 1; ++ did_overwrite = true; + } + if (sctx->cur_inode_new || did_overwrite) { + ret = gen_unique_name(sctx, sctx->cur_ino, + sctx->cur_inode_gen, valid_path); + if (ret < 0) + goto out; +- is_orphan = 1; ++ is_orphan = true; + } else { + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, + valid_path); +@@ -4356,6 +4431,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + if (ret > 0) { + orphanized_ancestor = true; + fs_path_reset(valid_path); ++ fs_path_reset(&sctx->cur_inode_path); + ret = get_cur_path(sctx, sctx->cur_ino, + sctx->cur_inode_gen, + valid_path); +@@ -4451,13 +4527,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + * it depending on the inode mode. + */ + if (is_orphan && can_rename) { +- ret = send_rename(sctx, valid_path, cur->full_path); +- if (ret < 0) +- goto out; +- is_orphan = 0; +- ret = fs_path_copy(valid_path, cur->full_path); ++ ret = rename_current_inode(sctx, valid_path, cur->full_path); + if (ret < 0) + goto out; ++ is_orphan = false; + } else if (can_rename) { + if (S_ISDIR(sctx->cur_inode_mode)) { + /* +@@ -4465,10 +4538,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + * dirs, we always have one new and one deleted + * ref. The deleted ref is ignored later. + */ +- ret = send_rename(sctx, valid_path, +- cur->full_path); +- if (!ret) +- ret = fs_path_copy(valid_path, ++ ret = rename_current_inode(sctx, valid_path, + cur->full_path); + if (ret < 0) + goto out; +@@ -4515,7 +4585,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + sctx->cur_inode_gen, valid_path); + if (ret < 0) + goto out; +- is_orphan = 1; ++ is_orphan = true; + } + + list_for_each_entry(cur, &sctx->deleted_refs, list) { +@@ -4561,6 +4631,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) + ret = send_unlink(sctx, cur->full_path); + if (ret < 0) + goto out; ++ if (is_current_inode_path(sctx, cur->full_path)) ++ fs_path_reset(&sctx->cur_inode_path); + } + ret = dup_ref(cur, &check_dirs); + if (ret < 0) +@@ -4879,11 +4951,15 @@ static int process_all_refs(struct send_ctx *sctx, + } + + static int send_set_xattr(struct send_ctx *sctx, +- struct fs_path *path, + const char *name, int name_len, + const char *data, int data_len) + { +- int ret = 0; ++ struct fs_path *path; ++ int ret; ++ ++ path = get_cur_inode_path(sctx); ++ if (IS_ERR(path)) ++ return PTR_ERR(path); + + ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); + if (ret < 0) +@@ -4924,19 +5000,13 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + const char *name, int name_len, const char *data, + int data_len, void *ctx) + { +- int ret; + struct send_ctx *sctx = ctx; +- struct fs_path *p; + struct posix_acl_xattr_header dummy_acl; + + /* Capabilities are emitted by finish_inode_if_needed */ + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) + return 0; + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; +- + /* + * This hack is needed because empty acls are stored as zero byte + * data in xattrs. Problem with that is, that receiving these zero byte +@@ -4953,38 +5023,21 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + } + } + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +- if (ret < 0) +- goto out; +- +- ret = send_set_xattr(sctx, p, name, name_len, data, data_len); +- +-out: +- fs_path_free(p); +- return ret; ++ return send_set_xattr(sctx, name, name_len, data, data_len); + } + + static int __process_deleted_xattr(int num, struct btrfs_key *di_key, + const char *name, int name_len, + const char *data, int data_len, void *ctx) + { +- int ret; + struct send_ctx *sctx = ctx; + struct fs_path *p; + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; +- +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +- if (ret < 0) +- goto out; +- +- ret = send_remove_xattr(sctx, p, name, name_len); ++ p = get_cur_inode_path(sctx); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + +-out: +- fs_path_free(p); +- return ret; ++ return send_remove_xattr(sctx, p, name, name_len); + } + + static int process_new_xattr(struct send_ctx *sctx) +@@ -5218,21 +5271,13 @@ static int process_verity(struct send_ctx *sctx) + if (ret < 0) + goto iput; + +- p = fs_path_alloc(); +- if (!p) { +- ret = -ENOMEM; ++ p = get_cur_inode_path(sctx); ++ if (IS_ERR(p)) { ++ ret = PTR_ERR(p); + goto iput; + } +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +- if (ret < 0) +- goto free_path; + + ret = send_verity(sctx, p, sctx->verity_descriptor); +- if (ret < 0) +- goto free_path; +- +-free_path: +- fs_path_free(p); + iput: + iput(inode); + return ret; +@@ -5347,31 +5392,25 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len) + int ret = 0; + struct fs_path *p; + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; +- + btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); + +- ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); +- if (ret < 0) +- goto out; ++ p = get_cur_inode_path(sctx); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); ++ ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); + if (ret < 0) +- goto out; ++ return ret; + + TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); + ret = put_file_data(sctx, offset, len); + if (ret < 0) +- goto out; ++ return ret; + + ret = send_cmd(sctx); + + tlv_put_failure: +-out: +- fs_path_free(p); + return ret; + } + +@@ -5384,6 +5423,7 @@ static int send_clone(struct send_ctx *sctx, + { + int ret = 0; + struct fs_path *p; ++ struct fs_path *cur_inode_path; + u64 gen; + + btrfs_debug(sctx->send_root->fs_info, +@@ -5391,6 +5431,10 @@ static int send_clone(struct send_ctx *sctx, + offset, len, clone_root->root->root_key.objectid, + clone_root->ino, clone_root->offset); + ++ cur_inode_path = get_cur_inode_path(sctx); ++ if (IS_ERR(cur_inode_path)) ++ return PTR_ERR(cur_inode_path); ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +@@ -5399,13 +5443,9 @@ static int send_clone(struct send_ctx *sctx, + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +- if (ret < 0) +- goto out; +- + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); + TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); +- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); ++ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, cur_inode_path); + + if (clone_root->root == sctx->send_root) { + ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen); +@@ -5456,27 +5496,45 @@ static int send_update_extent(struct send_ctx *sctx, + int ret = 0; + struct fs_path *p; + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; ++ p = get_cur_inode_path(sctx); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); + + ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); + if (ret < 0) +- goto out; ++ return ret; ++ ++ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); ++ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); ++ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); ++ ++ ret = send_cmd(sctx); + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); ++tlv_put_failure: ++ return ret; ++} ++ ++static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len) ++{ ++ struct fs_path *path; ++ int ret; ++ ++ path = get_cur_inode_path(sctx); ++ if (IS_ERR(path)) ++ return PTR_ERR(path); ++ ++ ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE); + if (ret < 0) +- goto out; ++ return ret; + +- TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); ++ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); ++ TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode); + TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); + TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); + + ret = send_cmd(sctx); + + tlv_put_failure: +-out: +- fs_path_free(p); + return ret; + } + +@@ -5487,6 +5545,14 @@ static int send_hole(struct send_ctx *sctx, u64 end) + u64 offset = sctx->cur_inode_last_extent; + int ret = 0; + ++ /* ++ * Starting with send stream v2 we have fallocate and can use it to ++ * punch holes instead of sending writes full of zeroes. ++ */ ++ if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE)) ++ return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, ++ offset, end - offset); ++ + /* + * A hole that starts at EOF or beyond it. Since we do not yet support + * fallocate (for extent preallocation and hole punching), sending a +@@ -5505,12 +5571,10 @@ static int send_hole(struct send_ctx *sctx, u64 end) + if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) + return send_update_extent(sctx, offset, end - offset); + +- p = fs_path_alloc(); +- if (!p) +- return -ENOMEM; +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); +- if (ret < 0) +- goto tlv_put_failure; ++ p = get_cur_inode_path(sctx); ++ if (IS_ERR(p)) ++ return PTR_ERR(p); ++ + while (offset < end) { + u64 len = min(end - offset, read_size); + +@@ -5531,7 +5595,6 @@ static int send_hole(struct send_ctx *sctx, u64 end) + } + sctx->cur_inode_next_write_offset = offset; + tlv_put_failure: +- fs_path_free(p); + return ret; + } + +@@ -5554,9 +5617,9 @@ static int send_encoded_inline_extent(struct send_ctx *sctx, + if (IS_ERR(inode)) + return PTR_ERR(inode); + +- fspath = fs_path_alloc(); +- if (!fspath) { +- ret = -ENOMEM; ++ fspath = get_cur_inode_path(sctx); ++ if (IS_ERR(fspath)) { ++ ret = PTR_ERR(fspath); + goto out; + } + +@@ -5564,10 +5627,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx, + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); +- if (ret < 0) +- goto out; +- + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); + ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei); +@@ -5596,7 +5655,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx, + + tlv_put_failure: + out: +- fs_path_free(fspath); + iput(inode); + return ret; + } +@@ -5621,9 +5679,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, + if (IS_ERR(inode)) + return PTR_ERR(inode); + +- fspath = fs_path_alloc(); +- if (!fspath) { +- ret = -ENOMEM; ++ fspath = get_cur_inode_path(sctx); ++ if (IS_ERR(fspath)) { ++ ret = PTR_ERR(fspath); + goto out; + } + +@@ -5631,10 +5689,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, + if (ret < 0) + goto out; + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); +- if (ret < 0) +- goto out; +- + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); + disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); +@@ -5701,7 +5755,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, + + tlv_put_failure: + out: +- fs_path_free(fspath); + iput(inode); + return ret; + } +@@ -5831,7 +5884,6 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path, + */ + static int send_capabilities(struct send_ctx *sctx) + { +- struct fs_path *fspath = NULL; + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct extent_buffer *leaf; +@@ -5857,25 +5909,19 @@ static int send_capabilities(struct send_ctx *sctx) + leaf = path->nodes[0]; + buf_len = btrfs_dir_data_len(leaf, di); + +- fspath = fs_path_alloc(); + buf = kmalloc(buf_len, GFP_KERNEL); +- if (!fspath || !buf) { ++ if (!buf) { + ret = -ENOMEM; + goto out; + } + +- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); +- if (ret < 0) +- goto out; +- + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); + read_extent_buffer(leaf, buf, data_ptr, buf_len); + +- ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, ++ ret = send_set_xattr(sctx, XATTR_NAME_CAPS, + strlen(XATTR_NAME_CAPS), buf, buf_len); + out: + kfree(buf); +- fs_path_free(fspath); + btrfs_free_path(path); + return ret; + } +@@ -6904,6 +6950,7 @@ static int changed_inode(struct send_ctx *sctx, + sctx->cur_inode_last_extent = (u64)-1; + sctx->cur_inode_next_write_offset = 0; + sctx->ignore_cur_inode = false; ++ fs_path_reset(&sctx->cur_inode_path); + + /* + * Set send_progress to current inode. This will tell all get_cur_xxx +@@ -8194,6 +8241,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) + goto out; + } + ++ init_path(&sctx->cur_inode_path); + INIT_LIST_HEAD(&sctx->new_refs); + INIT_LIST_HEAD(&sctx->deleted_refs); + +@@ -8479,6 +8527,9 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) + btrfs_lru_cache_clear(&sctx->dir_created_cache); + btrfs_lru_cache_clear(&sctx->dir_utimes_cache); + ++ if (sctx->cur_inode_path.buf != sctx->cur_inode_path.inline_buf) ++ kfree(sctx->cur_inode_path.buf); ++ + kfree(sctx); + } + +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c +index 581bdd709ee0d0..27690c518f6d79 100644 +--- a/fs/btrfs/space-info.c ++++ b/fs/btrfs/space-info.c +@@ -162,7 +162,7 @@ + * thing with or without extra unallocated space. + */ + +-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, ++u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, + bool may_use_included) + { + ASSERT(s_info); +@@ -342,7 +342,7 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, + } + + static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *space_info, ++ const struct btrfs_space_info *space_info, + enum btrfs_reserve_flush_enum flush) + { + u64 profile; +@@ -378,7 +378,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, + } + + int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *space_info, u64 bytes, ++ const struct btrfs_space_info *space_info, u64 bytes, + enum btrfs_reserve_flush_enum flush) + { + u64 avail; +@@ -483,8 +483,8 @@ static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) + DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); + } + +-static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *info) ++static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, ++ const struct btrfs_space_info *info) + { + const char *flag_str = space_info_flag_to_str(info); + lockdep_assert_held(&info->lock); +@@ -807,9 +807,8 @@ static void flush_space(struct btrfs_fs_info *fs_info, + return; + } + +-static inline u64 +-btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *space_info) ++static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, ++ const struct btrfs_space_info *space_info) + { + u64 used; + u64 avail; +@@ -834,7 +833,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, + } + + static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *space_info) ++ const struct btrfs_space_info *space_info) + { + const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); + u64 ordered, delalloc; +diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h +index 08a3bd10addcf9..b0187f25dbb5e0 100644 +--- a/fs/btrfs/space-info.h ++++ b/fs/btrfs/space-info.h +@@ -165,7 +165,7 @@ struct reserve_ticket { + wait_queue_head_t wait; + }; + +-static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) ++static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info) + { + return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && + (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); +@@ -206,7 +206,7 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, + u64 chunk_size); + struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, + u64 flags); +-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, ++u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, + bool may_use_included); + void btrfs_clear_space_info_full(struct btrfs_fs_info *info); + void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, +@@ -219,7 +219,7 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, + void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info); + int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, +- struct btrfs_space_info *space_info, u64 bytes, ++ const struct btrfs_space_info *space_info, u64 bytes, + enum btrfs_reserve_flush_enum flush); + + static inline void btrfs_space_info_free_bytes_may_use( +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 16434106c465db..9439abf415ae36 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -326,8 +326,7 @@ struct walk_control { + + /* + * Ignore any items from the inode currently being processed. Needs +- * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in +- * the LOG_WALK_REPLAY_INODES stage. ++ * to be set every time we find a BTRFS_INODE_ITEM_KEY. + */ + bool ignore_cur_inode; + +@@ -1423,6 +1422,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + btrfs_dir = btrfs_iget_logging(parent_objectid, root); + if (IS_ERR(btrfs_dir)) { + ret = PTR_ERR(btrfs_dir); ++ if (ret == -ENOENT) ++ ret = 0; + dir = NULL; + goto out; + } +@@ -1456,6 +1457,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + if (IS_ERR(btrfs_dir)) { + ret = PTR_ERR(btrfs_dir); + dir = NULL; ++ /* ++ * A new parent dir may have not been ++ * logged and not exist in the subvolume ++ * tree, see the comment above before ++ * the loop when getting the first ++ * parent dir. ++ */ ++ if (ret == -ENOENT) ++ ret = 0; + goto out; + } + dir = &btrfs_dir->vfs_inode; +@@ -2498,23 +2508,30 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + + nritems = btrfs_header_nritems(eb); + for (i = 0; i < nritems; i++) { +- btrfs_item_key_to_cpu(eb, &key, i); ++ struct btrfs_inode_item *inode_item; + +- /* inode keys are done during the first stage */ +- if (key.type == BTRFS_INODE_ITEM_KEY && +- wc->stage == LOG_WALK_REPLAY_INODES) { +- struct btrfs_inode_item *inode_item; +- u32 mode; ++ btrfs_item_key_to_cpu(eb, &key, i); + +- inode_item = btrfs_item_ptr(eb, i, +- struct btrfs_inode_item); ++ if (key.type == BTRFS_INODE_ITEM_KEY) { ++ inode_item = btrfs_item_ptr(eb, i, struct btrfs_inode_item); + /* +- * If we have a tmpfile (O_TMPFILE) that got fsync'ed +- * and never got linked before the fsync, skip it, as +- * replaying it is pointless since it would be deleted +- * later. We skip logging tmpfiles, but it's always +- * possible we are replaying a log created with a kernel +- * that used to log tmpfiles. ++ * An inode with no links is either: ++ * ++ * 1) A tmpfile (O_TMPFILE) that got fsync'ed and never ++ * got linked before the fsync, skip it, as replaying ++ * it is pointless since it would be deleted later. ++ * We skip logging tmpfiles, but it's always possible ++ * we are replaying a log created with a kernel that ++ * used to log tmpfiles; ++ * ++ * 2) A non-tmpfile which got its last link deleted ++ * while holding an open fd on it and later got ++ * fsynced through that fd. We always log the ++ * parent inodes when inode->last_unlink_trans is ++ * set to the current transaction, so ignore all the ++ * inode items for this inode. We will delete the ++ * inode when processing the parent directory with ++ * replay_dir_deletes(). + */ + if (btrfs_inode_nlink(eb, inode_item) == 0) { + wc->ignore_cur_inode = true; +@@ -2522,8 +2539,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + } else { + wc->ignore_cur_inode = false; + } +- ret = replay_xattr_deletes(wc->trans, root, log, +- path, key.objectid); ++ } ++ ++ /* Inode keys are done during the first stage. */ ++ if (key.type == BTRFS_INODE_ITEM_KEY && ++ wc->stage == LOG_WALK_REPLAY_INODES) { ++ u32 mode; ++ ++ ret = replay_xattr_deletes(wc->trans, root, log, path, key.objectid); + if (ret) + break; + mode = btrfs_inode_mode(eb, inode_item); +@@ -2611,9 +2634,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + key.type == BTRFS_INODE_EXTREF_KEY) { + ret = add_inode_ref(wc->trans, root, log, path, + eb, i, &key); +- if (ret && ret != -ENOENT) ++ if (ret) + break; +- ret = 0; + } else if (key.type == BTRFS_EXTENT_DATA_KEY) { + ret = replay_one_extent(wc->trans, root, path, + eb, i, &key); +@@ -4243,6 +4265,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, + btrfs_set_token_timespec_nsec(&token, &item->ctime, + inode_get_ctime(inode).tv_nsec); + ++ btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec); ++ btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec); ++ + /* + * We do not need to set the nbytes field, in fact during a fast fsync + * its value may not even be correct, since a fast fsync does not wait +@@ -7300,11 +7325,14 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) + + wc.replay_dest->log_root = log; + ret = btrfs_record_root_in_trans(trans, wc.replay_dest); +- if (ret) ++ if (ret) { + /* The loop needs to continue due to the root refs */ + btrfs_abort_transaction(trans, ret); +- else ++ } else { + ret = walk_log_tree(trans, log, &wc); ++ if (ret) ++ btrfs_abort_transaction(trans, ret); ++ } + + if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { + ret = fixup_inode_link_counts(trans, wc.replay_dest, +diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c +index 3df6153d5d5a80..febc014a510df4 100644 +--- a/fs/btrfs/tree-mod-log.c ++++ b/fs/btrfs/tree-mod-log.c +@@ -171,7 +171,7 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info, + * write unlock fs_info::tree_mod_log_lock. + */ + static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, +- struct extent_buffer *eb) ++ const struct extent_buffer *eb) + { + if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) + return true; +@@ -189,7 +189,7 @@ static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, + + /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ + static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, +- struct extent_buffer *eb) ++ const struct extent_buffer *eb) + { + if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) + return false; +@@ -199,7 +199,7 @@ static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, + return true; + } + +-static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb, ++static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb, + int slot, + enum btrfs_mod_log_op op) + { +@@ -222,7 +222,7 @@ static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb, + return tm; + } + +-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, ++int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, + enum btrfs_mod_log_op op) + { + struct tree_mod_elem *tm; +@@ -259,7 +259,7 @@ int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, + return ret; + } + +-static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb, ++static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb, + int dst_slot, int src_slot, + int nr_items) + { +@@ -279,7 +279,7 @@ static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb, + return tm; + } + +-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, ++int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, + int dst_slot, int src_slot, + int nr_items) + { +@@ -536,7 +536,7 @@ static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info, + } + + int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, +- struct extent_buffer *src, ++ const struct extent_buffer *src, + unsigned long dst_offset, + unsigned long src_offset, + int nr_items) +diff --git a/fs/btrfs/tree-mod-log.h b/fs/btrfs/tree-mod-log.h +index 94f10afeee9725..5f94ab681fa437 100644 +--- a/fs/btrfs/tree-mod-log.h ++++ b/fs/btrfs/tree-mod-log.h +@@ -31,7 +31,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, + int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root, + struct extent_buffer *new_root, + bool log_removal); +-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, ++int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, + enum btrfs_mod_log_op op); + int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb); + struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, +@@ -41,11 +41,11 @@ struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, + struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq); + int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); + int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, +- struct extent_buffer *src, ++ const struct extent_buffer *src, + unsigned long dst_offset, + unsigned long src_offset, + int nr_items); +-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, ++int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, + int dst_slot, int src_slot, + int nr_items); + u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info); +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c +index 197dfafbf40139..3622ba1d8e09f4 100644 +--- a/fs/btrfs/zoned.c ++++ b/fs/btrfs/zoned.c +@@ -1992,10 +1992,15 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) + goto out_unlock; + } + +- /* No space left */ +- if (btrfs_zoned_bg_is_full(block_group)) { +- ret = false; +- goto out_unlock; ++ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) { ++ /* The caller should check if the block group is full. */ ++ if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) { ++ ret = false; ++ goto out_unlock; ++ } ++ } else { ++ /* Since it is already written, it should have been active. */ ++ WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start); + } + + for (i = 0; i < map->num_stripes; i++) { +@@ -2346,12 +2351,12 @@ void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) + mutex_unlock(&fs_devices->device_list_mutex); + } + +-bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) ++bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) + { + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + struct btrfs_device *device; ++ u64 total = btrfs_super_total_bytes(fs_info->super_copy); + u64 used = 0; +- u64 total = 0; + u64 factor; + + ASSERT(btrfs_is_zoned(fs_info)); +@@ -2364,7 +2369,6 @@ bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) + if (!device->bdev) + continue; + +- total += device->disk_total_bytes; + used += device->bytes_used; + } + mutex_unlock(&fs_devices->device_list_mutex); +@@ -2418,7 +2422,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info) + + spin_lock(&block_group->lock); + if (block_group->reserved || block_group->alloc_offset == 0 || +- (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || ++ !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) || + test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { + spin_unlock(&block_group->lock); + continue; +diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h +index b9cec523b77842..448955641d1143 100644 +--- a/fs/btrfs/zoned.h ++++ b/fs/btrfs/zoned.h +@@ -77,7 +77,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, + struct extent_buffer *eb); + void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); + void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); +-bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info); ++bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info); + void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, + u64 length); + int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info); +@@ -237,7 +237,7 @@ static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { } + + static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { } + +-static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) ++static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) + { + return false; + } +diff --git a/fs/buffer.c b/fs/buffer.c +index 4b86e971efd8a1..32df6163ffed5f 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -157,8 +157,8 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) + */ + void end_buffer_read_sync(struct buffer_head *bh, int uptodate) + { +- __end_buffer_read_notouch(bh, uptodate); + put_bh(bh); ++ __end_buffer_read_notouch(bh, uptodate); + } + EXPORT_SYMBOL(end_buffer_read_sync); + +diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h +index 2d63da48635ab8..14b26036055e40 100644 +--- a/fs/crypto/fscrypt_private.h ++++ b/fs/crypto/fscrypt_private.h +@@ -27,6 +27,22 @@ + */ + #define FSCRYPT_MIN_KEY_SIZE 16 + ++/* ++ * This mask is passed as the third argument to the crypto_alloc_*() functions ++ * to prevent fscrypt from using the Crypto API drivers for non-inline crypto ++ * engines. Those drivers have been problematic for fscrypt. fscrypt users ++ * have reported hangs and even incorrect en/decryption with these drivers. ++ * Since going to the driver, off CPU, and back again is really slow, such ++ * drivers can be over 50 times slower than the CPU-based code for fscrypt's ++ * workload. Even on platforms that lack AES instructions on the CPU, using the ++ * offloads has been shown to be slower, even staying with AES. (Of course, ++ * Adiantum is faster still, and is the recommended option on such platforms...) ++ * ++ * Note that fscrypt also supports inline crypto engines. Those don't use the ++ * Crypto API and work much better than the old-style (non-inline) engines. ++ */ ++#define FSCRYPT_CRYPTOAPI_MASK \ ++ (CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY) + #define FSCRYPT_CONTEXT_V1 1 + #define FSCRYPT_CONTEXT_V2 2 + +diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c +index 5a384dad2c72f3..b7f5e7884e03b9 100644 +--- a/fs/crypto/hkdf.c ++++ b/fs/crypto/hkdf.c +@@ -72,7 +72,7 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, + u8 prk[HKDF_HASHLEN]; + int err; + +- hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0); ++ hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, FSCRYPT_CRYPTOAPI_MASK); + if (IS_ERR(hmac_tfm)) { + fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld", + PTR_ERR(hmac_tfm)); +diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c +index 361f41ef46c787..2348fc2a47f86a 100644 +--- a/fs/crypto/keysetup.c ++++ b/fs/crypto/keysetup.c +@@ -103,7 +103,8 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + struct crypto_skcipher *tfm; + int err; + +- tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0); ++ tfm = crypto_alloc_skcipher(mode->cipher_str, 0, ++ FSCRYPT_CRYPTOAPI_MASK); + if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) { + fscrypt_warn(inode, +diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c +index 75dabd9b27f9b6..159dd0288349a0 100644 +--- a/fs/crypto/keysetup_v1.c ++++ b/fs/crypto/keysetup_v1.c +@@ -52,7 +52,8 @@ static int derive_key_aes(const u8 *master_key, + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist src_sg, dst_sg; +- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); ++ struct crypto_skcipher *tfm = ++ crypto_alloc_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK); + + if (IS_ERR(tfm)) { + res = PTR_ERR(tfm); +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 31b32d9e7bbcea..6b2d655c1cefcd 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -217,6 +217,7 @@ struct eventpoll { + /* used to optimize loop detection check */ + u64 gen; + struct hlist_head refs; ++ u8 loop_check_depth; + + /* + * usage count, used together with epitem->dying to +@@ -1986,23 +1987,24 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + } + + /** +- * ep_loop_check_proc - verify that adding an epoll file inside another +- * epoll structure does not violate the constraints, in +- * terms of closed loops, or too deep chains (which can +- * result in excessive stack usage). ++ * ep_loop_check_proc - verify that adding an epoll file @ep inside another ++ * epoll file does not create closed loops, and ++ * determine the depth of the subtree starting at @ep + * + * @ep: the &struct eventpoll to be currently checked. + * @depth: Current depth of the path being checked. + * +- * Return: %zero if adding the epoll @file inside current epoll +- * structure @ep does not violate the constraints, or %-1 otherwise. ++ * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep. + */ + static int ep_loop_check_proc(struct eventpoll *ep, int depth) + { +- int error = 0; ++ int result = 0; + struct rb_node *rbp; + struct epitem *epi; + ++ if (ep->gen == loop_check_gen) ++ return ep->loop_check_depth; ++ + mutex_lock_nested(&ep->mtx, depth + 1); + ep->gen = loop_check_gen; + for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { +@@ -2010,13 +2012,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) + if (unlikely(is_file_epoll(epi->ffd.file))) { + struct eventpoll *ep_tovisit; + ep_tovisit = epi->ffd.file->private_data; +- if (ep_tovisit->gen == loop_check_gen) +- continue; + if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) +- error = -1; ++ result = INT_MAX; + else +- error = ep_loop_check_proc(ep_tovisit, depth + 1); +- if (error != 0) ++ result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1); ++ if (result > EP_MAX_NESTS) + break; + } else { + /* +@@ -2030,9 +2030,27 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) + list_file(epi->ffd.file); + } + } ++ ep->loop_check_depth = result; + mutex_unlock(&ep->mtx); + +- return error; ++ return result; ++} ++ ++/** ++ * ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards ++ */ ++static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth) ++{ ++ int result = 0; ++ struct epitem *epi; ++ ++ if (ep->gen == loop_check_gen) ++ return ep->loop_check_depth; ++ hlist_for_each_entry_rcu(epi, &ep->refs, fllink) ++ result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1); ++ ep->gen = loop_check_gen; ++ ep->loop_check_depth = result; ++ return result; + } + + /** +@@ -2048,8 +2066,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth) + */ + static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to) + { ++ int depth, upwards_depth; ++ + inserting_into = ep; +- return ep_loop_check_proc(to, 0); ++ /* ++ * Check how deep down we can get from @to, and whether it is possible ++ * to loop up to @ep. ++ */ ++ depth = ep_loop_check_proc(to, 0); ++ if (depth > EP_MAX_NESTS) ++ return -1; ++ /* Check how far up we can go from @ep. */ ++ rcu_read_lock(); ++ upwards_depth = ep_get_upwards_depth_proc(ep, 0); ++ rcu_read_unlock(); ++ ++ return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0; + } + + static void clear_tfile_check_list(void) +diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c +index f4f81e349cefe1..6139a57fde70a2 100644 +--- a/fs/exfat/dir.c ++++ b/fs/exfat/dir.c +@@ -994,6 +994,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, + struct exfat_hint_femp candi_empty; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + int num_entries = exfat_calc_num_entries(p_uniname); ++ unsigned int clu_count = 0; + + if (num_entries < 0) + return num_entries; +@@ -1131,6 +1132,10 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, + } else { + if (exfat_get_next_cluster(sb, &clu.dir)) + return -EIO; ++ ++ /* break if the cluster chain includes a loop */ ++ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi))) ++ goto not_found; + } + } + +@@ -1214,6 +1219,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir) + int i, count = 0; + int dentries_per_clu; + unsigned int entry_type; ++ unsigned int clu_count = 0; + struct exfat_chain clu; + struct exfat_dentry *ep; + struct exfat_sb_info *sbi = EXFAT_SB(sb); +@@ -1246,6 +1252,12 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir) + } else { + if (exfat_get_next_cluster(sb, &(clu.dir))) + return -EIO; ++ ++ if (unlikely(++clu_count > sbi->used_clusters)) { ++ exfat_fs_error(sb, "FAT or bitmap is corrupted"); ++ return -EIO; ++ } ++ + } + } + +diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c +index 24e1e05f9f34a7..407880901ee3fb 100644 +--- a/fs/exfat/fatent.c ++++ b/fs/exfat/fatent.c +@@ -461,5 +461,15 @@ int exfat_count_num_clusters(struct super_block *sb, + } + + *ret_count = count; ++ ++ /* ++ * since exfat_count_used_clusters() is not called, sbi->used_clusters ++ * cannot be used here. ++ */ ++ if (unlikely(i == sbi->num_clusters && clu != EXFAT_EOF_CLUSTER)) { ++ exfat_fs_error(sb, "The cluster chain has a loop"); ++ return -EIO; ++ } ++ + return 0; + } +diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c +index f340e96b499f1c..4657f893dea786 100644 +--- a/fs/exfat/namei.c ++++ b/fs/exfat/namei.c +@@ -893,6 +893,7 @@ static int exfat_check_dir_empty(struct super_block *sb, + { + int i, dentries_per_clu; + unsigned int type; ++ unsigned int clu_count = 0; + struct exfat_chain clu; + struct exfat_dentry *ep; + struct exfat_sb_info *sbi = EXFAT_SB(sb); +@@ -929,6 +930,10 @@ static int exfat_check_dir_empty(struct super_block *sb, + } else { + if (exfat_get_next_cluster(sb, &(clu.dir))) + return -EIO; ++ ++ /* break if the cluster chain includes a loop */ ++ if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi))) ++ break; + } + } + +diff --git a/fs/exfat/super.c b/fs/exfat/super.c +index 2778bd9b631e72..5affc11d14615a 100644 +--- a/fs/exfat/super.c ++++ b/fs/exfat/super.c +@@ -327,13 +327,12 @@ static void exfat_hash_init(struct super_block *sb) + INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); + } + +-static int exfat_read_root(struct inode *inode) ++static int exfat_read_root(struct inode *inode, struct exfat_chain *root_clu) + { + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_inode_info *ei = EXFAT_I(inode); +- struct exfat_chain cdir; +- int num_subdirs, num_clu = 0; ++ int num_subdirs; + + exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); + ei->entry = -1; +@@ -346,12 +345,9 @@ static int exfat_read_root(struct inode *inode) + ei->hint_stat.clu = sbi->root_dir; + ei->hint_femp.eidx = EXFAT_HINT_NONE; + +- exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); +- if (exfat_count_num_clusters(sb, &cdir, &num_clu)) +- return -EIO; +- i_size_write(inode, num_clu << sbi->cluster_size_bits); ++ i_size_write(inode, EXFAT_CLU_TO_B(root_clu->size, sbi)); + +- num_subdirs = exfat_count_dir_entries(sb, &cdir); ++ num_subdirs = exfat_count_dir_entries(sb, root_clu); + if (num_subdirs < 0) + return -EIO; + set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR); +@@ -567,7 +563,8 @@ static int exfat_verify_boot_region(struct super_block *sb) + } + + /* mount the file system volume */ +-static int __exfat_fill_super(struct super_block *sb) ++static int __exfat_fill_super(struct super_block *sb, ++ struct exfat_chain *root_clu) + { + int ret; + struct exfat_sb_info *sbi = EXFAT_SB(sb); +@@ -584,6 +581,18 @@ static int __exfat_fill_super(struct super_block *sb) + goto free_bh; + } + ++ /* ++ * Call exfat_count_num_cluster() before searching for up-case and ++ * bitmap directory entries to avoid infinite loop if they are missing ++ * and the cluster chain includes a loop. ++ */ ++ exfat_chain_set(root_clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN); ++ ret = exfat_count_num_clusters(sb, root_clu, &root_clu->size); ++ if (ret) { ++ exfat_err(sb, "failed to count the number of clusters in root"); ++ goto free_bh; ++ } ++ + ret = exfat_create_upcase_table(sb); + if (ret) { + exfat_err(sb, "failed to load upcase table"); +@@ -618,6 +627,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) + struct exfat_sb_info *sbi = sb->s_fs_info; + struct exfat_mount_options *opts = &sbi->options; + struct inode *root_inode; ++ struct exfat_chain root_clu; + int err; + + if (opts->allow_utime == (unsigned short)-1) +@@ -636,7 +646,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) + sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS; + sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS; + +- err = __exfat_fill_super(sb); ++ err = __exfat_fill_super(sb, &root_clu); + if (err) { + exfat_err(sb, "failed to recognize exfat type"); + goto check_nls_io; +@@ -671,7 +681,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) + + root_inode->i_ino = EXFAT_ROOT_INO; + inode_set_iversion(root_inode, 1); +- err = exfat_read_root(root_inode); ++ err = exfat_read_root(root_inode, &root_clu); + if (err) { + exfat_err(sb, "failed to initialize root inode"); + goto put_inode; +diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c +index 314b415ee51860..6ff1f8f29a3c62 100644 +--- a/fs/ext2/inode.c ++++ b/fs/ext2/inode.c +@@ -895,9 +895,19 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len) + { + int ret; ++ loff_t i_size; + + inode_lock(inode); +- len = min_t(u64, len, i_size_read(inode)); ++ i_size = i_size_read(inode); ++ /* ++ * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim ++ * length to 0 but still trim the range as much as possible since ++ * ext2_get_blocks() iterates unmapped space block by block which is ++ * slow. ++ */ ++ if (i_size == 0) ++ i_size = 1; ++ len = min_t(u64, len, i_size); + ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops); + inode_unlock(inode); + +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c +index 53a05b8292f033..1b68586f73f3fe 100644 +--- a/fs/ext4/fsmap.c ++++ b/fs/ext4/fsmap.c +@@ -393,6 +393,14 @@ static unsigned int ext4_getfsmap_find_sb(struct super_block *sb, + /* Reserved GDT blocks */ + if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) { + len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); ++ ++ /* ++ * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases, ++ * check for that. ++ */ ++ if (!len) ++ return 0; ++ + error = ext4_getfsmap_fill(meta_list, fsb, len, + EXT4_FMR_OWN_RESV_GDT); + if (error) +@@ -526,6 +534,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb, + ext4_group_t end_ag; + ext4_grpblk_t first_cluster; + ext4_grpblk_t last_cluster; ++ struct ext4_fsmap irec; + int error = 0; + + bofs = le32_to_cpu(sbi->s_es->s_first_data_block); +@@ -609,10 +618,18 @@ static int ext4_getfsmap_datadev(struct super_block *sb, + goto err; + } + +- /* Report any gaps at the end of the bg */ ++ /* ++ * The dummy record below will cause ext4_getfsmap_helper() to report ++ * any allocated blocks at the end of the range. ++ */ ++ irec.fmr_device = 0; ++ irec.fmr_physical = end_fsb + 1; ++ irec.fmr_length = 0; ++ irec.fmr_owner = EXT4_FMR_OWN_FREE; ++ irec.fmr_flags = 0; ++ + info->gfi_last = true; +- error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1, +- 0, info); ++ error = ext4_getfsmap_helper(sb, info, &irec); + if (error) + goto err; + +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c +index f2c495b745f1e4..d18a5bee102157 100644 +--- a/fs/ext4/indirect.c ++++ b/fs/ext4/indirect.c +@@ -539,7 +539,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + int indirect_blks; + int blocks_to_boundary = 0; + int depth; +- int count = 0; ++ u64 count = 0; + ext4_fsblk_t first_block = 0; + + trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); +@@ -588,7 +588,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + count++; + /* Fill in size of a hole we found */ + map->m_pblk = 0; +- map->m_len = min_t(unsigned int, map->m_len, count); ++ map->m_len = umin(map->m_len, count); + goto cleanup; + } + +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index c85647a0ba09fb..5fa1dd58ac42c1 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -298,7 +298,11 @@ static int ext4_create_inline_data(handle_t *handle, + if (error) + goto out; + +- BUG_ON(!is.s.not_found); ++ if (!is.s.not_found) { ++ EXT4_ERROR_INODE(inode, "unexpected inline data xattr"); ++ error = -EFSCORRUPTED; ++ goto out; ++ } + + error = ext4_xattr_ibody_set(handle, inode, &i, &is); + if (error) { +@@ -349,7 +353,11 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, + if (error) + goto out; + +- BUG_ON(is.s.not_found); ++ if (is.s.not_found) { ++ EXT4_ERROR_INODE(inode, "missing inline data xattr"); ++ error = -EFSCORRUPTED; ++ goto out; ++ } + + len -= EXT4_MIN_INLINE_DATA_SIZE; + value = kzalloc(len, GFP_NOFS); +@@ -1966,7 +1974,12 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline) + if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0) + goto out_error; + +- BUG_ON(is.s.not_found); ++ if (is.s.not_found) { ++ EXT4_ERROR_INODE(inode, ++ "missing inline data xattr"); ++ err = -EFSCORRUPTED; ++ goto out_error; ++ } + + value_len = le32_to_cpu(is.s.here->e_value_size); + value = kmalloc(value_len, GFP_NOFS); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 9694ef6b996e47..886d4dfa737a27 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -146,7 +146,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, + */ + int ext4_inode_is_fast_symlink(struct inode *inode) + { +- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { ++ if (!ext4_has_feature_ea_inode(inode->i_sb)) { + int ea_blocks = EXT4_I(inode)->i_file_acl ? + EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; + +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 8a9f8c95c6f1eb..c5f642096ab4ec 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -841,30 +841,30 @@ static void + mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +- int new_order; ++ int new, old; + +- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) ++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN)) + return; + +- new_order = mb_avg_fragment_size_order(sb, +- grp->bb_free / grp->bb_fragments); +- if (new_order == grp->bb_avg_fragment_size_order) ++ old = grp->bb_avg_fragment_size_order; ++ new = grp->bb_fragments == 0 ? -1 : ++ mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments); ++ if (new == old) + return; + +- if (grp->bb_avg_fragment_size_order != -1) { +- write_lock(&sbi->s_mb_avg_fragment_size_locks[ +- grp->bb_avg_fragment_size_order]); ++ if (old >= 0) { ++ write_lock(&sbi->s_mb_avg_fragment_size_locks[old]); + list_del(&grp->bb_avg_fragment_size_node); +- write_unlock(&sbi->s_mb_avg_fragment_size_locks[ +- grp->bb_avg_fragment_size_order]); ++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]); ++ } ++ ++ grp->bb_avg_fragment_size_order = new; ++ if (new >= 0) { ++ write_lock(&sbi->s_mb_avg_fragment_size_locks[new]); ++ list_add_tail(&grp->bb_avg_fragment_size_node, ++ &sbi->s_mb_avg_fragment_size[new]); ++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]); + } +- grp->bb_avg_fragment_size_order = new_order; +- write_lock(&sbi->s_mb_avg_fragment_size_locks[ +- grp->bb_avg_fragment_size_order]); +- list_add_tail(&grp->bb_avg_fragment_size_node, +- &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); +- write_unlock(&sbi->s_mb_avg_fragment_size_locks[ +- grp->bb_avg_fragment_size_order]); + } + + /* +@@ -1150,33 +1150,28 @@ static void + mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +- int i; ++ int new, old = grp->bb_largest_free_order; + +- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) +- if (grp->bb_counters[i] > 0) ++ for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--) ++ if (grp->bb_counters[new] > 0) + break; ++ + /* No need to move between order lists? */ +- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || +- i == grp->bb_largest_free_order) { +- grp->bb_largest_free_order = i; ++ if (new == old) + return; +- } + +- if (grp->bb_largest_free_order >= 0) { +- write_lock(&sbi->s_mb_largest_free_orders_locks[ +- grp->bb_largest_free_order]); ++ if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) { ++ write_lock(&sbi->s_mb_largest_free_orders_locks[old]); + list_del_init(&grp->bb_largest_free_order_node); +- write_unlock(&sbi->s_mb_largest_free_orders_locks[ +- grp->bb_largest_free_order]); ++ write_unlock(&sbi->s_mb_largest_free_orders_locks[old]); + } +- grp->bb_largest_free_order = i; +- if (grp->bb_largest_free_order >= 0 && grp->bb_free) { +- write_lock(&sbi->s_mb_largest_free_orders_locks[ +- grp->bb_largest_free_order]); ++ ++ grp->bb_largest_free_order = new; ++ if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) { ++ write_lock(&sbi->s_mb_largest_free_orders_locks[new]); + list_add_tail(&grp->bb_largest_free_order_node, +- &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); +- write_unlock(&sbi->s_mb_largest_free_orders_locks[ +- grp->bb_largest_free_order]); ++ &sbi->s_mb_largest_free_orders[new]); ++ write_unlock(&sbi->s_mb_largest_free_orders_locks[new]); + } + } + +diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c +index e5b47dda331759..a23b0c01f8096d 100644 +--- a/fs/ext4/orphan.c ++++ b/fs/ext4/orphan.c +@@ -590,8 +590,9 @@ int ext4_init_orphan_info(struct super_block *sb) + } + oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; + oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; +- oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block), +- GFP_KERNEL); ++ oi->of_binfo = kmalloc_array(oi->of_blocks, ++ sizeof(struct ext4_orphan_block), ++ GFP_KERNEL); + if (!oi->of_binfo) { + ret = -ENOMEM; + goto out_put; +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index d2b58f940aab5e..7e3906f1390975 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2028,6 +2028,9 @@ int ext4_init_fs_context(struct fs_context *fc) + fc->fs_private = ctx; + fc->ops = &ext4_context_ops; + ++ /* i_version is always enabled now */ ++ fc->sb_flags |= SB_I_VERSION; ++ + return 0; + } + +@@ -5305,9 +5308,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | + (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); + +- /* i_version is always enabled now */ +- sb->s_flags |= SB_I_VERSION; +- + err = ext4_check_feature_compatibility(sb, es, silent); + if (err) + goto failed_mount; +@@ -5398,6 +5398,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + err = ext4_load_and_init_journal(sb, es, ctx); + if (err) + goto failed_mount3a; ++ if (bdev_read_only(sb->s_bdev)) ++ needs_recovery = 0; + } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && + ext4_has_feature_journal_needs_recovery(sb)) { + ext4_msg(sb, KERN_ERR, "required journal recovery " +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index 8f0cb7c7eedeb4..031015823acb2b 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -991,6 +991,18 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + ++ err = setattr_prepare(idmap, dentry, attr); ++ if (err) ++ return err; ++ ++ err = fscrypt_prepare_setattr(dentry, attr); ++ if (err) ++ return err; ++ ++ err = fsverity_prepare_setattr(dentry, attr); ++ if (err) ++ return err; ++ + if (unlikely(IS_IMMUTABLE(inode))) + return -EPERM; + +@@ -1008,18 +1020,6 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + return -EINVAL; + } + +- err = setattr_prepare(idmap, dentry, attr); +- if (err) +- return err; +- +- err = fscrypt_prepare_setattr(dentry, attr); +- if (err) +- return err; +- +- err = fsverity_prepare_setattr(dentry, attr); +- if (err) +- return err; +- + if (is_quota_modification(idmap, inode, attr)) { + err = f2fs_dquot_initialize(inode); + if (err) +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index b00d66b953210d..1b404937743cf3 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -799,6 +799,16 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) + for (i = 1; i <= level; i++) { + bool done = false; + ++ if (nids[i] && nids[i] == dn->inode->i_ino) { ++ err = -EFSCORRUPTED; ++ f2fs_err_ratelimited(sbi, ++ "inode mapping table is corrupted, run fsck to fix it, " ++ "ino:%lu, nid:%u, level:%d, offset:%d", ++ dn->inode->i_ino, nids[i], level, offset[level]); ++ set_sbi_flag(sbi, SBI_NEED_FSCK); ++ goto release_pages; ++ } ++ + if (!nids[i] && mode == ALLOC_NODE) { + /* alloc new node */ + if (!f2fs_alloc_nid(sbi, &(nids[i]))) { +diff --git a/fs/file.c b/fs/file.c +index f8cf6728c6a03f..0ce6a6930276d3 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -90,18 +90,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) + * 'unsigned long' in some places, but simply because that is how the Linux + * kernel bitmaps are defined to work: they are not "bits in an array of bytes", + * they are very much "bits in an array of unsigned long". +- * +- * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied +- * by that "1024/sizeof(ptr)" before, we already know there are sufficient +- * clear low bits. Clang seems to realize that, gcc ends up being confused. +- * +- * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, +- * let's consider it documentation (and maybe a test-case for gcc to improve +- * its code generation ;) + */ +-static struct fdtable * alloc_fdtable(unsigned int nr) ++static struct fdtable *alloc_fdtable(unsigned int slots_wanted) + { + struct fdtable *fdt; ++ unsigned int nr; + void *data; + + /* +@@ -109,22 +102,47 @@ static struct fdtable * alloc_fdtable(unsigned int nr) + * Allocation steps are keyed to the size of the fdarray, since it + * grows far faster than any of the other dynamic data. We try to fit + * the fdarray into comfortable page-tuned chunks: starting at 1024B +- * and growing in powers of two from there on. ++ * and growing in powers of two from there on. Since we called only ++ * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab ++ * already gives BITS_PER_LONG slots), the above boils down to ++ * 1. use the smallest power of two large enough to give us that many ++ * slots. ++ * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is ++ * 256 slots (i.e. 1Kb fd array). ++ * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there ++ * and we are never going to be asked for 64 or less. + */ +- nr /= (1024 / sizeof(struct file *)); +- nr = roundup_pow_of_two(nr + 1); +- nr *= (1024 / sizeof(struct file *)); +- nr = ALIGN(nr, BITS_PER_LONG); ++ if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256) ++ nr = 256; ++ else ++ nr = roundup_pow_of_two(slots_wanted); + /* + * Note that this can drive nr *below* what we had passed if sysctl_nr_open +- * had been set lower between the check in expand_files() and here. Deal +- * with that in caller, it's cheaper that way. ++ * had been set lower between the check in expand_files() and here. + * + * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise + * bitmaps handling below becomes unpleasant, to put it mildly... + */ +- if (unlikely(nr > sysctl_nr_open)) +- nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; ++ if (unlikely(nr > sysctl_nr_open)) { ++ nr = round_down(sysctl_nr_open, BITS_PER_LONG); ++ if (nr < slots_wanted) ++ return ERR_PTR(-EMFILE); ++ } ++ ++ /* ++ * Check if the allocation size would exceed INT_MAX. kvmalloc_array() ++ * and kvmalloc() will warn if the allocation size is greater than ++ * INT_MAX, as filp_cachep objects are not __GFP_NOWARN. ++ * ++ * This can happen when sysctl_nr_open is set to a very high value and ++ * a process tries to use a file descriptor near that limit. For example, ++ * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what ++ * systemd typically sets it to - then trying to use a file descriptor ++ * close to that value will require allocating a file descriptor table ++ * that exceeds 8GB in size. ++ */ ++ if (unlikely(nr > INT_MAX / sizeof(struct file *))) ++ return ERR_PTR(-EMFILE); + + fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); + if (!fdt) +@@ -153,7 +171,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr) + out_fdt: + kfree(fdt); + out: +- return NULL; ++ return ERR_PTR(-ENOMEM); + } + + /* +@@ -170,7 +188,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr) + struct fdtable *new_fdt, *cur_fdt; + + spin_unlock(&files->file_lock); +- new_fdt = alloc_fdtable(nr); ++ new_fdt = alloc_fdtable(nr + 1); + + /* make sure all fd_install() have seen resize_in_progress + * or have finished their rcu_read_lock_sched() section. +@@ -179,16 +197,8 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr) + synchronize_rcu(); + + spin_lock(&files->file_lock); +- if (!new_fdt) +- return -ENOMEM; +- /* +- * extremely unlikely race - sysctl_nr_open decreased between the check in +- * caller and alloc_fdtable(). Cheaper to catch it here... +- */ +- if (unlikely(new_fdt->max_fds <= nr)) { +- __free_fdtable(new_fdt); +- return -EMFILE; +- } ++ if (IS_ERR(new_fdt)) ++ return PTR_ERR(new_fdt); + cur_fdt = files_fdtable(files); + BUG_ON(nr < cur_fdt->max_fds); + copy_fdtable(new_fdt, cur_fdt); +@@ -302,7 +312,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho + struct file **old_fds, **new_fds; + unsigned int open_files, i; + struct fdtable *old_fdt, *new_fdt; +- int error; + + newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); + if (!newf) +@@ -334,17 +343,10 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho + if (new_fdt != &newf->fdtab) + __free_fdtable(new_fdt); + +- new_fdt = alloc_fdtable(open_files - 1); +- if (!new_fdt) { +- error = -ENOMEM; +- goto out_release; +- } +- +- /* beyond sysctl_nr_open; nothing to do */ +- if (unlikely(new_fdt->max_fds < open_files)) { +- __free_fdtable(new_fdt); +- error = -EMFILE; +- goto out_release; ++ new_fdt = alloc_fdtable(open_files); ++ if (IS_ERR(new_fdt)) { ++ kmem_cache_free(files_cachep, newf); ++ return ERR_CAST(new_fdt); + } + + /* +@@ -393,10 +395,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho + rcu_assign_pointer(newf->fdt, new_fdt); + + return newf; +- +-out_release: +- kmem_cache_free(files_cachep, newf); +- return ERR_PTR(error); + } + + static struct fdtable *close_files(struct files_struct * files) +diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c +index 1f42eae112fb88..b1a368fc089f32 100644 +--- a/fs/gfs2/meta_io.c ++++ b/fs/gfs2/meta_io.c +@@ -93,6 +93,7 @@ const struct address_space_operations gfs2_meta_aops = { + .invalidate_folio = block_invalidate_folio, + .writepage = gfs2_aspace_writepage, + .release_folio = gfs2_release_folio, ++ .migrate_folio = buffer_migrate_folio_norefs, + }; + + const struct address_space_operations gfs2_rgrp_aops = { +@@ -100,6 +101,7 @@ const struct address_space_operations gfs2_rgrp_aops = { + .invalidate_folio = block_invalidate_folio, + .writepage = gfs2_aspace_writepage, + .release_folio = gfs2_release_folio, ++ .migrate_folio = buffer_migrate_folio_norefs, + }; + + /** +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c +index ef9498a6e88acd..34e9804e0f3601 100644 +--- a/fs/hfs/bfind.c ++++ b/fs/hfs/bfind.c +@@ -16,6 +16,9 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) + { + void *ptr; + ++ if (!tree || !fd) ++ return -EINVAL; ++ + fd->tree = tree; + fd->bnode = NULL; + ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c +index cb823a8a6ba960..e8cd1a31f2470c 100644 +--- a/fs/hfs/bnode.c ++++ b/fs/hfs/bnode.c +@@ -15,6 +15,48 @@ + + #include "btree.h" + ++static inline ++bool is_bnode_offset_valid(struct hfs_bnode *node, int off) ++{ ++ bool is_valid = off < node->tree->node_size; ++ ++ if (!is_valid) { ++ pr_err("requested invalid offset: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off); ++ } ++ ++ return is_valid; ++} ++ ++static inline ++int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) ++{ ++ unsigned int node_size; ++ ++ if (!is_bnode_offset_valid(node, off)) ++ return 0; ++ ++ node_size = node->tree->node_size; ++ ++ if ((off + len) > node_size) { ++ int new_len = (int)node_size - off; ++ ++ pr_err("requested length has been corrected: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, " ++ "requested_len %d, corrected_len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len, new_len); ++ ++ return new_len; ++ } ++ ++ return len; ++} ++ + void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) + { + struct page *page; +@@ -22,6 +64,20 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) + int bytes_read; + int bytes_to_read; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + pagenum = off >> PAGE_SHIFT; + off &= ~PAGE_MASK; /* compute page offset for the first page */ +@@ -80,6 +136,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) + { + struct page *page; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + page = node->page[0]; + +@@ -104,6 +174,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) + { + struct page *page; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + page = node->page[0]; + +@@ -119,6 +203,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, + hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); + if (!len) + return; ++ ++ len = check_and_correct_requested_length(src_node, src, len); ++ len = check_and_correct_requested_length(dst_node, dst, len); ++ + src += src_node->page_offset; + dst += dst_node->page_offset; + src_page = src_node->page[0]; +@@ -136,6 +224,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) + hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); + if (!len) + return; ++ ++ len = check_and_correct_requested_length(node, src, len); ++ len = check_and_correct_requested_length(node, dst, len); ++ + src += node->page_offset; + dst += node->page_offset; + page = node->page[0]; +@@ -482,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node) + if (test_bit(HFS_BNODE_DELETED, &node->flags)) { + hfs_bnode_unhash(node); + spin_unlock(&tree->hash_lock); ++ hfs_bnode_clear(node, 0, tree->node_size); + hfs_bmap_free(node); + hfs_bnode_free(node); + return; +diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c +index 2fa4b1f8cc7fb0..e86e1e235658fa 100644 +--- a/fs/hfs/btree.c ++++ b/fs/hfs/btree.c +@@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke + struct hfs_btree *tree; + struct hfs_btree_header_rec *head; + struct address_space *mapping; +- struct page *page; ++ struct folio *folio; ++ struct buffer_head *bh; + unsigned int size; ++ u16 dblock; ++ sector_t start_block; ++ loff_t offset; + + tree = kzalloc(sizeof(*tree), GFP_KERNEL); + if (!tree) +@@ -75,12 +79,40 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke + unlock_new_inode(tree->inode); + + mapping = tree->inode->i_mapping; +- page = read_mapping_page(mapping, 0, NULL); +- if (IS_ERR(page)) ++ folio = filemap_grab_folio(mapping, 0); ++ if (IS_ERR(folio)) + goto free_inode; + ++ folio_zero_range(folio, 0, folio_size(folio)); ++ ++ dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0); ++ start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div); ++ ++ size = folio_size(folio); ++ offset = 0; ++ while (size > 0) { ++ size_t len; ++ ++ bh = sb_bread(sb, start_block); ++ if (!bh) { ++ pr_err("unable to read tree header\n"); ++ goto put_folio; ++ } ++ ++ len = min_t(size_t, folio_size(folio), sb->s_blocksize); ++ memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize); ++ ++ brelse(bh); ++ ++ start_block++; ++ offset += len; ++ size -= len; ++ } ++ ++ folio_mark_uptodate(folio); ++ + /* Load the header */ +- head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + ++ head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) + + sizeof(struct hfs_bnode_desc)); + tree->root = be32_to_cpu(head->root); + tree->leaf_count = be32_to_cpu(head->leaf_count); +@@ -95,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke + + size = tree->node_size; + if (!is_power_of_2(size)) +- goto fail_page; ++ goto fail_folio; + if (!tree->node_count) +- goto fail_page; ++ goto fail_folio; + switch (id) { + case HFS_EXT_CNID: + if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { + pr_err("invalid extent max_key_len %d\n", + tree->max_key_len); +- goto fail_page; ++ goto fail_folio; + } + break; + case HFS_CAT_CNID: + if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { + pr_err("invalid catalog max_key_len %d\n", + tree->max_key_len); +- goto fail_page; ++ goto fail_folio; + } + break; + default: +@@ -121,12 +153,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke + tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + kunmap_local(head); +- put_page(page); ++ folio_unlock(folio); ++ folio_put(folio); + return tree; + +-fail_page: ++fail_folio: + kunmap_local(head); +- put_page(page); ++put_folio: ++ folio_unlock(folio); ++ folio_put(folio); + free_inode: + tree->inode->i_mapping->a_ops = &hfs_aops; + iput(tree->inode); +diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c +index 6d1878b99b3058..941c92525815e0 100644 +--- a/fs/hfs/extent.c ++++ b/fs/hfs/extent.c +@@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2) + * + * Find a block within an extent record + */ +-static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) ++u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) + { + int i; + u16 count; +diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h +index 49d02524e66717..f1402d71b092be 100644 +--- a/fs/hfs/hfs_fs.h ++++ b/fs/hfs/hfs_fs.h +@@ -190,6 +190,7 @@ extern const struct inode_operations hfs_dir_inode_operations; + + /* extent.c */ + extern int hfs_ext_keycmp(const btree_key *, const btree_key *); ++extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off); + extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int); + extern int hfs_ext_write_extent(struct inode *); + extern int hfs_extend_file(struct inode *); +diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c +index 079ea80534f7de..14f4995588ff03 100644 +--- a/fs/hfsplus/bnode.c ++++ b/fs/hfsplus/bnode.c +@@ -18,12 +18,68 @@ + #include "hfsplus_fs.h" + #include "hfsplus_raw.h" + ++static inline ++bool is_bnode_offset_valid(struct hfs_bnode *node, int off) ++{ ++ bool is_valid = off < node->tree->node_size; ++ ++ if (!is_valid) { ++ pr_err("requested invalid offset: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off); ++ } ++ ++ return is_valid; ++} ++ ++static inline ++int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) ++{ ++ unsigned int node_size; ++ ++ if (!is_bnode_offset_valid(node, off)) ++ return 0; ++ ++ node_size = node->tree->node_size; ++ ++ if ((off + len) > node_size) { ++ int new_len = (int)node_size - off; ++ ++ pr_err("requested length has been corrected: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, " ++ "requested_len %d, corrected_len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len, new_len); ++ ++ return new_len; ++ } ++ ++ return len; ++} ++ + /* Copy a specified range of bytes from the raw data of a node */ + void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) + { + struct page **pagep; + int l; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; +@@ -81,6 +137,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) + struct page **pagep; + int l; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; +@@ -109,6 +179,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) + struct page **pagep; + int l; + ++ if (!is_bnode_offset_valid(node, off)) ++ return; ++ ++ if (len == 0) { ++ pr_err("requested zero length: " ++ "NODE: id %u, type %#x, height %u, " ++ "node_size %u, offset %d, len %d\n", ++ node->this, node->type, node->height, ++ node->tree->node_size, off, len); ++ return; ++ } ++ ++ len = check_and_correct_requested_length(node, off, len); ++ + off += node->page_offset; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; +@@ -133,6 +217,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, + hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); + if (!len) + return; ++ ++ len = check_and_correct_requested_length(src_node, src, len); ++ len = check_and_correct_requested_length(dst_node, dst, len); ++ + src += src_node->page_offset; + dst += dst_node->page_offset; + src_page = src_node->page + (src >> PAGE_SHIFT); +@@ -187,6 +275,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) + hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); + if (!len) + return; ++ ++ len = check_and_correct_requested_length(node, src, len); ++ len = check_and_correct_requested_length(node, dst, len); ++ + src += node->page_offset; + dst += node->page_offset; + if (dst > src) { +diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c +index 73342c925a4b6e..36b6cf2a3abba4 100644 +--- a/fs/hfsplus/unicode.c ++++ b/fs/hfsplus/unicode.c +@@ -132,7 +132,14 @@ int hfsplus_uni2asc(struct super_block *sb, + + op = astr; + ip = ustr->unicode; ++ + ustrlen = be16_to_cpu(ustr->length); ++ if (ustrlen > HFSPLUS_MAX_STRLEN) { ++ ustrlen = HFSPLUS_MAX_STRLEN; ++ pr_err("invalid length %u has been corrected to %d\n", ++ be16_to_cpu(ustr->length), ustrlen); ++ } ++ + len = *len_p; + ce1 = NULL; + compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); +diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c +index f7f9d0889df342..d5fd8e068486e9 100644 +--- a/fs/hfsplus/xattr.c ++++ b/fs/hfsplus/xattr.c +@@ -172,7 +172,11 @@ static int hfsplus_create_attributes_file(struct super_block *sb) + return PTR_ERR(attr_file); + } + +- BUG_ON(i_size_read(attr_file) != 0); ++ if (i_size_read(attr_file) != 0) { ++ err = -EIO; ++ pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n"); ++ goto end_attr_file_creation; ++ } + + hip = HFSPLUS_I(attr_file); + +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index ac519515ef6c06..ab951fd475317c 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -136,7 +136,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) + vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); + vma->vm_ops = &hugetlb_vm_ops; + +- ret = seal_check_future_write(info->seals, vma); ++ ret = seal_check_write(info->seals, vma); + if (ret) + return ret; + +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c +index 8fda66c98a610f..368ae50d8a59e0 100644 +--- a/fs/jbd2/checkpoint.c ++++ b/fs/jbd2/checkpoint.c +@@ -285,6 +285,7 @@ int jbd2_log_do_checkpoint(journal_t *journal) + retry: + if (batch_count) + __flush_batch(journal, &batch_count); ++ cond_resched(); + spin_lock(&journal->j_list_lock); + goto restart; + } +diff --git a/fs/jfs/file.c b/fs/jfs/file.c +index 01b6912e60f808..742cadd1f37e84 100644 +--- a/fs/jfs/file.c ++++ b/fs/jfs/file.c +@@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file) + { + int rc; + ++ if (S_ISREG(inode->i_mode) && inode->i_size < 0) ++ return -EIO; ++ + if ((rc = dquot_file_open(inode, file))) + return rc; + +diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c +index 920d58a1566bfb..66c38ef5e57111 100644 +--- a/fs/jfs/inode.c ++++ b/fs/jfs/inode.c +@@ -145,9 +145,9 @@ void jfs_evict_inode(struct inode *inode) + if (!inode->i_nlink && !is_bad_inode(inode)) { + dquot_initialize(inode); + ++ truncate_inode_pages_final(&inode->i_data); + if (JFS_IP(inode)->fileset == FILESYSTEM_I) { + struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; +- truncate_inode_pages_final(&inode->i_data); + + if (test_cflag(COMMIT_Freewmap, inode)) + jfs_free_zero_link(inode); +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c +index 5a877261c3fe48..cdfa699cd7c8fa 100644 +--- a/fs/jfs/jfs_dmap.c ++++ b/fs/jfs/jfs_dmap.c +@@ -1389,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) + (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; + ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); + ++ if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) { ++ jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n"); ++ release_metapage(mp); ++ return -EIO; ++ } ++ + /* dmap control page trees fan-out by 4 and a single allocation + * group may be described by 1 or 2 subtrees within the ag level + * dmap control page, depending upon the ag size. examine the ag's +diff --git a/fs/libfs.c b/fs/libfs.c +index f5566964aa7d13..b913ab238cc156 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -610,7 +610,7 @@ void simple_recursive_removal(struct dentry *dentry, + struct dentry *victim = NULL, *child; + struct inode *inode = this->d_inode; + +- inode_lock(inode); ++ inode_lock_nested(inode, I_MUTEX_CHILD); + if (d_is_dir(this)) + inode->i_flags |= S_DEAD; + while ((child = find_next_child(this, victim)) == NULL) { +@@ -622,7 +622,7 @@ void simple_recursive_removal(struct dentry *dentry, + victim = this; + this = this->d_parent; + inode = this->d_inode; +- inode_lock(inode); ++ inode_lock_nested(inode, I_MUTEX_CHILD); + if (simple_positive(victim)) { + d_invalidate(victim); // avoid lost mounts + if (d_is_dir(victim)) +diff --git a/fs/namespace.c b/fs/namespace.c +index 6a9c53c800c4e4..f79226472251ba 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2526,6 +2526,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) + return attach_recursive_mnt(mnt, p, mp, 0); + } + ++static int may_change_propagation(const struct mount *m) ++{ ++ struct mnt_namespace *ns = m->mnt_ns; ++ ++ // it must be mounted in some namespace ++ if (IS_ERR_OR_NULL(ns)) // is_mounted() ++ return -EINVAL; ++ // and the caller must be admin in userns of that namespace ++ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) ++ return -EPERM; ++ return 0; ++} ++ + /* + * Sanity check the flags to change_mnt_propagation. + */ +@@ -2562,10 +2575,10 @@ static int do_change_type(struct path *path, int ms_flags) + return -EINVAL; + + namespace_lock(); +- if (!check_mnt(mnt)) { +- err = -EINVAL; ++ err = may_change_propagation(mnt); ++ if (err) + goto out_unlock; +- } ++ + if (type == MS_SHARED) { + err = invent_group_ids(mnt, recurse); + if (err) +@@ -2960,18 +2973,11 @@ static int do_set_group(struct path *from_path, struct path *to_path) + + namespace_lock(); + +- err = -EINVAL; +- /* To and From must be mounted */ +- if (!is_mounted(&from->mnt)) +- goto out; +- if (!is_mounted(&to->mnt)) +- goto out; +- +- err = -EPERM; +- /* We should be allowed to modify mount namespaces of both mounts */ +- if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) ++ err = may_change_propagation(from); ++ if (err) + goto out; +- if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) ++ err = may_change_propagation(to); ++ if (err) + goto out; + + err = -EINVAL; +diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c +index 6be13e0ec170d1..e498aade8c4796 100644 +--- a/fs/nfs/blocklayout/blocklayout.c ++++ b/fs/nfs/blocklayout/blocklayout.c +@@ -149,8 +149,8 @@ do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect, + + /* limit length to what the device mapping allows */ + end = disk_addr + *len; +- if (end >= map->start + map->len) +- *len = map->start + map->len - disk_addr; ++ if (end >= map->disk_offset + map->len) ++ *len = map->disk_offset + map->len - disk_addr; + + retry: + if (!bio) { +diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c +index 65cbb5607a5fc4..61ee0b6c0fba26 100644 +--- a/fs/nfs/blocklayout/dev.c ++++ b/fs/nfs/blocklayout/dev.c +@@ -199,10 +199,11 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, + struct pnfs_block_dev *child; + u64 chunk; + u32 chunk_idx; ++ u64 disk_chunk; + u64 disk_offset; + + chunk = div_u64(offset, dev->chunk_size); +- div_u64_rem(chunk, dev->nr_children, &chunk_idx); ++ disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx); + + if (chunk_idx >= dev->nr_children) { + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", +@@ -215,7 +216,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, + offset = chunk * dev->chunk_size; + + /* disk offset of the stripe */ +- disk_offset = div_u64(offset, dev->nr_children); ++ disk_offset = disk_chunk * dev->chunk_size; + + child = &dev->children[chunk_idx]; + child->map(child, disk_offset, map); +diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c +index 8f7cff7a42938e..0add0f329816b0 100644 +--- a/fs/nfs/blocklayout/extent_tree.c ++++ b/fs/nfs/blocklayout/extent_tree.c +@@ -552,6 +552,15 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, + return ret; + } + ++/** ++ * ext_tree_prepare_commit - encode extents that need to be committed ++ * @arg: layout commit data ++ * ++ * Return values: ++ * %0: Success, all required extents are encoded ++ * %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit ++ * %-ENOMEM: Out of memory, extents not encoded ++ */ + int + ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) + { +@@ -568,12 +577,12 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) + start_p = page_address(arg->layoutupdate_page); + arg->layoutupdate_pages = &arg->layoutupdate_page; + +-retry: +- ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten); ++ ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, ++ &count, &arg->lastbytewritten); + if (unlikely(ret)) { + ext_tree_free_commitdata(arg, buffer_size); + +- buffer_size = ext_tree_layoutupdate_size(bl, count); ++ buffer_size = NFS_SERVER(arg->inode)->wsize; + count = 0; + + arg->layoutupdate_pages = +@@ -588,7 +597,8 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) + return -ENOMEM; + } + +- goto retry; ++ ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, ++ &count, &arg->lastbytewritten); + } + + *start_p = cpu_to_be32(count); +@@ -608,7 +618,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) + } + + dprintk("%s found %zu ranges\n", __func__, count); +- return 0; ++ return ret; + } + + void +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index aa09f930eeaf7e..cc764da581c43c 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -668,6 +668,44 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp, + } + EXPORT_SYMBOL_GPL(nfs_init_client); + ++static void nfs4_server_set_init_caps(struct nfs_server *server) ++{ ++#if IS_ENABLED(CONFIG_NFS_V4) ++ /* Set the basic capabilities */ ++ server->caps = server->nfs_client->cl_mvops->init_caps; ++ if (server->flags & NFS_MOUNT_NORDIRPLUS) ++ server->caps &= ~NFS_CAP_READDIRPLUS; ++ if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) ++ server->caps &= ~NFS_CAP_READ_PLUS; ++ ++ /* ++ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower ++ * authentication. ++ */ ++ if (nfs4_disable_idmapping && ++ server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) ++ server->caps |= NFS_CAP_UIDGID_NOMAP; ++#endif ++} ++ ++void nfs_server_set_init_caps(struct nfs_server *server) ++{ ++ switch (server->nfs_client->rpc_ops->version) { ++ case 2: ++ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; ++ break; ++ case 3: ++ server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; ++ if (!(server->flags & NFS_MOUNT_NORDIRPLUS)) ++ server->caps |= NFS_CAP_READDIRPLUS; ++ break; ++ default: ++ nfs4_server_set_init_caps(server); ++ break; ++ } ++} ++EXPORT_SYMBOL_GPL(nfs_server_set_init_caps); ++ + /* + * Create a version 2 or 3 client + */ +@@ -709,7 +747,6 @@ static int nfs_init_server(struct nfs_server *server, + /* Initialise the client representation from the mount data */ + server->flags = ctx->flags; + server->options = ctx->options; +- server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; + + switch (clp->rpc_ops->version) { + case 2: +@@ -745,6 +782,8 @@ static int nfs_init_server(struct nfs_server *server, + if (error < 0) + goto error; + ++ nfs_server_set_init_caps(server); ++ + /* Preserve the values of mount_server-related mount options */ + if (ctx->mount_server.addrlen) { + memcpy(&server->mountd_address, &ctx->mount_server.address, +@@ -919,7 +958,6 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour + target->acregmax = source->acregmax; + target->acdirmin = source->acdirmin; + target->acdirmax = source->acdirmax; +- target->caps = source->caps; + target->options = source->options; + target->auth_info = source->auth_info; + target->port = source->port; +@@ -1145,6 +1183,8 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, + if (error < 0) + goto out_free_server; + ++ nfs_server_set_init_caps(server); ++ + /* probe the filesystem info for this server filesystem */ + error = nfs_probe_server(server, fh); + if (error < 0) +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index 8870c72416acbd..4eea91d054b241 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -223,7 +223,7 @@ extern struct nfs_client * + nfs4_find_client_sessionid(struct net *, const struct sockaddr *, + struct nfs4_sessionid *, u32); + extern struct nfs_server *nfs_create_server(struct fs_context *); +-extern void nfs4_server_set_init_caps(struct nfs_server *); ++extern void nfs_server_set_init_caps(struct nfs_server *); + extern struct nfs_server *nfs4_create_server(struct fs_context *); + extern struct nfs_server *nfs4_create_referral_server(struct fs_context *); + extern int nfs4_update_server(struct nfs_server *server, const char *hostname, +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index ac80f87cb9d996..f6dc42de48f03d 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -1079,24 +1079,6 @@ static void nfs4_session_limit_xasize(struct nfs_server *server) + #endif + } + +-void nfs4_server_set_init_caps(struct nfs_server *server) +-{ +- /* Set the basic capabilities */ +- server->caps |= server->nfs_client->cl_mvops->init_caps; +- if (server->flags & NFS_MOUNT_NORDIRPLUS) +- server->caps &= ~NFS_CAP_READDIRPLUS; +- if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) +- server->caps &= ~NFS_CAP_READ_PLUS; +- +- /* +- * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower +- * authentication. +- */ +- if (nfs4_disable_idmapping && +- server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) +- server->caps |= NFS_CAP_UIDGID_NOMAP; +-} +- + static int nfs4_server_common_setup(struct nfs_server *server, + struct nfs_fh *mntfh, bool auth_probe) + { +@@ -1111,7 +1093,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, + if (error < 0) + goto out; + +- nfs4_server_set_init_caps(server); ++ nfs_server_set_init_caps(server); + + /* Probe the root fh to retrieve its FSID and filehandle */ + error = nfs4_get_rootfh(server, mntfh, auth_probe); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 89d88d37e0cc5c..6debcfc63222d2 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -3951,7 +3951,7 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) + }; + int err; + +- nfs4_server_set_init_caps(server); ++ nfs_server_set_init_caps(server); + do { + err = nfs4_handle_exception(server, + _nfs4_server_capabilities(server, fhandle), +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 79d1ffdcbebd3d..b40c20bd364b04 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -3216,6 +3216,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) + struct nfs_inode *nfsi = NFS_I(inode); + loff_t end_pos; + int status; ++ bool mark_as_dirty = false; + + if (!pnfs_layoutcommit_outstanding(inode)) + return 0; +@@ -3267,19 +3268,23 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) + if (ld->prepare_layoutcommit) { + status = ld->prepare_layoutcommit(&data->args); + if (status) { +- put_cred(data->cred); ++ if (status != -ENOSPC) ++ put_cred(data->cred); + spin_lock(&inode->i_lock); + set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); + if (end_pos > nfsi->layout->plh_lwb) + nfsi->layout->plh_lwb = end_pos; +- goto out_unlock; ++ if (status != -ENOSPC) ++ goto out_unlock; ++ spin_unlock(&inode->i_lock); ++ mark_as_dirty = true; + } + } + + + status = nfs4_proc_layoutcommit(data, sync); + out: +- if (status) ++ if (status || mark_as_dirty) + mark_inode_dirty_sync(inode); + dprintk("<-- %s status %d\n", __func__, status); + return status; +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index e2875706e6bfd7..4aeb08040f3e5a 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -4282,10 +4282,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, + } + status = nfs_ok; + if (conf) { +- old = unconf; +- unhash_client_locked(old); +- nfsd4_change_callback(conf, &unconf->cl_cb_conn); +- } else { ++ if (get_client_locked(conf) == nfs_ok) { ++ old = unconf; ++ unhash_client_locked(old); ++ nfsd4_change_callback(conf, &unconf->cl_cb_conn); ++ } else { ++ conf = NULL; ++ } ++ } ++ ++ if (!conf) { + old = find_confirmed_client_by_name(&unconf->cl_name, nn); + if (old) { + status = nfserr_clid_inuse; +@@ -4302,10 +4308,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, + } + trace_nfsd_clid_replaced(&old->cl_clientid); + } ++ status = get_client_locked(unconf); ++ if (status != nfs_ok) { ++ old = NULL; ++ goto out; ++ } + move_to_confirmed(unconf); + conf = unconf; + } +- get_client_locked(conf); + spin_unlock(&nn->client_lock); + if (conf == unconf) + fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); +@@ -5765,6 +5775,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf + status = nfs4_check_deleg(cl, open, &dp); + if (status) + goto out; ++ if (dp && nfsd4_is_deleg_cur(open) && ++ (dp->dl_stid.sc_file != fp)) { ++ /* ++ * RFC8881 section 8.2.4 mandates the server to return ++ * NFS4ERR_BAD_STATEID if the selected table entry does ++ * not match the current filehandle. However returning ++ * NFS4ERR_BAD_STATEID in the OPEN can cause the client ++ * to repeatedly retry the operation with the same ++ * stateid, since the stateid itself is valid. To avoid ++ * this situation NFSD returns NFS4ERR_INVAL instead. ++ */ ++ status = nfserr_inval; ++ goto out; ++ } + stp = nfsd4_find_and_lock_existing_open(fp, open); + } else { + open->op_file = NULL; +diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c +index e1b856ecce61d0..6b93c909bdc9e4 100644 +--- a/fs/ntfs3/dir.c ++++ b/fs/ntfs3/dir.c +@@ -304,6 +304,9 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi, + if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN)) + return true; + ++ if (fname->name_len + sizeof(struct NTFS_DE) > le16_to_cpu(e->size)) ++ return true; ++ + name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name, + PATH_MAX); + if (name_len <= 0) { +diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c +index af7c0cbba74e3d..0150a221020988 100644 +--- a/fs/ntfs3/inode.c ++++ b/fs/ntfs3/inode.c +@@ -1130,10 +1130,10 @@ int inode_write_data(struct inode *inode, const void *data, size_t bytes) + * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK) + * for unicode string of @uni_len length. + */ +-static inline u32 ntfs_reparse_bytes(u32 uni_len) ++static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute) + { + /* Header + unicode string + decorated unicode string. */ +- return sizeof(short) * (2 * uni_len + 4) + ++ return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) + + offsetof(struct REPARSE_DATA_BUFFER, + SymbolicLinkReparseBuffer.PathBuffer); + } +@@ -1146,8 +1146,11 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, + struct REPARSE_DATA_BUFFER *rp; + __le16 *rp_name; + typeof(rp->SymbolicLinkReparseBuffer) *rs; ++ bool is_absolute; + +- rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS); ++ is_absolute = (strlen(symname) > 1 && symname[1] == ':'); ++ ++ rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS); + if (!rp) + return ERR_PTR(-ENOMEM); + +@@ -1162,7 +1165,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, + goto out; + + /* err = the length of unicode name of symlink. */ +- *nsize = ntfs_reparse_bytes(err); ++ *nsize = ntfs_reparse_bytes(err, is_absolute); + + if (*nsize > sbi->reparse.max_size) { + err = -EFBIG; +@@ -1182,7 +1185,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, + + /* PrintName + SubstituteName. */ + rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); +- rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8); ++ rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0)); + rs->PrintNameLength = rs->SubstituteNameOffset; + + /* +@@ -1190,16 +1193,18 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, + * parse this path. + * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). + */ +- rs->Flags = 0; ++ rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE); + +- memmove(rp_name + err + 4, rp_name, sizeof(short) * err); ++ memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err); + +- /* Decorate SubstituteName. */ +- rp_name += err; +- rp_name[0] = cpu_to_le16('\\'); +- rp_name[1] = cpu_to_le16('?'); +- rp_name[2] = cpu_to_le16('?'); +- rp_name[3] = cpu_to_le16('\\'); ++ if (is_absolute) { ++ /* Decorate SubstituteName. */ ++ rp_name += err; ++ rp_name[0] = cpu_to_le16('\\'); ++ rp_name[1] = cpu_to_le16('?'); ++ rp_name[2] = cpu_to_le16('?'); ++ rp_name[3] = cpu_to_le16('\\'); ++ } + + return rp; + out: +diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c +index b57140ebfad0f7..cd4bfd92ebd6e8 100644 +--- a/fs/orangefs/orangefs-debugfs.c ++++ b/fs/orangefs/orangefs-debugfs.c +@@ -354,7 +354,7 @@ static ssize_t orangefs_debug_read(struct file *file, + goto out; + + mutex_lock(&orangefs_debug_lock); +- sprintf_ret = sprintf(buf, "%s", (char *)file->private_data); ++ sprintf_ret = scnprintf(buf, ORANGEFS_MAX_DEBUG_STRING_LEN, "%s", (char *)file->private_data); + mutex_unlock(&orangefs_debug_lock); + + read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret); +diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c +index 81d425f571e28d..91f4e50af1e947 100644 +--- a/fs/smb/client/cifssmb.c ++++ b/fs/smb/client/cifssmb.c +@@ -3984,6 +3984,12 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, + pSMB->FileName[name_len] = 0; + pSMB->FileName[name_len+1] = 0; + name_len += 2; ++ } else if (!searchName[0]) { ++ pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb); ++ pSMB->FileName[1] = 0; ++ pSMB->FileName[2] = 0; ++ pSMB->FileName[3] = 0; ++ name_len = 4; + } + } else { + name_len = copy_path_name(pSMB->FileName, searchName); +@@ -3995,6 +4001,10 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, + pSMB->FileName[name_len] = '*'; + pSMB->FileName[name_len+1] = 0; + name_len += 2; ++ } else if (!searchName[0]) { ++ pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb); ++ pSMB->FileName[1] = 0; ++ name_len = 2; + } + } + +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 0588896c44567d..986286ef40466d 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -3162,18 +3162,15 @@ generic_ip_connect(struct TCP_Server_Info *server) + struct net *net = cifs_net_ns(server); + struct sock *sk; + +- rc = __sock_create(net, sfamily, SOCK_STREAM, +- IPPROTO_TCP, &server->ssocket, 1); ++ rc = sock_create_kern(net, sfamily, SOCK_STREAM, ++ IPPROTO_TCP, &server->ssocket); + if (rc < 0) { + cifs_server_dbg(VFS, "Error %d creating socket\n", rc); + return rc; + } + + sk = server->ssocket->sk; +- __netns_tracker_free(net, &sk->ns_tracker, false); +- sk->sk_net_refcnt = 1; +- get_net_track(net, &sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(net, 1); ++ sk_net_refcnt_upgrade(sk); + + /* BB other socket options to set KEEPALIVE, NODELAY? */ + cifs_dbg(FYI, "Socket created\n"); +@@ -3998,7 +3995,6 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, + return 0; + } + +- server->lstrp = jiffies; + server->tcpStatus = CifsInNegotiate; + server->neg_start = jiffies; + spin_unlock(&server->srv_lock); +diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c +index c351da8c3e2eaf..bbde7180a90ac4 100644 +--- a/fs/smb/client/sess.c ++++ b/fs/smb/client/sess.c +@@ -372,6 +372,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) + struct cifs_server_iface *old_iface = NULL; + struct cifs_server_iface *last_iface = NULL; + struct sockaddr_storage ss; ++ int retry = 0; + + spin_lock(&ses->chan_lock); + chan_index = cifs_ses_get_chan_index(ses, server); +@@ -400,6 +401,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) + return; + } + ++try_again: + last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface, + iface_head); + iface_min_speed = last_iface->speed; +@@ -437,6 +439,13 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) + } + + if (list_entry_is_head(iface, &ses->iface_list, iface_head)) { ++ list_for_each_entry(iface, &ses->iface_list, iface_head) ++ iface->weight_fulfilled = 0; ++ ++ /* see if it can be satisfied in second attempt */ ++ if (!retry++) ++ goto try_again; ++ + iface = NULL; + cifs_dbg(FYI, "unable to find a suitable iface\n"); + } +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index d0734aa1961a3e..b74f7690473948 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -730,6 +730,13 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, + bytes_left -= sizeof(*p); + break; + } ++ /* Validate that Next doesn't point beyond the buffer */ ++ if (next > bytes_left) { ++ cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n", ++ __func__, next, bytes_left); ++ rc = -EINVAL; ++ goto out; ++ } + p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); + bytes_left -= next; + } +@@ -741,7 +748,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, + } + + /* Azure rounds the buffer size up 8, to a 16 byte boundary */ +- if ((bytes_left > 8) || p->Next) ++ if ((bytes_left > 8) || ++ (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next) ++ + sizeof(p->Next) && p->Next)) + cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); + + ses->iface_last_update = jiffies; +diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c +index 48d020e1f663b5..713bd1dcd39cce 100644 +--- a/fs/smb/client/smbdirect.c ++++ b/fs/smb/client/smbdirect.c +@@ -282,18 +282,20 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) + log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", + request, wc->status); + +- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { +- log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", +- wc->status, wc->opcode); +- smbd_disconnect_rdma_connection(request->info); +- } +- + for (i = 0; i < request->num_sge; i++) + ib_dma_unmap_single(sc->ib.dev, + request->sge[i].addr, + request->sge[i].length, + DMA_TO_DEVICE); + ++ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { ++ log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", ++ wc->status, wc->opcode); ++ mempool_free(request, info->request_mempool); ++ smbd_disconnect_rdma_connection(info); ++ return; ++ } ++ + if (atomic_dec_and_test(&request->info->send_pending)) + wake_up(&request->info->wait_send_pending); + +@@ -1336,10 +1338,6 @@ void smbd_destroy(struct TCP_Server_Info *server) + log_rdma_event(INFO, "cancelling idle timer\n"); + cancel_delayed_work_sync(&info->idle_timer_work); + +- log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); +- wait_event(info->wait_send_pending, +- atomic_read(&info->send_pending) == 0); +- + /* It's not possible for upper layer to get to reassembly */ + log_rdma_event(INFO, "drain the reassembly queue\n"); + do { +@@ -2051,7 +2049,11 @@ int smbd_send(struct TCP_Server_Info *server, + */ + + wait_event(info->wait_send_pending, +- atomic_read(&info->send_pending) == 0); ++ atomic_read(&info->send_pending) == 0 || ++ sc->status != SMBDIRECT_SOCKET_CONNECTED); ++ ++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0) ++ rc = -EAGAIN; + + return rc; + } +diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c +index 66b20c3d963eb2..f5ebc200dd7383 100644 +--- a/fs/smb/server/connection.c ++++ b/fs/smb/server/connection.c +@@ -503,7 +503,8 @@ void ksmbd_conn_transport_destroy(void) + { + mutex_lock(&init_lock); + ksmbd_tcp_destroy(); +- ksmbd_rdma_destroy(); ++ ksmbd_rdma_stop_listening(); + stop_sessions(); ++ ksmbd_rdma_destroy(); + mutex_unlock(&init_lock); + } +diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h +index c769fe3859b37e..29ba91fc54076c 100644 +--- a/fs/smb/server/connection.h ++++ b/fs/smb/server/connection.h +@@ -45,7 +45,12 @@ struct ksmbd_conn { + struct mutex srv_mutex; + int status; + unsigned int cli_cap; +- __be32 inet_addr; ++ union { ++ __be32 inet_addr; ++#if IS_ENABLED(CONFIG_IPV6) ++ u8 inet6_addr[16]; ++#endif ++ }; + char *request_buf; + struct ksmbd_transport *transport; + struct nls_table *local_nls; +diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c +index e564432643ea30..2a3ef29ac0eb75 100644 +--- a/fs/smb/server/oplock.c ++++ b/fs/smb/server/oplock.c +@@ -1102,8 +1102,10 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp, + if (!atomic_inc_not_zero(&opinfo->refcount)) + continue; + +- if (ksmbd_conn_releasing(opinfo->conn)) ++ if (ksmbd_conn_releasing(opinfo->conn)) { ++ opinfo_put(opinfo); + continue; ++ } + + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); + opinfo_put(opinfo); +@@ -1139,8 +1141,11 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp) + if (!atomic_inc_not_zero(&opinfo->refcount)) + continue; + +- if (ksmbd_conn_releasing(opinfo->conn)) ++ if (ksmbd_conn_releasing(opinfo->conn)) { ++ opinfo_put(opinfo); + continue; ++ } ++ + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); + opinfo_put(opinfo); + } +@@ -1343,8 +1348,10 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, + if (!atomic_inc_not_zero(&brk_op->refcount)) + continue; + +- if (ksmbd_conn_releasing(brk_op->conn)) ++ if (ksmbd_conn_releasing(brk_op->conn)) { ++ opinfo_put(brk_op); + continue; ++ } + + if (brk_op->is_lease && (brk_op->o_lease->state & + (~(SMB2_LEASE_READ_CACHING_LE | +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index d3dd3b9b4005c6..85e7bc3a2bd33c 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -6011,7 +6011,6 @@ static int smb2_create_link(struct ksmbd_work *work, + { + char *link_name = NULL, *target_name = NULL, *pathname = NULL; + struct path path, parent_path; +- bool file_present = false; + int rc; + + if (buf_len < (u64)sizeof(struct smb2_file_link_info) + +@@ -6044,11 +6043,8 @@ static int smb2_create_link(struct ksmbd_work *work, + if (rc) { + if (rc != -ENOENT) + goto out; +- } else +- file_present = true; +- +- if (file_info->ReplaceIfExists) { +- if (file_present) { ++ } else { ++ if (file_info->ReplaceIfExists) { + rc = ksmbd_vfs_remove_file(work, &path); + if (rc) { + rc = -EINVAL; +@@ -6056,21 +6052,17 @@ static int smb2_create_link(struct ksmbd_work *work, + link_name); + goto out; + } +- } +- } else { +- if (file_present) { ++ } else { + rc = -EEXIST; + ksmbd_debug(SMB, "link already exists\n"); + goto out; + } ++ ksmbd_vfs_kern_path_unlock(&parent_path, &path); + } +- + rc = ksmbd_vfs_link(work, target_name, link_name); + if (rc) + rc = -EINVAL; + out: +- if (file_present) +- ksmbd_vfs_kern_path_unlock(&parent_path, &path); + + if (!IS_ERR(link_name)) + kfree(link_name); +diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c +index 6c3a57bff14703..a4ff1167c9a123 100644 +--- a/fs/smb/server/transport_rdma.c ++++ b/fs/smb/server/transport_rdma.c +@@ -2193,7 +2193,7 @@ int ksmbd_rdma_init(void) + return 0; + } + +-void ksmbd_rdma_destroy(void) ++void ksmbd_rdma_stop_listening(void) + { + if (!smb_direct_listener.cm_id) + return; +@@ -2202,7 +2202,10 @@ void ksmbd_rdma_destroy(void) + rdma_destroy_id(smb_direct_listener.cm_id); + + smb_direct_listener.cm_id = NULL; ++} + ++void ksmbd_rdma_destroy(void) ++{ + if (smb_direct_wq) { + destroy_workqueue(smb_direct_wq); + smb_direct_wq = NULL; +diff --git a/fs/smb/server/transport_rdma.h b/fs/smb/server/transport_rdma.h +index 77aee4e5c9dcd8..a2291b77488a15 100644 +--- a/fs/smb/server/transport_rdma.h ++++ b/fs/smb/server/transport_rdma.h +@@ -54,13 +54,15 @@ struct smb_direct_data_transfer { + + #ifdef CONFIG_SMB_SERVER_SMBDIRECT + int ksmbd_rdma_init(void); ++void ksmbd_rdma_stop_listening(void); + void ksmbd_rdma_destroy(void); + bool ksmbd_rdma_capable_netdev(struct net_device *netdev); + void init_smbd_max_io_size(unsigned int sz); + unsigned int get_smbd_max_read_write_size(void); + #else + static inline int ksmbd_rdma_init(void) { return 0; } +-static inline int ksmbd_rdma_destroy(void) { return 0; } ++static inline void ksmbd_rdma_stop_listening(void) { } ++static inline void ksmbd_rdma_destroy(void) { } + static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; } + static inline void init_smbd_max_io_size(unsigned int sz) { } + static inline unsigned int get_smbd_max_read_write_size(void) { return 0; } +diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c +index e86bc4a460687a..53c536f2ce9f9c 100644 +--- a/fs/smb/server/transport_tcp.c ++++ b/fs/smb/server/transport_tcp.c +@@ -87,7 +87,14 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk) + return NULL; + } + ++#if IS_ENABLED(CONFIG_IPV6) ++ if (client_sk->sk->sk_family == AF_INET6) ++ memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16); ++ else ++ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; ++#else + conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; ++#endif + conn->transport = KSMBD_TRANS(t); + KSMBD_TRANS(t)->conn = conn; + KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops; +@@ -231,7 +238,6 @@ static int ksmbd_kthread_fn(void *p) + { + struct socket *client_sk = NULL; + struct interface *iface = (struct interface *)p; +- struct inet_sock *csk_inet; + struct ksmbd_conn *conn; + int ret; + +@@ -254,13 +260,27 @@ static int ksmbd_kthread_fn(void *p) + /* + * Limits repeated connections from clients with the same IP. + */ +- csk_inet = inet_sk(client_sk->sk); + down_read(&conn_list_lock); + list_for_each_entry(conn, &conn_list, conns_list) +- if (csk_inet->inet_daddr == conn->inet_addr) { ++#if IS_ENABLED(CONFIG_IPV6) ++ if (client_sk->sk->sk_family == AF_INET6) { ++ if (memcmp(&client_sk->sk->sk_v6_daddr, ++ &conn->inet6_addr, 16) == 0) { ++ ret = -EAGAIN; ++ break; ++ } ++ } else if (inet_sk(client_sk->sk)->inet_daddr == ++ conn->inet_addr) { ++ ret = -EAGAIN; ++ break; ++ } ++#else ++ if (inet_sk(client_sk->sk)->inet_daddr == ++ conn->inet_addr) { + ret = -EAGAIN; + break; + } ++#endif + up_read(&conn_list_lock); + if (ret == -EAGAIN) + continue; +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 3a27d4268b3c4a..494d21777ed00b 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + unsigned short flags; + unsigned int fragments; + u64 lookup_table_start, xattr_id_table_start, next_table; +- int err; ++ int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); + + TRACE("Entered squashfs_fill_superblock\n"); + ++ if (!devblksize) { ++ errorf(fc, "squashfs: unable to set blocksize\n"); ++ return -EINVAL; ++ } ++ + sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); + if (sb->s_fs_info == NULL) { + ERROR("Failed to allocate squashfs_sb_info\n"); +@@ -201,12 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + + msblk->panic_on_errors = (opts->errors == Opt_errors_panic); + +- msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); +- if (!msblk->devblksize) { +- errorf(fc, "squashfs: unable to set blocksize\n"); +- return -EINVAL; +- } +- ++ msblk->devblksize = devblksize; + msblk->devblksize_log2 = ffz(~msblk->devblksize); + + mutex_init(&msblk->meta_index_mutex); +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c +index 7d389dd5ed5195..6b70965063d739 100644 +--- a/fs/tracefs/inode.c ++++ b/fs/tracefs/inode.c +@@ -483,9 +483,20 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) + return !(ei && ei->is_freed); + } + ++static int tracefs_d_delete(const struct dentry *dentry) ++{ ++ /* ++ * We want to keep eventfs dentries around but not tracefs ++ * ones. eventfs dentries have content in d_fsdata. ++ * Use d_fsdata to determine if it's a eventfs dentry or not. ++ */ ++ return dentry->d_fsdata == NULL; ++} ++ + static const struct dentry_operations tracefs_dentry_operations = { + .d_revalidate = tracefs_d_revalidate, + .d_release = tracefs_d_release, ++ .d_delete = tracefs_d_delete, + }; + + static int trace_fill_super(struct super_block *sb, void *data, int silent) +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 20dff9ed2471da..cb13a07a4aa852 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1409,7 +1409,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, + struct genericPartitionMap *gpm; + uint16_t ident; + struct buffer_head *bh; +- unsigned int table_len; ++ unsigned int table_len, part_map_count; + int ret; + + bh = udf_read_tagged(sb, block, block, &ident); +@@ -1430,7 +1430,16 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, + "logical volume"); + if (ret) + goto out_bh; +- ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); ++ ++ part_map_count = le32_to_cpu(lvd->numPartitionMaps); ++ if (part_map_count > table_len / sizeof(struct genericPartitionMap1)) { ++ udf_err(sb, "error loading logical volume descriptor: " ++ "Too many partition maps (%u > %u)\n", part_map_count, ++ table_len / (unsigned)sizeof(struct genericPartitionMap1)); ++ ret = -EIO; ++ goto out_bh; ++ } ++ ret = udf_sb_alloc_partition_maps(sb, part_map_count); + if (ret) + goto out_bh; + +diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c +index f5377ba5967a7a..68af6ae3b5d5b8 100644 +--- a/fs/xfs/xfs_itable.c ++++ b/fs/xfs/xfs_itable.c +@@ -422,11 +422,15 @@ xfs_inumbers( + .breq = breq, + }; + struct xfs_trans *tp; ++ unsigned int iwalk_flags = 0; + int error = 0; + + if (xfs_bulkstat_already_done(breq->mp, breq->startino)) + return 0; + ++ if (breq->flags & XFS_IBULK_SAME_AG) ++ iwalk_flags |= XFS_IWALK_SAME_AG; ++ + /* + * Grab an empty transaction so that we can use its recursive buffer + * locking abilities to detect cycles in the inobt without deadlocking. +@@ -435,7 +439,7 @@ xfs_inumbers( + if (error) + goto out; + +- error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, ++ error = xfs_inobt_walk(breq->mp, tp, breq->startino, iwalk_flags, + xfs_inumbers_walk, breq->icount, &ic); + xfs_trans_cancel(tp); + out: +diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h +index a07b510e7dc559..a63d61ca55afc8 100644 +--- a/include/linux/arch_topology.h ++++ b/include/linux/arch_topology.h +@@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu) + + void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); + ++DECLARE_PER_CPU(unsigned long, capacity_freq_ref); ++ ++static inline unsigned long topology_get_freq_ref(int cpu) ++{ ++ return per_cpu(capacity_freq_ref, cpu); ++} ++ + DECLARE_PER_CPU(unsigned long, arch_freq_scale); + + static inline unsigned long topology_get_freq_scale(int cpu) +@@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu); + void remove_cpu_topology(unsigned int cpuid); + void reset_cpu_topology(void); + int parse_acpi_topology(void); ++void freq_inv_set_max_ratio(int cpu, u64 max_rate); + #endif + + #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ +diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h +index 92c8997b193816..b9c0b3281ace16 100644 +--- a/include/linux/blk_types.h ++++ b/include/linux/blk_types.h +@@ -379,6 +379,8 @@ enum req_op { + REQ_OP_DISCARD = (__force blk_opf_t)3, + /* securely erase sectors */ + REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, ++ /* write data at the current zone write pointer */ ++ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, + /* Open a zone */ +@@ -386,9 +388,7 @@ enum req_op { + /* Close a zone */ + REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, + /* Transition a zone to full */ +- REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, +- /* write data at the current zone write pointer */ +- REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, ++ REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, + /* reset a zone write pointer */ + REQ_OP_ZONE_RESET = (__force blk_opf_t)15, + /* reset all the zone present on the device */ +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 5a4054f17cbc68..e84ed3a43f1f80 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -234,14 +234,6 @@ static inline void *offset_to_ptr(const int *off) + #define __ADDRESSABLE(sym) \ + ___ADDRESSABLE(sym, __section(".discard.addressable")) + +-#define __ADDRESSABLE_ASM(sym) \ +- .pushsection .discard.addressable,"aw"; \ +- .align ARCH_SEL(8,4); \ +- ARCH_SEL(.quad, .long) __stringify(sym); \ +- .popsection; +- +-#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) +- + /* &a[0] degrades to a pointer: a different type from an array */ + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index 184a84dd467ec7..bfecd9dcb55297 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -1245,6 +1245,7 @@ void arch_set_freq_scale(const struct cpumask *cpus, + { + } + #endif ++ + /* the following are really really optional */ + extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; + extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; +diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h +index adec808b371a11..88d91e08747187 100644 +--- a/include/linux/energy_model.h ++++ b/include/linux/energy_model.h +@@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, + unsigned long max_util, unsigned long sum_util, + unsigned long allowed_cpu_cap) + { +- unsigned long freq, scale_cpu; ++ unsigned long freq, ref_freq, scale_cpu; + struct em_perf_state *ps; + int cpu; + +@@ -241,10 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, + */ + cpu = cpumask_first(to_cpumask(pd->cpus)); + scale_cpu = arch_scale_cpu_capacity(cpu); +- ps = &pd->table[pd->nr_perf_states - 1]; ++ ref_freq = arch_scale_freq_ref(cpu); + + max_util = min(max_util, allowed_cpu_cap); +- freq = map_util_freq(max_util, ps->frequency, scale_cpu); ++ freq = map_util_freq(max_util, ref_freq, scale_cpu); + + /* + * Find the lowest performance state of the Energy Model above the +diff --git a/include/linux/fs.h b/include/linux/fs.h +index b641a01512fb09..4cdeeaedaa404f 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -456,7 +456,7 @@ extern const struct address_space_operations empty_aops; + * It is also used to block modification of page cache contents through + * memory mappings. + * @gfp_mask: Memory allocation flags to use for allocating pages. +- * @i_mmap_writable: Number of VM_SHARED mappings. ++ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings. + * @nr_thps: Number of THPs in the pagecache (non-shmem only). + * @i_mmap: Tree of private and shared mappings. + * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. +@@ -559,7 +559,7 @@ static inline int mapping_mapped(struct address_space *mapping) + + /* + * Might pages of this file have been modified in userspace? +- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap ++ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap + * marks vma as VM_SHARED if it is shared, and the file was opened for + * writing i.e. vma may be mprotected writable even if now readonly. + * +diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h +index 9efbc54e35e596..be5417303ecf69 100644 +--- a/include/linux/hypervisor.h ++++ b/include/linux/hypervisor.h +@@ -37,6 +37,9 @@ static inline bool hypervisor_isolated_pci_functions(void) + if (IS_ENABLED(CONFIG_S390)) + return true; + ++ if (IS_ENABLED(CONFIG_LOONGARCH)) ++ return true; ++ + return jailhouse_paravirt(); + } + +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index 430749a0f362aa..272d9ad739607e 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) + /* found in socket.c */ + extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); + +-static inline bool is_vlan_dev(const struct net_device *dev) +-{ +- return dev->priv_flags & IFF_802_1Q_VLAN; +-} +- + #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) + #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) + #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +@@ -199,6 +194,11 @@ struct vlan_dev_priv { + #endif + }; + ++static inline bool is_vlan_dev(const struct net_device *dev) ++{ ++ return dev->priv_flags & IFF_802_1Q_VLAN; ++} ++ + static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) + { + return netdev_priv(dev); +@@ -236,6 +236,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, + extern bool vlan_uses_dev(const struct net_device *dev); + + #else ++static inline bool is_vlan_dev(const struct net_device *dev) ++{ ++ return false; ++} ++ + static inline struct net_device * + __vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) +@@ -253,19 +258,19 @@ vlan_for_each(struct net_device *dev, + + static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) + { +- BUG(); ++ WARN_ON_ONCE(1); + return NULL; + } + + static inline u16 vlan_dev_vlan_id(const struct net_device *dev) + { +- BUG(); ++ WARN_ON_ONCE(1); + return 0; + } + + static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) + { +- BUG(); ++ WARN_ON_ONCE(1); + return 0; + } + +diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h +index cb71aa616bd37f..631d58d0b83850 100644 +--- a/include/linux/iosys-map.h ++++ b/include/linux/iosys-map.h +@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map) + */ + static inline void iosys_map_clear(struct iosys_map *map) + { +- if (map->is_iomem) { +- map->vaddr_iomem = NULL; +- map->is_iomem = false; +- } else { +- map->vaddr = NULL; +- } ++ memset(map, 0, sizeof(*map)); + } + + /** +diff --git a/include/linux/memfd.h b/include/linux/memfd.h +index e7abf6fa4c5223..40cc726a8a0ce4 100644 +--- a/include/linux/memfd.h ++++ b/include/linux/memfd.h +@@ -6,11 +6,25 @@ + + #ifdef CONFIG_MEMFD_CREATE + extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); ++unsigned int *memfd_file_seals_ptr(struct file *file); + #else + static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) + { + return -EINVAL; + } ++ ++static inline unsigned int *memfd_file_seals_ptr(struct file *file) ++{ ++ return NULL; ++} + #endif + ++/* Retrieve memfd seals associated with the file, if any. */ ++static inline unsigned int memfd_file_seals(struct file *file) ++{ ++ unsigned int *sealsp = memfd_file_seals_ptr(file); ++ ++ return sealsp ? *sealsp : 0; ++} ++ + #endif /* __LINUX_MEMFD_H */ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index ee26e37daa0a80..b97d8a691b28bf 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -941,6 +941,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) + return vma->vm_flags & VM_ACCESS_FLAGS; + } + ++static inline bool is_shared_maywrite(vm_flags_t vm_flags) ++{ ++ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == ++ (VM_SHARED | VM_MAYWRITE); ++} ++ ++static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) ++{ ++ return is_shared_maywrite(vma->vm_flags); ++} ++ + static inline + struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) + { +@@ -4011,34 +4022,57 @@ void mem_dump_obj(void *object); + static inline void mem_dump_obj(void *object) {} + #endif + ++static inline bool is_write_sealed(int seals) ++{ ++ return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); ++} ++ ++/** ++ * is_readonly_sealed - Checks whether write-sealed but mapped read-only, ++ * in which case writes should be disallowing moving ++ * forwards. ++ * @seals: the seals to check ++ * @vm_flags: the VMA flags to check ++ * ++ * Returns whether readonly sealed, in which case writess should be disallowed ++ * going forward. ++ */ ++static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) ++{ ++ /* ++ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as ++ * MAP_SHARED and read-only, take care to not allow mprotect to ++ * revert protections on such mappings. Do this only for shared ++ * mappings. For private mappings, don't need to mask ++ * VM_MAYWRITE as we still want them to be COW-writable. ++ */ ++ if (is_write_sealed(seals) && ++ ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) ++ return true; ++ ++ return false; ++} ++ + /** +- * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it ++ * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and ++ * handle them. + * @seals: the seals to check + * @vma: the vma to operate on + * +- * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on +- * the vma flags. Return 0 if check pass, or <0 for errors. +- */ +-static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) +-{ +- if (seals & F_SEAL_FUTURE_WRITE) { +- /* +- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when +- * "future write" seal active. +- */ +- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) +- return -EPERM; +- +- /* +- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as +- * MAP_SHARED and read-only, take care to not allow mprotect to +- * revert protections on such mappings. Do this only for shared +- * mappings. For private mappings, don't need to mask +- * VM_MAYWRITE as we still want them to be COW-writable. +- */ +- if (vma->vm_flags & VM_SHARED) +- vm_flags_clear(vma, VM_MAYWRITE); +- } ++ * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper ++ * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. ++ */ ++static inline int seal_check_write(int seals, struct vm_area_struct *vma) ++{ ++ if (!is_write_sealed(seals)) ++ return 0; ++ ++ /* ++ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when ++ * write seals are active. ++ */ ++ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) ++ return -EPERM; + + return 0; + } +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 2d1fb935a8c86a..ac5bd1718af241 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -318,7 +318,14 @@ struct pci_sriov; + struct pci_p2pdma; + struct rcec_ea; + +-/* The pci_dev structure describes PCI devices */ ++/* struct pci_dev - describes a PCI device ++ * ++ * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable, ++ * Conventional PCI Hot-Plug, ACPI slot). ++ * Such bridges are allocated additional MMIO and bus ++ * number resources to allow for hierarchy expansion. ++ * @is_pciehp: PCIe Hot-Plug Capable bridge. ++ */ + struct pci_dev { + struct list_head bus_list; /* Node in per-bus list */ + struct pci_bus *bus; /* Bus this device is on */ +@@ -439,6 +446,7 @@ struct pci_dev { + unsigned int is_physfn:1; + unsigned int is_virtfn:1; + unsigned int is_hotplug_bridge:1; ++ unsigned int is_pciehp:1; + unsigned int shpc_managed:1; /* SHPC owned by shpchp */ + unsigned int is_thunderbolt:1; /* Thunderbolt controller */ + /* +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h +index 406855d73901a4..4b74f0f012a59f 100644 +--- a/include/linux/pm_runtime.h ++++ b/include/linux/pm_runtime.h +@@ -73,7 +73,8 @@ extern int pm_runtime_force_resume(struct device *dev); + extern int __pm_runtime_idle(struct device *dev, int rpmflags); + extern int __pm_runtime_suspend(struct device *dev, int rpmflags); + extern int __pm_runtime_resume(struct device *dev, int rpmflags); +-extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count); ++extern int pm_runtime_get_if_active(struct device *dev); ++extern int pm_runtime_get_if_in_use(struct device *dev); + extern int pm_schedule_suspend(struct device *dev, unsigned int delay); + extern int __pm_runtime_set_status(struct device *dev, unsigned int status); + extern int pm_runtime_barrier(struct device *dev); +@@ -95,18 +96,6 @@ extern void pm_runtime_release_supplier(struct device_link *link); + + extern int devm_pm_runtime_enable(struct device *dev); + +-/** +- * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. +- * @dev: Target device. +- * +- * Increment the runtime PM usage counter of @dev if its runtime PM status is +- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. +- */ +-static inline int pm_runtime_get_if_in_use(struct device *dev) +-{ +- return pm_runtime_get_if_active(dev, false); +-} +- + /** + * pm_suspend_ignore_children - Set runtime PM behavior regarding children. + * @dev: Target device. +@@ -277,8 +266,7 @@ static inline int pm_runtime_get_if_in_use(struct device *dev) + { + return -EINVAL; + } +-static inline int pm_runtime_get_if_active(struct device *dev, +- bool ign_usage_count) ++static inline int pm_runtime_get_if_active(struct device *dev) + { + return -EINVAL; + } +diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h +index 67b573d5bf28f2..9671b7234684ae 100644 +--- a/include/linux/sched/topology.h ++++ b/include/linux/sched/topology.h +@@ -275,6 +275,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus, + { } + #endif + ++#ifndef arch_scale_freq_ref ++static __always_inline ++unsigned int arch_scale_freq_ref(int cpu) ++{ ++ return 0; ++} ++#endif ++ + static inline int task_node(const struct task_struct *p) + { + return cpu_to_node(task_cpu(p)); +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 7b7222b4f6111d..3a558a3c2cca8a 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -3556,7 +3556,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag) + */ + static inline void *skb_frag_address_safe(const skb_frag_t *frag) + { +- void *ptr = page_address(skb_frag_page(frag)); ++ struct page *page = skb_frag_page(frag); ++ void *ptr; ++ ++ if (!page) ++ return NULL; ++ ++ ptr = page_address(page); + if (unlikely(!ptr)) + return NULL; + +diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h +index 2d207cb4837dbf..4ac082a6317381 100644 +--- a/include/linux/usb/cdc_ncm.h ++++ b/include/linux/usb/cdc_ncm.h +@@ -119,6 +119,7 @@ struct cdc_ncm_ctx { + u32 timer_interval; + u32 max_ndp_size; + u8 is_ndp16; ++ u8 filtering_supported; + union { + struct usb_cdc_ncm_ndp16 *delayed_ndp16; + struct usb_cdc_ncm_ndp32 *delayed_ndp32; +diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h +index fbf30721bac9e5..5148b035a8f387 100644 +--- a/include/linux/virtio_vsock.h ++++ b/include/linux/virtio_vsock.h +@@ -110,7 +110,12 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) + return (size_t)(skb_end_pointer(skb) - skb->head); + } + +-#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) ++/* Dimension the RX SKB so that the entire thing fits exactly into ++ * a single 4KiB page. This avoids wasting memory due to alloc_skb() ++ * rounding up to the next page order and also means that we ++ * don't leave higher-order pages sitting around in the RX queue. ++ */ ++#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4) + #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL + #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) + +diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h +index c5e57c6bd87367..078e16d2512a55 100644 +--- a/include/net/bond_3ad.h ++++ b/include/net/bond_3ad.h +@@ -54,6 +54,8 @@ typedef enum { + AD_MUX_DETACHED, /* mux machine */ + AD_MUX_WAITING, /* mux machine */ + AD_MUX_ATTACHED, /* mux machine */ ++ AD_MUX_COLLECTING, /* mux machine */ ++ AD_MUX_DISTRIBUTING, /* mux machine */ + AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */ + } mux_states_t; + +@@ -302,6 +304,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave); + int bond_3ad_set_carrier(struct bonding *bond); + void bond_3ad_update_lacp_rate(struct bonding *bond); ++void bond_3ad_update_lacp_active(struct bonding *bond); + void bond_3ad_update_ad_actor_settings(struct bonding *bond); + int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); + size_t bond_3ad_stats_size(void); +diff --git a/include/net/bond_options.h b/include/net/bond_options.h +index f631d9f099410c..18687ccf063830 100644 +--- a/include/net/bond_options.h ++++ b/include/net/bond_options.h +@@ -76,6 +76,7 @@ enum { + BOND_OPT_MISSED_MAX, + BOND_OPT_NS_TARGETS, + BOND_OPT_PRIO, ++ BOND_OPT_COUPLED_CONTROL, + BOND_OPT_LAST + }; + +diff --git a/include/net/bonding.h b/include/net/bonding.h +index 94594026a5c554..8bb5f016969f10 100644 +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -148,6 +148,7 @@ struct bond_params { + #if IS_ENABLED(CONFIG_IPV6) + struct in6_addr ns_targets[BOND_MAX_NS_TARGETS]; + #endif ++ int coupled_control; + + /* 2 bytes of padding : see ether_addr_equal_64bits() */ + u8 ad_actor_system[ETH_ALEN + 2]; +@@ -167,6 +168,7 @@ struct slave { + u8 backup:1, /* indicates backup slave. Value corresponds with + BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ + inactive:1, /* indicates inactive slave */ ++ rx_disabled:1, /* indicates whether slave's Rx is disabled */ + should_notify:1, /* indicates whether the state changed */ + should_notify_link:1; /* indicates whether the link changed */ + u8 duplex; +@@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave, + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); + if (!slave->bond->params.all_slaves_active) + slave->inactive = 1; ++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) ++ slave->rx_disabled = 1; ++} ++ ++static inline void bond_set_slave_tx_disabled_flags(struct slave *slave, ++ bool notify) ++{ ++ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); + } + + static inline void bond_set_slave_active_flags(struct slave *slave, +@@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave, + { + bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); + slave->inactive = 0; ++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) ++ slave->rx_disabled = 0; ++} ++ ++static inline void bond_set_slave_rx_enabled_flags(struct slave *slave, ++ bool notify) ++{ ++ slave->rx_disabled = 0; + } + + static inline bool bond_is_slave_inactive(struct slave *slave) +@@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave) + return slave->inactive; + } + ++static inline bool bond_is_slave_rx_disabled(struct slave *slave) ++{ ++ return slave->rx_disabled; ++} ++ + static inline void bond_propose_link_state(struct slave *slave, int state) + { + slave->link_new_state = state; +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h +index 2fb3151ea7c9e9..5b3a63c377d60b 100644 +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -559,7 +559,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, + { + int i; + +- if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) ++ if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) + return NULL; + + if (iftype == NL80211_IFTYPE_AP_VLAN) +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index 835a58ce9ca57c..adaa1b2323d2c4 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -4111,6 +4111,8 @@ struct ieee80211_prep_tx_info { + * @mgd_complete_tx: Notify the driver that the response frame for a previously + * transmitted frame announced with @mgd_prepare_tx was received, the data + * is filled similarly to @mgd_prepare_tx though the duration is not used. ++ * Note that this isn't always called for each mgd_prepare_tx() call, for ++ * example for SAE the 'confirm' messages can be on the air in any order. + * + * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending + * a TDLS discovery-request, we expect a reply to arrive on the AP's +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 0d28172193fa63..d775906a65c75c 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -180,6 +180,7 @@ struct pneigh_entry { + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; ++ bool permanent; + u32 key[]; + }; + +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index ce3f84c6eb8eb3..5ddcadee62b766 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -293,6 +293,7 @@ static inline int check_net(const struct net *net) + } + + void net_drop_ns(void *); ++void net_passive_dec(struct net *net); + + #else + +@@ -322,8 +323,23 @@ static inline int check_net(const struct net *net) + } + + #define net_drop_ns NULL ++ ++static inline void net_passive_dec(struct net *net) ++{ ++ refcount_dec(&net->passive); ++} + #endif + ++static inline void net_passive_inc(struct net *net) ++{ ++ refcount_inc(&net->passive); ++} ++ ++/* Returns true if the netns initialization is completed successfully */ ++static inline bool net_initialized(const struct net *net) ++{ ++ return READ_ONCE(net->list.next); ++} + + static inline void __netns_tracker_alloc(struct net *net, + netns_tracker *tracker, +diff --git a/include/net/sock.h b/include/net/sock.h +index e15bea43b2ecd1..b5f7208a9ec383 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1859,6 +1859,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk) + struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + struct proto *prot, int kern); + void sk_free(struct sock *sk); ++void sk_net_refcnt_upgrade(struct sock *sk); + void sk_destruct(struct sock *sk); + struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); + void sk_free_unlock_clone(struct sock *sk); +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h +index 8ea1674069fe81..f759109caeeac6 100644 +--- a/include/trace/events/btrfs.h ++++ b/include/trace/events/btrfs.h +@@ -1857,7 +1857,7 @@ TRACE_EVENT(qgroup_update_counters, + + TRACE_EVENT(qgroup_update_reserve, + +- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, ++ TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup, + s64 diff, int type), + + TP_ARGS(fs_info, qgroup, diff, type), +@@ -1883,7 +1883,7 @@ TRACE_EVENT(qgroup_update_reserve, + + TRACE_EVENT(qgroup_meta_reserve, + +- TP_PROTO(struct btrfs_root *root, s64 diff, int type), ++ TP_PROTO(const struct btrfs_root *root, s64 diff, int type), + + TP_ARGS(root, diff, type), + +@@ -1906,7 +1906,7 @@ TRACE_EVENT(qgroup_meta_reserve, + + TRACE_EVENT(qgroup_meta_convert, + +- TP_PROTO(struct btrfs_root *root, s64 diff), ++ TP_PROTO(const struct btrfs_root *root, s64 diff), + + TP_ARGS(root, diff), + +diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h +index f50048af5fcc28..c8fe879d5828bd 100644 +--- a/include/trace/events/thp.h ++++ b/include/trace/events/thp.h +@@ -8,6 +8,7 @@ + #include + #include + ++#ifdef CONFIG_PPC_BOOK3S_64 + DECLARE_EVENT_CLASS(hugepage_set, + + TP_PROTO(unsigned long addr, unsigned long pte), +@@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud, + TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set), + TP_ARGS(addr, pud, clr, set) + ); ++#endif /* CONFIG_PPC_BOOK3S_64 */ + + DECLARE_EVENT_CLASS(migration_pmd, + +diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h +index ce3117df9cec29..6750911da4f066 100644 +--- a/include/uapi/linux/if_link.h ++++ b/include/uapi/linux/if_link.h +@@ -950,6 +950,7 @@ enum { + IFLA_BOND_AD_LACP_ACTIVE, + IFLA_BOND_MISSED_MAX, + IFLA_BOND_NS_IP6_TARGET, ++ IFLA_BOND_COUPLED_CONTROL, + __IFLA_BOND_MAX, + }; + +diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h +index ff8d21f9e95b77..5a47339ef7d768 100644 +--- a/include/uapi/linux/in6.h ++++ b/include/uapi/linux/in6.h +@@ -152,7 +152,6 @@ struct in6_flowlabel_req { + /* + * IPV6 socket options + */ +-#if __UAPI_DEF_IPV6_OPTIONS + #define IPV6_ADDRFORM 1 + #define IPV6_2292PKTINFO 2 + #define IPV6_2292HOPOPTS 3 +@@ -169,8 +168,10 @@ struct in6_flowlabel_req { + #define IPV6_MULTICAST_IF 17 + #define IPV6_MULTICAST_HOPS 18 + #define IPV6_MULTICAST_LOOP 19 ++#if __UAPI_DEF_IPV6_OPTIONS + #define IPV6_ADD_MEMBERSHIP 20 + #define IPV6_DROP_MEMBERSHIP 21 ++#endif + #define IPV6_ROUTER_ALERT 22 + #define IPV6_MTU_DISCOVER 23 + #define IPV6_MTU 24 +@@ -203,7 +204,6 @@ struct in6_flowlabel_req { + #define IPV6_IPSEC_POLICY 34 + #define IPV6_XFRM_POLICY 35 + #define IPV6_HDRINCL 36 +-#endif + + /* + * Multicast: +diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h +index 8e61f8b7c2ced1..333769bc6abf0c 100644 +--- a/include/uapi/linux/io_uring.h ++++ b/include/uapi/linux/io_uring.h +@@ -46,7 +46,7 @@ struct io_uring_sqe { + }; + __u32 len; /* buffer size or number of iovecs */ + union { +- __kernel_rwf_t rw_flags; ++ __u32 rw_flags; + __u32 fsync_flags; + __u16 poll_events; /* compatibility */ + __u32 poll32_events; /* word-reversed for BE */ +diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h +index 42fa15f8310d6b..b77d5c210c2620 100644 +--- a/include/uapi/linux/pfrut.h ++++ b/include/uapi/linux/pfrut.h +@@ -89,6 +89,7 @@ struct pfru_payload_hdr { + __u32 hw_ver; + __u32 rt_ver; + __u8 platform_id[16]; ++ __u32 svn_ver; + }; + + enum pfru_dsm_status { +diff --git a/io_uring/net.c b/io_uring/net.c +index e455f051e62ef7..e7f8a79e049c9d 100644 +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -351,6 +351,13 @@ static int io_setup_async_addr(struct io_kiocb *req, + return -EAGAIN; + } + ++static void io_net_kbuf_recyle(struct io_kiocb *req) ++{ ++ req->flags |= REQ_F_PARTIAL_IO; ++ if (req->flags & REQ_F_BUFFER_RING) ++ io_kbuf_recycle_ring(req); ++} ++ + int io_sendmsg_prep_async(struct io_kiocb *req) + { + int ret; +@@ -442,7 +449,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) + kmsg->msg.msg_controllen = 0; + kmsg->msg.msg_control = NULL; + sr->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return io_setup_async_msg(req, kmsg, issue_flags); + } + if (ret == -ERESTARTSYS) +@@ -521,7 +528,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) + sr->len -= ret; + sr->buf += ret; + sr->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return io_setup_async_addr(req, &__address, issue_flags); + } + if (ret == -ERESTARTSYS) +@@ -891,7 +898,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) + } + if (ret > 0 && io_net_retry(sock, flags)) { + sr->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return io_setup_async_msg(req, kmsg, issue_flags); + } + if (ret == -ERESTARTSYS) +@@ -991,7 +998,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) + sr->len -= ret; + sr->buf += ret; + sr->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return -EAGAIN; + } + if (ret == -ERESTARTSYS) +@@ -1235,7 +1242,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) + zc->len -= ret; + zc->buf += ret; + zc->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return io_setup_async_addr(req, &__address, issue_flags); + } + if (ret == -ERESTARTSYS) +@@ -1306,7 +1313,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) + + if (ret > 0 && io_net_retry(sock, flags)) { + sr->done_io += ret; +- req->flags |= REQ_F_PARTIAL_IO; ++ io_net_kbuf_recyle(req); + return io_setup_async_msg(req, kmsg, issue_flags); + } + if (ret == -ERESTARTSYS) +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 1f9ae600e4455c..7d6ee41f4b4f4f 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -460,7 +460,8 @@ static bool reg_not_null(const struct bpf_reg_state *reg) + type == PTR_TO_MAP_KEY || + type == PTR_TO_SOCK_COMMON || + (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || +- type == PTR_TO_MEM; ++ type == PTR_TO_MEM || ++ type == CONST_PTR_TO_MAP; + } + + static bool type_is_ptr_alloc_obj(u32 type) +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index ad8b62202bdc46..eadb028916c812 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -432,7 +432,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes) + { + if (!cpusets_insane_config() && + movable_only_nodes(nodes)) { +- static_branch_enable(&cpusets_insane_config_key); ++ static_branch_enable_cpuslocked(&cpusets_insane_config_key); + pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" + "Cpuset allocations might fail even with a lot of memory available.\n", + nodemask_pr_args(nodes)); +diff --git a/kernel/fork.c b/kernel/fork.c +index 7966c9a1c163d1..0e20d7e9460848 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -739,7 +739,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + + get_file(file); + i_mmap_lock_write(mapping); +- if (tmp->vm_flags & VM_SHARED) ++ if (vma_is_shared_maywrite(tmp)) + mapping_allow_writable(mapping); + flush_dcache_mmap_lock(mapping); + /* insert tmp into the share list, just after mpnt */ +diff --git a/kernel/module/main.c b/kernel/module/main.c +index 9711ad14825b24..627680e568fccc 100644 +--- a/kernel/module/main.c ++++ b/kernel/module/main.c +@@ -701,14 +701,16 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, + struct module *mod; + char name[MODULE_NAME_LEN]; + char buf[MODULE_FLAGS_BUF_SIZE]; +- int ret, forced = 0; ++ int ret, len, forced = 0; + + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + +- if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) +- return -EFAULT; +- name[MODULE_NAME_LEN-1] = '\0'; ++ len = strncpy_from_user(name, name_user, MODULE_NAME_LEN); ++ if (len == 0 || len == MODULE_NAME_LEN) ++ return -ENOENT; ++ if (len < 0) ++ return len; + + audit_log_kern_module(name); + +diff --git a/kernel/power/console.c b/kernel/power/console.c +index fcdf0e14a47d47..19c48aa5355d2b 100644 +--- a/kernel/power/console.c ++++ b/kernel/power/console.c +@@ -16,6 +16,7 @@ + #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) + + static int orig_fgconsole, orig_kmsg; ++static bool vt_switch_done; + + static DEFINE_MUTEX(vt_switch_mutex); + +@@ -136,17 +137,21 @@ void pm_prepare_console(void) + if (orig_fgconsole < 0) + return; + ++ vt_switch_done = true; ++ + orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); + return; + } + + void pm_restore_console(void) + { +- if (!pm_vt_switch()) ++ if (!pm_vt_switch() && !vt_switch_done) + return; + + if (orig_fgconsole >= 0) { + vt_move_to_console(orig_fgconsole, 0); + vt_kmsg_redirect(orig_kmsg); + } ++ ++ vt_switch_done = false; + } +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 536acebf22b0d0..607b2e68fa4c20 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -4427,6 +4427,8 @@ int rcutree_prepare_cpu(unsigned int cpu) + rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; + trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); ++ ++ rcu_preempt_deferred_qs_init(rdp); + rcu_spawn_one_boost_kthread(rnp); + rcu_spawn_cpu_nocb_kthread(cpu); + WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); +diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h +index ac8cc756920ddf..71403d22a84655 100644 +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -175,6 +175,17 @@ struct rcu_snap_record { + unsigned long jiffies; /* Track jiffies value */ + }; + ++/* ++ * An IRQ work (deferred_qs_iw) is used by RCU to get the scheduler's attention. ++ * to report quiescent states at the soonest possible time. ++ * The request can be in one of the following states: ++ * - DEFER_QS_IDLE: An IRQ work is yet to be scheduled. ++ * - DEFER_QS_PENDING: An IRQ work was scheduled but either not yet run, or it ++ * ran and we still haven't reported a quiescent state. ++ */ ++#define DEFER_QS_IDLE 0 ++#define DEFER_QS_PENDING 1 ++ + /* Per-CPU data for read-copy update. */ + struct rcu_data { + /* 1) quiescent-state and grace-period handling : */ +@@ -192,7 +203,7 @@ struct rcu_data { + /* during and after the last grace */ + /* period it is aware of. */ + struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ +- bool defer_qs_iw_pending; /* Scheduler attention pending? */ ++ int defer_qs_iw_pending; /* Scheduler attention pending? */ + struct work_struct strict_work; /* Schedule readers for strict GPs. */ + + /* 2) batch handling */ +@@ -452,6 +463,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp); + static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); + static void rcu_flavor_sched_clock_irq(int user); + static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); ++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp); + static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); + static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); + static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h +index 94b715139f52d9..8707f155afb6df 100644 +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -474,13 +474,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) + struct rcu_node *rnp; + union rcu_special special; + ++ rdp = this_cpu_ptr(&rcu_data); ++ if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING) ++ rdp->defer_qs_iw_pending = DEFER_QS_IDLE; ++ + /* + * If RCU core is waiting for this CPU to exit its critical section, + * report the fact that it has exited. Because irqs are disabled, + * t->rcu_read_unlock_special cannot change. + */ + special = t->rcu_read_unlock_special; +- rdp = this_cpu_ptr(&rcu_data); + if (!special.s && !rdp->cpu_no_qs.b.exp) { + local_irq_restore(flags); + return; +@@ -612,10 +615,29 @@ notrace void rcu_preempt_deferred_qs(struct task_struct *t) + */ + static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) + { ++ unsigned long flags; + struct rcu_data *rdp; + + rdp = container_of(iwp, struct rcu_data, defer_qs_iw); +- rdp->defer_qs_iw_pending = false; ++ local_irq_save(flags); ++ ++ /* ++ * If the IRQ work handler happens to run in the middle of RCU read-side ++ * critical section, it could be ineffective in getting the scheduler's ++ * attention to report a deferred quiescent state (the whole point of the ++ * IRQ work). For this reason, requeue the IRQ work. ++ * ++ * Basically, we want to avoid following situation: ++ * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING) ++ * 2. CPU enters new rcu_read_lock() ++ * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0 ++ * 4. rcu_read_unlock() does not re-queue work (state still PENDING) ++ * 5. Deferred QS reporting does not happen. ++ */ ++ if (rcu_preempt_depth() > 0) ++ WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE); ++ ++ local_irq_restore(flags); + } + + /* +@@ -661,17 +683,11 @@ static void rcu_read_unlock_special(struct task_struct *t) + set_tsk_need_resched(current); + set_preempt_need_resched(); + if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && +- expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { ++ expboost && rdp->defer_qs_iw_pending != DEFER_QS_PENDING && ++ cpu_online(rdp->cpu)) { + // Get scheduler to re-evaluate and call hooks. + // If !IRQ_WORK, FQS scan will eventually IPI. +- if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && +- IS_ENABLED(CONFIG_PREEMPT_RT)) +- rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( +- rcu_preempt_deferred_qs_handler); +- else +- init_irq_work(&rdp->defer_qs_iw, +- rcu_preempt_deferred_qs_handler); +- rdp->defer_qs_iw_pending = true; ++ rdp->defer_qs_iw_pending = DEFER_QS_PENDING; + irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); + } + } +@@ -810,6 +826,10 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) + } + } + ++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) ++{ ++ rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); ++} + #else /* #ifdef CONFIG_PREEMPT_RCU */ + + /* +@@ -1009,6 +1029,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) + WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); + } + ++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { } ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 776be0549162c9..819ec1ccc08cf5 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -137,6 +137,32 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy) + } + } + ++/** ++ * get_capacity_ref_freq - get the reference frequency that has been used to ++ * correlate frequency and compute capacity for a given cpufreq policy. We use ++ * the CPU managing it for the arch_scale_freq_ref() call in the function. ++ * @policy: the cpufreq policy of the CPU in question. ++ * ++ * Return: the reference CPU frequency to compute a capacity. ++ */ ++static __always_inline ++unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy) ++{ ++ unsigned int freq = arch_scale_freq_ref(policy->cpu); ++ ++ if (freq) ++ return freq; ++ ++ if (arch_scale_freq_invariant()) ++ return policy->cpuinfo.max_freq; ++ ++ /* ++ * Apply a 25% margin so that we select a higher frequency than ++ * the current one before the CPU is fully busy: ++ */ ++ return policy->cur + (policy->cur >> 2); ++} ++ + /** + * get_next_freq - Compute a new frequency for a given cpufreq policy. + * @sg_policy: schedutil policy object to compute the new frequency for. +@@ -163,9 +189,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, + unsigned long util, unsigned long max) + { + struct cpufreq_policy *policy = sg_policy->policy; +- unsigned int freq = arch_scale_freq_invariant() ? +- policy->cpuinfo.max_freq : policy->cur; ++ unsigned int freq; + ++ freq = get_capacity_ref_freq(policy); + freq = map_util_freq(util, freq, max); + + if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 6ce3028e6e852f..1cf43e91ae9de8 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -11697,8 +11697,14 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) + /* + * Track max cost of a domain to make sure to not delay the + * next wakeup on the CPU. ++ * ++ * sched_balance_newidle() bumps the cost whenever newidle ++ * balance fails, and we don't want things to grow out of ++ * control. Use the sysctl_sched_migration_cost as the upper ++ * limit, plus a litle extra to avoid off by ones. + */ +- sd->max_newidle_lb_cost = cost; ++ sd->max_newidle_lb_cost = ++ min(cost, sysctl_sched_migration_cost + 200); + sd->last_decay_max_lb_cost = jiffies; + } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { + /* +@@ -12384,10 +12390,17 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) + + t1 = sched_clock_cpu(this_cpu); + domain_cost = t1 - t0; +- update_newidle_cost(sd, domain_cost); +- + curr_cost += domain_cost; + t0 = t1; ++ ++ /* ++ * Failing newidle means it is not effective; ++ * bump the cost so we end up doing less of it. ++ */ ++ if (!pulled_task) ++ domain_cost = (3 * sd->max_newidle_lb_cost) / 2; ++ ++ update_newidle_cost(sd, domain_cost); + } + + /* +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 4351b9069a919d..15785a729a0cdf 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -4058,13 +4058,17 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, + } else { + iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); + } ++ } else { ++ if (hash) ++ iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash); ++ else ++ iter->hash = EMPTY_HASH; ++ } + +- if (!iter->hash) { +- trace_parser_put(&iter->parser); +- goto out_unlock; +- } +- } else +- iter->hash = hash; ++ if (!iter->hash) { ++ trace_parser_put(&iter->parser); ++ goto out_unlock; ++ } + + ret = 0; + +@@ -5922,9 +5926,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file) + ftrace_hash_move_and_update_ops(iter->ops, orig_hash, + iter->hash, filter_hash); + mutex_unlock(&ftrace_lock); +- } else { +- /* For read only, the hash is the ops hash */ +- iter->hash = NULL; + } + + mutex_unlock(&iter->ops->func_hash->regex_lock); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 946350c98b5396..907e45361939be 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1661,7 +1661,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, + + ret = get_user(ch, ubuf++); + if (ret) +- goto out; ++ goto fail; + + read++; + cnt--; +@@ -1675,7 +1675,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, + while (cnt && isspace(ch)) { + ret = get_user(ch, ubuf++); + if (ret) +- goto out; ++ goto fail; + read++; + cnt--; + } +@@ -1685,8 +1685,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, + /* only spaces were written */ + if (isspace(ch) || !ch) { + *ppos += read; +- ret = read; +- goto out; ++ return read; + } + } + +@@ -1696,11 +1695,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, + parser->buffer[parser->idx++] = ch; + else { + ret = -EINVAL; +- goto out; ++ goto fail; + } ++ + ret = get_user(ch, ubuf++); + if (ret) +- goto out; ++ goto fail; + read++; + cnt--; + } +@@ -1716,13 +1716,13 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, + parser->buffer[parser->idx] = 0; + } else { + ret = -EINVAL; +- goto out; ++ goto fail; + } + + *ppos += read; +- ret = read; +- +-out: ++ return read; ++fail: ++ trace_parser_fail(parser); + return ret; + } + +@@ -2211,10 +2211,10 @@ int __init register_tracer(struct tracer *type) + mutex_unlock(&trace_types_lock); + + if (ret || !default_bootup_tracer) +- goto out_unlock; ++ return ret; + + if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) +- goto out_unlock; ++ return 0; + + printk(KERN_INFO "Starting tracer '%s'\n", type->name); + /* Do we want this tracer to start on bootup? */ +@@ -2226,8 +2226,7 @@ int __init register_tracer(struct tracer *type) + /* disable other selftests, since this will break it. */ + disable_tracing_selftest("running a tracer"); + +- out_unlock: +- return ret; ++ return 0; + } + + static void tracing_reset_cpu(struct array_buffer *buf, int cpu) +@@ -8734,11 +8733,10 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, + out_reg: + ret = tracing_alloc_snapshot_instance(tr); + if (ret < 0) +- goto out; ++ return ret; + + ret = register_ftrace_function_probe(glob, tr, ops, count); + +- out: + return ret < 0 ? ret : 0; + } + +@@ -10344,7 +10342,7 @@ __init static int tracer_alloc_buffers(void) + BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); + + if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) +- goto out; ++ return -ENOMEM; + + if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) + goto out_free_buffer_mask; +@@ -10455,7 +10453,6 @@ __init static int tracer_alloc_buffers(void) + free_cpumask_var(global_trace.tracing_cpumask); + out_free_buffer_mask: + free_cpumask_var(tracing_buffer_mask); +-out: + return ret; + } + +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index e3afb830fbcc7b..c91f3c47ac642f 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -1153,6 +1153,7 @@ bool ftrace_event_is_function(struct trace_event_call *call); + */ + struct trace_parser { + bool cont; ++ bool fail; + char *buffer; + unsigned idx; + unsigned size; +@@ -1160,7 +1161,7 @@ struct trace_parser { + + static inline bool trace_parser_loaded(struct trace_parser *parser) + { +- return (parser->idx != 0); ++ return !parser->fail && parser->idx != 0; + } + + static inline bool trace_parser_cont(struct trace_parser *parser) +@@ -1174,6 +1175,11 @@ static inline void trace_parser_clear(struct trace_parser *parser) + parser->idx = 0; + } + ++static inline void trace_parser_fail(struct trace_parser *parser) ++{ ++ parser->fail = true; ++} ++ + extern int trace_parser_get_init(struct trace_parser *parser, int size); + extern void trace_parser_put(struct trace_parser *parser); + extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, +@@ -2053,7 +2059,7 @@ static inline bool is_good_system_name(const char *name) + static inline void sanitize_event_name(char *name) + { + while (*name++ != '\0') +- if (*name == ':' || *name == '.') ++ if (*name == ':' || *name == '.' || *name == '*') + *name = '_'; + } + +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c +index 68af76ca8bc992..0a5454fae31693 100644 +--- a/mm/debug_vm_pgtable.c ++++ b/mm/debug_vm_pgtable.c +@@ -1047,29 +1047,34 @@ static void __init destroy_args(struct pgtable_debug_args *args) + + /* Free page table entries */ + if (args->start_ptep) { ++ pmd_clear(args->pmdp); + pte_free(args->mm, args->start_ptep); + mm_dec_nr_ptes(args->mm); + } + + if (args->start_pmdp) { ++ pud_clear(args->pudp); + pmd_free(args->mm, args->start_pmdp); + mm_dec_nr_pmds(args->mm); + } + + if (args->start_pudp) { ++ p4d_clear(args->p4dp); + pud_free(args->mm, args->start_pudp); + mm_dec_nr_puds(args->mm); + } + +- if (args->start_p4dp) ++ if (args->start_p4dp) { ++ pgd_clear(args->pgdp); + p4d_free(args->mm, args->start_p4dp); ++ } + + /* Free vma and mm struct */ + if (args->vma) + vm_area_free(args->vma); + + if (args->mm) +- mmdrop(args->mm); ++ mmput(args->mm); + } + + static struct page * __init +diff --git a/mm/filemap.c b/mm/filemap.c +index 05eb77623a1063..ab24dbf5e747ef 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -3716,7 +3716,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) + */ + int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) + { +- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) ++ if (vma_is_shared_maywrite(vma)) + return -EINVAL; + return generic_file_mmap(file, vma); + } +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index f86d4e04d95e12..e2e41de55c02b1 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -452,6 +452,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) + { + unsigned long flags; + struct kmemleak_object *object; ++ bool warn = false; + + /* try the slab allocator first */ + if (object_cache) { +@@ -469,8 +470,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) + else if (mem_pool_free_count) + object = &mem_pool[--mem_pool_free_count]; + else +- pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); ++ warn = true; + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); ++ if (warn) ++ pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); + + return object; + } +@@ -2006,6 +2009,7 @@ static const struct file_operations kmemleak_fops = { + static void __kmemleak_do_cleanup(void) + { + struct kmemleak_object *object, *tmp; ++ unsigned int cnt = 0; + + /* + * Kmemleak has already been disabled, no need for RCU list traversal +@@ -2014,6 +2018,10 @@ static void __kmemleak_do_cleanup(void) + list_for_each_entry_safe(object, tmp, &object_list, object_list) { + __remove_object(object); + __delete_object(object); ++ ++ /* Call cond_resched() once per 64 iterations to avoid soft lockup */ ++ if (!(++cnt & 0x3f)) ++ cond_resched(); + } + } + +diff --git a/mm/madvise.c b/mm/madvise.c +index 9d2a6cb655ff20..3d6370d3199f31 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -987,7 +987,7 @@ static long madvise_remove(struct vm_area_struct *vma, + return -EINVAL; + } + +- if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) ++ if (!vma_is_shared_maywrite(vma)) + return -EACCES; + + offset = (loff_t)(start - vma->vm_start) +diff --git a/mm/memfd.c b/mm/memfd.c +index 2dba2cb6f0d0f8..187265dc68f5e8 100644 +--- a/mm/memfd.c ++++ b/mm/memfd.c +@@ -134,7 +134,7 @@ static int memfd_wait_for_pins(struct address_space *mapping) + return error; + } + +-static unsigned int *memfd_file_seals_ptr(struct file *file) ++unsigned int *memfd_file_seals_ptr(struct file *file) + { + if (shmem_file(file)) + return &SHMEM_I(file_inode(file))->seals; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index a96840c4158165..dae5e60d64e2fd 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -835,9 +835,17 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, + #define hwpoison_hugetlb_range NULL + #endif + ++static int hwpoison_test_walk(unsigned long start, unsigned long end, ++ struct mm_walk *walk) ++{ ++ /* We also want to consider pages mapped into VM_PFNMAP. */ ++ return 0; ++} ++ + static const struct mm_walk_ops hwpoison_walk_ops = { + .pmd_entry = hwpoison_pte_range, + .hugetlb_entry = hwpoison_hugetlb_range, ++ .test_walk = hwpoison_test_walk, + .walk_lock = PGWALK_RDLOCK, + }; + +diff --git a/mm/mmap.c b/mm/mmap.c +index a9c70001e45601..8cf23a07ae500f 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -47,6 +47,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -107,7 +108,7 @@ void vma_set_page_prot(struct vm_area_struct *vma) + static void __remove_shared_vm_struct(struct vm_area_struct *vma, + struct file *file, struct address_space *mapping) + { +- if (vma->vm_flags & VM_SHARED) ++ if (vma_is_shared_maywrite(vma)) + mapping_unmap_writable(mapping); + + flush_dcache_mmap_lock(mapping); +@@ -383,7 +384,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, + static void __vma_link_file(struct vm_area_struct *vma, + struct address_space *mapping) + { +- if (vma->vm_flags & VM_SHARED) ++ if (vma_is_shared_maywrite(vma)) + mapping_allow_writable(mapping); + + flush_dcache_mmap_lock(mapping); +@@ -1285,6 +1286,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, + + if (file) { + struct inode *inode = file_inode(file); ++ unsigned int seals = memfd_file_seals(file); + unsigned long flags_mask; + + if (!file_mmap_ok(file, inode, pgoff, len)) +@@ -1323,6 +1325,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr, + vm_flags |= VM_SHARED | VM_MAYSHARE; + if (!(file->f_mode & FMODE_WRITE)) + vm_flags &= ~(VM_MAYWRITE | VM_SHARED); ++ else if (is_readonly_sealed(seals, vm_flags)) ++ vm_flags &= ~VM_MAYWRITE; + fallthrough; + case MAP_PRIVATE: + if (!(file->f_mode & FMODE_READ)) +@@ -2845,7 +2849,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr, + mm->map_count++; + if (vma->vm_file) { + i_mmap_lock_write(vma->vm_file->f_mapping); +- if (vma->vm_flags & VM_SHARED) ++ if (vma_is_shared_maywrite(vma)) + mapping_allow_writable(vma->vm_file->f_mapping); + + flush_dcache_mmap_lock(vma->vm_file->f_mapping); +@@ -2926,7 +2930,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + return -EINVAL; + + /* Map writable and ensure this isn't a sealed memfd. */ +- if (file && (vm_flags & VM_SHARED)) { ++ if (file && is_shared_maywrite(vm_flags)) { + int error = mapping_map_writable(file->f_mapping); + + if (error) +diff --git a/mm/ptdump.c b/mm/ptdump.c +index 03c1bdae4a4368..e46df2c24d8583 100644 +--- a/mm/ptdump.c ++++ b/mm/ptdump.c +@@ -152,6 +152,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) + { + const struct ptdump_range *range = st->range; + ++ get_online_mems(); + mmap_write_lock(mm); + while (range->start != range->end) { + walk_page_range_novma(mm, range->start, range->end, +@@ -159,6 +160,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) + range++; + } + mmap_write_unlock(mm); ++ put_online_mems(); + + /* Flush out the last page */ + st->note_page(st, 0, -1, 0); +diff --git a/mm/shmem.c b/mm/shmem.c +index 283fb62084d454..ecf1011cc3e296 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2396,7 +2396,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) + struct shmem_inode_info *info = SHMEM_I(inode); + int ret; + +- ret = seal_check_future_write(info->seals, vma); ++ ret = seal_check_write(info->seals, vma); + if (ret) + return ret; + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 549ee9e87d6366..ff9d2520ba749c 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -339,7 +339,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) + case BT_CODEC_TRANSPARENT: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) +- return false; ++ return -EINVAL; ++ + param = &esco_param_msbc[conn->attempt - 1]; + cp.tx_coding_format.id = 0x03; + cp.rx_coding_format.id = 0x03; +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 3b22ce3aa95bb5..c06010c0d88293 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -6664,8 +6664,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, + qos->ucast.out.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); +- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); +- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); ++ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; ++ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; + qos->ucast.in.phy = ev->c_phy; + qos->ucast.out.phy = ev->p_phy; + break; +@@ -6679,8 +6679,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, + qos->ucast.in.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); +- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); +- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); ++ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; ++ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; + qos->ucast.out.phy = ev->c_phy; + qos->ucast.in.phy = ev->p_phy; + break; +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index 69c2ba1e843eb4..d2613bd3e6db02 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -118,7 +118,7 @@ static void hci_sock_free_cookie(struct sock *sk) + int id = hci_pi(sk)->cookie; + + if (id) { +- hci_pi(sk)->cookie = 0xffffffff; ++ hci_pi(sk)->cookie = 0; + ida_free(&sock_cookie_ida, id); + } + } +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index fa16ee88ec396a..f42805d9b38fa6 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -4807,6 +4807,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; + } + ++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) { ++ br_info(brmctx->br, ++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n", ++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX), ++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX)); ++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX; ++ } ++ + brmctx->multicast_query_interval = intvl_jiffies; + } + +@@ -4823,6 +4831,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; + } + ++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) { ++ br_info(brmctx->br, ++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n", ++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX), ++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX)); ++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX; ++ } ++ + brmctx->multicast_startup_query_interval = intvl_jiffies; + } + +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index 067d47b8eb8ffb..ef98ec4c3f51d4 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -31,6 +31,8 @@ + #define BR_MULTICAST_DEFAULT_HASH_MAX 4096 + #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000) + #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN ++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */ ++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX + + #define BR_HWDOM_MAX BITS_PER_LONG + +diff --git a/net/core/dev.c b/net/core/dev.c +index 4006fd164b7bc7..2d3e0e4130c213 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3559,6 +3559,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, + features &= ~NETIF_F_TSO_MANGLEID; + } + ++ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers, ++ * so neither does TSO that depends on it. ++ */ ++ if (features & NETIF_F_IPV6_CSUM && ++ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 || ++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && ++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) && ++ skb_transport_header_was_set(skb) && ++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) && ++ !ipv6_has_hopopt_jumbo(skb)) ++ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4); ++ + return features; + } + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 1e2e60ffe76629..e6b36df482bc7f 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -55,7 +55,8 @@ static void __neigh_notify(struct neighbour *n, int type, int flags, + u32 pid); + static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); + static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, +- struct net_device *dev); ++ struct net_device *dev, ++ bool skip_perm); + + #ifdef CONFIG_PROC_FS + static const struct seq_operations neigh_stat_seq_ops; +@@ -444,7 +445,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, + { + write_lock_bh(&tbl->lock); + neigh_flush_dev(tbl, dev, skip_perm); +- pneigh_ifdown_and_unlock(tbl, dev); ++ pneigh_ifdown_and_unlock(tbl, dev, skip_perm); + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, + tbl->family); + if (skb_queue_empty_lockless(&tbl->proxy_queue)) +@@ -845,7 +846,8 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, + } + + static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, +- struct net_device *dev) ++ struct net_device *dev, ++ bool skip_perm) + { + struct pneigh_entry *n, **np, *freelist = NULL; + u32 h; +@@ -853,12 +855,15 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, + for (h = 0; h <= PNEIGH_HASHMASK; h++) { + np = &tbl->phash_buckets[h]; + while ((n = *np) != NULL) { ++ if (skip_perm && n->permanent) ++ goto skip; + if (!dev || n->dev == dev) { + *np = n->next; + n->next = freelist; + freelist = n; + continue; + } ++skip: + np = &n->next; + } + } +@@ -2033,6 +2038,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + pn = pneigh_lookup(tbl, net, dst, dev, 1); + if (pn) { + pn->flags = ndm_flags; ++ pn->permanent = !!(ndm->ndm_state & NUD_PERMANENT); + if (protocol) + pn->protocol = protocol; + err = 0; +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index 70ac9d9bc87708..20829e0c36cdbf 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -467,7 +467,7 @@ static void net_complete_free(void) + + } + +-static void net_free(struct net *net) ++void net_passive_dec(struct net *net) + { + if (refcount_dec_and_test(&net->passive)) { + kfree(rcu_access_pointer(net->gen)); +@@ -485,7 +485,7 @@ void net_drop_ns(void *p) + struct net *net = (struct net *)p; + + if (net) +- net_free(net); ++ net_passive_dec(net); + } + + struct net *copy_net_ns(unsigned long flags, +@@ -527,7 +527,7 @@ struct net *copy_net_ns(unsigned long flags, + key_remove_domain(net->key_domain); + #endif + put_user_ns(user_ns); +- net_free(net); ++ net_passive_dec(net); + dec_ucounts: + dec_net_namespaces(ucounts); + return ERR_PTR(rv); +@@ -672,7 +672,7 @@ static void cleanup_net(struct work_struct *work) + key_remove_domain(net->key_domain); + #endif + put_user_ns(net->user_ns); +- net_free(net); ++ net_passive_dec(net); + } + } + +diff --git a/net/core/sock.c b/net/core/sock.c +index ec48690b5174eb..b74bc8175937e2 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2159,6 +2159,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + get_net_track(net, &sk->ns_tracker, priority); + sock_inuse_add(net, 1); + } else { ++ net_passive_inc(net); + __netns_tracker_alloc(net, &sk->ns_tracker, + false, priority); + } +@@ -2183,6 +2184,7 @@ EXPORT_SYMBOL(sk_alloc); + static void __sk_destruct(struct rcu_head *head) + { + struct sock *sk = container_of(head, struct sock, sk_rcu); ++ struct net *net = sock_net(sk); + struct sk_filter *filter; + + if (sk->sk_destruct) +@@ -2214,14 +2216,28 @@ static void __sk_destruct(struct rcu_head *head) + put_cred(sk->sk_peer_cred); + put_pid(sk->sk_peer_pid); + +- if (likely(sk->sk_net_refcnt)) +- put_net_track(sock_net(sk), &sk->ns_tracker); +- else +- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); +- ++ if (likely(sk->sk_net_refcnt)) { ++ put_net_track(net, &sk->ns_tracker); ++ } else { ++ __netns_tracker_free(net, &sk->ns_tracker, false); ++ net_passive_dec(net); ++ } + sk_prot_free(sk->sk_prot_creator, sk); + } + ++void sk_net_refcnt_upgrade(struct sock *sk) ++{ ++ struct net *net = sock_net(sk); ++ ++ WARN_ON_ONCE(sk->sk_net_refcnt); ++ __netns_tracker_free(net, &sk->ns_tracker, false); ++ net_passive_dec(net); ++ sk->sk_net_refcnt = 1; ++ get_net_track(net, &sk->ns_tracker, GFP_KERNEL); ++ sock_inuse_add(net, 1); ++} ++EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); ++ + void sk_destruct(struct sock *sk) + { + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); +@@ -2313,6 +2329,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + * is not properly dismantling its kernel sockets at netns + * destroy time. + */ ++ net_passive_inc(sock_net(newsk)); + __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, + false, priority); + } +diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c +index 1b6457f357bdb2..b8230faa567f77 100644 +--- a/net/hsr/hsr_slave.c ++++ b/net/hsr/hsr_slave.c +@@ -62,8 +62,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) || +- protocol == htons(ETH_P_HSR)) ++ protocol == htons(ETH_P_HSR)) { ++ if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) { ++ kfree_skb(skb); ++ goto finish_consume; ++ } ++ + skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); ++ } + skb_reset_mac_len(skb); + + hsr_forward_skb(skb, port); +diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c +index 675b5bbed638e4..2d663fe50f876c 100644 +--- a/net/ipv4/netfilter/nf_reject_ipv4.c ++++ b/net/ipv4/netfilter/nf_reject_ipv4.c +@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb, + if (!oth) + return; + +- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && +- nf_reject_fill_skb_dst(oldskb) < 0) ++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0) + return; + + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) +@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) + if (iph->frag_off & htons(IP_OFFSET)) + return; + +- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) && +- nf_reject_fill_skb_dst(skb_in) < 0) ++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0) + return; + + if (skb_csum_unnecessary(skb_in) || +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 6ee77f7f911473..8672ebbace980b 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2560,7 +2560,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, + do_cache = true; + if (type == RTN_BROADCAST) { + flags |= RTCF_BROADCAST | RTCF_LOCAL; +- fi = NULL; + } else if (type == RTN_MULTICAST) { + flags |= RTCF_MULTICAST | RTCF_LOCAL; + if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index 3870b59f540048..9be9df2caf6540 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -61,7 +61,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, + remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); + skb->remcsum_offload = remcsum; + +- need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); ++ need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb); + /* Try to offload checksum if possible */ + offload_csum = !!(need_csum && + !need_ipsec && +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index f6188bd9f55ba6..1c3b0ba289fbd4 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2193,13 +2193,12 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp) + in6_ifa_put(ifp); + } + +-/* Join to solicited addr multicast group. +- * caller must hold RTNL */ ++/* Join to solicited addr multicast group. */ + void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) + { + struct in6_addr maddr; + +- if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) ++ if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP)) + return; + + addrconf_addr_solict_mult(addr, &maddr); +@@ -3834,7 +3833,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) + * Do not dev_put! + */ + if (unregister) { +- idev->dead = 1; ++ WRITE_ONCE(idev->dead, 1); + + /* protected by rtnl_lock */ + RCU_INIT_POINTER(dev->ip6_ptr, NULL); +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c +index e153dac47a530d..160b452f75e7d2 100644 +--- a/net/ipv6/mcast.c ++++ b/net/ipv6/mcast.c +@@ -906,23 +906,22 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, + static int __ipv6_dev_mc_inc(struct net_device *dev, + const struct in6_addr *addr, unsigned int mode) + { +- struct ifmcaddr6 *mc; + struct inet6_dev *idev; +- +- ASSERT_RTNL(); ++ struct ifmcaddr6 *mc; + + /* we need to take a reference on idev */ + idev = in6_dev_get(dev); +- + if (!idev) + return -EINVAL; + +- if (idev->dead) { ++ mutex_lock(&idev->mc_lock); ++ ++ if (READ_ONCE(idev->dead)) { ++ mutex_unlock(&idev->mc_lock); + in6_dev_put(idev); + return -ENODEV; + } + +- mutex_lock(&idev->mc_lock); + for_each_mc_mclock(idev, mc) { + if (ipv6_addr_equal(&mc->mca_addr, addr)) { + mc->mca_users++; +diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c +index e4776bd2ed89bd..f3579bccf0a516 100644 +--- a/net/ipv6/netfilter/nf_reject_ipv6.c ++++ b/net/ipv6/netfilter/nf_reject_ipv6.c +@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, + fl6.fl6_sport = otcph->dest; + fl6.fl6_dport = otcph->source; + +- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) { ++ if (!skb_dst(oldskb)) { + nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); + if (!dst) + return; +@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, + if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) + skb_in->dev = net->loopback_dev; + +- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) && +- nf_reject6_fill_skb_dst(skb_in) < 0) ++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0) + return; + + icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); +diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c +index 3c3800223e0e0d..6e15a65faeccd7 100644 +--- a/net/ipv6/seg6_hmac.c ++++ b/net/ipv6/seg6_hmac.c +@@ -35,6 +35,7 @@ + #include + + #include ++#include + #include + #include + #include +@@ -269,7 +270,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb) + if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output)) + return false; + +- if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0) ++ if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN)) + return false; + + return true; +@@ -293,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo) + struct seg6_pernet_data *sdata = seg6_pernet(net); + int err; + ++ if (!__hmac_get_algo(hinfo->alg_id)) ++ return -EINVAL; ++ + err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node, + rht_params); + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 3ff7f38394a6bc..1addfba4b28567 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1847,12 +1847,12 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, + } + + if (params->supported_rates && +- params->supported_rates_len) { +- ieee80211_parse_bitrates(link->conf->chandef.width, +- sband, params->supported_rates, +- params->supported_rates_len, +- &link_sta->pub->supp_rates[sband->band]); +- } ++ params->supported_rates_len && ++ !ieee80211_parse_bitrates(link->conf->chandef.width, ++ sband, params->supported_rates, ++ params->supported_rates_len, ++ &link_sta->pub->supp_rates[sband->band])) ++ return -EINVAL; + + if (params->ht_capa) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, +diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c +index 31c4f112345ea4..4a21e53afa72ef 100644 +--- a/net/mac80211/chan.c ++++ b/net/mac80211/chan.c +@@ -1313,6 +1313,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) + goto out; + } + ++ link->radar_required = link->reserved_radar_required; + list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); + rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf); + +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 2c7e139efd532f..295c2fdbd3c742 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -3662,6 +3662,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_prep_tx_info info = { + .subtype = IEEE80211_STYPE_AUTH, + }; ++ bool sae_need_confirm = false; + + sdata_assert_lock(sdata); + +@@ -3705,6 +3706,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); ++ if (auth_transaction == 1) ++ sae_need_confirm = true; + goto notify_driver; + } + +@@ -3747,6 +3750,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + ifmgd->auth_data->expected_transaction == 2)) { + if (!ieee80211_mark_sta_auth(sdata)) + return; /* ignore frame -- wait for timeout */ ++ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && ++ auth_transaction == 1) { ++ sae_need_confirm = true; + } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && + auth_transaction == 2) { + sdata_info(sdata, "SAE peer confirmed\n"); +@@ -3755,7 +3761,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + notify_driver: +- drv_mgd_complete_tx(sdata->local, sdata, &info); ++ if (!sae_need_confirm) ++ drv_mgd_complete_tx(sdata->local, sdata, &info); + } + + #define case_WLAN(type) \ +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 58665b6ae6354b..210337ef23cf5c 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4221,10 +4221,16 @@ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, + rx->link_sta = NULL; + } + +- if (link_id < 0) +- rx->link = &rx->sdata->deflink; +- else if (!ieee80211_rx_data_set_link(rx, link_id)) ++ if (link_id < 0) { ++ if (ieee80211_vif_is_mld(&rx->sdata->vif) && ++ sta && !sta->sta.valid_links) ++ rx->link = ++ rcu_dereference(rx->sdata->link[sta->deflink.link_id]); ++ else ++ rx->link = &rx->sdata->deflink; ++ } else if (!ieee80211_rx_data_set_link(rx, link_id)) { + return false; ++ } + + return true; + } +diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c +index 8032cfba22d1c5..5f9592fb57add2 100644 +--- a/net/mctp/af_mctp.c ++++ b/net/mctp/af_mctp.c +@@ -73,7 +73,6 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen) + + lock_sock(sk); + +- /* TODO: allow rebind */ + if (sk_hashed(sk)) { + rc = -EADDRINUSE; + goto out_release; +@@ -549,15 +548,36 @@ static void mctp_sk_close(struct sock *sk, long timeout) + static int mctp_sk_hash(struct sock *sk) + { + struct net *net = sock_net(sk); ++ struct sock *existing; ++ struct mctp_sock *msk; ++ int rc; ++ ++ msk = container_of(sk, struct mctp_sock, sk); + + /* Bind lookup runs under RCU, remain live during that. */ + sock_set_flag(sk, SOCK_RCU_FREE); + + mutex_lock(&net->mctp.bind_lock); ++ ++ /* Prevent duplicate binds. */ ++ sk_for_each(existing, &net->mctp.binds) { ++ struct mctp_sock *mex = ++ container_of(existing, struct mctp_sock, sk); ++ ++ if (mex->bind_type == msk->bind_type && ++ mex->bind_addr == msk->bind_addr && ++ mex->bind_net == msk->bind_net) { ++ rc = -EADDRINUSE; ++ goto out; ++ } ++ } ++ + sk_add_node_rcu(sk, &net->mctp.binds); +- mutex_unlock(&net->mctp.bind_lock); ++ rc = 0; + +- return 0; ++out: ++ mutex_unlock(&net->mctp.bind_lock); ++ return rc; + } + + static void mctp_sk_unhash(struct sock *sk) +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 8d4889a730064d..9406d2d555e74d 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -1117,7 +1117,9 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk, + return hmac == mp_opt->ahmac; + } + +-/* Return false if a subflow has been reset, else return true */ ++/* Return false in case of error (or subflow has been reset), ++ * else return true. ++ */ + bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); +@@ -1221,7 +1223,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) + + mpext = skb_ext_add(skb, SKB_EXT_MPTCP); + if (!mpext) +- return true; ++ return false; + + memset(mpext, 0, sizeof(*mpext)); + +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index f7257de37bd090..e8042014bd5f4a 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -294,6 +294,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) + struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); + struct mptcp_sock *msk = entry->sock; + struct sock *sk = (struct sock *)msk; ++ unsigned int timeout; + + pr_debug("msk=%p\n", msk); + +@@ -311,6 +312,10 @@ static void mptcp_pm_add_timer(struct timer_list *timer) + goto out; + } + ++ timeout = mptcp_get_add_addr_timeout(sock_net(sk)); ++ if (!timeout) ++ goto out; ++ + spin_lock_bh(&msk->pm.lock); + + if (!mptcp_pm_should_add_signal_addr(msk)) { +@@ -322,7 +327,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) + + if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) + sk_reset_timer(sk, timer, +- jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); ++ jiffies + timeout); + + spin_unlock_bh(&msk->pm.lock); + +@@ -364,6 +369,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + struct mptcp_pm_add_entry *add_entry = NULL; + struct sock *sk = (struct sock *)msk; + struct net *net = sock_net(sk); ++ unsigned int timeout; + + lockdep_assert_held(&msk->pm.lock); + +@@ -373,9 +379,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) + return false; + +- sk_reset_timer(sk, &add_entry->add_timer, +- jiffies + mptcp_get_add_addr_timeout(net)); +- return true; ++ goto reset_timer; + } + + add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); +@@ -389,8 +393,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + add_entry->retrans_times = 0; + + timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); +- sk_reset_timer(sk, &add_entry->add_timer, +- jiffies + mptcp_get_add_addr_timeout(net)); ++reset_timer: ++ timeout = mptcp_get_add_addr_timeout(net); ++ if (timeout) ++ sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout); + + return true; + } +@@ -1783,7 +1789,6 @@ static void __flush_addrs(struct list_head *list) + static void __reset_counters(struct pm_nl_pernet *pernet) + { + WRITE_ONCE(pernet->add_addr_signal_max, 0); +- WRITE_ONCE(pernet->add_addr_accept_max, 0); + WRITE_ONCE(pernet->local_addr_max, 0); + pernet->addrs = 0; + } +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index a01ea18283c726..0c9b9c0c277c23 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -1715,10 +1715,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, + * needs it. + * Update ns_tracker to current stack trace and refcounted tracker. + */ +- __netns_tracker_free(net, &sf->sk->ns_tracker, false); +- sf->sk->sk_net_refcnt = 1; +- get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(net, 1); ++ sk_net_refcnt_upgrade(sf->sk); + err = tcp_set_ulp(sf->sk, "mptcp"); + + release_ssk: +diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h +index 2c260f33b55cc5..ad1f671ffc37fa 100644 +--- a/net/ncsi/internal.h ++++ b/net/ncsi/internal.h +@@ -110,7 +110,7 @@ struct ncsi_channel_version { + u8 update; /* NCSI version update */ + char alpha1; /* NCSI version alpha1 */ + char alpha2; /* NCSI version alpha2 */ +- u8 fw_name[12]; /* Firmware name string */ ++ u8 fw_name[12 + 1]; /* Firmware name string */ + u32 fw_version; /* Firmware version */ + u16 pci_ids[4]; /* PCI identification */ + u32 mf_id; /* Manufacture ID */ +diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c +index 8668888c5a2f99..d5ed80731e8928 100644 +--- a/net/ncsi/ncsi-rsp.c ++++ b/net/ncsi/ncsi-rsp.c +@@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr) + ncv->alpha1 = rsp->alpha1; + ncv->alpha2 = rsp->alpha2; + memcpy(ncv->fw_name, rsp->fw_name, 12); ++ ncv->fw_name[12] = '\0'; + ncv->fw_version = ntohl(rsp->fw_version); + for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++) + ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]); +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index 282e9644f6fdd6..928bd2013289af 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -859,8 +859,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) + + static int ctnetlink_done(struct netlink_callback *cb) + { +- if (cb->args[1]) +- nf_ct_put((struct nf_conn *)cb->args[1]); + kfree(cb->data); + return 0; + } +@@ -1175,19 +1173,26 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) + return 0; + } + ++static unsigned long ctnetlink_get_id(const struct nf_conn *ct) ++{ ++ unsigned long id = nf_ct_get_id(ct); ++ ++ return id ? id : 1; ++} ++ + static int + ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) + { + unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; + struct net *net = sock_net(skb->sk); +- struct nf_conn *ct, *last; ++ unsigned long last_id = cb->args[1]; + struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_node *n; + struct nf_conn *nf_ct_evict[8]; ++ struct nf_conn *ct; + int res, i; + spinlock_t *lockp; + +- last = (struct nf_conn *)cb->args[1]; + i = 0; + + local_bh_disable(); +@@ -1224,7 +1229,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) + continue; + + if (cb->args[1]) { +- if (ct != last) ++ if (ctnetlink_get_id(ct) != last_id) + continue; + cb->args[1] = 0; + } +@@ -1237,8 +1242,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) + NFNL_MSG_TYPE(cb->nlh->nlmsg_type), + ct, true, flags); + if (res < 0) { +- nf_conntrack_get(&ct->ct_general); +- cb->args[1] = (unsigned long)ct; ++ cb->args[1] = ctnetlink_get_id(ct); + spin_unlock(lockp); + goto out; + } +@@ -1251,12 +1255,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) + } + out: + local_bh_enable(); +- if (last) { ++ if (last_id) { + /* nf ct hash resize happened, now clear the leftover. */ +- if ((struct nf_conn *)cb->args[1] == last) ++ if (cb->args[1] == last_id) + cb->args[1] = 0; +- +- nf_ct_put(last); + } + + while (i) { +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 0a412d9a8e5fdb..a5ffda87daf63b 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -798,16 +798,6 @@ static int netlink_release(struct socket *sock) + + sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); + +- /* Because struct net might disappear soon, do not keep a pointer. */ +- if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { +- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); +- /* Because of deferred_put_nlk_sk and use of work queue, +- * it is possible netns will be freed before this socket. +- */ +- sock_net_set(sk, &init_net); +- __netns_tracker_alloc(&init_net, &sk->ns_tracker, +- false, GFP_KERNEL); +- } + call_rcu(&nlk->rcu, deferred_put_nlk_sk); + return 0; + } +@@ -1229,7 +1219,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + nlk = nlk_sk(sk); + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + +- if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) && ++ if ((rmem == skb->truesize || rmem <= READ_ONCE(sk->sk_rcvbuf)) && + !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { + netlink_skb_set_owner_r(skb, sk); + return 0; +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index 2dba7505b41489..985b05f38b6746 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -503,12 +503,8 @@ bool rds_tcp_tune(struct socket *sock) + release_sock(sk); + return false; + } +- /* Update ns_tracker to current stack trace and refcounted tracker */ +- __netns_tracker_free(net, &sk->ns_tracker, false); +- +- sk->sk_net_refcnt = 1; +- netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(net, 1); ++ sk_net_refcnt_upgrade(sk); ++ put_net(net); + } + rtn = net_generic(net, rds_tcp_netid); + if (rtn->sndbuf_size > 0) { +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c +index 09242578dac5bc..85984c91cf51fe 100644 +--- a/net/sched/sch_cake.c ++++ b/net/sched/sch_cake.c +@@ -1762,7 +1762,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, + ktime_t now = ktime_get(); + struct cake_tin_data *b; + struct cake_flow *flow; +- u32 idx; ++ u32 idx, tin; + + /* choose flow to insert into */ + idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); +@@ -1772,6 +1772,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, + __qdisc_drop(skb, to_free); + return ret; + } ++ tin = (u32)(b - q->tins); + idx--; + flow = &b->flows[idx]; + +@@ -1939,13 +1940,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, + q->buffer_max_used = q->buffer_used; + + if (q->buffer_used > q->buffer_limit) { ++ bool same_flow = false; + u32 dropped = 0; ++ u32 drop_id; + + while (q->buffer_used > q->buffer_limit) { + dropped++; +- cake_drop(sch, to_free); ++ drop_id = cake_drop(sch, to_free); ++ ++ if ((drop_id >> 16) == tin && ++ (drop_id & 0xFFFF) == idx) ++ same_flow = true; + } + b->drop_overlimit += dropped; ++ ++ if (same_flow) ++ return NET_XMIT_CN; + } + return NET_XMIT_SUCCESS; + } +diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c +index 3ee46f6e005da0..9873f4ae90c3aa 100644 +--- a/net/sched/sch_ets.c ++++ b/net/sched/sch_ets.c +@@ -651,23 +651,24 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, + + sch_tree_lock(sch); + +- q->nbands = nbands; ++ for (i = nbands; i < oldbands; i++) { ++ if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) ++ list_del_init(&q->classes[i].alist); ++ qdisc_purge_queue(q->classes[i].qdisc); ++ } ++ ++ WRITE_ONCE(q->nbands, nbands); + for (i = nstrict; i < q->nstrict; i++) { + if (q->classes[i].qdisc->q.qlen) { + list_add_tail(&q->classes[i].alist, &q->active); + q->classes[i].deficit = quanta[i]; + } + } +- for (i = q->nbands; i < oldbands; i++) { +- if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) +- list_del_init(&q->classes[i].alist); +- qdisc_purge_queue(q->classes[i].qdisc); +- } +- q->nstrict = nstrict; ++ WRITE_ONCE(q->nstrict, nstrict); + memcpy(q->prio2band, priomap, sizeof(priomap)); + + for (i = 0; i < q->nbands; i++) +- q->classes[i].quantum = quanta[i]; ++ WRITE_ONCE(q->classes[i].quantum, quanta[i]); + + for (i = oldbands; i < q->nbands; i++) { + q->classes[i].qdisc = queues[i]; +@@ -681,7 +682,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, + for (i = q->nbands; i < oldbands; i++) { + qdisc_put(q->classes[i].qdisc); + q->classes[i].qdisc = NULL; +- q->classes[i].quantum = 0; ++ WRITE_ONCE(q->classes[i].quantum, 0); + q->classes[i].deficit = 0; + gnet_stats_basic_sync_init(&q->classes[i].bstats); + memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); +@@ -738,6 +739,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) + struct ets_sched *q = qdisc_priv(sch); + struct nlattr *opts; + struct nlattr *nest; ++ u8 nbands, nstrict; + int band; + int prio; + int err; +@@ -750,21 +752,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) + if (!opts) + goto nla_err; + +- if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands)) ++ nbands = READ_ONCE(q->nbands); ++ if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands)) + goto nla_err; + +- if (q->nstrict && +- nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict)) ++ nstrict = READ_ONCE(q->nstrict); ++ if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict)) + goto nla_err; + +- if (q->nbands > q->nstrict) { ++ if (nbands > nstrict) { + nest = nla_nest_start(skb, TCA_ETS_QUANTA); + if (!nest) + goto nla_err; + +- for (band = q->nstrict; band < q->nbands; band++) { ++ for (band = nstrict; band < nbands; band++) { + if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, +- q->classes[band].quantum)) ++ READ_ONCE(q->classes[band].quantum))) + goto nla_err; + } + +@@ -776,7 +779,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb) + goto nla_err; + + for (prio = 0; prio <= TC_PRIO_MAX; prio++) { +- if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio])) ++ if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, ++ READ_ONCE(q->prio2band[prio]))) + goto nla_err; + } + +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 113b305b0d154c..c8a426062923ac 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) + */ + static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) + { +- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); ++ WARN_ON(cl->level || !cl->leaf.q); + + if (!cl->prio_activity) { + cl->prio_activity = 1 << cl->prio; +diff --git a/net/sctp/input.c b/net/sctp/input.c +index a8a254a5008e52..032a10d82302c3 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb) + * it's better to just linearize it otherwise crc computing + * takes longer. + */ +- if ((!is_gso && skb_linearize(skb)) || ++ if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || + !pskb_may_pull(skb, sizeof(struct sctphdr))) + goto discard_it; + +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c +index 45efbbfff94ae2..b3bfd0f18d4180 100644 +--- a/net/smc/af_smc.c ++++ b/net/smc/af_smc.c +@@ -2553,8 +2553,9 @@ static void smc_listen_work(struct work_struct *work) + goto out_decl; + } + +- smc_listen_out_connected(new_smc); + SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini); ++ /* smc_listen_out() will release smcsk */ ++ smc_listen_out_connected(new_smc); + goto out_free; + + out_unlock: +@@ -3343,10 +3344,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family) + * which need net ref. + */ + sk = smc->clcsock->sk; +- __netns_tracker_free(net, &sk->ns_tracker, false); +- sk->sk_net_refcnt = 1; +- get_net_track(net, &sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(net, 1); ++ sk_net_refcnt_upgrade(sk); + return 0; + } + +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index 7229b4a9ad1dcf..78b139d8c1f3b9 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -1579,10 +1579,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, + newlen = error; + + if (protocol == IPPROTO_TCP) { +- __netns_tracker_free(net, &sock->sk->ns_tracker, false); +- sock->sk->sk_net_refcnt = 1; +- get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(net, 1); ++ sk_net_refcnt_upgrade(sock->sk); + if ((error = kernel_listen(sock, 64)) < 0) + goto bummer; + } +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 99bb3e762af46f..8b27a21f3b42d8 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, + goto out; + } + +- if (protocol == IPPROTO_TCP) { +- __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false); +- sock->sk->sk_net_refcnt = 1; +- get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL); +- sock_inuse_add(xprt->xprt_net, 1); +- } ++ if (protocol == IPPROTO_TCP) ++ sk_net_refcnt_upgrade(sock->sk); + + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + if (IS_ERR(filp)) +diff --git a/net/tls/tls.h b/net/tls/tls.h +index 02038d0381b754..5dc61c85c076ec 100644 +--- a/net/tls/tls.h ++++ b/net/tls/tls.h +@@ -192,7 +192,7 @@ void tls_strp_msg_done(struct tls_strparser *strp); + int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); + void tls_rx_msg_ready(struct tls_strparser *strp); + +-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); ++bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); + int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); + struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); + int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); +diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c +index bea60b0160d1fc..6ce64a6e4495ec 100644 +--- a/net/tls/tls_strp.c ++++ b/net/tls/tls_strp.c +@@ -474,7 +474,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) + strp->stm.offset = offset; + } + +-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) ++bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) + { + struct strp_msg *rxm; + struct tls_msg *tlm; +@@ -483,8 +483,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) + DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); + + if (!strp->copy_mode && force_refresh) { +- if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) +- return; ++ if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { ++ WRITE_ONCE(strp->msg_ready, 0); ++ memset(&strp->stm, 0, sizeof(strp->stm)); ++ return false; ++ } + + tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); + } +@@ -494,6 +497,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) + rxm->offset = strp->stm.offset; + tlm = tls_msg(strp->anchor); + tlm->control = strp->mark; ++ ++ return true; + } + + /* Called with lock held on lower socket */ +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 4905a81c4ac194..27ce1feb79e14e 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -1380,7 +1380,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, + return sock_intr_errno(timeo); + } + +- tls_strp_msg_load(&ctx->strp, released); ++ if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) ++ return tls_rx_rec_wait(sk, psock, nonblock, false); + + return 1; + } +@@ -1773,6 +1774,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout) + return tls_decrypt_sg(sk, NULL, sgout, &darg); + } + ++/* All records returned from a recvmsg() call must have the same type. ++ * 0 is not a valid content type. Use it as "no type reported, yet". ++ */ + static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, + u8 *control) + { +@@ -2016,8 +2020,10 @@ int tls_sw_recvmsg(struct sock *sk, + if (err < 0) + goto end; + ++ /* process_rx_list() will set @control if it processed any records */ + copied = err; +- if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) ++ if (len <= copied || rx_more || ++ (control && control != TLS_RECORD_TYPE_DATA)) + goto end; + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c +index 2925f5d27ad3fb..e1d7ce8dac082e 100644 +--- a/net/vmw_vsock/virtio_transport.c ++++ b/net/vmw_vsock/virtio_transport.c +@@ -221,7 +221,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) + + static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) + { +- int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; ++ int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; + struct scatterlist pkt, *p; + struct virtqueue *vq; + struct sk_buff *skb; +@@ -497,8 +497,9 @@ static void virtio_transport_rx_work(struct work_struct *work) + do { + virtqueue_disable_cb(vq); + for (;;) { ++ unsigned int len, payload_len; ++ struct virtio_vsock_hdr *hdr; + struct sk_buff *skb; +- unsigned int len; + + if (!virtio_transport_more_replies(vsock)) { + /* Stop rx until the device processes already +@@ -515,12 +516,19 @@ static void virtio_transport_rx_work(struct work_struct *work) + vsock->rx_buf_nr--; + + /* Drop short/long packets */ +- if (unlikely(len < sizeof(struct virtio_vsock_hdr) || ++ if (unlikely(len < sizeof(*hdr) || + len > virtio_vsock_skb_len(skb))) { + kfree_skb(skb); + continue; + } + ++ hdr = virtio_vsock_hdr(skb); ++ payload_len = le32_to_cpu(hdr->len); ++ if (unlikely(payload_len > len - sizeof(*hdr))) { ++ kfree_skb(skb); ++ continue; ++ } ++ + virtio_vsock_skb_rx_put(skb); + virtio_transport_deliver_tap_pkt(skb); + virtio_transport_recv_pkt(&virtio_transport, skb); +diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c +index 55a1d3633853fa..3d631f8073f06b 100644 +--- a/net/wireless/mlme.c ++++ b/net/wireless/mlme.c +@@ -739,7 +739,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, + + mgmt = (const struct ieee80211_mgmt *)params->buf; + +- if (!ieee80211_is_mgmt(mgmt->frame_control)) ++ if (!ieee80211_is_mgmt(mgmt->frame_control) || ++ ieee80211_has_order(mgmt->frame_control)) + return -EINVAL; + + stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index d2bd5bddfb05d8..acfbe1f013d1b8 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -1466,6 +1466,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, + } + EXPORT_SYMBOL(xfrm_state_lookup_byspi); + ++static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto) ++{ ++ struct xfrm_state *x; ++ unsigned int i; ++ ++ rcu_read_lock(); ++ for (i = 0; i <= net->xfrm.state_hmask; i++) { ++ hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) { ++ if (x->id.spi == spi && x->id.proto == proto) { ++ if (!xfrm_state_hold_rcu(x)) ++ continue; ++ rcu_read_unlock(); ++ return x; ++ } ++ } ++ } ++ rcu_read_unlock(); ++ return NULL; ++} ++ + static void __xfrm_state_insert(struct xfrm_state *x) + { + struct net *net = xs_net(x); +@@ -2259,10 +2279,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, + unsigned int h; + struct xfrm_state *x0; + int err = -ENOENT; +- __be32 minspi = htonl(low); +- __be32 maxspi = htonl(high); ++ u32 range = high - low + 1; + __be32 newspi = 0; +- u32 mark = x->mark.v & x->mark.m; + + spin_lock_bh(&x->lock); + if (x->km.state == XFRM_STATE_DEAD) { +@@ -2276,38 +2294,34 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, + + err = -ENOENT; + +- if (minspi == maxspi) { +- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); +- if (x0) { +- NL_SET_ERR_MSG(extack, "Requested SPI is already in use"); +- xfrm_state_put(x0); ++ for (h = 0; h < range; h++) { ++ u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high); ++ newspi = htonl(spi); ++ ++ spin_lock_bh(&net->xfrm.xfrm_state_lock); ++ x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto); ++ if (!x0) { ++ x->id.spi = newspi; ++ h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family); ++ XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type); ++ spin_unlock_bh(&net->xfrm.xfrm_state_lock); ++ err = 0; + goto unlock; + } +- newspi = minspi; +- } else { +- u32 spi = 0; +- for (h = 0; h < high-low+1; h++) { +- spi = get_random_u32_inclusive(low, high); +- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); +- if (x0 == NULL) { +- newspi = htonl(spi); +- break; +- } +- xfrm_state_put(x0); ++ xfrm_state_put(x0); ++ spin_unlock_bh(&net->xfrm.xfrm_state_lock); ++ ++ if (signal_pending(current)) { ++ err = -ERESTARTSYS; ++ goto unlock; + } ++ ++ if (low == high) ++ break; + } +- if (newspi) { +- spin_lock_bh(&net->xfrm.xfrm_state_lock); +- x->id.spi = newspi; +- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); +- XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, +- x->xso.type); +- spin_unlock_bh(&net->xfrm.xfrm_state_lock); + +- err = 0; +- } else { ++ if (err) + NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); +- } + + unlock: + spin_unlock_bh(&x->lock); +diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c +index 9e52c7360e55b4..2b99d18e703d8a 100644 +--- a/scripts/kconfig/gconf.c ++++ b/scripts/kconfig/gconf.c +@@ -780,7 +780,7 @@ static void renderer_edited(GtkCellRendererText * cell, + struct symbol *sym; + + if (!gtk_tree_model_get_iter(model2, &iter, path)) +- return; ++ goto free; + + gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); + sym = menu->sym; +@@ -792,6 +792,7 @@ static void renderer_edited(GtkCellRendererText * cell, + + update_tree(&rootmenu, NULL); + ++free: + gtk_tree_path_free(path); + } + +@@ -974,13 +975,14 @@ on_treeview2_key_press_event(GtkWidget * widget, + void + on_treeview2_cursor_changed(GtkTreeView * treeview, gpointer user_data) + { ++ GtkTreeModel *model = gtk_tree_view_get_model(treeview); + GtkTreeSelection *selection; + GtkTreeIter iter; + struct menu *menu; + + selection = gtk_tree_view_get_selection(treeview); +- if (gtk_tree_selection_get_selected(selection, &model2, &iter)) { +- gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); ++ if (gtk_tree_selection_get_selected(selection, &model, &iter)) { ++ gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1); + text_insert_help(menu); + } + } +diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c +index 1dcfb288ee6363..327b60cdb8dace 100644 +--- a/scripts/kconfig/lxdialog/inputbox.c ++++ b/scripts/kconfig/lxdialog/inputbox.c +@@ -39,8 +39,10 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width + + if (!init) + instr[0] = '\0'; +- else +- strcpy(instr, init); ++ else { ++ strncpy(instr, init, sizeof(dialog_input_result) - 1); ++ instr[sizeof(dialog_input_result) - 1] = '\0'; ++ } + + do_resize: + if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) +diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c +index 0e333284e947bc..6bb8a320a4cb8d 100644 +--- a/scripts/kconfig/lxdialog/menubox.c ++++ b/scripts/kconfig/lxdialog/menubox.c +@@ -264,7 +264,7 @@ int dialog_menu(const char *title, const char *prompt, + if (key < 256 && isalpha(key)) + key = tolower(key); + +- if (strchr("ynmh", key)) ++ if (strchr("ynmh ", key)) + i = max_choice; + else { + for (i = choice + 1; i < max_choice; i++) { +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c +index 8cd72fe2597405..7a17c94a159418 100644 +--- a/scripts/kconfig/nconf.c ++++ b/scripts/kconfig/nconf.c +@@ -591,6 +591,8 @@ static void item_add_str(const char *fmt, ...) + tmp_str, + sizeof(k_menu_items[index].str)); + ++ k_menu_items[index].str[sizeof(k_menu_items[index].str) - 1] = '\0'; ++ + free_item(curses_menu_items[index]); + curses_menu_items[index] = new_item( + k_menu_items[index].str, +diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c +index 25a7263ef3c8c5..5f13a0a7fb0bec 100644 +--- a/scripts/kconfig/nconf.gui.c ++++ b/scripts/kconfig/nconf.gui.c +@@ -349,6 +349,7 @@ int dialog_inputbox(WINDOW *main_window, + x = (columns-win_cols)/2; + + strncpy(result, init, *result_len); ++ result[*result_len - 1] = '\0'; + + /* create the windows */ + win = newwin(win_lines, win_cols, y, x); +diff --git a/security/apparmor/file.c b/security/apparmor/file.c +index 6fd21324a097f6..a51b83cf696899 100644 +--- a/security/apparmor/file.c ++++ b/security/apparmor/file.c +@@ -436,9 +436,11 @@ int aa_path_link(const struct cred *subj_cred, + { + struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry }; + struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry }; ++ struct inode *inode = d_backing_inode(old_dentry); ++ vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(target.mnt), inode); + struct path_cond cond = { +- d_backing_inode(old_dentry)->i_uid, +- d_backing_inode(old_dentry)->i_mode ++ .uid = vfsuid_into_kuid(vfsuid), ++ .mode = inode->i_mode, + }; + char *buffer = NULL, *buffer2 = NULL; + struct aa_profile *profile; +diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h +index 73c8a32c68613e..6e88e99da80f6b 100644 +--- a/security/apparmor/include/lib.h ++++ b/security/apparmor/include/lib.h +@@ -46,7 +46,11 @@ + #define AA_BUG_FMT(X, fmt, args...) \ + WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args) + #else +-#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args) ++#define AA_BUG_FMT(X, fmt, args...) \ ++ do { \ ++ BUILD_BUG_ON_INVALID(X); \ ++ no_printk(fmt, ##args); \ ++ } while (0) + #endif + + #define AA_ERROR(fmt, args...) \ +diff --git a/security/inode.c b/security/inode.c +index 3aa75fffa8c929..a90b043695d92c 100644 +--- a/security/inode.c ++++ b/security/inode.c +@@ -159,7 +159,6 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode, + inode->i_fop = fops; + } + d_instantiate(dentry, inode); +- dget(dentry); + inode_unlock(dir); + return dentry; + +@@ -306,7 +305,6 @@ void securityfs_remove(struct dentry *dentry) + simple_rmdir(dir, dentry); + else + simple_unlink(dir, dentry); +- dput(dentry); + } + inode_unlock(dir); + simple_release_fs(&mount, &mount_count); +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 31fc20350fd96e..f37fd1e48740f1 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #include "pcm_local.h" + +@@ -3125,13 +3126,23 @@ struct snd_pcm_sync_ptr32 { + static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime) + { + snd_pcm_uframes_t boundary; ++ snd_pcm_uframes_t border; ++ int order; + + if (! runtime->buffer_size) + return 0; +- boundary = runtime->buffer_size; +- while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size) +- boundary *= 2; +- return boundary; ++ ++ border = 0x7fffffffUL - runtime->buffer_size; ++ if (runtime->buffer_size > border) ++ return runtime->buffer_size; ++ ++ order = __fls(border) - __fls(runtime->buffer_size); ++ boundary = runtime->buffer_size << order; ++ ++ if (boundary <= border) ++ return boundary; ++ else ++ return boundary / 2; + } + + static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c +index bbf7bcdb449a8b..0a9223c18d77c3 100644 +--- a/sound/hda/hdac_device.c ++++ b/sound/hda/hdac_device.c +@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm); + int snd_hdac_keep_power_up(struct hdac_device *codec) + { + if (!atomic_inc_not_zero(&codec->in_pm)) { +- int ret = pm_runtime_get_if_active(&codec->dev, true); ++ int ret = pm_runtime_get_if_active(&codec->dev); + if (!ret) + return -1; + if (ret < 0) +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index aa6dc00985b514..80c3084189b039 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -641,24 +641,16 @@ static void hda_jackpoll_work(struct work_struct *work) + struct hda_codec *codec = + container_of(work, struct hda_codec, jackpoll_work.work); + +- /* for non-polling trigger: we need nothing if already powered on */ +- if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core)) ++ if (!codec->jackpoll_interval) + return; + + /* the power-up/down sequence triggers the runtime resume */ +- snd_hda_power_up_pm(codec); ++ snd_hda_power_up(codec); + /* update jacks manually if polling is required, too */ +- if (codec->jackpoll_interval) { +- snd_hda_jack_set_dirty_all(codec); +- snd_hda_jack_poll_all(codec); +- } +- snd_hda_power_down_pm(codec); +- +- if (!codec->jackpoll_interval) +- return; +- +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); ++ snd_hda_jack_set_dirty_all(codec); ++ snd_hda_jack_poll_all(codec); ++ schedule_delayed_work(&codec->jackpoll_work, codec->jackpoll_interval); ++ snd_hda_power_down(codec); + } + + /* release all pincfg lists */ +@@ -2920,12 +2912,12 @@ static void hda_call_codec_resume(struct hda_codec *codec) + snd_hda_regmap_sync(codec); + } + +- if (codec->jackpoll_interval) +- hda_jackpoll_work(&codec->jackpoll_work.work); +- else +- snd_hda_jack_report_sync(codec); ++ snd_hda_jack_report_sync(codec); + codec->core.dev.power.power_state = PMSG_ON; + snd_hdac_leave_pm(&codec->core); ++ if (codec->jackpoll_interval) ++ schedule_delayed_work(&codec->jackpoll_work, ++ codec->jackpoll_interval); + } + + static int hda_codec_runtime_suspend(struct device *dev) +@@ -2937,8 +2929,6 @@ static int hda_codec_runtime_suspend(struct device *dev) + if (!codec->card) + return 0; + +- cancel_delayed_work_sync(&codec->jackpoll_work); +- + state = hda_call_codec_suspend(codec); + if (codec->link_down_at_suspend || + (codec_has_clkstop(codec) && codec_has_epss(codec) && +@@ -2946,10 +2936,6 @@ static int hda_codec_runtime_suspend(struct device *dev) + snd_hdac_codec_link_down(&codec->core); + snd_hda_codec_display_power(codec, false); + +- if (codec->bus->jackpoll_in_suspend && +- (dev->power.power_state.event != PM_EVENT_SUSPEND)) +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); + return 0; + } + +@@ -3052,6 +3038,7 @@ void snd_hda_codec_shutdown(struct hda_codec *codec) + if (!codec->core.registered) + return; + ++ codec->jackpoll_interval = 0; /* don't poll any longer */ + cancel_delayed_work_sync(&codec->jackpoll_work); + list_for_each_entry(cpcm, &codec->pcm_list_head, list) + snd_pcm_suspend_all(cpcm->pcm); +@@ -3118,10 +3105,11 @@ int snd_hda_codec_build_controls(struct hda_codec *codec) + if (err < 0) + return err; + ++ snd_hda_jack_report_sync(codec); /* call at the last init point */ + if (codec->jackpoll_interval) +- hda_jackpoll_work(&codec->jackpoll_work.work); +- else +- snd_hda_jack_report_sync(codec); /* call at the last init point */ ++ schedule_delayed_work(&codec->jackpoll_work, ++ codec->jackpoll_interval); ++ + sync_power_up_states(codec); + return 0; + } +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index 94b452595f3032..851e9231bbbd55 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -4411,7 +4411,7 @@ static int add_tuning_control(struct hda_codec *codec, + } + knew.private_value = + HDA_COMPOSE_AMP_VAL(nid, 1, 0, type); +- sprintf(namestr, "%s %s Volume", name, dirstr[dir]); ++ snprintf(namestr, sizeof(namestr), "%s %s Volume", name, dirstr[dir]); + return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index e12e3134b5e16b..d4bc80780a1f91 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10071,6 +10071,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), + SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), ++ SND_PCI_QUIRK(0x103c, 0x8548, "HP EliteBook x360 830 G6", ALC285_FIXUP_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x854a, "HP EliteBook 830 G6", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), + SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), + SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), +@@ -10636,6 +10638,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -10651,6 +10654,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0xf111, 0x000b, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + + #if 0 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c +index ae285c0a629c82..f3df6fe2b7f187 100644 +--- a/sound/pci/intel8x0.c ++++ b/sound/pci/intel8x0.c +@@ -2252,7 +2252,7 @@ static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock, + tmp |= chip->ac97_sdin[0] << ICH_DI1L_SHIFT; + for (i = 1; i < 4; i++) { + if (pcm->r[0].codec[i]) { +- tmp |= chip->ac97_sdin[pcm->r[0].codec[1]->num] << ICH_DI2L_SHIFT; ++ tmp |= chip->ac97_sdin[pcm->r[0].codec[i]->num] << ICH_DI2L_SHIFT; + break; + } + } +diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c +index 8b6b7602969488..0ddfb0cb376fde 100644 +--- a/sound/soc/codecs/hdac_hdmi.c ++++ b/sound/soc/codecs/hdac_hdmi.c +@@ -1230,7 +1230,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev, + >> DRM_ELD_VER_SHIFT; + + if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) { +- dev_err(&hdev->dev, "HDMI: Unknown ELD version %d\n", ver); ++ dev_err_ratelimited(&hdev->dev, ++ "HDMI: Unknown ELD version %d\n", ver); + return -EINVAL; + } + +@@ -1238,7 +1239,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev, + DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; + + if (mnl > ELD_MAX_MNL) { +- dev_err(&hdev->dev, "HDMI: MNL Invalid %d\n", mnl); ++ dev_err_ratelimited(&hdev->dev, ++ "HDMI: MNL Invalid %d\n", mnl); + return -EINVAL; + } + +@@ -1297,8 +1299,8 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin, + + if (!port->eld.monitor_present || !port->eld.eld_valid) { + +- dev_err(&hdev->dev, "%s: disconnect for pin:port %d:%d\n", +- __func__, pin->nid, port->id); ++ dev_dbg(&hdev->dev, "%s: disconnect for pin:port %d:%d\n", ++ __func__, pin->nid, port->id); + + /* + * PCMs are not registered during device probe, so don't +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c +index 1955d77cffd996..0f250e8e216a4f 100644 +--- a/sound/soc/codecs/rt5640.c ++++ b/sound/soc/codecs/rt5640.c +@@ -3016,6 +3016,11 @@ static int rt5640_i2c_probe(struct i2c_client *i2c) + } + + regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val); ++ if (val != RT5640_DEVICE_ID) { ++ usleep_range(60000, 100000); ++ regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val); ++ } ++ + if (val != RT5640_DEVICE_ID) { + dev_err(&i2c->dev, + "Device with ID register %#x is not rt5640/39\n", val); +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c +index 886f5c29939b8e..a6948a57636ab5 100644 +--- a/sound/soc/fsl/fsl_sai.c ++++ b/sound/soc/fsl/fsl_sai.c +@@ -768,9 +768,9 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) + * are running concurrently. + */ + /* Software Reset */ +- regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); ++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + /* Clear SR bit to finish the reset */ +- regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); ++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, 0); + } + + static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, +@@ -889,11 +889,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) + unsigned int ofs = sai->soc_data->reg_offset; + + /* Software Reset for both Tx and Rx */ +- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); +- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); ++ regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); ++ regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + /* Clear SR bit to finish the reset */ +- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); +- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); ++ regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); ++ regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); + + regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs), + FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), +@@ -1710,11 +1710,11 @@ static int fsl_sai_runtime_resume(struct device *dev) + + regcache_cache_only(sai->regmap, false); + regcache_mark_dirty(sai->regmap); +- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); +- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); ++ regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); ++ regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + usleep_range(1000, 2000); +- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); +- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); ++ regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); ++ regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); + + ret = regcache_sync(sai->regmap); + if (ret) +diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c +index 63e4356e8caf94..8f36cef88fe60b 100644 +--- a/sound/soc/intel/avs/core.c ++++ b/sound/soc/intel/avs/core.c +@@ -415,6 +415,8 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) + adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; ++ bus = &adev->base.core; ++ + ret = avs_bus_init(adev, pci, id); + if (ret < 0) { + dev_err(dev, "failed to init avs bus: %d\n", ret); +@@ -425,7 +427,6 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) + if (ret < 0) + return ret; + +- bus = &adev->base.core; + bus->addr = pci_resource_start(pci, 0); + bus->remap_addr = pci_ioremap_bar(pci, 0); + if (!bus->remap_addr) { +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c +index f918d9e16dc041..f342bc4b3a1468 100644 +--- a/sound/soc/qcom/lpass-platform.c ++++ b/sound/soc/qcom/lpass-platform.c +@@ -201,7 +201,6 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component, + struct regmap *map; + unsigned int dai_id = cpu_dai->driver->id; + +- component->id = dai_id; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; +@@ -1189,13 +1188,14 @@ static int lpass_platform_pcmops_suspend(struct snd_soc_component *component) + { + struct lpass_data *drvdata = snd_soc_component_get_drvdata(component); + struct regmap *map; +- unsigned int dai_id = component->id; + +- if (dai_id == LPASS_DP_RX) ++ if (drvdata->hdmi_port_enable) { + map = drvdata->hdmiif_map; +- else +- map = drvdata->lpaif_map; ++ regcache_cache_only(map, true); ++ regcache_mark_dirty(map); ++ } + ++ map = drvdata->lpaif_map; + regcache_cache_only(map, true); + regcache_mark_dirty(map); + +@@ -1206,14 +1206,19 @@ static int lpass_platform_pcmops_resume(struct snd_soc_component *component) + { + struct lpass_data *drvdata = snd_soc_component_get_drvdata(component); + struct regmap *map; +- unsigned int dai_id = component->id; ++ int ret; + +- if (dai_id == LPASS_DP_RX) ++ if (drvdata->hdmi_port_enable) { + map = drvdata->hdmiif_map; +- else +- map = drvdata->lpaif_map; ++ regcache_cache_only(map, false); ++ ret = regcache_sync(map); ++ if (ret) ++ return ret; ++ } + ++ map = drvdata->lpaif_map; + regcache_cache_only(map, false); ++ + return regcache_sync(map); + } + +@@ -1223,7 +1228,9 @@ static int lpass_platform_copy(struct snd_soc_component *component, + unsigned long bytes) + { + struct snd_pcm_runtime *rt = substream->runtime; +- unsigned int dai_id = component->id; ++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream); ++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0); ++ unsigned int dai_id = cpu_dai->driver->id; + int ret = 0; + + void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos + +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index 7eea70eea68b47..dc95b6f4155586 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -1025,6 +1025,9 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card, + void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, + struct snd_soc_pcm_runtime *rtd) + { ++ if (!rtd) ++ return; ++ + lockdep_assert_held(&client_mutex); + + /* +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 7729f8f4d5e610..7facb7b2dba19d 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -734,6 +734,10 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm, + out: + trace_snd_soc_bias_level_done(card, level); + ++ /* success */ ++ if (ret == 0) ++ snd_soc_dapm_init_bias_level(dapm, level); ++ + return ret; + } + +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c +index be0b3c8ac7055b..f2cce15be4e271 100644 +--- a/sound/usb/mixer_quirks.c ++++ b/sound/usb/mixer_quirks.c +@@ -2150,15 +2150,15 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer) + #define SND_RME_CLK_FREQMUL_SHIFT 18 + #define SND_RME_CLK_FREQMUL_MASK 0x7 + #define SND_RME_CLK_SYSTEM(x) \ +- ((x >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK) ++ (((x) >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK) + #define SND_RME_CLK_AES(x) \ +- ((x >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) ++ (((x) >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) + #define SND_RME_CLK_SPDIF(x) \ +- ((x >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) ++ (((x) >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK) + #define SND_RME_CLK_SYNC(x) \ +- ((x >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK) ++ (((x) >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK) + #define SND_RME_CLK_FREQMUL(x) \ +- ((x >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK) ++ (((x) >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK) + #define SND_RME_CLK_AES_LOCK 0x1 + #define SND_RME_CLK_AES_SYNC 0x4 + #define SND_RME_CLK_SPDIF_LOCK 0x2 +@@ -2167,9 +2167,9 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer) + #define SND_RME_SPDIF_FORMAT_SHIFT 5 + #define SND_RME_BINARY_MASK 0x1 + #define SND_RME_SPDIF_IF(x) \ +- ((x >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK) ++ (((x) >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK) + #define SND_RME_SPDIF_FORMAT(x) \ +- ((x >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK) ++ (((x) >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK) + + static const u32 snd_rme_rate_table[] = { + 32000, 44100, 48000, 50000, +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index 0f1558ef855535..12a5e053ec54fc 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -341,20 +341,28 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor + + len = le16_to_cpu(cluster->wLength); + c = 0; +- p += sizeof(struct uac3_cluster_header_descriptor); ++ p += sizeof(*cluster); ++ len -= sizeof(*cluster); + +- while (((p - (void *)cluster) < len) && (c < channels)) { ++ while (len > 0 && (c < channels)) { + struct uac3_cluster_segment_descriptor *cs_desc = p; + u16 cs_len; + u8 cs_type; + ++ if (len < sizeof(*cs_desc)) ++ break; + cs_len = le16_to_cpu(cs_desc->wLength); ++ if (len < cs_len) ++ break; + cs_type = cs_desc->bSegmentType; + + if (cs_type == UAC3_CHANNEL_INFORMATION) { + struct uac3_cluster_information_segment_descriptor *is = p; + unsigned char map; + ++ if (cs_len < sizeof(*is)) ++ break; ++ + /* + * TODO: this conversion is not complete, update it + * after adding UAC3 values to asound.h +@@ -456,6 +464,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor + chmap->map[c++] = map; + } + p += cs_len; ++ len -= cs_len; + } + + if (channels < c) +@@ -876,7 +885,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, + u64 badd_formats = 0; + unsigned int num_channels; + struct audioformat *fp; +- u16 cluster_id, wLength; ++ u16 cluster_id, wLength, cluster_wLength; + int clock = 0; + int err; + +@@ -1005,6 +1014,16 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, + return ERR_PTR(-EIO); + } + ++ cluster_wLength = le16_to_cpu(cluster->wLength); ++ if (cluster_wLength < sizeof(*cluster) || ++ cluster_wLength > wLength) { ++ dev_err(&dev->dev, ++ "%u:%d : invalid Cluster Descriptor size\n", ++ iface_no, altno); ++ kfree(cluster); ++ return ERR_PTR(-EIO); ++ } ++ + num_channels = cluster->bNrChannels; + chmap = convert_chmap_v3(cluster); + kfree(cluster); +diff --git a/sound/usb/validate.c b/sound/usb/validate.c +index 6fe206f6e91105..a0d55b77c9941d 100644 +--- a/sound/usb/validate.c ++++ b/sound/usb/validate.c +@@ -221,6 +221,17 @@ static bool validate_uac3_feature_unit(const void *p, + return d->bLength >= sizeof(*d) + 4 + 2; + } + ++static bool validate_uac3_power_domain_unit(const void *p, ++ const struct usb_desc_validator *v) ++{ ++ const struct uac3_power_domain_descriptor *d = p; ++ ++ if (d->bLength < sizeof(*d)) ++ return false; ++ /* baEntities[] + wPDomainDescrStr */ ++ return d->bLength >= sizeof(*d) + d->bNrEntities + 2; ++} ++ + static bool validate_midi_out_jack(const void *p, + const struct usb_desc_validator *v) + { +@@ -274,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = { + /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */ + FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit), + FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit), +- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit), ++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit), + /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */ + FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit), + FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit), +@@ -285,6 +296,7 @@ static const struct usb_desc_validator audio_validators[] = { + struct uac3_clock_multiplier_descriptor), + /* UAC_VERSION_3, UAC3_SAMPLE_RATE_CONVERTER: not implemented yet */ + /* UAC_VERSION_3, UAC3_CONNECTORS: not implemented yet */ ++ FUNC(UAC_VERSION_3, UAC3_POWER_DOMAIN, validate_uac3_power_domain_unit), + { } /* terminator */ + }; + +diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c +index 08d0ac543c6746..a0536528dfde26 100644 +--- a/tools/bpf/bpftool/main.c ++++ b/tools/bpf/bpftool/main.c +@@ -534,9 +534,9 @@ int main(int argc, char **argv) + usage(); + + if (version_requested) +- return do_version(argc, argv); +- +- ret = cmd_select(commands, argc, argv, do_help); ++ ret = do_version(argc, argv); ++ else ++ ret = cmd_select(commands, argc, argv, do_help); + + if (json_output) + jsonw_destroy(&json_wtr); +diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h +index 933bc0be7e1c6b..a9d8b5b51f37f8 100644 +--- a/tools/include/nolibc/std.h ++++ b/tools/include/nolibc/std.h +@@ -20,6 +20,8 @@ + + #include "stdint.h" + ++#include ++ + /* those are commonly provided by sys/types.h */ + typedef unsigned int dev_t; + typedef unsigned long ino_t; +@@ -31,6 +33,6 @@ typedef unsigned long nlink_t; + typedef signed long off_t; + typedef signed long blksize_t; + typedef signed long blkcnt_t; +-typedef signed long time_t; ++typedef __kernel_old_time_t time_t; + + #endif /* _NOLIBC_STD_H */ +diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h +index 8cfc4c860fa444..48dca7b188d06c 100644 +--- a/tools/include/nolibc/types.h ++++ b/tools/include/nolibc/types.h +@@ -128,7 +128,7 @@ typedef struct { + int __fd = (fd); \ + if (__fd >= 0) \ + __set->fds[__fd / FD_SETIDXMASK] &= \ +- ~(1U << (__fd & FX_SETBITMASK)); \ ++ ~(1U << (__fd & FD_SETBITMASK)); \ + } while (0) + + #define FD_SET(fd, set) do { \ +@@ -145,7 +145,7 @@ typedef struct { + int __r = 0; \ + if (__fd >= 0) \ + __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \ +-1U << (__fd & FD_SET_BITMASK)); \ ++1U << (__fd & FD_SETBITMASK)); \ + __r; \ + }) + +diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h +index 39e659c83cfd21..cb8b0a3029d3d0 100644 +--- a/tools/include/uapi/linux/if_link.h ++++ b/tools/include/uapi/linux/if_link.h +@@ -865,6 +865,7 @@ enum { + IFLA_BOND_AD_LACP_ACTIVE, + IFLA_BOND_MISSED_MAX, + IFLA_BOND_NS_IP6_TARGET, ++ IFLA_BOND_COUPLED_CONTROL, + __IFLA_BOND_MAX, + }; + +diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +index 08a399b0be286c..6ab9139f16af90 100644 +--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c ++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +@@ -240,9 +240,9 @@ static int mperf_stop(void) + int cpu; + + for (cpu = 0; cpu < cpu_count; cpu++) { +- mperf_measure_stats(cpu); +- mperf_get_tsc(&tsc_at_measure_end[cpu]); + clock_gettime(CLOCK_REALTIME, &time_end[cpu]); ++ mperf_get_tsc(&tsc_at_measure_end[cpu]); ++ mperf_measure_stats(cpu); + } + + return 0; +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include +index ff527ac065cf8b..c006e72b4f4348 100644 +--- a/tools/scripts/Makefile.include ++++ b/tools/scripts/Makefile.include +@@ -98,7 +98,9 @@ else ifneq ($(CROSS_COMPILE),) + # Allow userspace to override CLANG_CROSS_FLAGS to specify their own + # sysroots and flags or to avoid the GCC call in pure Clang builds. + ifeq ($(CLANG_CROSS_FLAGS),) +-CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) ++CLANG_TARGET := $(notdir $(CROSS_COMPILE:%-=%)) ++CLANG_TARGET := $(subst s390-linux,s390x-linux,$(CLANG_TARGET)) ++CLANG_CROSS_FLAGS := --target=$(CLANG_TARGET) + GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null)) + ifneq ($(GCC_TOOLCHAIN_DIR),) + CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl +index 3316015757433a..a8979280b50552 100755 +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -1358,7 +1358,10 @@ sub __eval_option { + # If a variable contains itself, use the default var + if (($var eq $name) && defined($opt{$var})) { + $o = $opt{$var}; +- $retval = "$retval$o"; ++ # Only append if the default doesn't contain itself ++ if ($o !~ m/\$\{$var\}/) { ++ $retval = "$retval$o"; ++ } + } elsif (defined($opt{$o})) { + $o = $opt{$o}; + $retval = "$retval$o"; +diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c +index c6228176dd1a0c..408fb1c5c2f856 100644 +--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c ++++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c +@@ -168,7 +168,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) + memset(&sve, 0, sizeof(sve)); + sve.size = sizeof(sve); + sve.vl = sve_vl_from_vq(SVE_VQ_MIN); +- sve.flags = SVE_PT_VL_INHERIT; ++ sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE; + ret = set_sve(child, type, &sve); + if (ret != 0) { + ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", +@@ -233,6 +233,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, + /* Set the VL by doing a set with no register payload */ + memset(&sve, 0, sizeof(sve)); + sve.size = sizeof(sve); ++ sve.flags = SVE_PT_REGS_SVE; + sve.vl = vl; + ret = set_sve(child, type, &sve); + if (ret != 0) { +diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +index dfff6feac12c3c..7e9a508c157184 100644 +--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c ++++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +@@ -21,8 +21,7 @@ + #include "../progs/test_user_ringbuf.h" + + static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; +-static const long c_ringbuf_size = 1 << 12; /* 1 small page */ +-static const long c_max_entries = c_ringbuf_size / c_sample_size; ++static long c_ringbuf_size, c_max_entries; + + static void drain_current_samples(void) + { +@@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void) + uint32_t remaining_samples = total_samples; + int err; + +- BUILD_BUG_ON(total_samples <= c_max_entries); ++ if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries")) ++ return; ++ + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; +@@ -686,6 +687,9 @@ void test_user_ringbuf(void) + { + int i; + ++ c_ringbuf_size = getpagesize(); /* 1 page */ ++ c_max_entries = c_ringbuf_size / c_sample_size; ++ + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i].test_name)) + continue; +diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c +index 7ea535bfbacd3e..e4ef82a6ee38c8 100644 +--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c ++++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c +@@ -619,7 +619,7 @@ __naked void pass_pointer_to_tail_call(void) + + SEC("socket") + __description("unpriv: cmp map pointer with zero") +-__success __failure_unpriv __msg_unpriv("R1 pointer comparison") ++__success __success_unpriv + __retval(0) + __naked void cmp_map_pointer_with_zero(void) + { +diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +index 4b994b6df5ac30..ed81eaf2afd6d9 100644 +--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc ++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +@@ -29,7 +29,7 @@ ftrace_filter_check 'schedule*' '^schedule.*$' + ftrace_filter_check '*pin*lock' '.*pin.*lock$' + + # filter by start*mid* +-ftrace_filter_check 'mutex*try*' '^mutex.*try.*' ++ftrace_filter_check 'mutex*unl*' '^mutex.*unl.*' + + # Advanced full-glob matching feature is recently supported. + # Skip the tests if we are sure the kernel does not support it. +diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h +index ddbcfc9b7bac4a..7a5fd1d5355e7e 100644 +--- a/tools/testing/selftests/futex/include/futextest.h ++++ b/tools/testing/selftests/futex/include/futextest.h +@@ -47,6 +47,17 @@ typedef volatile u_int32_t futex_t; + FUTEX_PRIVATE_FLAG) + #endif + ++/* ++ * SYS_futex is expected from system C library, in glibc some 32-bit ++ * architectures (e.g. RV32) are using 64-bit time_t, therefore it doesn't have ++ * SYS_futex defined but just SYS_futex_time64. Define SYS_futex as ++ * SYS_futex_time64 in this situation to ensure the compilation and the ++ * compatibility. ++ */ ++#if !defined(SYS_futex) && defined(SYS_futex_time64) ++#define SYS_futex SYS_futex_time64 ++#endif ++ + /** + * futex() - SYS_futex syscall wrapper + * @uaddr: address of first futex +diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c +index e92b60eecb7d5c..9c9c82fd18a7ea 100644 +--- a/tools/testing/selftests/memfd/memfd_test.c ++++ b/tools/testing/selftests/memfd/memfd_test.c +@@ -285,6 +285,24 @@ static void *mfd_assert_mmap_shared(int fd) + return p; + } + ++static void *mfd_assert_mmap_read_shared(int fd) ++{ ++ void *p; ++ ++ p = mmap(NULL, ++ mfd_def_size, ++ PROT_READ, ++ MAP_SHARED, ++ fd, ++ 0); ++ if (p == MAP_FAILED) { ++ printf("mmap() failed: %m\n"); ++ abort(); ++ } ++ ++ return p; ++} ++ + static void *mfd_assert_mmap_private(int fd) + { + void *p; +@@ -986,6 +1004,30 @@ static void test_seal_future_write(void) + close(fd); + } + ++static void test_seal_write_map_read_shared(void) ++{ ++ int fd; ++ void *p; ++ ++ printf("%s SEAL-WRITE-MAP-READ\n", memfd_str); ++ ++ fd = mfd_assert_new("kern_memfd_seal_write_map_read", ++ mfd_def_size, ++ MFD_CLOEXEC | MFD_ALLOW_SEALING); ++ ++ mfd_assert_add_seals(fd, F_SEAL_WRITE); ++ mfd_assert_has_seals(fd, F_SEAL_WRITE); ++ ++ p = mfd_assert_mmap_read_shared(fd); ++ ++ mfd_assert_read(fd); ++ mfd_assert_read_shared(fd); ++ mfd_fail_write(fd); ++ ++ munmap(p, mfd_def_size); ++ close(fd); ++} ++ + /* + * Test SEAL_SHRINK + * Test whether SEAL_SHRINK actually prevents shrinking +@@ -1603,6 +1645,7 @@ int main(int argc, char **argv) + + test_seal_write(); + test_seal_future_write(); ++ test_seal_write_map_read_shared(); + test_seal_shrink(); + test_seal_grow(); + test_seal_resize(); +diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh +index 71899a3ffa7a9d..3528e730e4d37c 100755 +--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh ++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh +@@ -134,6 +134,7 @@ ip netns exec $ns1 ./pm_nl_ctl limits 1 9 2>/dev/null + check "ip netns exec $ns1 ./pm_nl_ctl limits" "$default_limits" "subflows above hard limit" + + ip netns exec $ns1 ./pm_nl_ctl limits 8 8 ++ip netns exec $ns1 ./pm_nl_ctl flush + check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8 + subflows 8" "set limits" + diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.103-104.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.103-104.patch new file mode 100644 index 0000000000..eb7ea28a0a --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.103-104.patch @@ -0,0 +1,3677 @@ +diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml +index 91c774f106ceb1..ab1196d1ec2dd4 100644 +--- a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml ++++ b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml +@@ -59,7 +59,6 @@ properties: + - const: bus + - const: core + - const: vsync +- - const: lut + - const: tbu + - const: tbu_rt + # MSM8996 has additional iommu clock +diff --git a/Makefile b/Makefile +index 9b288ccccd6495..ae57f816375ebd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 103 ++SUBLEVEL = 104 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/mips/boot/dts/lantiq/danube_easy50712.dts b/arch/mips/boot/dts/lantiq/danube_easy50712.dts +index 1ce20b7d05cb8c..c4d7aa5753b043 100644 +--- a/arch/mips/boot/dts/lantiq/danube_easy50712.dts ++++ b/arch/mips/boot/dts/lantiq/danube_easy50712.dts +@@ -82,13 +82,16 @@ conf_out { + }; + }; + +- etop@e180000 { ++ ethernet@e180000 { + compatible = "lantiq,etop-xway"; + reg = <0xe180000 0x40000>; + interrupt-parent = <&icu0>; + interrupts = <73 78>; ++ interrupt-names = "tx", "rx"; + phy-mode = "rmii"; + mac-address = [ 00 11 22 33 44 55 ]; ++ lantiq,rx-burst-length = <4>; ++ lantiq,tx-burst-length = <4>; + }; + + stp0: stp@e100bb0 { +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 3ed0782252229a..4c72b59fdf98cc 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -478,7 +478,7 @@ void __init ltq_soc_init(void) + ifccr = CGU_IFCCR_VR9; + pcicr = CGU_PCICR_VR9; + } else { +- clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE); ++ clkdev_add_pmu("1e180000.ethernet", NULL, 1, 0, PMU_PPE); + } + + if (!of_machine_is_compatible("lantiq,ase")) +@@ -512,9 +512,9 @@ void __init ltq_soc_init(void) + CLOCK_133M, CLOCK_133M); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); +- clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE); +- clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY); +- clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY); ++ clkdev_add_pmu("1e180000.ethernet", "ppe", 1, 0, PMU_PPE); ++ clkdev_add_cgu("1e180000.ethernet", "ephycgu", CGU_EPHY); ++ clkdev_add_pmu("1e180000.ethernet", "ephy", 1, 0, PMU_EPHY); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO); + clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); + } else if (of_machine_is_compatible("lantiq,grx390")) { +@@ -573,7 +573,7 @@ void __init ltq_soc_init(void) + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); +- clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); ++ clkdev_add_pmu("1e180000.ethernet", "switch", 1, 0, PMU_SWITCH); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); + clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); +diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c +index 5b3c093611baf1..7209d00a9c2576 100644 +--- a/arch/powerpc/kernel/kvm.c ++++ b/arch/powerpc/kernel/kvm.c +@@ -632,19 +632,19 @@ static void __init kvm_check_ins(u32 *inst, u32 features) + #endif + } + +- switch (inst_no_rt & ~KVM_MASK_RB) { + #ifdef CONFIG_PPC_BOOK3S_32 ++ switch (inst_no_rt & ~KVM_MASK_RB) { + case KVM_INST_MTSRIN: + if (features & KVM_MAGIC_FEAT_SR) { + u32 inst_rb = _inst & KVM_MASK_RB; + kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); + } + break; +-#endif + } ++#endif + +- switch (_inst) { + #ifdef CONFIG_BOOKE ++ switch (_inst) { + case KVM_INST_WRTEEI_0: + kvm_patch_ins_wrteei_0(inst); + break; +@@ -652,8 +652,8 @@ static void __init kvm_check_ins(u32 *inst, u32 features) + case KVM_INST_WRTEEI_1: + kvm_patch_ins_wrtee(inst, 0, 1); + break; +-#endif + } ++#endif + } + + extern u32 kvm_template_start[]; +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index 7444fe0e3d08cd..2cb30d9c5b4ae7 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -161,8 +161,28 @@ static int cmp_id(const void *key, const void *elem) + return 1; + } + ++static u32 cpuid_to_ucode_rev(unsigned int val) ++{ ++ union zen_patch_rev p = {}; ++ union cpuid_1_eax c; ++ ++ c.full = val; ++ ++ p.stepping = c.stepping; ++ p.model = c.model; ++ p.ext_model = c.ext_model; ++ p.ext_fam = c.ext_fam; ++ ++ return p.ucode_rev; ++} ++ + static bool need_sha_check(u32 cur_rev) + { ++ if (!cur_rev) { ++ cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); ++ pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev); ++ } ++ + switch (cur_rev >> 8) { + case 0x80012: return cur_rev <= 0x800126f; break; + case 0x80082: return cur_rev <= 0x800820f; break; +@@ -744,8 +764,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi + n.equiv_cpu = equiv_cpu; + n.patch_id = uci->cpu_sig.rev; + +- WARN_ON_ONCE(!n.patch_id); +- + list_for_each_entry(p, µcode_cache, plist) + if (patch_cpus_equivalent(p, &n, false)) + return p; +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index ba1c2a7f74f766..af4ae9216667ea 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -847,6 +847,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, + if (min > map->max_apic_id) + return 0; + ++ min = array_index_nospec(min, map->max_apic_id + 1); ++ + for_each_set_bit(i, ipi_bitmap, + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { + if (map->phys_map[min + i]) { +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index af0b2b3bc991e2..5088065ac704be 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -9802,8 +9802,11 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) + rcu_read_lock(); + map = rcu_dereference(vcpu->kvm->arch.apic_map); + +- if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) +- target = map->phys_map[dest_id]->vcpu; ++ if (likely(map) && dest_id <= map->max_apic_id) { ++ dest_id = array_index_nospec(dest_id, map->max_apic_id + 1); ++ if (map->phys_map[dest_id]) ++ target = map->phys_map[dest_id]->vcpu; ++ } + + rcu_read_unlock(); + +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 77d6af61158936..8e304efde3429e 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -2329,6 +2329,12 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, ++ { ++ // TUXEDO InfinityBook Pro AMD Gen9 ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), ++ }, ++ }, + { }, + }; + +diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c +index ff558908897f3e..9c83fb29b2f1b2 100644 +--- a/drivers/atm/atmtcp.c ++++ b/drivers/atm/atmtcp.c +@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) + return NULL; + } + ++static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb) ++{ ++ struct atmtcp_hdr *hdr; ++ ++ if (skb->len < sizeof(struct atmtcp_hdr)) ++ return -EINVAL; ++ ++ hdr = (struct atmtcp_hdr *)skb->data; ++ if (hdr->length == ATMTCP_HDR_MAGIC) ++ return -EINVAL; ++ ++ return 0; ++} + + static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + { +@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + struct sk_buff *new_skb; + int result = 0; + +- if (skb->len < sizeof(struct atmtcp_hdr)) +- goto done; +- + dev = vcc->dev_data; + hdr = (struct atmtcp_hdr *) skb->data; + if (hdr->length == ATMTCP_HDR_MAGIC) { +@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = { + + static const struct atmdev_ops atmtcp_c_dev_ops = { + .close = atmtcp_c_close, ++ .pre_send = atmtcp_c_pre_send, + .send = atmtcp_c_send + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +index 384834fbd59011..7200110197415f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +@@ -89,8 +89,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + } + + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, +- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | +- AMDGPU_VM_PAGE_EXECUTABLE); ++ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | ++ AMDGPU_PTE_EXECUTABLE); + + if (r) { + DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); +diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c +index 772d8e662278b9..851f0baf94600c 100644 +--- a/drivers/gpu/drm/display/drm_dp_helper.c ++++ b/drivers/gpu/drm/display/drm_dp_helper.c +@@ -663,7 +663,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + * monitor doesn't power down exactly after the throw away read. + */ + if (!aux->is_remote) { +- ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); ++ ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV); + if (ret < 0) + return ret; + } +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c +index bbe4f1665b6039..dc142f9e4f6028 100644 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c +@@ -981,12 +981,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + + if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { + sync_file = sync_file_create(submit->user_fence); +- if (!sync_file) { ++ if (!sync_file) + ret = -ENOMEM; +- } else { +- fd_install(out_fence_fd, sync_file->file); +- args->fence_fd = out_fence_fd; +- } + } + + submit_attach_object_fences(submit); +@@ -1013,10 +1009,14 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + out_unlock: + mutex_unlock(&queue->lock); + out_post_unlock: +- if (ret && (out_fence_fd >= 0)) { +- put_unused_fd(out_fence_fd); ++ if (ret) { ++ if (out_fence_fd >= 0) ++ put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); ++ } else if (sync_file) { ++ fd_install(out_fence_fd, sync_file->file); ++ args->fence_fd = out_fence_fd; + } + + if (!IS_ERR_OR_NULL(submit)) { +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c +index 7a2cceaee6e97f..1199dfc1194c80 100644 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c +@@ -663,6 +663,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane, + struct nouveau_drm *drm = nouveau_drm(plane->dev); + uint8_t i; + ++ /* All chipsets can display all formats in linear layout */ ++ if (modifier == DRM_FORMAT_MOD_LINEAR) ++ return true; ++ + if (drm->client.device.info.chipset < 0xc0) { + const struct drm_format_info *info = drm_format_info(format); + const uint8_t kind = (modifier >> 12) & 0xff; +diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c +index b7da3ab44c277d..7c43397c19e61d 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c ++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c +@@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i + static void + gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) + { +- nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++); ++ nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag); + while (len >= 4) { + nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img); + img += 4; +@@ -249,9 +249,11 @@ int + gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) + { + struct nvkm_falcon *falcon = fw->falcon; +- int target, ret; ++ int ret; + + if (fw->inst) { ++ int target; ++ + nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001); + + switch (nvkm_memory_target(fw->inst)) { +@@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) + } + + if (fw->boot) { +- switch (nvkm_memory_target(&fw->fw.mem.memory)) { +- case NVKM_MEM_TARGET_VRAM: target = 4; break; +- case NVKM_MEM_TARGET_HOST: target = 5; break; +- case NVKM_MEM_TARGET_NCOH: target = 6; break; +- default: +- WARN_ON(1); +- return -EINVAL; +- } +- + ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0, + IMEM, falcon->code.limit - fw->boot_size, fw->boot_size, + fw->boot_addr >> 8, false); +diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c +index 84625e817ce950..896f73aa4d2c82 100644 +--- a/drivers/hid/hid-asus.c ++++ b/drivers/hid/hid-asus.c +@@ -1108,7 +1108,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) + return ret; + } + +- if (!drvdata->input) { ++ /* ++ * Check that input registration succeeded. Checking that ++ * HID_CLAIMED_INPUT is set prevents a UAF when all input devices ++ * were freed during registration due to no usages being mapped, ++ * leaving drvdata->input pointing to freed memory. ++ */ ++ if (!drvdata->input || !(hdev->claimed & HID_CLAIMED_INPUT)) { + hid_err(hdev, "Asus input not registered\n"); + ret = -ENOMEM; + goto err_stop_hw; +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 0d1d7162814f32..3f74633070b6ec 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -818,6 +818,8 @@ + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093 ++#define USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT 0x6184 ++#define USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT 0x61ed + + #define USB_VENDOR_ID_LETSKETCH 0x6161 + #define USB_DEVICE_ID_WP9620N 0x4d15 +@@ -891,6 +893,7 @@ + #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 + #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539 + #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f ++#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc543 + #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a + #define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548 + #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 +diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c +index 77c2d45ac62a7f..6f5c71660d823b 100644 +--- a/drivers/hid/hid-input-test.c ++++ b/drivers/hid/hid-input-test.c +@@ -7,7 +7,7 @@ + + #include + +-static void hid_test_input_set_battery_charge_status(struct kunit *test) ++static void hid_test_input_update_battery_charge_status(struct kunit *test) + { + struct hid_device *dev; + bool handled; +@@ -15,15 +15,15 @@ static void hid_test_input_set_battery_charge_status(struct kunit *test) + dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + +- handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0); ++ handled = hidinput_update_battery_charge_status(dev, HID_DG_HEIGHT, 0); + KUNIT_EXPECT_FALSE(test, handled); + KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN); + +- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0); ++ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 0); + KUNIT_EXPECT_TRUE(test, handled); + KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING); + +- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1); ++ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 1); + KUNIT_EXPECT_TRUE(test, handled); + KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING); + } +@@ -63,7 +63,7 @@ static void hid_test_input_get_battery_property(struct kunit *test) + } + + static struct kunit_case hid_input_tests[] = { +- KUNIT_CASE(hid_test_input_set_battery_charge_status), ++ KUNIT_CASE(hid_test_input_update_battery_charge_status), + KUNIT_CASE(hid_test_input_get_battery_property), + { } + }; +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 9d80635a91ebd8..f5c217ac4bfaa7 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -595,13 +595,33 @@ static void hidinput_cleanup_battery(struct hid_device *dev) + dev->battery = NULL; + } + +-static void hidinput_update_battery(struct hid_device *dev, int value) ++static bool hidinput_update_battery_charge_status(struct hid_device *dev, ++ unsigned int usage, int value) ++{ ++ switch (usage) { ++ case HID_BAT_CHARGING: ++ dev->battery_charge_status = value ? ++ POWER_SUPPLY_STATUS_CHARGING : ++ POWER_SUPPLY_STATUS_DISCHARGING; ++ return true; ++ } ++ ++ return false; ++} ++ ++static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, ++ int value) + { + int capacity; + + if (!dev->battery) + return; + ++ if (hidinput_update_battery_charge_status(dev, usage, value)) { ++ power_supply_changed(dev->battery); ++ return; ++ } ++ + if (value == 0 || value < dev->battery_min || value > dev->battery_max) + return; + +@@ -617,20 +637,6 @@ static void hidinput_update_battery(struct hid_device *dev, int value) + power_supply_changed(dev->battery); + } + } +- +-static bool hidinput_set_battery_charge_status(struct hid_device *dev, +- unsigned int usage, int value) +-{ +- switch (usage) { +- case HID_BAT_CHARGING: +- dev->battery_charge_status = value ? +- POWER_SUPPLY_STATUS_CHARGING : +- POWER_SUPPLY_STATUS_DISCHARGING; +- return true; +- } +- +- return false; +-} + #else /* !CONFIG_HID_BATTERY_STRENGTH */ + static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, + struct hid_field *field, bool is_percentage) +@@ -642,14 +648,9 @@ static void hidinput_cleanup_battery(struct hid_device *dev) + { + } + +-static void hidinput_update_battery(struct hid_device *dev, int value) +-{ +-} +- +-static bool hidinput_set_battery_charge_status(struct hid_device *dev, +- unsigned int usage, int value) ++static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, ++ int value) + { +- return false; + } + #endif /* CONFIG_HID_BATTERY_STRENGTH */ + +@@ -1515,11 +1516,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct + return; + + if (usage->type == EV_PWR) { +- bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value); +- +- if (!handled) +- hidinput_update_battery(hid, value); +- ++ hidinput_update_battery(hid, usage->hid, value); + return; + } + +diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c +index 37958edec55f5f..e2d5b3f699146d 100644 +--- a/drivers/hid/hid-logitech-dj.c ++++ b/drivers/hid/hid-logitech-dj.c +@@ -1983,6 +1983,10 @@ static const struct hid_device_id logi_dj_receivers[] = { + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1), + .driver_data = recvr_type_gaming_hidpp}, ++ { /* Logitech lightspeed receiver (0xc543) */ ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, ++ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2), ++ .driver_data = recvr_type_gaming_hidpp}, + + { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c +index 4519ee377aa767..3a2c1e48aba20c 100644 +--- a/drivers/hid/hid-logitech-hidpp.c ++++ b/drivers/hid/hid-logitech-hidpp.c +@@ -4652,6 +4652,8 @@ static const struct hid_device_id hidpp_devices[] = { + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) }, + { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) }, ++ { /* Logitech G PRO 2 LIGHTSPEED Wireless Mouse over USB */ ++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc09a) }, + + { /* G935 Gaming Headset */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87), +diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c +index c5bfca8ac5e6e8..a985301a4135f5 100644 +--- a/drivers/hid/hid-mcp2221.c ++++ b/drivers/hid/hid-mcp2221.c +@@ -49,6 +49,7 @@ enum { + MCP2221_I2C_MASK_ADDR_NACK = 0x40, + MCP2221_I2C_WRADDRL_SEND = 0x21, + MCP2221_I2C_ADDR_NACK = 0x25, ++ MCP2221_I2C_READ_PARTIAL = 0x54, + MCP2221_I2C_READ_COMPL = 0x55, + MCP2221_ALT_F_NOT_GPIOV = 0xEE, + MCP2221_ALT_F_NOT_GPIOD = 0xEF, +@@ -187,6 +188,25 @@ static int mcp_cancel_last_cmd(struct mcp2221 *mcp) + return mcp_send_data_req_status(mcp, mcp->txbuf, 8); + } + ++/* Check if the last command succeeded or failed and return the result. ++ * If the command did fail, cancel that command which will free the i2c bus. ++ */ ++static int mcp_chk_last_cmd_status_free_bus(struct mcp2221 *mcp) ++{ ++ int ret; ++ ++ ret = mcp_chk_last_cmd_status(mcp); ++ if (ret) { ++ /* The last command was a failure. ++ * Send a cancel which will also free the bus. ++ */ ++ usleep_range(980, 1000); ++ mcp_cancel_last_cmd(mcp); ++ } ++ ++ return ret; ++} ++ + static int mcp_set_i2c_speed(struct mcp2221 *mcp) + { + int ret; +@@ -241,7 +261,7 @@ static int mcp_i2c_write(struct mcp2221 *mcp, + usleep_range(980, 1000); + + if (last_status) { +- ret = mcp_chk_last_cmd_status(mcp); ++ ret = mcp_chk_last_cmd_status_free_bus(mcp); + if (ret) + return ret; + } +@@ -278,6 +298,7 @@ static int mcp_i2c_smbus_read(struct mcp2221 *mcp, + { + int ret; + u16 total_len; ++ int retries = 0; + + mcp->txbuf[0] = type; + if (msg) { +@@ -301,20 +322,31 @@ static int mcp_i2c_smbus_read(struct mcp2221 *mcp, + mcp->rxbuf_idx = 0; + + do { ++ /* Wait for the data to be read by the device */ ++ usleep_range(980, 1000); ++ + memset(mcp->txbuf, 0, 4); + mcp->txbuf[0] = MCP2221_I2C_GET_DATA; + + ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1); +- if (ret) +- return ret; +- +- ret = mcp_chk_last_cmd_status(mcp); +- if (ret) +- return ret; +- +- usleep_range(980, 1000); ++ if (ret) { ++ if (retries < 5) { ++ /* The data wasn't ready to read. ++ * Wait a bit longer and try again. ++ */ ++ usleep_range(90, 100); ++ retries++; ++ } else { ++ return ret; ++ } ++ } else { ++ retries = 0; ++ } + } while (mcp->rxbuf_idx < total_len); + ++ usleep_range(980, 1000); ++ ret = mcp_chk_last_cmd_status_free_bus(mcp); ++ + return ret; + } + +@@ -328,11 +360,6 @@ static int mcp_i2c_xfer(struct i2c_adapter *adapter, + + mutex_lock(&mcp->lock); + +- /* Setting speed before every transaction is required for mcp2221 */ +- ret = mcp_set_i2c_speed(mcp); +- if (ret) +- goto exit; +- + if (num == 1) { + if (msgs->flags & I2C_M_RD) { + ret = mcp_i2c_smbus_read(mcp, msgs, MCP2221_I2C_RD_DATA, +@@ -417,9 +444,7 @@ static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr, + if (last_status) { + usleep_range(980, 1000); + +- ret = mcp_chk_last_cmd_status(mcp); +- if (ret) +- return ret; ++ ret = mcp_chk_last_cmd_status_free_bus(mcp); + } + + return ret; +@@ -437,10 +462,6 @@ static int mcp_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + + mutex_lock(&mcp->lock); + +- ret = mcp_set_i2c_speed(mcp); +- if (ret) +- goto exit; +- + switch (size) { + + case I2C_SMBUS_QUICK: +@@ -791,7 +812,8 @@ static int mcp2221_raw_event(struct hid_device *hdev, + mcp->status = -EIO; + break; + } +- if (data[2] == MCP2221_I2C_READ_COMPL) { ++ if (data[2] == MCP2221_I2C_READ_COMPL || ++ data[2] == MCP2221_I2C_READ_PARTIAL) { + buf = mcp->rxbuf; + memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]); + mcp->rxbuf_idx = mcp->rxbuf_idx + data[3]; +@@ -1152,6 +1174,11 @@ static int mcp2221_probe(struct hid_device *hdev, + if (i2c_clk_freq < 50) + i2c_clk_freq = 50; + mcp->cur_i2c_clk_div = (12000000 / (i2c_clk_freq * 1000)) - 3; ++ ret = mcp_set_i2c_speed(mcp); ++ if (ret) { ++ hid_err(hdev, "can't set i2c speed: %d\n", ret); ++ return ret; ++ } + + mcp->adapter.owner = THIS_MODULE; + mcp->adapter.class = I2C_CLASS_HWMON; +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index becd4c1ccf93c1..a85581cd511fd3 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1448,6 +1448,14 @@ static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, + if (hdev->vendor == I2C_VENDOR_ID_GOODIX && + (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 || + hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) { ++ if (*size < 608) { ++ dev_info( ++ &hdev->dev, ++ "GT7868Q fixup: report descriptor is only %u bytes, skipping\n", ++ *size); ++ return rdesc; ++ } ++ + if (rdesc[607] == 0x15) { + rdesc[607] = 0x25; + dev_info( +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c +index b5d26f03fe6bb7..a1128c5315fffa 100644 +--- a/drivers/hid/hid-ntrig.c ++++ b/drivers/hid/hid-ntrig.c +@@ -144,6 +144,9 @@ static void ntrig_report_version(struct hid_device *hdev) + struct usb_device *usb_dev = hid_to_usb_dev(hdev); + unsigned char *data = kmalloc(8, GFP_KERNEL); + ++ if (!hid_is_usb(hdev)) ++ return; ++ + if (!data) + goto err_free; + +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 80372342c176af..64f9728018b885 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -124,6 +124,8 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL }, +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index dd44373ba930e2..42bc8f05e26358 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -684,6 +684,7 @@ static bool wacom_is_art_pen(int tool_id) + case 0x885: /* Intuos3 Marker Pen */ + case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ + case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */ ++ case 0x204: /* Art Pen 2 */ + is_art_pen = true; + break; + } +diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c +index fad5a72d3b1671..f1208591ed67e3 100644 +--- a/drivers/net/ethernet/dlink/dl2k.c ++++ b/drivers/net/ethernet/dlink/dl2k.c +@@ -1092,7 +1092,7 @@ get_stats (struct net_device *dev) + dev->stats.rx_bytes += dr32(OctetRcvOk); + dev->stats.tx_bytes += dr32(OctetXmtOk); + +- dev->stats.multicast = dr32(McstFramesRcvdOk); ++ dev->stats.multicast += dr32(McstFramesRcvdOk); + dev->stats.collisions += dr32(SingleColFrames) + + dr32(MultiColFrames); + +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c +index f5023ac9ab8323..eae4376c685952 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c +@@ -527,14 +527,14 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) + * @xdp: xdp_buff used as input to the XDP program + * @xdp_prog: XDP program to run + * @xdp_ring: ring to be used for XDP_TX action +- * @rx_buf: Rx buffer to store the XDP action ++ * @eop_desc: Last descriptor in packet to read metadata from + * + * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} + */ +-static void ++static u32 + ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, +- struct ice_rx_buf *rx_buf) ++ union ice_32b_rx_flex_desc *eop_desc) + { + unsigned int ret = ICE_XDP_PASS; + u32 act; +@@ -542,6 +542,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + if (!xdp_prog) + goto exit; + ++ ice_xdp_meta_set_desc(xdp, eop_desc); ++ + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: +@@ -571,7 +573,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + ret = ICE_XDP_CONSUMED; + } + exit: +- ice_set_rx_bufs_act(xdp, rx_ring, ret); ++ return ret; + } + + /** +@@ -857,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + xdp_buff_set_frags_flag(xdp); + } + +- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { +- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); ++ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) + return -ENOMEM; +- } + + __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, + rx_buf->page_offset, size); +@@ -921,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, + struct ice_rx_buf *rx_buf; + + rx_buf = &rx_ring->rx_buf[ntc]; +- rx_buf->pgcnt = page_count(rx_buf->page); + prefetchw(rx_buf->page); + + if (!size) +@@ -937,6 +936,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, + return rx_buf; + } + ++/** ++ * ice_get_pgcnts - grab page_count() for gathered fragments ++ * @rx_ring: Rx descriptor ring to store the page counts on ++ * ++ * This function is intended to be called right before running XDP ++ * program so that the page recycling mechanism will be able to take ++ * a correct decision regarding underlying pages; this is done in such ++ * way as XDP program can change the refcount of page ++ */ ++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) ++{ ++ u32 nr_frags = rx_ring->nr_frags + 1; ++ u32 idx = rx_ring->first_desc; ++ struct ice_rx_buf *rx_buf; ++ u32 cnt = rx_ring->count; ++ ++ for (int i = 0; i < nr_frags; i++) { ++ rx_buf = &rx_ring->rx_buf[idx]; ++ rx_buf->pgcnt = page_count(rx_buf->page); ++ ++ if (++idx == cnt) ++ idx = 0; ++ } ++} ++ + /** + * ice_build_skb - Build skb around an existing buffer + * @rx_ring: Rx descriptor ring to transact packets on +@@ -1049,12 +1073,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) + rx_buf->page_offset + headlen, size, + xdp->frame_sz); + } else { +- /* buffer is unused, change the act that should be taken later +- * on; data was copied onto skb's linear part so there's no ++ /* buffer is unused, restore biased page count in Rx buffer; ++ * data was copied onto skb's linear part so there's no + * need for adjusting page offset and we can reuse this buffer + * as-is + */ +- rx_buf->act = ICE_SKB_CONSUMED; ++ rx_buf->pagecnt_bias++; + } + + if (unlikely(xdp_buff_has_frags(xdp))) { +@@ -1107,29 +1131,34 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) + * @xdp: XDP buffer carrying linear + frags part + * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage + * @ntc: a current next_to_clean value to be stored at rx_ring ++ * @verdict: return code from XDP program execution + * + * Walk through gathered fragments and satisfy internal page + * recycle mechanism; we take here an action related to verdict + * returned by XDP program; + */ + static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, +- u32 *xdp_xmit, u32 ntc) ++ u32 *xdp_xmit, u32 ntc, u32 verdict) + { + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + u32 cnt = rx_ring->count; ++ u32 post_xdp_frags = 1; + struct ice_rx_buf *buf; + int i; + +- for (i = 0; i < nr_frags; i++) { ++ if (unlikely(xdp_buff_has_frags(xdp))) ++ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; ++ ++ for (i = 0; i < post_xdp_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + +- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { ++ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); +- *xdp_xmit |= buf->act; +- } else if (buf->act & ICE_XDP_CONSUMED) { ++ *xdp_xmit |= verdict; ++ } else if (verdict & ICE_XDP_CONSUMED) { + buf->pagecnt_bias++; +- } else if (buf->act == ICE_XDP_PASS) { ++ } else if (verdict == ICE_XDP_PASS) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + } + +@@ -1138,6 +1167,17 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + if (++idx == cnt) + idx = 0; + } ++ /* handle buffers that represented frags released by XDP prog; ++ * for these we keep pagecnt_bias as-is; refcount from struct page ++ * has been decremented within XDP prog and we do not have to increase ++ * the biased refcnt ++ */ ++ for (; i < nr_frags; i++) { ++ buf = &rx_ring->rx_buf[idx]; ++ ice_put_rx_buf(rx_ring, buf); ++ if (++idx == cnt) ++ idx = 0; ++ } + + xdp->data = NULL; + rx_ring->first_desc = ntc; +@@ -1164,9 +1204,9 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + struct ice_tx_ring *xdp_ring = NULL; + struct bpf_prog *xdp_prog = NULL; + u32 ntc = rx_ring->next_to_clean; ++ u32 cached_ntu, xdp_verdict; + u32 cnt = rx_ring->count; + u32 xdp_xmit = 0; +- u32 cached_ntu; + bool failure; + + xdp_prog = READ_ONCE(rx_ring->xdp_prog); +@@ -1230,7 +1270,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); + xdp_buff_clear_frags_flag(xdp); + } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { +- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc); ++ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); + break; + } + if (++ntc == cnt) +@@ -1240,13 +1280,14 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + if (ice_is_non_eop(rx_ring, rx_desc)) + continue; + +- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); +- if (rx_buf->act == ICE_XDP_PASS) ++ ice_get_pgcnts(rx_ring); ++ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); ++ if (xdp_verdict == ICE_XDP_PASS) + goto construct_skb; + total_rx_bytes += xdp_get_buff_len(xdp); + total_rx_pkts++; + +- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc); ++ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + + continue; + construct_skb: +@@ -1256,13 +1297,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) + skb = ice_construct_skb(rx_ring, xdp); + /* exit if we failed to retrieve a buffer */ + if (!skb) { +- rx_ring->ring_stats->rx_stats.alloc_page_failed++; +- rx_buf->act = ICE_XDP_CONSUMED; +- if (unlikely(xdp_buff_has_frags(xdp))) +- ice_set_rx_bufs_act(xdp, rx_ring, +- ICE_XDP_CONSUMED); ++ rx_ring->ring_stats->rx_stats.alloc_buf_failed++; ++ xdp_verdict = ICE_XDP_CONSUMED; + } +- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc); ++ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + + if (!skb) + break; +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h +index 407d4c320097f6..53a155dde3e320 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.h ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h +@@ -201,7 +201,6 @@ struct ice_rx_buf { + struct page *page; + unsigned int page_offset; + unsigned int pgcnt; +- unsigned int act; + unsigned int pagecnt_bias; + }; + +@@ -257,6 +256,14 @@ enum ice_rx_dtype { + ICE_RX_DTYPE_SPLIT_ALWAYS = 2, + }; + ++struct ice_xdp_buff { ++ struct xdp_buff xdp_buff; ++ const union ice_32b_rx_flex_desc *eop_desc; ++}; ++ ++/* Required for compatibility with xdp_buffs from xsk_pool */ ++static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); ++ + /* indices into GLINT_ITR registers */ + #define ICE_RX_ITR ICE_IDX_ITR0 + #define ICE_TX_ITR ICE_IDX_ITR1 +@@ -298,7 +305,6 @@ enum ice_dynamic_itr { + /* descriptor ring, associated with a VSI */ + struct ice_rx_ring { + /* CL1 - 1st cacheline starts here */ +- struct ice_rx_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ +@@ -310,12 +316,16 @@ struct ice_rx_ring { + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 next_to_alloc; +- /* CL2 - 2nd cacheline starts here */ ++ + union { + struct ice_rx_buf *rx_buf; + struct xdp_buff **xdp_buf; + }; +- struct xdp_buff xdp; ++ /* CL2 - 2nd cacheline starts here */ ++ union { ++ struct ice_xdp_buff xdp_ext; ++ struct xdp_buff xdp; ++ }; + /* CL3 - 3rd cacheline starts here */ + struct bpf_prog *xdp_prog; + u16 rx_offset; +@@ -332,6 +342,7 @@ struct ice_rx_ring { + /* CL4 - 4th cacheline starts here */ + struct ice_channel *ch; + struct ice_tx_ring *xdp_ring; ++ struct ice_rx_ring *next; /* pointer to next ring in q_vector */ + struct xsk_buff_pool *xsk_pool; + u32 nr_frags; + dma_addr_t dma; /* physical address of ring */ +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +index b0e56675f98b2a..41efafc5eb386a 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h ++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +@@ -5,49 +5,6 @@ + #define _ICE_TXRX_LIB_H_ + #include "ice.h" + +-/** +- * ice_set_rx_bufs_act - propagate Rx buffer action to frags +- * @xdp: XDP buffer representing frame (linear and frags part) +- * @rx_ring: Rx ring struct +- * act: action to store onto Rx buffers related to XDP buffer parts +- * +- * Set action that should be taken before putting Rx buffer from first frag +- * to the last. +- */ +-static inline void +-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, +- const unsigned int act) +-{ +- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; +- u32 nr_frags = rx_ring->nr_frags + 1; +- u32 idx = rx_ring->first_desc; +- u32 cnt = rx_ring->count; +- struct ice_rx_buf *buf; +- +- for (int i = 0; i < nr_frags; i++) { +- buf = &rx_ring->rx_buf[idx]; +- buf->act = act; +- +- if (++idx == cnt) +- idx = 0; +- } +- +- /* adjust pagecnt_bias on frags freed by XDP prog */ +- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { +- u32 delta = rx_ring->nr_frags - sinfo_frags; +- +- while (delta) { +- if (idx == 0) +- idx = cnt - 1; +- else +- idx--; +- buf = &rx_ring->rx_buf[idx]; +- buf->pagecnt_bias--; +- delta--; +- } +- } +-} +- + /** + * ice_test_staterr - tests bits in Rx descriptor status and error fields + * @status_err_n: Rx descriptor status_error0 or status_error1 bits +@@ -164,4 +121,14 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, + struct sk_buff *skb, u16 ptype); + void + ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); ++ ++static inline void ++ice_xdp_meta_set_desc(struct xdp_buff *xdp, ++ union ice_32b_rx_flex_desc *eop_desc) ++{ ++ struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, ++ xdp_buff); ++ ++ xdp_ext->eop_desc = eop_desc; ++} + #endif /* !_ICE_TXRX_LIB_H_ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +index f66788a2ed77ec..8489b5087d9c60 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +@@ -107,7 +107,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli + if (err) + return err; + +- mlx5_unload_one_devl_locked(dev, true); ++ mlx5_sync_reset_unload_flow(dev, true); + err = mlx5_health_wait_pci_up(dev); + if (err) + NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +index 3efa8bf1d14ef4..4720523813b976 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +@@ -575,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + if (err) + return err; + } +- priv->dcbx.xoff = xoff; + + /* Apply the settings */ + if (update_buffer) { +@@ -584,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + return err; + } + ++ priv->dcbx.xoff = xoff; ++ + if (update_prio2buffer) + err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +index f4a19ffbb641c0..66d276a1be836a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +@@ -66,11 +66,23 @@ struct mlx5e_port_buffer { + struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; + }; + ++#ifdef CONFIG_MLX5_CORE_EN_DCB + int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + struct ieee_pfc *pfc, + u32 *buffer_size, + u8 *prio2buffer); ++#else ++static inline int ++mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ++ u32 change, unsigned int mtu, ++ void *pfc, ++ u32 *buffer_size, ++ u8 *prio2buffer) ++{ ++ return 0; ++} ++#endif + + int mlx5e_port_query_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 5c6f01abdcb91d..d378aa55f22f90 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -44,6 +44,7 @@ + #include "eswitch.h" + #include "en.h" + #include "en/txrx.h" ++#include "en/port_buffer.h" + #include "en_tc.h" + #include "en_rep.h" + #include "en_accel/ipsec.h" +@@ -108,6 +109,8 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv) + if (up) { + netdev_info(priv->netdev, "Link up\n"); + netif_carrier_on(priv->netdev); ++ mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, ++ NULL, NULL, NULL); + } else { + netdev_info(priv->netdev, "Link down\n"); + netif_carrier_off(priv->netdev); +@@ -2722,9 +2725,11 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) + struct mlx5e_params *params = &priv->channels.params; + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; +- u16 mtu; ++ u16 mtu, prev_mtu; + int err; + ++ mlx5e_query_mtu(mdev, params, &prev_mtu); ++ + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); + if (err) + return err; +@@ -2734,6 +2739,18 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", + __func__, mtu, params->sw_mtu); + ++ if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) { ++ err = mlx5e_port_manual_buffer_config(priv, 0, mtu, ++ NULL, NULL, NULL); ++ if (err) { ++ netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n", ++ __func__, mtu, err, prev_mtu); ++ ++ mlx5e_set_mtu(mdev, params, prev_mtu); ++ return err; ++ } ++ } ++ + params->sw_mtu = mtu; + return 0; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +index 6b17346aa4cef2..1547704c89767f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +@@ -6,13 +6,15 @@ + #include "fw_reset.h" + #include "diag/fw_tracer.h" + #include "lib/tout.h" ++#include "sf/sf.h" + + enum { + MLX5_FW_RESET_FLAGS_RESET_REQUESTED, + MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, + MLX5_FW_RESET_FLAGS_PENDING_COMP, + MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, +- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED ++ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, ++ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, + }; + + struct mlx5_fw_reset { +@@ -26,6 +28,7 @@ struct mlx5_fw_reset { + struct work_struct reset_now_work; + struct work_struct reset_abort_work; + unsigned long reset_flags; ++ u8 reset_method; + struct timer_list timer; + struct completion done; + int ret; +@@ -94,7 +97,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level, + } + + static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, +- u8 *reset_type, u8 *reset_state) ++ u8 *reset_type, u8 *reset_state, u8 *reset_method) + { + u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; +@@ -110,13 +113,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, + *reset_type = MLX5_GET(mfrl_reg, out, reset_type); + if (reset_state) + *reset_state = MLX5_GET(mfrl_reg, out, reset_state); ++ if (reset_method) ++ *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method); + + return 0; + } + + int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) + { +- return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL); ++ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL); ++} ++ ++static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev, ++ u8 *reset_method) ++{ ++ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) { ++ *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE; ++ return 0; ++ } ++ ++ return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method); + } + + static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, +@@ -124,7 +140,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, + { + u8 reset_state; + +- if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) ++ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL)) + goto out; + + if (!reset_state) +@@ -203,7 +219,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) + return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false); + } + +-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded) ++static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) + { + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + struct devlink *devlink = priv_to_devlink(dev); +@@ -212,8 +228,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload + if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { + complete(&fw_reset->done); + } else { +- if (!unloaded) +- mlx5_unload_one(dev, false); ++ mlx5_sync_reset_unload_flow(dev, false); + if (mlx5_health_wait_pci_up(dev)) + mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); + else +@@ -256,7 +271,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) + + mlx5_sync_reset_clear_reset_requested(dev, false); + mlx5_enter_error_state(dev, true); +- mlx5_fw_reset_complete_reload(dev, false); ++ mlx5_fw_reset_complete_reload(dev); + } + + #define MLX5_RESET_POLL_INTERVAL (HZ / 10) +@@ -383,6 +398,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) + return false; + } + ++ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) { ++ mlx5_core_warn(dev, "SFs should be removed before reset\n"); ++ return false; ++ } ++ + #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) + err = mlx5_check_hotplug_interrupt(dev); + if (err) +@@ -402,7 +422,11 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) + struct mlx5_core_dev *dev = fw_reset->dev; + int err; + +- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || ++ err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method); ++ if (err) ++ mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err); ++ ++ if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || + !mlx5_is_reset_now_capable(dev)) { + err = mlx5_fw_reset_set_reset_sync_nack(dev); + mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s", +@@ -419,21 +443,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) + mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n"); + } + +-static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) ++static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id) + { + struct pci_bus *bridge_bus = dev->pdev->bus; + struct pci_dev *bridge = bridge_bus->self; + unsigned long timeout; + struct pci_dev *sdev; +- u16 reg16, dev_id; + int cap, err; ++ u16 reg16; + +- err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); +- if (err) +- return pcibios_err_to_errno(err); +- err = mlx5_check_dev_ids(dev, dev_id); +- if (err) +- return err; + cap = pci_find_capability(bridge, PCI_CAP_ID_EXP); + if (!cap) + return -EOPNOTSUPP; +@@ -503,64 +521,60 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) + return err; + } + +-static void mlx5_sync_reset_now_event(struct work_struct *work) ++static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev) + { +- struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, +- reset_now_work); +- struct mlx5_core_dev *dev = fw_reset->dev; +- int err; ++ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) ++ return -EOPNOTSUPP; + +- if (mlx5_sync_reset_clear_reset_requested(dev, false)) +- return; ++ return pci_reset_bus(dev->pdev); ++} + +- mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); ++static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method) ++{ ++ u16 dev_id; ++ int err; + +- err = mlx5_cmd_fast_teardown_hca(dev); +- if (err) { +- mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err); +- goto done; +- } ++ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); ++ if (err) ++ return pcibios_err_to_errno(err); ++ err = mlx5_check_dev_ids(dev, dev_id); ++ if (err) ++ return err; + +- err = mlx5_pci_link_toggle(dev); +- if (err) { +- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); +- set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags); ++ switch (reset_method) { ++ case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE: ++ err = mlx5_pci_link_toggle(dev, dev_id); ++ if (err) ++ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n"); ++ break; ++ case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET: ++ err = mlx5_pci_reset_bus(dev); ++ if (err) ++ mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n"); ++ break; ++ default: ++ return -EOPNOTSUPP; + } + +- mlx5_enter_error_state(dev, true); +-done: +- fw_reset->ret = err; +- mlx5_fw_reset_complete_reload(dev, false); ++ return err; + } + +-static void mlx5_sync_reset_unload_event(struct work_struct *work) ++void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked) + { +- struct mlx5_fw_reset *fw_reset; +- struct mlx5_core_dev *dev; ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + unsigned long timeout; + bool reset_action; + u8 rst_state; + int err; + +- fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work); +- dev = fw_reset->dev; +- +- if (mlx5_sync_reset_clear_reset_requested(dev, false)) +- return; +- +- mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n"); +- +- err = mlx5_cmd_fast_teardown_hca(dev); +- if (err) +- mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err); +- else +- mlx5_enter_error_state(dev, true); +- +- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) ++ if (locked) + mlx5_unload_one_devl_locked(dev, false); + else + mlx5_unload_one(dev, false); + ++ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags)) ++ return; ++ + mlx5_set_fw_rst_ack(dev); + mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n"); + +@@ -583,17 +597,73 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) + goto done; + } + +- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state); ++ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", ++ rst_state); + if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) { +- err = mlx5_pci_link_toggle(dev); ++ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); + if (err) { +- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err); ++ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", ++ err); + fw_reset->ret = err; + } + } + + done: +- mlx5_fw_reset_complete_reload(dev, true); ++ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags); ++} ++ ++static void mlx5_sync_reset_now_event(struct work_struct *work) ++{ ++ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, ++ reset_now_work); ++ struct mlx5_core_dev *dev = fw_reset->dev; ++ int err; ++ ++ if (mlx5_sync_reset_clear_reset_requested(dev, false)) ++ return; ++ ++ mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); ++ ++ err = mlx5_cmd_fast_teardown_hca(dev); ++ if (err) { ++ mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err); ++ goto done; ++ } ++ ++ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); ++ if (err) { ++ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err); ++ set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags); ++ } ++ ++ mlx5_enter_error_state(dev, true); ++done: ++ fw_reset->ret = err; ++ mlx5_fw_reset_complete_reload(dev); ++} ++ ++static void mlx5_sync_reset_unload_event(struct work_struct *work) ++{ ++ struct mlx5_fw_reset *fw_reset; ++ struct mlx5_core_dev *dev; ++ int err; ++ ++ fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work); ++ dev = fw_reset->dev; ++ ++ if (mlx5_sync_reset_clear_reset_requested(dev, false)) ++ return; ++ ++ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags); ++ mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n"); ++ ++ err = mlx5_cmd_fast_teardown_hca(dev); ++ if (err) ++ mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err); ++ else ++ mlx5_enter_error_state(dev, true); ++ ++ mlx5_fw_reset_complete_reload(dev); + } + + static void mlx5_sync_reset_abort_event(struct work_struct *work) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +index ea527d06a85f07..d5b28525c960dc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, + int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); + + int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); ++void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked); + int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack); + void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 62a85f09b52fd7..8a11e410f7c135 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -627,6 +627,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) + if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload)) + MLX5_SET(cmd_hca_cap, set_hca_cap, + pci_sync_for_fw_update_with_driver_unload, 1); ++ if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method)) ++ MLX5_SET(cmd_hca_cap, set_hca_cap, ++ pcie_reset_using_hotreset_method, 1); + + if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports)) + MLX5_SET(cmd_hca_cap, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +index e34a8f88c518c1..d5b2b6cfc8d21d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +@@ -20,9 +20,16 @@ struct mlx5_sf { + u16 hw_state; + }; + ++static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port) ++{ ++ struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port); ++ ++ return container_of(mlx5_dl_port, struct mlx5_sf, dl_port); ++} ++ + struct mlx5_sf_table { + struct mlx5_core_dev *dev; /* To refer from notifier context. */ +- struct xarray port_indices; /* port index based lookup. */ ++ struct xarray function_ids; /* function id based lookup. */ + refcount_t refcount; + struct completion disable_complete; + struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */ +@@ -30,33 +37,20 @@ struct mlx5_sf_table { + struct notifier_block vhca_nb; + }; + +-static struct mlx5_sf * +-mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) +-{ +- return xa_load(&table->port_indices, port_index); +-} +- + static struct mlx5_sf * + mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) + { +- unsigned long index; +- struct mlx5_sf *sf; +- +- xa_for_each(&table->port_indices, index, sf) { +- if (sf->hw_fn_id == fn_id) +- return sf; +- } +- return NULL; ++ return xa_load(&table->function_ids, fn_id); + } + +-static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) ++static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) + { +- return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL); ++ return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL); + } + +-static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) ++static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) + { +- xa_erase(&table->port_indices, sf->port_index); ++ xa_erase(&table->function_ids, sf->hw_fn_id); + } + + static struct mlx5_sf * +@@ -93,7 +87,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, + sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; + sf->controller = controller; + +- err = mlx5_sf_id_insert(table, sf); ++ err = mlx5_sf_function_id_insert(table, sf); + if (err) + goto insert_err; + +@@ -111,7 +105,6 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, + + static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) + { +- mlx5_sf_id_erase(table, sf); + mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id); + trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id); + kfree(sf); +@@ -172,26 +165,19 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, + struct netlink_ext_ack *extack) + { + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); ++ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); + struct mlx5_sf_table *table; +- struct mlx5_sf *sf; +- int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) + return -EOPNOTSUPP; + +- sf = mlx5_sf_lookup_by_index(table, dl_port->index); +- if (!sf) { +- err = -EOPNOTSUPP; +- goto sf_err; +- } + mutex_lock(&table->sf_state_lock); + *state = mlx5_sf_to_devlink_state(sf->hw_state); + *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state); + mutex_unlock(&table->sf_state_lock); +-sf_err: + mlx5_sf_table_put(table); +- return err; ++ return 0; + } + + static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, +@@ -257,8 +243,8 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, + struct netlink_ext_ack *extack) + { + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); ++ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); + struct mlx5_sf_table *table; +- struct mlx5_sf *sf; + int err; + + table = mlx5_sf_table_try_get(dev); +@@ -267,14 +253,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, + "Port state set is only supported in eswitch switchdev mode or SF ports are disabled."); + return -EOPNOTSUPP; + } +- sf = mlx5_sf_lookup_by_index(table, dl_port->index); +- if (!sf) { +- err = -ENODEV; +- goto out; +- } +- + err = mlx5_sf_state_set(dev, table, sf, state, extack); +-out: + mlx5_sf_table_put(table); + return err; + } +@@ -301,6 +280,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, + return 0; + + esw_err: ++ mlx5_sf_function_id_erase(table, sf); + mlx5_sf_free(table, sf); + return err; + } +@@ -361,6 +341,8 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, + + static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) + { ++ mlx5_sf_function_id_erase(table, sf); ++ + if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) { + mlx5_sf_free(table, sf); + } else if (mlx5_sf_is_active(sf)) { +@@ -383,10 +365,9 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, + struct netlink_ext_ack *extack) + { + struct mlx5_core_dev *dev = devlink_priv(devlink); ++ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_sf_table *table; +- struct mlx5_sf *sf; +- int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) { +@@ -394,21 +375,14 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, + "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); + return -EOPNOTSUPP; + } +- sf = mlx5_sf_lookup_by_index(table, dl_port->index); +- if (!sf) { +- err = -ENODEV; +- goto sf_err; +- } + + mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); +- mlx5_sf_id_erase(table, sf); + + mutex_lock(&table->sf_state_lock); + mlx5_sf_dealloc(table, sf); + mutex_unlock(&table->sf_state_lock); +-sf_err: + mlx5_sf_table_put(table); +- return err; ++ return 0; + } + + static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) +@@ -471,9 +445,8 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) + /* At this point, no new user commands can start and no vhca event can + * arrive. It is safe to destroy all user created SFs. + */ +- xa_for_each(&table->port_indices, index, sf) { ++ xa_for_each(&table->function_ids, index, sf) { + mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); +- mlx5_sf_id_erase(table, sf); + mlx5_sf_dealloc(table, sf); + } + } +@@ -531,7 +504,7 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) + + mutex_init(&table->sf_state_lock); + table->dev = dev; +- xa_init(&table->port_indices); ++ xa_init(&table->function_ids); + dev->priv.sf_table = table; + refcount_set(&table->refcount, 0); + table->esw_nb.notifier_call = mlx5_sf_esw_event; +@@ -566,6 +539,16 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) + mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); + WARN_ON(refcount_read(&table->refcount)); + mutex_destroy(&table->sf_state_lock); +- WARN_ON(!xa_empty(&table->port_indices)); ++ WARN_ON(!xa_empty(&table->function_ids)); + kfree(table); + } ++ ++bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev) ++{ ++ struct mlx5_sf_table *table = dev->priv.sf_table; ++ ++ if (!table) ++ return true; ++ ++ return xa_empty(&table->function_ids); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +index 860f9ddb7107b8..89559a37997ad6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +@@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev); + + int mlx5_sf_table_init(struct mlx5_core_dev *dev); + void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); ++bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev); + + int mlx5_devlink_sf_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *add_attr, +@@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) + { + } + ++static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev) ++{ ++ return true; ++} ++ + #endif + + #endif +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +index a9837985a483d8..bdb4f527289d2d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +@@ -69,7 +69,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, + init_waitqueue_head(&priv->tstamp_busy_wait); + } + +-static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) ++static void dwmac4_update_caps(struct stmmac_priv *priv) + { + if (priv->plat->tx_queues_to_use > 1) + priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); +@@ -1161,7 +1161,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + + const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, +- .phylink_get_caps = dwmac4_phylink_get_caps, ++ .update_caps = dwmac4_update_caps, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, +@@ -1204,7 +1204,7 @@ const struct stmmac_ops dwmac4_ops = { + + const struct stmmac_ops dwmac410_ops = { + .core_init = dwmac4_core_init, +- .phylink_get_caps = dwmac4_phylink_get_caps, ++ .update_caps = dwmac4_update_caps, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, +@@ -1253,7 +1253,7 @@ const struct stmmac_ops dwmac410_ops = { + + const struct stmmac_ops dwmac510_ops = { + .core_init = dwmac4_core_init, +- .phylink_get_caps = dwmac4_phylink_get_caps, ++ .update_caps = dwmac4_update_caps, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +index 052566f5b7f361..0bcb378fa0bc91 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +@@ -47,6 +47,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); + } + ++static void dwxgmac2_update_caps(struct stmmac_priv *priv) ++{ ++ if (!priv->dma_cap.mbps_10_100) ++ priv->hw->link.caps &= ~(MAC_10 | MAC_100); ++ else if (!priv->dma_cap.half_duplex) ++ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD); ++} ++ + static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) + { + u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); +@@ -1583,6 +1591,7 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg * + + const struct stmmac_ops dwxgmac210_ops = { + .core_init = dwxgmac2_core_init, ++ .update_caps = dwxgmac2_update_caps, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxgmac2_rx_queue_enable, +@@ -1705,8 +1714,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | +- MAC_1000FD | MAC_2500FD | MAC_5000FD | +- MAC_10000FD; ++ MAC_10 | MAC_100 | MAC_1000FD | ++ MAC_2500FD | MAC_5000FD | MAC_10000FD; + mac->link.duplex = 0; + mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; + mac->link.speed100 = XGMAC_CONFIG_SS_100_MII; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +index 05ea74e9379399..b2c03cb65c7cc8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +@@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, + } + + writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); +- +- /* Enable MTL RX overflow */ +- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); +- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); + } + + static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr, +@@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, + static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) + { ++ struct stmmac_priv *priv; + u32 hw_cap; + ++ priv = container_of(dma_cap, struct stmmac_priv, dma_cap); ++ + /* MAC HW feature 0 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31; +@@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, + dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; + dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3; + dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; ++ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20) ++ dma_cap->mbps_10_100 = 1; + + /* MAC HW feature 1 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h +index 47fb8e1646c2e9..ee9a7d98648b01 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h +@@ -300,8 +300,8 @@ struct stmmac_est; + struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); +- /* Get phylink capabilities */ +- void (*phylink_get_caps)(struct stmmac_priv *priv); ++ /* Update MAC capabilities */ ++ void (*update_caps)(struct stmmac_priv *priv); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ +@@ -423,8 +423,8 @@ struct stmmac_ops { + + #define stmmac_core_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, core_init, __args) +-#define stmmac_mac_phylink_get_caps(__priv) \ +- stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) ++#define stmmac_mac_update_caps(__priv) \ ++ stmmac_do_void_callback(__priv, mac, update_caps, __priv) + #define stmmac_mac_set(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac, __args) + #define stmmac_rx_ipc(__priv, __args...) \ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 615d25a0e46be5..ff5389a8efc33a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1230,8 +1230,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) + xpcs_get_interfaces(priv->hw->xpcs, + priv->phylink_config.supported_interfaces); + +- /* Get the MAC specific capabilities */ +- stmmac_mac_phylink_get_caps(priv); ++ /* Refresh the MAC-specific capabilities */ ++ stmmac_mac_update_caps(priv); + + priv->phylink_config.mac_capabilities = priv->hw->link.caps; + +@@ -2426,6 +2426,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; ++ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; +@@ -2496,7 +2497,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, +- true, priv->mode, true, true, ++ csum, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); +@@ -4789,6 +4790,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; ++ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc; + dma_addr_t dma_addr; +@@ -4833,7 +4835,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, +- true, priv->mode, true, true, ++ csum, priv->mode, true, true, + xdpf->len); + + tx_q->tx_count_frames++; +@@ -7232,7 +7234,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, + rx_cnt); + +- stmmac_mac_phylink_get_caps(priv); ++ stmmac_mac_update_caps(priv); + + priv->phylink_config.mac_capabilities = priv->hw->link.caps; + +diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h +index cdb343779a8fb5..4ba6e32cf6d8d1 100644 +--- a/drivers/net/phy/mscc/mscc.h ++++ b/drivers/net/phy/mscc/mscc.h +@@ -476,6 +476,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev) + void vsc85xx_link_change_notify(struct phy_device *phydev); + void vsc8584_config_ts_intr(struct phy_device *phydev); + int vsc8584_ptp_init(struct phy_device *phydev); ++void vsc8584_ptp_deinit(struct phy_device *phydev); + int vsc8584_ptp_probe_once(struct phy_device *phydev); + int vsc8584_ptp_probe(struct phy_device *phydev); + irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev); +@@ -490,6 +491,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev) + { + return 0; + } ++static inline void vsc8584_ptp_deinit(struct phy_device *phydev) ++{ ++} + static inline int vsc8584_ptp_probe_once(struct phy_device *phydev) + { + return 0; +diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c +index 3de72d9cc22bd0..3a932b30f4358f 100644 +--- a/drivers/net/phy/mscc/mscc_main.c ++++ b/drivers/net/phy/mscc/mscc_main.c +@@ -2337,9 +2337,7 @@ static int vsc85xx_probe(struct phy_device *phydev) + + static void vsc85xx_remove(struct phy_device *phydev) + { +- struct vsc8531_private *priv = phydev->priv; +- +- skb_queue_purge(&priv->rx_skbs_list); ++ vsc8584_ptp_deinit(phydev); + } + + /* Microsemi VSC85xx PHYs */ +diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c +index add1a9ee721afa..1f6237705b44b7 100644 +--- a/drivers/net/phy/mscc/mscc_ptp.c ++++ b/drivers/net/phy/mscc/mscc_ptp.c +@@ -1297,7 +1297,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev) + + static int __vsc8584_init_ptp(struct phy_device *phydev) + { +- struct vsc8531_private *vsc8531 = phydev->priv; + static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; + static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; + u32 val; +@@ -1514,17 +1513,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) + + vsc85xx_ts_eth_cmp1_sig(phydev); + +- vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp; +- vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp; +- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp; +- vsc8531->mii_ts.ts_info = vsc85xx_ts_info; +- phydev->mii_ts = &vsc8531->mii_ts; +- +- memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps)); +- +- vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, +- &phydev->mdio.dev); +- return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock); ++ return 0; + } + + void vsc8584_config_ts_intr(struct phy_device *phydev) +@@ -1551,6 +1540,16 @@ int vsc8584_ptp_init(struct phy_device *phydev) + return 0; + } + ++void vsc8584_ptp_deinit(struct phy_device *phydev) ++{ ++ struct vsc8531_private *vsc8531 = phydev->priv; ++ ++ if (vsc8531->ptp->ptp_clock) { ++ ptp_clock_unregister(vsc8531->ptp->ptp_clock); ++ skb_queue_purge(&vsc8531->rx_skbs_list); ++ } ++} ++ + irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev) + { + struct vsc8531_private *priv = phydev->priv; +@@ -1608,7 +1607,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev) + + vsc8531->ptp->phydev = phydev; + +- return 0; ++ vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp; ++ vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp; ++ vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp; ++ vsc8531->mii_ts.ts_info = vsc85xx_ts_info; ++ phydev->mii_ts = &vsc8531->mii_ts; ++ ++ memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps)); ++ vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, ++ &phydev->mdio.dev); ++ return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock); + } + + int vsc8584_ptp_probe_once(struct phy_device *phydev) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 3976bc4295dd19..eba755b584a459 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1363,6 +1363,9 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c +index 4d57a4e3410546..18393800546c11 100644 +--- a/drivers/of/dynamic.c ++++ b/drivers/of/dynamic.c +@@ -306,15 +306,20 @@ int of_detach_node(struct device_node *np) + } + EXPORT_SYMBOL_GPL(of_detach_node); + ++void __of_prop_free(struct property *prop) ++{ ++ kfree(prop->name); ++ kfree(prop->value); ++ kfree(prop); ++} ++ + static void property_list_free(struct property *prop_list) + { + struct property *prop, *next; + + for (prop = prop_list; prop != NULL; prop = next) { + next = prop->next; +- kfree(prop->name); +- kfree(prop->value); +- kfree(prop); ++ __of_prop_free(prop); + } + } + +@@ -427,9 +432,7 @@ struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags) + return new; + + err_free: +- kfree(new->name); +- kfree(new->value); +- kfree(new); ++ __of_prop_free(new); + return NULL; + } + +@@ -471,9 +474,7 @@ struct device_node *__of_node_dup(const struct device_node *np, + if (!new_pp) + goto err_prop; + if (__of_add_property(node, new_pp)) { +- kfree(new_pp->name); +- kfree(new_pp->value); +- kfree(new_pp); ++ __of_prop_free(new_pp); + goto err_prop; + } + } +@@ -934,12 +935,14 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs, + + ret = of_changeset_add_property(ocs, np, new_pp); + if (ret) { +- kfree(new_pp->name); +- kfree(new_pp->value); +- kfree(new_pp); ++ __of_prop_free(new_pp); ++ return ret; + } + +- return ret; ++ new_pp->next = np->deadprops; ++ np->deadprops = new_pp; ++ ++ return 0; + } + + /** +diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h +index 21f8f5e80917d1..73b55f4f84a3cc 100644 +--- a/drivers/of/of_private.h ++++ b/drivers/of/of_private.h +@@ -123,6 +123,7 @@ extern void *__unflatten_device_tree(const void *blob, + * own the devtree lock or work on detached trees only. + */ + struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags); ++void __of_prop_free(struct property *prop); + struct device_node *__of_node_dup(const struct device_node *np, + const char *full_name); + +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c +index a9a292d6d59b26..dc13299586414f 100644 +--- a/drivers/of/overlay.c ++++ b/drivers/of/overlay.c +@@ -262,9 +262,7 @@ static struct property *dup_and_fixup_symbol_prop( + return new_prop; + + err_free_new_prop: +- kfree(new_prop->name); +- kfree(new_prop->value); +- kfree(new_prop); ++ __of_prop_free(new_prop); + err_free_target_path: + kfree(target_path); + +@@ -361,11 +359,8 @@ static int add_changeset_property(struct overlay_changeset *ovcs, + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", + target->np, new_prop->name); + +- if (ret) { +- kfree(new_prop->name); +- kfree(new_prop->value); +- kfree(new_prop); +- } ++ if (ret) ++ __of_prop_free(new_prop); + return ret; + } + +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index 3b22c36bfb0b7c..5bfec440b4fd70 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -800,15 +800,11 @@ static void __init of_unittest_property_copy(void) + + new = __of_prop_dup(&p1, GFP_KERNEL); + unittest(new && propcmp(&p1, new), "empty property didn't copy correctly\n"); +- kfree(new->value); +- kfree(new->name); +- kfree(new); ++ __of_prop_free(new); + + new = __of_prop_dup(&p2, GFP_KERNEL); + unittest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n"); +- kfree(new->value); +- kfree(new->name); +- kfree(new); ++ __of_prop_free(new); + #endif + } + +@@ -3665,9 +3661,7 @@ static __init void of_unittest_overlay_high_level(void) + goto err_unlock; + } + if (__of_add_property(of_symbols, new_prop)) { +- kfree(new_prop->name); +- kfree(new_prop->value); +- kfree(new_prop); ++ __of_prop_free(new_prop); + /* "name" auto-generated by unflatten */ + if (!strcmp(prop->name, "name")) + continue; +diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig +index 7dfb7190580efa..ab3908a923e3a0 100644 +--- a/drivers/pinctrl/Kconfig ++++ b/drivers/pinctrl/Kconfig +@@ -438,6 +438,7 @@ config PINCTRL_STMFX + tristate "STMicroelectronics STMFX GPIO expander pinctrl driver" + depends on I2C + depends on OF_GPIO ++ depends on HAS_IOMEM + select GENERIC_PINCONF + select GPIOLIB_IRQCHIP + select MFD_STMFX +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 24f6eefb68030d..df37ac81620e62 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr, + return show_shost_mode(supported_mode, buf); + } + +-static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); ++static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); + + static ssize_t + show_shost_active_mode(struct device *dev, +@@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev, + return show_shost_mode(shost->active_mode, buf); + } + +-static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); ++static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); + + static int check_reset_type(const char *str) + { +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index f2ed7167c84809..5ad237d77a9a9f 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref { + atomic_t refcount; + wait_queue_head_t wait; + struct vhost_virtqueue *vq; ++ struct rcu_head rcu; + }; + + #define VHOST_NET_BATCH 64 +@@ -249,9 +250,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) + + static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) + { +- int r = atomic_sub_return(1, &ubufs->refcount); ++ int r; ++ ++ rcu_read_lock(); ++ r = atomic_sub_return(1, &ubufs->refcount); + if (unlikely(!r)) + wake_up(&ubufs->wait); ++ rcu_read_unlock(); + return r; + } + +@@ -264,7 +269,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) + static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) + { + vhost_net_ubuf_put_and_wait(ubufs); +- kfree(ubufs); ++ kfree_rcu(ubufs, rcu); + } + + static void vhost_net_clear_ubuf_info(struct vhost_net *n) +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index 586c5709dfb554..34438981ddd805 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -90,6 +90,10 @@ static int efivarfs_d_compare(const struct dentry *dentry, + { + int guid = len - EFI_VARIABLE_GUID_LEN; + ++ /* Parallel lookups may produce a temporary invalid filename */ ++ if (guid <= 0) ++ return 1; ++ + if (name->len != len) + return 1; + +diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c +index d852b43ac43e32..c1f802ecc47b2c 100644 +--- a/fs/erofs/zdata.c ++++ b/fs/erofs/zdata.c +@@ -1401,6 +1401,16 @@ static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work) + } + #endif + ++/* Use (kthread_)work in atomic contexts to minimize scheduling overhead */ ++static inline bool z_erofs_in_atomic(void) ++{ ++ if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth()) ++ return true; ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return true; ++ return !preemptible(); ++} ++ + static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, + int bios) + { +@@ -1415,8 +1425,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, + + if (atomic_add_return(bios, &io->pending_bios)) + return; +- /* Use (kthread_)work and sync decompression for atomic contexts only */ +- if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) { ++ if (z_erofs_in_atomic()) { + #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD + struct kthread_worker *worker; + +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 040b6b79c75e59..0ea3916ed1dcb1 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -206,83 +206,6 @@ nfs_page_group_lock_head(struct nfs_page *req) + return head; + } + +-/* +- * nfs_unroll_locks - unlock all newly locked reqs and wait on @req +- * @head: head request of page group, must be holding head lock +- * @req: request that couldn't lock and needs to wait on the req bit lock +- * +- * This is a helper function for nfs_lock_and_join_requests +- * returns 0 on success, < 0 on error. +- */ +-static void +-nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) +-{ +- struct nfs_page *tmp; +- +- /* relinquish all the locks successfully grabbed this run */ +- for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { +- if (!kref_read(&tmp->wb_kref)) +- continue; +- nfs_unlock_and_release_request(tmp); +- } +-} +- +-/* +- * nfs_page_group_lock_subreq - try to lock a subrequest +- * @head: head request of page group +- * @subreq: request to lock +- * +- * This is a helper function for nfs_lock_and_join_requests which +- * must be called with the head request and page group both locked. +- * On error, it returns with the page group unlocked. +- */ +-static int +-nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) +-{ +- int ret; +- +- if (!kref_get_unless_zero(&subreq->wb_kref)) +- return 0; +- while (!nfs_lock_request(subreq)) { +- nfs_page_group_unlock(head); +- ret = nfs_wait_on_request(subreq); +- if (!ret) +- ret = nfs_page_group_lock(head); +- if (ret < 0) { +- nfs_unroll_locks(head, subreq); +- nfs_release_request(subreq); +- return ret; +- } +- } +- return 0; +-} +- +-/* +- * nfs_page_group_lock_subrequests - try to lock the subrequests +- * @head: head request of page group +- * +- * This is a helper function for nfs_lock_and_join_requests which +- * must be called with the head request locked. +- */ +-int nfs_page_group_lock_subrequests(struct nfs_page *head) +-{ +- struct nfs_page *subreq; +- int ret; +- +- ret = nfs_page_group_lock(head); +- if (ret < 0) +- return ret; +- /* lock each request in the page group */ +- for (subreq = head->wb_this_page; subreq != head; +- subreq = subreq->wb_this_page) { +- ret = nfs_page_group_lock_subreq(head, subreq); +- if (ret < 0) +- return ret; +- } +- nfs_page_group_unlock(head); +- return 0; +-} +- + /* + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked +@@ -349,13 +272,14 @@ nfs_page_group_unlock(struct nfs_page *req) + nfs_page_clear_headlock(req); + } + +-/* +- * nfs_page_group_sync_on_bit_locked ++/** ++ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set ++ * @req: request in page group ++ * @bit: PG_* bit that is used to sync page group + * + * must be called with page group lock held + */ +-static bool +-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) ++bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) + { + struct nfs_page *head = req->wb_head; + struct nfs_page *tmp; +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 7d03811f44a4bb..cb1e9996fcc8ec 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -156,20 +156,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) + } + } + +-static int +-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) ++static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) + { +- int ret; +- +- if (!test_bit(PG_REMOVE, &req->wb_flags)) +- return 0; +- ret = nfs_page_group_lock(req); +- if (ret) +- return ret; + if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) + nfs_page_set_inode_ref(req, inode); +- nfs_page_group_unlock(req); +- return 0; + } + + static struct nfs_page *nfs_folio_private_request(struct folio *folio) +@@ -238,36 +228,6 @@ static struct nfs_page *nfs_folio_find_head_request(struct folio *folio) + return req; + } + +-static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio) +-{ +- struct inode *inode = folio_file_mapping(folio)->host; +- struct nfs_page *req, *head; +- int ret; +- +- for (;;) { +- req = nfs_folio_find_head_request(folio); +- if (!req) +- return req; +- head = nfs_page_group_lock_head(req); +- if (head != req) +- nfs_release_request(req); +- if (IS_ERR(head)) +- return head; +- ret = nfs_cancel_remove_inode(head, inode); +- if (ret < 0) { +- nfs_unlock_and_release_request(head); +- return ERR_PTR(ret); +- } +- /* Ensure that nobody removed the request before we locked it */ +- if (head == nfs_folio_private_request(folio)) +- break; +- if (folio_test_swapcache(folio)) +- break; +- nfs_unlock_and_release_request(head); +- } +- return head; +-} +- + /* Adjust the file length if we're writing beyond the end */ + static void nfs_grow_file(struct folio *folio, unsigned int offset, + unsigned int count) +@@ -548,6 +508,57 @@ void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, + nfs_destroy_unlinked_subrequests(destroy_list, head, inode); + } + ++/* ++ * nfs_unroll_locks - unlock all newly locked reqs and wait on @req ++ * @head: head request of page group, must be holding head lock ++ * @req: request that couldn't lock and needs to wait on the req bit lock ++ * ++ * This is a helper function for nfs_lock_and_join_requests ++ * returns 0 on success, < 0 on error. ++ */ ++static void ++nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) ++{ ++ struct nfs_page *tmp; ++ ++ /* relinquish all the locks successfully grabbed this run */ ++ for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { ++ if (!kref_read(&tmp->wb_kref)) ++ continue; ++ nfs_unlock_and_release_request(tmp); ++ } ++} ++ ++/* ++ * nfs_page_group_lock_subreq - try to lock a subrequest ++ * @head: head request of page group ++ * @subreq: request to lock ++ * ++ * This is a helper function for nfs_lock_and_join_requests which ++ * must be called with the head request and page group both locked. ++ * On error, it returns with the page group unlocked. ++ */ ++static int ++nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) ++{ ++ int ret; ++ ++ if (!kref_get_unless_zero(&subreq->wb_kref)) ++ return 0; ++ while (!nfs_lock_request(subreq)) { ++ nfs_page_group_unlock(head); ++ ret = nfs_wait_on_request(subreq); ++ if (!ret) ++ ret = nfs_page_group_lock(head); ++ if (ret < 0) { ++ nfs_unroll_locks(head, subreq); ++ nfs_release_request(subreq); ++ return ret; ++ } ++ } ++ return 0; ++} ++ + /* + * nfs_lock_and_join_requests - join all subreqs to the head req + * @folio: the folio used to lookup the "page group" of nfs_page structures +@@ -566,30 +577,59 @@ void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, + static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) + { + struct inode *inode = folio_file_mapping(folio)->host; +- struct nfs_page *head; ++ struct nfs_page *head, *subreq; + struct nfs_commit_info cinfo; + int ret; + +- nfs_init_cinfo_from_inode(&cinfo, inode); + /* + * A reference is taken only on the head request which acts as a + * reference to the whole page group - the group will not be destroyed + * until the head reference is released. + */ +- head = nfs_folio_find_and_lock_request(folio); +- if (IS_ERR_OR_NULL(head)) +- return head; ++retry: ++ head = nfs_folio_find_head_request(folio); ++ if (!head) ++ return NULL; + +- /* lock each request in the page group */ +- ret = nfs_page_group_lock_subrequests(head); +- if (ret < 0) { ++ while (!nfs_lock_request(head)) { ++ ret = nfs_wait_on_request(head); ++ if (ret < 0) { ++ nfs_release_request(head); ++ return ERR_PTR(ret); ++ } ++ } ++ ++ ret = nfs_page_group_lock(head); ++ if (ret < 0) ++ goto out_unlock; ++ ++ /* Ensure that nobody removed the request before we locked it */ ++ if (head != folio->private && !folio_test_swapcache(folio)) { ++ nfs_page_group_unlock(head); + nfs_unlock_and_release_request(head); +- return ERR_PTR(ret); ++ goto retry; + } + +- nfs_join_page_group(head, &cinfo, inode); ++ nfs_cancel_remove_inode(head, inode); ++ ++ /* lock each request in the page group */ ++ for (subreq = head->wb_this_page; ++ subreq != head; ++ subreq = subreq->wb_this_page) { ++ ret = nfs_page_group_lock_subreq(head, subreq); ++ if (ret < 0) ++ goto out_unlock; ++ } + ++ nfs_page_group_unlock(head); ++ ++ nfs_init_cinfo_from_inode(&cinfo, inode); ++ nfs_join_page_group(head, &cinfo, inode); + return head; ++ ++out_unlock: ++ nfs_unlock_and_release_request(head); ++ return ERR_PTR(ret); + } + + static void nfs_write_error(struct nfs_page *req, int error) +@@ -792,7 +832,8 @@ static void nfs_inode_remove_request(struct nfs_page *req) + { + struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); + +- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { ++ nfs_page_group_lock(req); ++ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) { + struct folio *folio = nfs_page_to_folio(req->wb_head); + struct address_space *mapping = folio_file_mapping(folio); + +@@ -804,6 +845,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) + } + spin_unlock(&mapping->private_lock); + } ++ nfs_page_group_unlock(req); + + if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { + atomic_long_dec(&nfsi->nrequests); +diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c +index a1ab95f382d566..2744d5580d195f 100644 +--- a/fs/smb/client/cifsfs.c ++++ b/fs/smb/client/cifsfs.c +@@ -1371,6 +1371,20 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, + netfs_resize_file(&target_cifsi->netfs, new_size); + fscache_resize_cookie(cifs_inode_cookie(target_inode), + new_size); ++ } else if (rc == -EOPNOTSUPP) { ++ /* ++ * copy_file_range syscall man page indicates EINVAL ++ * is returned e.g when "fd_in and fd_out refer to the ++ * same file and the source and target ranges overlap." ++ * Test generic/157 was what showed these cases where ++ * we need to remap EOPNOTSUPP to EINVAL ++ */ ++ if (off >= src_inode->i_size) { ++ rc = -EINVAL; ++ } else if (src_inode == target_inode) { ++ if (off + len > destoff) ++ rc = -EINVAL; ++ } + } + } + +diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c +index d93ebd58ecae16..6c16c4f34d8824 100644 +--- a/fs/smb/client/inode.c ++++ b/fs/smb/client/inode.c +@@ -1856,15 +1856,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + struct tcon_link *tlink; + struct cifs_tcon *tcon; ++ __u32 dosattr = 0, origattr = 0; + struct TCP_Server_Info *server; + struct iattr *attrs = NULL; +- __u32 dosattr = 0, origattr = 0; ++ bool rehash = false; + + cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); + + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + ++ /* Unhash dentry in advance to prevent any concurrent opens */ ++ spin_lock(&dentry->d_lock); ++ if (!d_unhashed(dentry)) { ++ __d_drop(dentry); ++ rehash = true; ++ } ++ spin_unlock(&dentry->d_lock); ++ + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + return PTR_ERR(tlink); +@@ -1915,7 +1924,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) + cifs_drop_nlink(inode); + } + } else if (rc == -ENOENT) { +- d_drop(dentry); ++ if (simple_positive(dentry)) ++ d_delete(dentry); + } else if (rc == -EBUSY) { + if (server->ops->rename_pending_delete) { + rc = server->ops->rename_pending_delete(full_path, +@@ -1968,6 +1978,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) + kfree(attrs); + free_xid(xid); + cifs_put_tlink(tlink); ++ if (rehash) ++ d_rehash(dentry); + return rc; + } + +@@ -2367,6 +2379,7 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, + struct cifs_sb_info *cifs_sb; + struct tcon_link *tlink; + struct cifs_tcon *tcon; ++ bool rehash = false; + unsigned int xid; + int rc, tmprc; + int retry_count = 0; +@@ -2382,6 +2395,17 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + ++ /* ++ * Prevent any concurrent opens on the target by unhashing the dentry. ++ * VFS already unhashes the target when renaming directories. ++ */ ++ if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) { ++ if (!d_unhashed(target_dentry)) { ++ d_drop(target_dentry); ++ rehash = true; ++ } ++ } ++ + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + return PTR_ERR(tlink); +@@ -2421,6 +2445,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, + } + } + ++ if (!rc) ++ rehash = false; + /* + * No-replace is the natural behavior for CIFS, so skip unlink hacks. + */ +@@ -2479,12 +2505,16 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, + goto cifs_rename_exit; + rc = cifs_do_rename(xid, source_dentry, from_name, + target_dentry, to_name); ++ if (!rc) ++ rehash = false; + } + + /* force revalidate to go get info when needed */ + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; + + cifs_rename_exit: ++ if (rehash) ++ d_rehash(target_dentry); + kfree(info_buf_source); + free_dentry_path(page2); + free_dentry_path(page1); +diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c +index e1078a1decdfa3..0cc80f472432ad 100644 +--- a/fs/smb/client/smb2inode.c ++++ b/fs/smb/client/smb2inode.c +@@ -206,8 +206,10 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, + server = cifs_pick_channel(ses); + + vars = kzalloc(sizeof(*vars), GFP_ATOMIC); +- if (vars == NULL) +- return -ENOMEM; ++ if (vars == NULL) { ++ rc = -ENOMEM; ++ goto out; ++ } + rqst = &vars->rqst[0]; + rsp_iov = &vars->rsp_iov[0]; + +@@ -828,6 +830,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, + smb2_should_replay(tcon, &retries, &cur_sleep)) + goto replay_again; + ++out: + if (cfile) + cifsFileInfo_put(cfile); + +diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c +index 54de405cbab5ac..4d369876487bde 100644 +--- a/fs/xfs/libxfs/xfs_attr_remote.c ++++ b/fs/xfs/libxfs/xfs_attr_remote.c +@@ -418,6 +418,13 @@ xfs_attr_rmtval_get( + dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt, + 0, &bp, &xfs_attr3_rmt_buf_ops); ++ /* ++ * ENODATA from disk implies a disk medium failure; ++ * ENODATA for xattrs means attribute not found, so ++ * disambiguate that here. ++ */ ++ if (error == -ENODATA) ++ error = -EIO; + if (error) + return error; + +diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c +index 28bbfc31039c0e..1efd45076ee2ae 100644 +--- a/fs/xfs/libxfs/xfs_da_btree.c ++++ b/fs/xfs/libxfs/xfs_da_btree.c +@@ -2649,6 +2649,12 @@ xfs_da_read_buf( + + error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0, + &bp, ops); ++ /* ++ * ENODATA from disk implies a disk medium failure; ENODATA for ++ * xattrs means attribute not found, so disambiguate that here. ++ */ ++ if (error == -ENODATA && whichfork == XFS_ATTR_FORK) ++ error = -EIO; + if (error) + goto out_free; + +diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h +index 45f2f278b50a8a..70807c679f1abc 100644 +--- a/include/linux/atmdev.h ++++ b/include/linux/atmdev.h +@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */ + int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, + void __user *arg); + #endif ++ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb); + int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 9106771bb92f01..4913d364e97747 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1731,7 +1731,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 reserved_at_328[0x2]; + u8 relaxed_ordering_read[0x1]; + u8 log_max_pd[0x5]; +- u8 reserved_at_330[0x6]; ++ u8 reserved_at_330[0x5]; ++ u8 pcie_reset_using_hotreset_method[0x1]; + u8 pci_sync_for_fw_update_with_driver_unload[0x1]; + u8 vnic_env_cnt_steering_fail[0x1]; + u8 vport_counter_local_loopback[0x1]; +@@ -10824,6 +10825,11 @@ struct mlx5_ifc_mcda_reg_bits { + u8 data[][0x20]; + }; + ++enum { ++ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0, ++ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1, ++}; ++ + enum { + MLX5_MFRL_REG_RESET_STATE_IDLE = 0, + MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, +@@ -10851,7 +10857,8 @@ struct mlx5_ifc_mfrl_reg_bits { + u8 pci_sync_for_fw_update_start[0x1]; + u8 pci_sync_for_fw_update_resp[0x2]; + u8 rst_type_sel[0x3]; +- u8 reserved_at_28[0x4]; ++ u8 pci_reset_req_method[0x3]; ++ u8 reserved_at_2b[0x1]; + u8 reset_state[0x4]; + u8 reset_type[0x8]; + u8 reset_level[0x8]; +diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h +index 1c315f854ea801..6a46069c5a3689 100644 +--- a/include/linux/nfs_page.h ++++ b/include/linux/nfs_page.h +@@ -156,13 +156,13 @@ extern int nfs_wait_on_request(struct nfs_page *); + extern void nfs_unlock_request(struct nfs_page *req); + extern void nfs_unlock_and_release_request(struct nfs_page *); + extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); +-extern int nfs_page_group_lock_subrequests(struct nfs_page *head); + extern void nfs_join_page_group(struct nfs_page *head, + struct nfs_commit_info *cinfo, + struct inode *inode); + extern int nfs_page_group_lock(struct nfs_page *); + extern void nfs_page_group_unlock(struct nfs_page *); + extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); ++extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int); + extern int nfs_page_set_headlock(struct nfs_page *req); + extern void nfs_page_clear_headlock(struct nfs_page *req); + extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); +diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h +index 3cb2d10cac930b..e2e588b08fe90a 100644 +--- a/include/net/bluetooth/hci_sync.h ++++ b/include/net/bluetooth/hci_sync.h +@@ -72,7 +72,7 @@ int hci_update_class_sync(struct hci_dev *hdev); + + int hci_update_eir_sync(struct hci_dev *hdev); + int hci_update_class_sync(struct hci_dev *hdev); +-int hci_update_name_sync(struct hci_dev *hdev); ++int hci_update_name_sync(struct hci_dev *hdev, const u8 *name); + int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode); + + int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, +diff --git a/include/net/rose.h b/include/net/rose.h +index 23267b4efcfa32..2b5491bbf39ab5 100644 +--- a/include/net/rose.h ++++ b/include/net/rose.h +@@ -8,6 +8,7 @@ + #ifndef _ROSE_H + #define _ROSE_H + ++#include + #include + #include + #include +@@ -96,7 +97,7 @@ struct rose_neigh { + ax25_cb *ax25; + struct net_device *dev; + unsigned short count; +- unsigned short use; ++ refcount_t use; + unsigned int number; + char restarted; + char dce_mode; +@@ -151,6 +152,21 @@ struct rose_sock { + + #define rose_sk(sk) ((struct rose_sock *)(sk)) + ++static inline void rose_neigh_hold(struct rose_neigh *rose_neigh) ++{ ++ refcount_inc(&rose_neigh->use); ++} ++ ++static inline void rose_neigh_put(struct rose_neigh *rose_neigh) ++{ ++ if (refcount_dec_and_test(&rose_neigh->use)) { ++ if (rose_neigh->ax25) ++ ax25_cb_put(rose_neigh->ax25); ++ kfree(rose_neigh->digipeat); ++ kfree(rose_neigh); ++ } ++} ++ + /* af_rose.c */ + extern ax25_address rose_callsign; + extern int sysctl_rose_restart_request_timeout; +diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c +index b481c48a31a630..6b0be9598a973f 100644 +--- a/kernel/dma/pool.c ++++ b/kernel/dma/pool.c +@@ -102,8 +102,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, + + #ifdef CONFIG_DMA_DIRECT_REMAP + addr = dma_common_contiguous_remap(page, pool_size, +- pgprot_dmacoherent(PAGE_KERNEL), +- __builtin_return_address(0)); ++ pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)), ++ __builtin_return_address(0)); + if (!addr) + goto free_page; + #else +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 907e45361939be..a32c8637503d14 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -10162,10 +10162,10 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) + ret = print_trace_line(&iter); + if (ret != TRACE_TYPE_NO_CONSUME) + trace_consume(&iter); ++ ++ trace_printk_seq(&iter.seq); + } + touch_nmi_watchdog(); +- +- trace_printk_seq(&iter.seq); + } + + if (!cnt) +diff --git a/net/atm/common.c b/net/atm/common.c +index 9cc82acbc73588..48bb3f66a3f2ab 100644 +--- a/net/atm/common.c ++++ b/net/atm/common.c +@@ -635,18 +635,27 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) + + skb->dev = NULL; /* for paths shared with net_device interfaces */ + if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { +- atm_return_tx(vcc, skb); +- kfree_skb(skb); + error = -EFAULT; +- goto out; ++ goto free_skb; + } + if (eff != size) + memset(skb->data + size, 0, eff-size); ++ ++ if (vcc->dev->ops->pre_send) { ++ error = vcc->dev->ops->pre_send(vcc, skb); ++ if (error) ++ goto free_skb; ++ } ++ + error = vcc->dev->ops->send(vcc, skb); + error = error ? error : size; + out: + release_sock(sk); + return error; ++free_skb: ++ atm_return_tx(vcc, skb); ++ kfree_skb(skb); ++ goto out; + } + + __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index c06010c0d88293..5eed23b8d6c332 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -2692,7 +2692,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) + if (!conn) + goto unlock; + +- if (status) { ++ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) { + mgmt_disconnect_failed(hdev, &conn->dst, conn->type, + conn->dst_type, status); + +@@ -2707,6 +2707,12 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) + goto done; + } + ++ /* During suspend, mark connection as closed immediately ++ * since we might not receive HCI_EV_DISCONN_COMPLETE ++ */ ++ if (hdev->suspended) ++ conn->state = BT_CLOSED; ++ + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + + if (conn->type == ACL_LINK) { +@@ -4386,7 +4392,17 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, + if (!conn) + continue; + +- conn->sent -= count; ++ /* Check if there is really enough packets outstanding before ++ * attempting to decrease the sent counter otherwise it could ++ * underflow.. ++ */ ++ if (conn->sent >= count) { ++ conn->sent -= count; ++ } else { ++ bt_dev_warn(hdev, "hcon %p sent %u < count %u", ++ conn, conn->sent, count); ++ conn->sent = 0; ++ } + + switch (conn->type) { + case ACL_LINK: +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index 01aca077071174..020f1809fc9946 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -3491,13 +3491,13 @@ int hci_update_scan_sync(struct hci_dev *hdev) + return hci_write_scan_enable_sync(hdev, scan); + } + +-int hci_update_name_sync(struct hci_dev *hdev) ++int hci_update_name_sync(struct hci_dev *hdev, const u8 *name) + { + struct hci_cp_write_local_name cp; + + memset(&cp, 0, sizeof(cp)); + +- memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); ++ memcpy(cp.name, name, sizeof(cp.name)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, + sizeof(cp), &cp, +@@ -3550,7 +3550,7 @@ int hci_powered_update_sync(struct hci_dev *hdev) + hci_write_fast_connectable_sync(hdev, false); + hci_update_scan_sync(hdev); + hci_update_class_sync(hdev); +- hci_update_name_sync(hdev); ++ hci_update_name_sync(hdev, hdev->dev_name); + hci_update_eir_sync(hdev); + } + +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 82fa8c28438f25..9b01eaaa0eb2d6 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -3819,8 +3819,11 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err) + + static int set_name_sync(struct hci_dev *hdev, void *data) + { ++ struct mgmt_pending_cmd *cmd = data; ++ struct mgmt_cp_set_local_name *cp = cmd->param; ++ + if (lmp_bredr_capable(hdev)) { +- hci_update_name_sync(hdev); ++ hci_update_name_sync(hdev, cp->name); + hci_update_eir_sync(hdev); + } + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 8672ebbace980b..20f5c8307443d3 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2547,12 +2547,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res, + !netif_is_l3_master(dev_out)) + return ERR_PTR(-EINVAL); + +- if (ipv4_is_lbcast(fl4->daddr)) ++ if (ipv4_is_lbcast(fl4->daddr)) { + type = RTN_BROADCAST; +- else if (ipv4_is_multicast(fl4->daddr)) ++ ++ /* reset fi to prevent gateway resolution */ ++ fi = NULL; ++ } else if (ipv4_is_multicast(fl4->daddr)) { + type = RTN_MULTICAST; +- else if (ipv4_is_zeronet(fl4->daddr)) ++ } else if (ipv4_is_zeronet(fl4->daddr)) { + return ERR_PTR(-EINVAL); ++ } + + if (dev_out->flags & IFF_LOOPBACK) + flags |= RTCF_LOCAL; +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c +index 66e9ceaaa43a14..614695444b6acb 100644 +--- a/net/rose/af_rose.c ++++ b/net/rose/af_rose.c +@@ -170,7 +170,7 @@ void rose_kill_by_neigh(struct rose_neigh *neigh) + + if (rose->neighbour == neigh) { + rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + rose->neighbour = NULL; + } + } +@@ -212,7 +212,7 @@ static void rose_kill_by_device(struct net_device *dev) + if (rose->device == dev) { + rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); + if (rose->neighbour) +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + netdev_put(rose->device, &rose->dev_tracker); + rose->device = NULL; + } +@@ -655,7 +655,7 @@ static int rose_release(struct socket *sock) + break; + + case ROSE_STATE_2: +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + release_sock(sk); + rose_disconnect(sk, 0, -1, -1); + lock_sock(sk); +@@ -823,6 +823,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le + rose->lci = rose_new_lci(rose->neighbour); + if (!rose->lci) { + err = -ENETUNREACH; ++ rose_neigh_put(rose->neighbour); + goto out_release; + } + +@@ -834,12 +835,14 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le + dev = rose_dev_first(); + if (!dev) { + err = -ENETUNREACH; ++ rose_neigh_put(rose->neighbour); + goto out_release; + } + + user = ax25_findbyuid(current_euid()); + if (!user) { + err = -EINVAL; ++ rose_neigh_put(rose->neighbour); + dev_put(dev); + goto out_release; + } +@@ -874,8 +877,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le + + rose->state = ROSE_STATE_1; + +- rose->neighbour->use++; +- + rose_write_internal(sk, ROSE_CALL_REQUEST); + rose_start_heartbeat(sk); + rose_start_t1timer(sk); +@@ -1077,7 +1078,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros + GFP_ATOMIC); + make_rose->facilities = facilities; + +- make_rose->neighbour->use++; ++ rose_neigh_hold(make_rose->neighbour); + + if (rose_sk(sk)->defer) { + make_rose->state = ROSE_STATE_5; +diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c +index 4d67f36dce1b49..7caae93937ee9b 100644 +--- a/net/rose/rose_in.c ++++ b/net/rose/rose_in.c +@@ -56,7 +56,7 @@ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int framety + case ROSE_CLEAR_REQUEST: + rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); + rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + break; + + default: +@@ -79,12 +79,12 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety + case ROSE_CLEAR_REQUEST: + rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); + rose_disconnect(sk, 0, skb->data[3], skb->data[4]); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + break; + + case ROSE_CLEAR_CONFIRMATION: + rose_disconnect(sk, 0, -1, -1); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + break; + + default: +@@ -120,7 +120,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety + case ROSE_CLEAR_REQUEST: + rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); + rose_disconnect(sk, 0, skb->data[3], skb->data[4]); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + break; + + case ROSE_RR: +@@ -233,7 +233,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety + case ROSE_CLEAR_REQUEST: + rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); + rose_disconnect(sk, 0, skb->data[3], skb->data[4]); +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + break; + + default: +@@ -253,7 +253,7 @@ static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int framety + if (frametype == ROSE_CLEAR_REQUEST) { + rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); + rose_disconnect(sk, 0, skb->data[3], skb->data[4]); +- rose_sk(sk)->neighbour->use--; ++ rose_neigh_put(rose_sk(sk)->neighbour); + } + + return 0; +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c +index a7054546f52dfa..28746ae5a25828 100644 +--- a/net/rose/rose_route.c ++++ b/net/rose/rose_route.c +@@ -93,11 +93,11 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route, + rose_neigh->ax25 = NULL; + rose_neigh->dev = dev; + rose_neigh->count = 0; +- rose_neigh->use = 0; + rose_neigh->dce_mode = 0; + rose_neigh->loopback = 0; + rose_neigh->number = rose_neigh_no++; + rose_neigh->restarted = 0; ++ refcount_set(&rose_neigh->use, 1); + + skb_queue_head_init(&rose_neigh->queue); + +@@ -178,6 +178,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route, + } + } + rose_neigh->count++; ++ rose_neigh_hold(rose_neigh); + + goto out; + } +@@ -187,6 +188,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route, + rose_node->neighbour[rose_node->count] = rose_neigh; + rose_node->count++; + rose_neigh->count++; ++ rose_neigh_hold(rose_neigh); + } + + out: +@@ -234,20 +236,12 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) + + if ((s = rose_neigh_list) == rose_neigh) { + rose_neigh_list = rose_neigh->next; +- if (rose_neigh->ax25) +- ax25_cb_put(rose_neigh->ax25); +- kfree(rose_neigh->digipeat); +- kfree(rose_neigh); + return; + } + + while (s != NULL && s->next != NULL) { + if (s->next == rose_neigh) { + s->next = rose_neigh->next; +- if (rose_neigh->ax25) +- ax25_cb_put(rose_neigh->ax25); +- kfree(rose_neigh->digipeat); +- kfree(rose_neigh); + return; + } + +@@ -263,10 +257,10 @@ static void rose_remove_route(struct rose_route *rose_route) + struct rose_route *s; + + if (rose_route->neigh1 != NULL) +- rose_route->neigh1->use--; ++ rose_neigh_put(rose_route->neigh1); + + if (rose_route->neigh2 != NULL) +- rose_route->neigh2->use--; ++ rose_neigh_put(rose_route->neigh2); + + if ((s = rose_route_list) == rose_route) { + rose_route_list = rose_route->next; +@@ -330,9 +324,12 @@ static int rose_del_node(struct rose_route_struct *rose_route, + for (i = 0; i < rose_node->count; i++) { + if (rose_node->neighbour[i] == rose_neigh) { + rose_neigh->count--; ++ rose_neigh_put(rose_neigh); + +- if (rose_neigh->count == 0 && rose_neigh->use == 0) ++ if (rose_neigh->count == 0) { + rose_remove_neigh(rose_neigh); ++ rose_neigh_put(rose_neigh); ++ } + + rose_node->count--; + +@@ -381,11 +378,11 @@ void rose_add_loopback_neigh(void) + sn->ax25 = NULL; + sn->dev = NULL; + sn->count = 0; +- sn->use = 0; + sn->dce_mode = 1; + sn->loopback = 1; + sn->number = rose_neigh_no++; + sn->restarted = 1; ++ refcount_set(&sn->use, 1); + + skb_queue_head_init(&sn->queue); + +@@ -436,6 +433,7 @@ int rose_add_loopback_node(const rose_address *address) + rose_node_list = rose_node; + + rose_loopback_neigh->count++; ++ rose_neigh_hold(rose_loopback_neigh); + + out: + spin_unlock_bh(&rose_node_list_lock); +@@ -467,6 +465,7 @@ void rose_del_loopback_node(const rose_address *address) + rose_remove_node(rose_node); + + rose_loopback_neigh->count--; ++ rose_neigh_put(rose_loopback_neigh); + + out: + spin_unlock_bh(&rose_node_list_lock); +@@ -506,6 +505,7 @@ void rose_rt_device_down(struct net_device *dev) + memmove(&t->neighbour[i], &t->neighbour[i + 1], + sizeof(t->neighbour[0]) * + (t->count - i)); ++ rose_neigh_put(s); + } + + if (t->count <= 0) +@@ -513,6 +513,7 @@ void rose_rt_device_down(struct net_device *dev) + } + + rose_remove_neigh(s); ++ rose_neigh_put(s); + } + spin_unlock_bh(&rose_neigh_list_lock); + spin_unlock_bh(&rose_node_list_lock); +@@ -548,6 +549,7 @@ static int rose_clear_routes(void) + { + struct rose_neigh *s, *rose_neigh; + struct rose_node *t, *rose_node; ++ int i; + + spin_lock_bh(&rose_node_list_lock); + spin_lock_bh(&rose_neigh_list_lock); +@@ -558,17 +560,21 @@ static int rose_clear_routes(void) + while (rose_node != NULL) { + t = rose_node; + rose_node = rose_node->next; +- if (!t->loopback) ++ ++ if (!t->loopback) { ++ for (i = 0; i < t->count; i++) ++ rose_neigh_put(t->neighbour[i]); + rose_remove_node(t); ++ } + } + + while (rose_neigh != NULL) { + s = rose_neigh; + rose_neigh = rose_neigh->next; + +- if (s->use == 0 && !s->loopback) { +- s->count = 0; ++ if (!s->loopback) { + rose_remove_neigh(s); ++ rose_neigh_put(s); + } + } + +@@ -684,6 +690,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, + for (i = 0; i < node->count; i++) { + if (node->neighbour[i]->restarted) { + res = node->neighbour[i]; ++ rose_neigh_hold(node->neighbour[i]); + goto out; + } + } +@@ -695,6 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, + for (i = 0; i < node->count; i++) { + if (!rose_ftimer_running(node->neighbour[i])) { + res = node->neighbour[i]; ++ rose_neigh_hold(node->neighbour[i]); + goto out; + } + failed = 1; +@@ -784,13 +792,13 @@ static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh) + } + + if (rose_route->neigh1 == rose_neigh) { +- rose_route->neigh1->use--; ++ rose_neigh_put(rose_route->neigh1); + rose_route->neigh1 = NULL; + rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0); + } + + if (rose_route->neigh2 == rose_neigh) { +- rose_route->neigh2->use--; ++ rose_neigh_put(rose_route->neigh2); + rose_route->neigh2 = NULL; + rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0); + } +@@ -919,7 +927,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) + rose_clear_queues(sk); + rose->cause = ROSE_NETWORK_CONGESTION; + rose->diagnostic = 0; +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + rose->neighbour = NULL; + rose->lci = 0; + rose->state = ROSE_STATE_0; +@@ -1044,12 +1052,12 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) + + if ((new_lci = rose_new_lci(new_neigh)) == 0) { + rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71); +- goto out; ++ goto put_neigh; + } + + if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) { + rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120); +- goto out; ++ goto put_neigh; + } + + rose_route->lci1 = lci; +@@ -1062,8 +1070,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) + rose_route->lci2 = new_lci; + rose_route->neigh2 = new_neigh; + +- rose_route->neigh1->use++; +- rose_route->neigh2->use++; ++ rose_neigh_hold(rose_route->neigh1); ++ rose_neigh_hold(rose_route->neigh2); + + rose_route->next = rose_route_list; + rose_route_list = rose_route; +@@ -1075,6 +1083,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) + rose_transmit_link(skb, rose_route->neigh2); + res = 1; + ++put_neigh: ++ rose_neigh_put(new_neigh); + out: + spin_unlock_bh(&rose_route_list_lock); + spin_unlock_bh(&rose_neigh_list_lock); +@@ -1190,7 +1200,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v) + (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign), + rose_neigh->dev ? rose_neigh->dev->name : "???", + rose_neigh->count, +- rose_neigh->use, ++ refcount_read(&rose_neigh->use) - rose_neigh->count - 1, + (rose_neigh->dce_mode) ? "DCE" : "DTE", + (rose_neigh->restarted) ? "yes" : "no", + ax25_display_timer(&rose_neigh->t0timer) / HZ, +@@ -1295,18 +1305,22 @@ void __exit rose_rt_free(void) + struct rose_neigh *s, *rose_neigh = rose_neigh_list; + struct rose_node *t, *rose_node = rose_node_list; + struct rose_route *u, *rose_route = rose_route_list; ++ int i; + + while (rose_neigh != NULL) { + s = rose_neigh; + rose_neigh = rose_neigh->next; + + rose_remove_neigh(s); ++ rose_neigh_put(s); + } + + while (rose_node != NULL) { + t = rose_node; + rose_node = rose_node->next; + ++ for (i = 0; i < t->count; i++) ++ rose_neigh_put(t->neighbour[i]); + rose_remove_node(t); + } + +diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c +index 1525773e94aa17..c52d7d20c5199b 100644 +--- a/net/rose/rose_timer.c ++++ b/net/rose/rose_timer.c +@@ -180,7 +180,7 @@ static void rose_timer_expiry(struct timer_list *t) + break; + + case ROSE_STATE_2: /* T3 */ +- rose->neighbour->use--; ++ rose_neigh_put(rose->neighbour); + rose_disconnect(sk, ETIMEDOUT, -1, -1); + break; + +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 717828e531621a..0673857cb3d8b1 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -547,7 +547,9 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) + { + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_port = 0; ++ addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_addr = sk->sk_v6_rcv_saddr; ++ addr->v6.sin6_scope_id = 0; + } + + /* Initialize sk->sk_rcv_saddr from sctp_addr. */ +diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c +index ebddfa74ce0a07..150ed10c8377ab 100644 +--- a/sound/soc/codecs/lpass-tx-macro.c ++++ b/sound/soc/codecs/lpass-tx-macro.c +@@ -1940,7 +1940,7 @@ static int tx_macro_register_mclk_output(struct tx_macro *tx) + } + + static const struct snd_soc_component_driver tx_macro_component_drv = { +- .name = "RX-MACRO", ++ .name = "TX-MACRO", + .probe = tx_macro_component_probe, + .controls = tx_macro_snd_controls, + .num_controls = ARRAY_SIZE(tx_macro_snd_controls), diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.104-105.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.104-105.patch new file mode 100644 index 0000000000..6394a1e504 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.104-105.patch @@ -0,0 +1,5454 @@ +diff --git a/Documentation/userspace-api/netlink/specs.rst b/Documentation/userspace-api/netlink/specs.rst +index cc4e2430997ef8..a8218284e67a42 100644 +--- a/Documentation/userspace-api/netlink/specs.rst ++++ b/Documentation/userspace-api/netlink/specs.rst +@@ -408,10 +408,21 @@ This section describes the attribute types supported by the ``genetlink`` + compatibility level. Refer to documentation of different levels for additional + attribute types. + +-Scalar integer types ++Common integer types + -------------------- + +-Fixed-width integer types: ++``sint`` and ``uint`` represent signed and unsigned 64 bit integers. ++If the value can fit on 32 bits only 32 bits are carried in netlink ++messages, otherwise full 64 bits are carried. Note that the payload ++is only aligned to 4B, so the full 64 bit value may be unaligned! ++ ++Common integer types should be preferred over fix-width types in majority ++of cases. ++ ++Fix-width integer types ++----------------------- ++ ++Fixed-width integer types include: + ``u8``, ``u16``, ``u32``, ``u64``, ``s8``, ``s16``, ``s32``, ``s64``. + + Note that types smaller than 32 bit should be avoided as using them +@@ -421,6 +432,9 @@ See :ref:`pad_type` for padding of 64 bit attributes. + The payload of the attribute is the integer in host order unless ``byte-order`` + specifies otherwise. + ++64 bit values are usually aligned by the kernel but it is recommended ++that the user space is able to deal with unaligned values. ++ + .. _pad_type: + + pad +diff --git a/Makefile b/Makefile +index ae57f816375ebd..2b7f67d7b641ce 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 104 ++SUBLEVEL = 105 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts +index cd44bf83745cae..678ecc9f81dbb7 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts ++++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts +@@ -442,6 +442,7 @@ &usdhc2 { + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; ++ vqmmc-supply = <&ldo5>; + bus-width = <4>; + status = "okay"; + }; +diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi +index eae39c1cb98568..2e93d922c86111 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi +@@ -571,6 +571,7 @@ &usdhc2 { + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; ++ vqmmc-supply = <&ldo5>; + bus-width = <4>; + status = "okay"; + }; +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +index f5e124b235c83c..fb3012a6c9fc30 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +@@ -967,6 +967,7 @@ spiflash: flash@0 { + reg = <0>; + m25p,fast-read; + spi-max-frequency = <10000000>; ++ vcc-supply = <&vcc_3v0>; + }; + }; + +diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h +index bfa6638b4c930c..8f7ac23c404d99 100644 +--- a/arch/arm64/include/asm/module.h ++++ b/arch/arm64/include/asm/module.h +@@ -19,6 +19,7 @@ struct mod_arch_specific { + + /* for CONFIG_DYNAMIC_FTRACE */ + struct plt_entry *ftrace_trampolines; ++ struct plt_entry *init_ftrace_trampolines; + }; + + u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, +diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h +index b9ae8349e35dbb..fb944b46846dae 100644 +--- a/arch/arm64/include/asm/module.lds.h ++++ b/arch/arm64/include/asm/module.lds.h +@@ -2,6 +2,7 @@ SECTIONS { + .plt 0 : { BYTE(0) } + .init.plt 0 : { BYTE(0) } + .text.ftrace_trampoline 0 : { BYTE(0) } ++ .init.text.ftrace_trampoline 0 : { BYTE(0) } + + #ifdef CONFIG_KASAN_SW_TAGS + /* +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c +index a650f5e11fc5d8..b657f058bf4d50 100644 +--- a/arch/arm64/kernel/ftrace.c ++++ b/arch/arm64/kernel/ftrace.c +@@ -195,10 +195,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func) + return ftrace_modify_code(pc, 0, new, false); + } + +-static struct plt_entry *get_ftrace_plt(struct module *mod) ++static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) + { + #ifdef CONFIG_MODULES +- struct plt_entry *plt = mod->arch.ftrace_trampolines; ++ struct plt_entry *plt = NULL; ++ ++ if (within_module_mem_type(addr, mod, MOD_INIT_TEXT)) ++ plt = mod->arch.init_ftrace_trampolines; ++ else if (within_module_mem_type(addr, mod, MOD_TEXT)) ++ plt = mod->arch.ftrace_trampolines; ++ else ++ return NULL; + + return &plt[FTRACE_PLT_IDX]; + #else +@@ -270,7 +277,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, + if (WARN_ON(!mod)) + return false; + +- plt = get_ftrace_plt(mod); ++ plt = get_ftrace_plt(mod, pc); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); + return false; +diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c +index 79200f21e12393..e4ddb1642ee22d 100644 +--- a/arch/arm64/kernel/module-plts.c ++++ b/arch/arm64/kernel/module-plts.c +@@ -284,7 +284,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + unsigned long core_plts = 0; + unsigned long init_plts = 0; + Elf64_Sym *syms = NULL; +- Elf_Shdr *pltsec, *tramp = NULL; ++ Elf_Shdr *pltsec, *tramp = NULL, *init_tramp = NULL; + int i; + + /* +@@ -299,6 +299,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + else if (!strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; ++ else if (!strcmp(secstrings + sechdrs[i].sh_name, ++ ".init.text.ftrace_trampoline")) ++ init_tramp = sechdrs + i; + else if (sechdrs[i].sh_type == SHT_SYMTAB) + syms = (Elf64_Sym *)sechdrs[i].sh_addr; + } +@@ -364,5 +367,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); + } + ++ if (init_tramp) { ++ init_tramp->sh_type = SHT_NOBITS; ++ init_tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; ++ init_tramp->sh_addralign = __alignof__(struct plt_entry); ++ init_tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry); ++ } ++ + return 0; + } +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c +index dd851297596e5e..adaf2920773b37 100644 +--- a/arch/arm64/kernel/module.c ++++ b/arch/arm64/kernel/module.c +@@ -579,6 +579,17 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr, + __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); + + mod->arch.ftrace_trampolines = plts; ++ ++ s = find_section(hdr, sechdrs, ".init.text.ftrace_trampoline"); ++ if (!s) ++ return -ENOEXEC; ++ ++ plts = (void *)s->sh_addr; ++ ++ __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); ++ ++ mod->arch.init_ftrace_trampolines = plts; ++ + #endif + return 0; + } +diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c +index 4a3686d1334949..0e90cd2df0ea3a 100644 +--- a/arch/loongarch/kernel/signal.c ++++ b/arch/loongarch/kernel/signal.c +@@ -697,6 +697,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, + for (i = 1; i < 32; i++) + err |= __put_user(regs->regs[i], &sc->sc_regs[i]); + ++#ifdef CONFIG_CPU_HAS_LBT ++ if (extctx->lbt.addr) ++ err |= protected_save_lbt_context(extctx); ++#endif ++ + if (extctx->lasx.addr) + err |= protected_save_lasx_context(extctx); + else if (extctx->lsx.addr) +@@ -704,11 +709,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, + else if (extctx->fpu.addr) + err |= protected_save_fpu_context(extctx); + +-#ifdef CONFIG_CPU_HAS_LBT +- if (extctx->lbt.addr) +- err |= protected_save_lbt_context(extctx); +-#endif +- + /* Set the "end" magic */ + info = (struct sctx_info *)extctx->end.addr; + err |= __put_user(0, &info->magic); +diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h +index b5b84c6be01e16..da818b39a24cc4 100644 +--- a/arch/riscv/include/asm/asm.h ++++ b/arch/riscv/include/asm/asm.h +@@ -90,7 +90,7 @@ + #endif + + .macro asm_per_cpu dst sym tmp +- REG_L \tmp, TASK_TI_CPU_NUM(tp) ++ lw \tmp, TASK_TI_CPU_NUM(tp) + slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT + la \dst, __per_cpu_offset + add \dst, \dst, \tmp +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 35c416f061552b..ea95303ab15b81 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -41,6 +41,9 @@ static inline bool pgtable_l5_enabled(void) + #define pgtable_l5_enabled() 0 + #endif /* CONFIG_X86_5LEVEL */ + ++#define ARCH_PAGE_TABLE_SYNC_MASK \ ++ (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED) ++ + extern unsigned int pgdir_shift; + extern unsigned int ptrs_per_p4d; + +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index 11eb93e13ce175..cf080c96b7dd66 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -223,6 +223,24 @@ static void sync_global_pgds(unsigned long start, unsigned long end) + sync_global_pgds_l4(start, end); + } + ++/* ++ * Make kernel mappings visible in all page tables in the system. ++ * This is necessary except when the init task populates kernel mappings ++ * during the boot process. In that case, all processes originating from ++ * the init task copies the kernel mappings, so there is no issue. ++ * Otherwise, missing synchronization could lead to kernel crashes due ++ * to missing page table entries for certain kernel mappings. ++ * ++ * Synchronization is performed at the top level, which is the PGD in ++ * 5-level paging systems. But in 4-level paging systems, however, ++ * pgd_populate() is a no-op, so synchronization is done at the P4D level. ++ * sync_global_pgds() handles this difference between paging levels. ++ */ ++void arch_sync_kernel_mappings(unsigned long start, unsigned long end) ++{ ++ sync_global_pgds(start, end); ++} ++ + /* + * NOTE: This function is marked __ref because it calls __init function + * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c +index 1a31106a14e446..6ee8382a32302e 100644 +--- a/drivers/acpi/arm64/iort.c ++++ b/drivers/acpi/arm64/iort.c +@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start, + + new_sids = krealloc_array(sids, count + new_count, + sizeof(*new_sids), GFP_KERNEL); +- if (!new_sids) ++ if (!new_sids) { ++ kfree(sids); + return NULL; ++ } + + for (i = count; i < total_count; i++) + new_sids[i] = id_start++; +diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c +index 4bfc78f9781ede..0935045051699f 100644 +--- a/drivers/bluetooth/hci_vhci.c ++++ b/drivers/bluetooth/hci_vhci.c +@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = { + .write = force_devcd_write, + }; + ++static void vhci_debugfs_init(struct vhci_data *data) ++{ ++ struct hci_dev *hdev = data->hdev; ++ ++ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, ++ &force_suspend_fops); ++ ++ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, ++ &force_wakeup_fops); ++ ++ if (IS_ENABLED(CONFIG_BT_MSFTEXT)) ++ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, ++ &msft_opcode_fops); ++ ++ if (IS_ENABLED(CONFIG_BT_AOSPEXT)) ++ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, ++ &aosp_capable_fops); ++ ++ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, ++ &force_devcoredump_fops); ++} ++ + static int __vhci_create_device(struct vhci_data *data, __u8 opcode) + { + struct hci_dev *hdev; +@@ -435,22 +457,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) + return -EBUSY; + } + +- debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, +- &force_suspend_fops); +- +- debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, +- &force_wakeup_fops); +- +- if (IS_ENABLED(CONFIG_BT_MSFTEXT)) +- debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, +- &msft_opcode_fops); +- +- if (IS_ENABLED(CONFIG_BT_AOSPEXT)) +- debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, +- &aosp_capable_fops); +- +- debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, +- &force_devcoredump_fops); ++ if (!IS_ERR_OR_NULL(hdev->debugfs)) ++ vhci_debugfs_init(data); + + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; + +@@ -652,6 +660,21 @@ static int vhci_open(struct inode *inode, struct file *file) + return 0; + } + ++static void vhci_debugfs_remove(struct hci_dev *hdev) ++{ ++ debugfs_lookup_and_remove("force_suspend", hdev->debugfs); ++ ++ debugfs_lookup_and_remove("force_wakeup", hdev->debugfs); ++ ++ if (IS_ENABLED(CONFIG_BT_MSFTEXT)) ++ debugfs_lookup_and_remove("msft_opcode", hdev->debugfs); ++ ++ if (IS_ENABLED(CONFIG_BT_AOSPEXT)) ++ debugfs_lookup_and_remove("aosp_capable", hdev->debugfs); ++ ++ debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs); ++} ++ + static int vhci_release(struct inode *inode, struct file *file) + { + struct vhci_data *data = file->private_data; +@@ -663,6 +686,8 @@ static int vhci_release(struct inode *inode, struct file *file) + hdev = data->hdev; + + if (hdev) { ++ if (!IS_ERR_OR_NULL(hdev->debugfs)) ++ vhci_debugfs_remove(hdev); + hci_unregister_dev(hdev); + hci_free_dev(hdev); + } +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 4f1206ff0a10e9..ed782c0b48af25 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -172,7 +172,6 @@ struct vid_data { + * based on the MSR_IA32_MISC_ENABLE value and whether or + * not the maximum reported turbo P-state is different from + * the maximum reported non-turbo one. +- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. + * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo + * P-state capacity. + * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo +@@ -181,7 +180,6 @@ struct vid_data { + struct global_params { + bool no_turbo; + bool turbo_disabled; +- bool turbo_disabled_mf; + int max_perf_pct; + int min_perf_pct; + }; +@@ -592,16 +590,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) + cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); + } + +-static inline void update_turbo_state(void) ++static bool turbo_is_disabled(void) + { + u64 misc_en; +- struct cpudata *cpu; + +- cpu = all_cpu_data[0]; ++ if (!cpu_feature_enabled(X86_FEATURE_IDA)) ++ return true; ++ + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); +- global.turbo_disabled = +- (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || +- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); ++ ++ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + } + + static int min_perf_pct_min(void) +@@ -1156,40 +1154,16 @@ static void intel_pstate_update_policies(void) + static void __intel_pstate_update_max_freq(struct cpudata *cpudata, + struct cpufreq_policy *policy) + { +- policy->cpuinfo.max_freq = global.turbo_disabled_mf ? ++ policy->cpuinfo.max_freq = global.turbo_disabled ? + cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); + } + +-static void intel_pstate_update_max_freq(unsigned int cpu) +-{ +- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); +- +- if (!policy) +- return; +- +- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); +- +- cpufreq_cpu_release(policy); +-} +- + static void intel_pstate_update_limits(unsigned int cpu) + { + mutex_lock(&intel_pstate_driver_lock); + +- update_turbo_state(); +- /* +- * If turbo has been turned on or off globally, policy limits for +- * all CPUs need to be updated to reflect that. +- */ +- if (global.turbo_disabled_mf != global.turbo_disabled) { +- global.turbo_disabled_mf = global.turbo_disabled; +- arch_set_max_freq_ratio(global.turbo_disabled); +- for_each_possible_cpu(cpu) +- intel_pstate_update_max_freq(cpu); +- } else { +- cpufreq_update_policy(cpu); +- } ++ cpufreq_update_policy(cpu); + + mutex_unlock(&intel_pstate_driver_lock); + } +@@ -1289,11 +1263,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, + return -EAGAIN; + } + +- update_turbo_state(); +- if (global.turbo_disabled) +- ret = sprintf(buf, "%u\n", global.turbo_disabled); +- else +- ret = sprintf(buf, "%u\n", global.no_turbo); ++ ret = sprintf(buf, "%u\n", global.no_turbo); + + mutex_unlock(&intel_pstate_driver_lock); + +@@ -1304,32 +1274,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +- int ret; ++ bool no_turbo; + +- ret = sscanf(buf, "%u", &input); +- if (ret != 1) ++ if (sscanf(buf, "%u", &input) != 1) + return -EINVAL; + + mutex_lock(&intel_pstate_driver_lock); + + if (!intel_pstate_driver) { +- mutex_unlock(&intel_pstate_driver_lock); +- return -EAGAIN; ++ count = -EAGAIN; ++ goto unlock_driver; + } + +- mutex_lock(&intel_pstate_limits_lock); ++ no_turbo = !!clamp_t(int, input, 0, 1); + +- update_turbo_state(); +- if (global.turbo_disabled) { +- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); +- mutex_unlock(&intel_pstate_limits_lock); +- mutex_unlock(&intel_pstate_driver_lock); +- return -EPERM; ++ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); ++ if (global.turbo_disabled && !no_turbo) { ++ pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); ++ count = -EPERM; ++ if (global.no_turbo) ++ goto unlock_driver; ++ else ++ no_turbo = 1; + } + +- global.no_turbo = clamp_t(int, input, 0, 1); ++ if (no_turbo == global.no_turbo) { ++ goto unlock_driver; ++ } + +- if (global.no_turbo) { ++ WRITE_ONCE(global.no_turbo, no_turbo); ++ ++ mutex_lock(&intel_pstate_limits_lock); ++ ++ if (no_turbo) { + struct cpudata *cpu = all_cpu_data[0]; + int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; + +@@ -1341,8 +1318,9 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, + mutex_unlock(&intel_pstate_limits_lock); + + intel_pstate_update_policies(); +- arch_set_max_freq_ratio(global.no_turbo); ++ arch_set_max_freq_ratio(no_turbo); + ++unlock_driver: + mutex_unlock(&intel_pstate_driver_lock); + + return count; +@@ -1793,7 +1771,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) + u32 vid; + + val = (u64)pstate << 8; +- if (global.no_turbo && !global.turbo_disabled) ++ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) + val |= (u64)1 << 32; + + vid_fp = cpudata->vid.min + mul_fp( +@@ -1958,7 +1936,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) + u64 val; + + val = (u64)pstate << 8; +- if (global.no_turbo && !global.turbo_disabled) ++ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) + val |= (u64)1 << 32; + + return val; +@@ -2031,14 +2009,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + } + +-static void intel_pstate_max_within_limits(struct cpudata *cpu) +-{ +- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); +- +- update_turbo_state(); +- intel_pstate_set_pstate(cpu, pstate); +-} +- + static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) + { + int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); +@@ -2264,7 +2234,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) + + sample->busy_scaled = busy_frac * 100; + +- target = global.no_turbo || global.turbo_disabled ? ++ target = READ_ONCE(global.no_turbo) ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + target += target >> 2; + target = mul_fp(target, busy_frac); +@@ -2308,8 +2278,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) + struct sample *sample; + int target_pstate; + +- update_turbo_state(); +- + target_pstate = get_target_pstate(cpu); + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); + trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); +@@ -2527,7 +2495,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) + + static int intel_pstate_get_max_freq(struct cpudata *cpu) + { +- return global.turbo_disabled || global.no_turbo ? ++ return READ_ONCE(global.no_turbo) ? + cpu->pstate.max_freq : cpu->pstate.turbo_freq; + } + +@@ -2612,12 +2580,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); + + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { ++ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); ++ + /* + * NOHZ_FULL CPUs need this as the governor callback may not + * be invoked on them. + */ + intel_pstate_clear_update_util_hook(policy->cpu); +- intel_pstate_max_within_limits(cpu); ++ intel_pstate_set_pstate(cpu, pstate); + } else { + intel_pstate_set_update_util_hook(policy->cpu); + } +@@ -2660,10 +2630,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, + { + int max_freq; + +- update_turbo_state(); + if (hwp_active) { + intel_pstate_get_hwp_cap(cpu); +- max_freq = global.no_turbo || global.turbo_disabled ? ++ max_freq = READ_ONCE(global.no_turbo) ? + cpu->pstate.max_freq : cpu->pstate.turbo_freq; + } else { + max_freq = intel_pstate_get_max_freq(cpu); +@@ -2757,8 +2726,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) + + /* cpuinfo and default policy values */ + policy->cpuinfo.min_freq = cpu->pstate.min_freq; +- update_turbo_state(); +- global.turbo_disabled_mf = global.turbo_disabled; + policy->cpuinfo.max_freq = global.turbo_disabled ? + cpu->pstate.max_freq : cpu->pstate.turbo_freq; + +@@ -2924,8 +2891,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, + struct cpufreq_freqs freqs; + int target_pstate; + +- update_turbo_state(); +- + freqs.old = policy->cur; + freqs.new = target_freq; + +@@ -2947,8 +2912,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, + struct cpudata *cpu = all_cpu_data[policy->cpu]; + int target_pstate; + +- update_turbo_state(); +- + target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); + + target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); +@@ -2966,7 +2929,6 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, + int old_pstate = cpu->pstate.current_pstate; + int cap_pstate, min_pstate, max_pstate, target_pstate; + +- update_turbo_state(); + cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : + HWP_HIGHEST_PERF(hwp_cap); + +@@ -3156,6 +3118,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) + + memset(&global, 0, sizeof(global)); + global.max_perf_pct = 100; ++ global.turbo_disabled = turbo_is_disabled(); ++ global.no_turbo = global.turbo_disabled; ++ ++ arch_set_max_freq_ratio(global.turbo_disabled); + + intel_pstate_driver = driver; + ret = cpufreq_register_driver(intel_pstate_driver); +diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c +index 324b7387b1b922..525bb92ced8f82 100644 +--- a/drivers/dma/mediatek/mtk-cqdma.c ++++ b/drivers/dma/mediatek/mtk-cqdma.c +@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, + { + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + struct virt_dma_desc *vd; +- unsigned long flags; + +- spin_lock_irqsave(&cvc->pc->lock, flags); + list_for_each_entry(vd, &cvc->pc->queue, node) + if (vd->tx.cookie == cookie) { +- spin_unlock_irqrestore(&cvc->pc->lock, flags); + return vd; + } +- spin_unlock_irqrestore(&cvc->pc->lock, flags); + + list_for_each_entry(vd, &cvc->vc.desc_issued, node) + if (vd->tx.cookie == cookie) +@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, + if (ret == DMA_COMPLETE || !txstate) + return ret; + +- spin_lock_irqsave(&cvc->vc.lock, flags); ++ spin_lock_irqsave(&cvc->pc->lock, flags); ++ spin_lock(&cvc->vc.lock); + vd = mtk_cqdma_find_active_desc(c, cookie); +- spin_unlock_irqrestore(&cvc->vc.lock, flags); ++ spin_unlock(&cvc->vc.lock); ++ spin_unlock_irqrestore(&cvc->pc->lock, flags); + + if (vd) { + cvd = to_cqdma_vdesc(vd); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +index ffa5e72a84ebcb..c83445c2e37f3d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +@@ -291,21 +291,22 @@ static int psp_memory_training_init(struct psp_context *psp) + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { +- DRM_DEBUG("memory training is not supported!\n"); ++ dev_dbg(psp->adev->dev, "memory training is not supported!\n"); + return 0; + } + + ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); + if (ctx->sys_cache == NULL) { +- DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); ++ dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); + ret = -ENOMEM; + goto Err_out; + } + +- DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", +- ctx->train_data_size, +- ctx->p2c_train_data_offset, +- ctx->c2p_train_data_offset); ++ dev_dbg(psp->adev->dev, ++ "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", ++ ctx->train_data_size, ++ ctx->p2c_train_data_offset, ++ ctx->c2p_train_data_offset); + ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; + return 0; + +@@ -407,8 +408,8 @@ static int psp_sw_init(void *handle) + + psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!psp->cmd) { +- DRM_ERROR("Failed to allocate memory to command buffer!\n"); +- ret = -ENOMEM; ++ dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); ++ return -ENOMEM; + } + + adev->psp.xgmi_context.supports_extended_data = +@@ -454,13 +455,13 @@ static int psp_sw_init(void *handle) + if (mem_training_ctx->enable_mem_training) { + ret = psp_memory_training_init(psp); + if (ret) { +- DRM_ERROR("Failed to initialize memory training!\n"); ++ dev_err(adev->dev, "Failed to initialize memory training!\n"); + return ret; + } + + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { +- DRM_ERROR("Failed to process memory training!\n"); ++ dev_err(adev->dev, "Failed to process memory training!\n"); + return ret; + } + } +@@ -674,9 +675,11 @@ psp_cmd_submit_buf(struct psp_context *psp, + */ + if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { + if (ucode) +- DRM_WARN("failed to load ucode %s(0x%X) ", +- amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); +- DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", ++ dev_warn(psp->adev->dev, ++ "failed to load ucode %s(0x%X) ", ++ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); ++ dev_warn(psp->adev->dev, ++ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", + psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status); + /* If any firmware (including CAP) load fails under SRIOV, it should +@@ -806,7 +809,7 @@ static int psp_tmr_init(struct psp_context *psp) + psp->fw_pri_buf) { + ret = psp_load_toc(psp, &tmr_size); + if (ret) { +- DRM_ERROR("Failed to load toc\n"); ++ dev_err(psp->adev->dev, "Failed to load toc\n"); + return ret; + } + } +@@ -854,7 +857,7 @@ static int psp_tmr_load(struct psp_context *psp) + + psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); + if (psp->tmr_bo) +- DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", ++ dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", + amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, +@@ -1112,7 +1115,7 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, + psp_prep_reg_prog_cmd_buf(cmd, reg, value); + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) +- DRM_ERROR("PSP failed to program reg id %d", reg); ++ dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); + + release_psp_cmd_buf(psp); + +@@ -1492,22 +1495,22 @@ static void psp_ras_ta_check_status(struct psp_context *psp) + switch (ras_cmd->ras_status) { + case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: + dev_warn(psp->adev->dev, +- "RAS WARNING: cmd failed due to unsupported ip\n"); ++ "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + dev_warn(psp->adev->dev, +- "RAS WARNING: cmd failed due to unsupported error injection\n"); ++ "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case TA_RAS_STATUS__SUCCESS: + break; + case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: + if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) + dev_warn(psp->adev->dev, +- "RAS WARNING: Inject error to critical region is not allowed\n"); ++ "RAS WARNING: Inject error to critical region is not allowed\n"); + break; + default: + dev_warn(psp->adev->dev, +- "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); ++ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } + } +@@ -1531,7 +1534,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) + return ret; + + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { +- DRM_WARN("RAS: Unsupported Interface"); ++ dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); + return -EINVAL; + } + +@@ -1681,7 +1684,7 @@ int psp_ras_initialize(struct psp_context *psp) + psp->ras_context.context.initialized = true; + else { + if (ras_cmd->ras_status) +- dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); ++ dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); + + /* fail to load RAS TA */ + psp->ras_context.context.initialized = false; +@@ -2101,7 +2104,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_kdb != NULL)) { + ret = psp_bootloader_load_kdb(psp); + if (ret) { +- DRM_ERROR("PSP load kdb failed!\n"); ++ dev_err(adev->dev, "PSP load kdb failed!\n"); + return ret; + } + } +@@ -2110,7 +2113,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_spl != NULL)) { + ret = psp_bootloader_load_spl(psp); + if (ret) { +- DRM_ERROR("PSP load spl failed!\n"); ++ dev_err(adev->dev, "PSP load spl failed!\n"); + return ret; + } + } +@@ -2119,7 +2122,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_sysdrv != NULL)) { + ret = psp_bootloader_load_sysdrv(psp); + if (ret) { +- DRM_ERROR("PSP load sys drv failed!\n"); ++ dev_err(adev->dev, "PSP load sys drv failed!\n"); + return ret; + } + } +@@ -2128,7 +2131,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_soc_drv != NULL)) { + ret = psp_bootloader_load_soc_drv(psp); + if (ret) { +- DRM_ERROR("PSP load soc drv failed!\n"); ++ dev_err(adev->dev, "PSP load soc drv failed!\n"); + return ret; + } + } +@@ -2137,7 +2140,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_intf_drv != NULL)) { + ret = psp_bootloader_load_intf_drv(psp); + if (ret) { +- DRM_ERROR("PSP load intf drv failed!\n"); ++ dev_err(adev->dev, "PSP load intf drv failed!\n"); + return ret; + } + } +@@ -2146,7 +2149,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_dbg_drv != NULL)) { + ret = psp_bootloader_load_dbg_drv(psp); + if (ret) { +- DRM_ERROR("PSP load dbg drv failed!\n"); ++ dev_err(adev->dev, "PSP load dbg drv failed!\n"); + return ret; + } + } +@@ -2155,7 +2158,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_ras_drv != NULL)) { + ret = psp_bootloader_load_ras_drv(psp); + if (ret) { +- DRM_ERROR("PSP load ras_drv failed!\n"); ++ dev_err(adev->dev, "PSP load ras_drv failed!\n"); + return ret; + } + } +@@ -2164,7 +2167,7 @@ static int psp_hw_start(struct psp_context *psp) + (psp->funcs->bootloader_load_sos != NULL)) { + ret = psp_bootloader_load_sos(psp); + if (ret) { +- DRM_ERROR("PSP load sos failed!\n"); ++ dev_err(adev->dev, "PSP load sos failed!\n"); + return ret; + } + } +@@ -2172,7 +2175,7 @@ static int psp_hw_start(struct psp_context *psp) + + ret = psp_ring_create(psp, PSP_RING_TYPE__KM); + if (ret) { +- DRM_ERROR("PSP create ring failed!\n"); ++ dev_err(adev->dev, "PSP create ring failed!\n"); + return ret; + } + +@@ -2182,7 +2185,7 @@ static int psp_hw_start(struct psp_context *psp) + if (!psp_boottime_tmr(psp)) { + ret = psp_tmr_init(psp); + if (ret) { +- DRM_ERROR("PSP tmr init failed!\n"); ++ dev_err(adev->dev, "PSP tmr init failed!\n"); + return ret; + } + } +@@ -2201,7 +2204,7 @@ static int psp_hw_start(struct psp_context *psp) + + ret = psp_tmr_load(psp); + if (ret) { +- DRM_ERROR("PSP load tmr failed!\n"); ++ dev_err(adev->dev, "PSP load tmr failed!\n"); + return ret; + } + +@@ -2448,7 +2451,8 @@ static void psp_print_fw_hdr(struct psp_context *psp, + } + } + +-static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, ++static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, ++ struct amdgpu_firmware_info *ucode, + struct psp_gfx_cmd_resp *cmd) + { + int ret; +@@ -2461,7 +2465,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, + + ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); + if (ret) +- DRM_ERROR("Unknown firmware type\n"); ++ dev_err(psp->adev->dev, "Unknown firmware type\n"); + + return ret; + } +@@ -2472,7 +2476,7 @@ int psp_execute_ip_fw_load(struct psp_context *psp, + int ret = 0; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + +- ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd); ++ ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); + if (!ret) { + ret = psp_cmd_submit_buf(psp, ucode, cmd, + psp->fence_buf_mc_addr); +@@ -2507,13 +2511,13 @@ static int psp_load_smu_fw(struct psp_context *psp) + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { + ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); + if (ret) +- DRM_WARN("Failed to set MP1 state prepare for reload\n"); ++ dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); + } + + ret = psp_execute_ip_fw_load(psp, ucode); + + if (ret) +- DRM_ERROR("PSP load smu failed!\n"); ++ dev_err(adev->dev, "PSP load smu failed!\n"); + + return ret; + } +@@ -2609,7 +2613,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp) + adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { + ret = psp_rlc_autoload_start(psp); + if (ret) { +- DRM_ERROR("Failed to start rlc autoload\n"); ++ dev_err(adev->dev, "Failed to start rlc autoload\n"); + return ret; + } + } +@@ -2631,7 +2635,7 @@ static int psp_load_fw(struct amdgpu_device *adev) + + ret = psp_ring_init(psp, PSP_RING_TYPE__KM); + if (ret) { +- DRM_ERROR("PSP ring init failed!\n"); ++ dev_err(adev->dev, "PSP ring init failed!\n"); + goto failed; + } + } +@@ -2646,13 +2650,13 @@ static int psp_load_fw(struct amdgpu_device *adev) + + ret = psp_asd_initialize(psp); + if (ret) { +- DRM_ERROR("PSP load asd failed!\n"); ++ dev_err(adev->dev, "PSP load asd failed!\n"); + goto failed1; + } + + ret = psp_rl_load(adev); + if (ret) { +- DRM_ERROR("PSP load RL failed!\n"); ++ dev_err(adev->dev, "PSP load RL failed!\n"); + goto failed1; + } + +@@ -2672,7 +2676,7 @@ static int psp_load_fw(struct amdgpu_device *adev) + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, +- "RAS: Failed to initialize RAS\n"); ++ "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) +@@ -2725,7 +2729,7 @@ static int psp_hw_init(void *handle) + + ret = psp_load_fw(adev); + if (ret) { +- DRM_ERROR("PSP firmware loading failed\n"); ++ dev_err(adev->dev, "PSP firmware loading failed\n"); + goto failed; + } + +@@ -2772,7 +2776,7 @@ static int psp_suspend(void *handle) + psp->xgmi_context.context.initialized) { + ret = psp_xgmi_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate xgmi ta\n"); ++ dev_err(adev->dev, "Failed to terminate xgmi ta\n"); + goto out; + } + } +@@ -2780,46 +2784,46 @@ static int psp_suspend(void *handle) + if (psp->ta_fw) { + ret = psp_ras_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate ras ta\n"); ++ dev_err(adev->dev, "Failed to terminate ras ta\n"); + goto out; + } + ret = psp_hdcp_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate hdcp ta\n"); ++ dev_err(adev->dev, "Failed to terminate hdcp ta\n"); + goto out; + } + ret = psp_dtm_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate dtm ta\n"); ++ dev_err(adev->dev, "Failed to terminate dtm ta\n"); + goto out; + } + ret = psp_rap_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate rap ta\n"); ++ dev_err(adev->dev, "Failed to terminate rap ta\n"); + goto out; + } + ret = psp_securedisplay_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate securedisplay ta\n"); ++ dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); + goto out; + } + } + + ret = psp_asd_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate asd\n"); ++ dev_err(adev->dev, "Failed to terminate asd\n"); + goto out; + } + + ret = psp_tmr_terminate(psp); + if (ret) { +- DRM_ERROR("Failed to terminate tmr\n"); ++ dev_err(adev->dev, "Failed to terminate tmr\n"); + goto out; + } + + ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); + if (ret) +- DRM_ERROR("PSP ring stop failed\n"); ++ dev_err(adev->dev, "PSP ring stop failed\n"); + + out: + return ret; +@@ -2831,12 +2835,12 @@ static int psp_resume(void *handle) + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + +- DRM_INFO("PSP is resuming...\n"); ++ dev_info(adev->dev, "PSP is resuming...\n"); + + if (psp->mem_train_ctx.enable_mem_training) { + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { +- DRM_ERROR("Failed to process memory training!\n"); ++ dev_err(adev->dev, "Failed to process memory training!\n"); + return ret; + } + } +@@ -2853,7 +2857,7 @@ static int psp_resume(void *handle) + + ret = psp_asd_initialize(psp); + if (ret) { +- DRM_ERROR("PSP load asd failed!\n"); ++ dev_err(adev->dev, "PSP load asd failed!\n"); + goto failed; + } + +@@ -2877,7 +2881,7 @@ static int psp_resume(void *handle) + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, +- "RAS: Failed to initialize RAS\n"); ++ "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) +@@ -2905,7 +2909,7 @@ static int psp_resume(void *handle) + return 0; + + failed: +- DRM_ERROR("PSP resume failed\n"); ++ dev_err(adev->dev, "PSP resume failed\n"); + mutex_unlock(&adev->firmware.mutex); + return ret; + } +@@ -2966,9 +2970,11 @@ int psp_ring_cmd_submit(struct psp_context *psp, + write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); + /* Check invalid write_frame ptr address */ + if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { +- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", +- ring_buffer_start, ring_buffer_end, write_frame); +- DRM_ERROR("write_frame is pointing to address out of bounds\n"); ++ dev_err(adev->dev, ++ "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", ++ ring_buffer_start, ring_buffer_end, write_frame); ++ dev_err(adev->dev, ++ "write_frame is pointing to address out of bounds\n"); + return -EINVAL; + } + +@@ -3495,7 +3501,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + int ret; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { +- DRM_INFO("PSP block is not ready yet."); ++ dev_info(adev->dev, "PSP block is not ready yet\n."); + return -EBUSY; + } + +@@ -3504,7 +3510,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + mutex_unlock(&adev->psp.mutex); + + if (ret) { +- DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); ++ dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); + return ret; + } + +@@ -3526,7 +3532,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + void *fw_pri_cpu_addr; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { +- DRM_INFO("PSP block is not ready yet."); ++ dev_err(adev->dev, "PSP block is not ready yet."); + return -EBUSY; + } + +@@ -3559,7 +3565,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + release_firmware(usbc_pd_fw); + fail: + if (ret) { +- DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); ++ dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); + count = ret; + } + +@@ -3606,7 +3612,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, + + /* Safeguard against memory drain */ + if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { +- dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B); ++ dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); + kvfree(adev->psp.vbflash_tmp_buf); + adev->psp.vbflash_tmp_buf = NULL; + adev->psp.vbflash_image_size = 0; +@@ -3625,7 +3631,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, + adev->psp.vbflash_image_size += count; + mutex_unlock(&adev->psp.mutex); + +- dev_dbg(adev->dev, "IFWI staged for update"); ++ dev_dbg(adev->dev, "IFWI staged for update\n"); + + return count; + } +@@ -3645,7 +3651,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, + if (adev->psp.vbflash_image_size == 0) + return -EINVAL; + +- dev_dbg(adev->dev, "PSP IFWI flash process initiated"); ++ dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); + + ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, + AMDGPU_GPU_PAGE_SIZE, +@@ -3670,11 +3676,11 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, + adev->psp.vbflash_image_size = 0; + + if (ret) { +- dev_err(adev->dev, "Failed to load IFWI, err = %d", ret); ++ dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); + return ret; + } + +- dev_dbg(adev->dev, "PSP IFWI flash process done"); ++ dev_dbg(adev->dev, "PSP IFWI flash process done\n"); + return 0; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +index fded8902346f5d..2992ce494e000c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -2125,11 +2125,13 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, + */ + long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) + { +- timeout = drm_sched_entity_flush(&vm->immediate, timeout); ++ timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, ++ DMA_RESV_USAGE_BOOKKEEP, ++ true, timeout); + if (timeout <= 0) + return timeout; + +- return drm_sched_entity_flush(&vm->delayed, timeout); ++ return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + } + + /** +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +index 584cd5277f9272..e2dd7d4361cf31 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +@@ -1459,17 +1459,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev) + + static void dce_v10_0_audio_fini(struct amdgpu_device *adev) + { +- int i; +- + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + +- for (i = 0; i < adev->mode_info.audio.num_pins; i++) +- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); +- + adev->mode_info.audio.enabled = false; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +index c14b70350a51ae..7ce89654e12b42 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +@@ -1508,17 +1508,12 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev) + + static void dce_v11_0_audio_fini(struct amdgpu_device *adev) + { +- int i; +- + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + +- for (i = 0; i < adev->mode_info.audio.num_pins; i++) +- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); +- + adev->mode_info.audio.enabled = false; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +index 7f85ba5b726f68..c3d05ab7b12ff1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +@@ -1377,17 +1377,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev) + + static void dce_v6_0_audio_fini(struct amdgpu_device *adev) + { +- int i; +- + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + +- for (i = 0; i < adev->mode_info.audio.num_pins; i++) +- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); +- + adev->mode_info.audio.enabled = false; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +index f2b3cb5ed6bec2..ce2300c3c36b40 100644 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +@@ -1426,17 +1426,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev) + + static void dce_v8_0_audio_fini(struct amdgpu_device *adev) + { +- int i; +- + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + +- for (i = 0; i < adev->mode_info.audio.num_pins; i++) +- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); +- + adev->mode_info.audio.enabled = false; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +index 136bd93c3b6554..0a33f8f117e921 100644 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +@@ -896,13 +896,13 @@ void dce110_link_encoder_construct( + enc110->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ +- if (BP_RESULT_OK == result) { ++ if (result == BP_RESULT_OK) { + enc110->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc110->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; +- } else { ++ } else if (result != BP_RESULT_NORECORD) { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); +@@ -1795,13 +1795,13 @@ void dce60_link_encoder_construct( + enc110->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ +- if (BP_RESULT_OK == result) { ++ if (result == BP_RESULT_OK) { + enc110->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc110->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; +- } else { ++ } else if (result != BP_RESULT_NORECORD) { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +index 59cbff209acd6e..560935f2e8cbe1 100644 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +@@ -375,6 +375,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) + + gpiod_set_value_cansleep(pdata->enable_gpio, 1); + ++ /* ++ * After EN is deasserted and an external clock is detected, the bridge ++ * will sample GPIO3:1 to determine its frequency. The driver will ++ * overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is ++ * racy. Thus we have to wait a couple of us. According to the datasheet ++ * the GPIO lines has to be stable at least 5 us (td5) but it seems that ++ * is not enough and the refclk frequency value is still lost or ++ * overwritten by the bridge itself. Waiting for 20us seems to work. ++ */ ++ usleep_range(20, 30); ++ + /* + * If we have a reference clock we can enable communication w/ the + * panel (including the aux channel) w/out any need for an input clock +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +index ef4fa70119de1a..bfa1070a5f08e2 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +@@ -352,6 +352,7 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) + { + struct mtk_drm_private *drm_priv = dev_get_drvdata(dev); + struct mtk_drm_private *all_drm_priv[MAX_CRTC]; ++ struct mtk_drm_private *temp_drm_priv; + struct device_node *phandle = dev->parent->of_node; + const struct of_device_id *of_id; + struct device_node *node; +@@ -364,24 +365,41 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) + + of_id = of_match_node(mtk_drm_of_ids, node); + if (!of_id) +- continue; ++ goto next_put_node; + + pdev = of_find_device_by_node(node); + if (!pdev) +- continue; ++ goto next_put_node; + + drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match); +- if (!drm_dev || !dev_get_drvdata(drm_dev)) +- continue; ++ if (!drm_dev) ++ goto next_put_device_pdev_dev; ++ ++ temp_drm_priv = dev_get_drvdata(drm_dev); ++ if (!temp_drm_priv) ++ goto next_put_device_drm_dev; ++ ++ if (temp_drm_priv->data->main_len) ++ all_drm_priv[CRTC_MAIN] = temp_drm_priv; ++ else if (temp_drm_priv->data->ext_len) ++ all_drm_priv[CRTC_EXT] = temp_drm_priv; ++ else if (temp_drm_priv->data->third_len) ++ all_drm_priv[CRTC_THIRD] = temp_drm_priv; + +- all_drm_priv[cnt] = dev_get_drvdata(drm_dev); +- if (all_drm_priv[cnt] && all_drm_priv[cnt]->mtk_drm_bound) ++ if (temp_drm_priv->mtk_drm_bound) + cnt++; + +- if (cnt == MAX_CRTC) { +- of_node_put(node); ++next_put_device_drm_dev: ++ put_device(drm_dev); ++ ++next_put_device_pdev_dev: ++ put_device(&pdev->dev); ++ ++next_put_node: ++ of_node_put(node); ++ ++ if (cnt == MAX_CRTC) + break; +- } + } + + if (drm_priv->data->mmsys_dev_num == cnt) { +@@ -475,21 +493,21 @@ static int mtk_drm_kms_init(struct drm_device *drm) + for (j = 0; j < private->data->mmsys_dev_num; j++) { + priv_n = private->all_drm_private[j]; + +- if (i == 0 && priv_n->data->main_len) { ++ if (i == CRTC_MAIN && priv_n->data->main_len) { + ret = mtk_drm_crtc_create(drm, priv_n->data->main_path, + priv_n->data->main_len, j); + if (ret) + goto err_component_unbind; + + continue; +- } else if (i == 1 && priv_n->data->ext_len) { ++ } else if (i == CRTC_EXT && priv_n->data->ext_len) { + ret = mtk_drm_crtc_create(drm, priv_n->data->ext_path, + priv_n->data->ext_len, j); + if (ret) + goto err_component_unbind; + + continue; +- } else if (i == 2 && priv_n->data->third_len) { ++ } else if (i == CRTC_THIRD && priv_n->data->third_len) { + ret = mtk_drm_crtc_create(drm, priv_n->data->third_path, + priv_n->data->third_len, j); + if (ret) +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h +index eb2fd45941f09d..f4de8bb2768503 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h +@@ -9,11 +9,17 @@ + #include + #include "mtk_drm_ddp_comp.h" + +-#define MAX_CRTC 3 + #define MAX_CONNECTOR 2 + #define DDP_COMPONENT_DRM_OVL_ADAPTOR (DDP_COMPONENT_ID_MAX + 1) + #define DDP_COMPONENT_DRM_ID_MAX (DDP_COMPONENT_DRM_OVL_ADAPTOR + 1) + ++enum mtk_drm_crtc_path { ++ CRTC_MAIN, ++ CRTC_EXT, ++ CRTC_THIRD, ++ MAX_CRTC, ++}; ++ + struct device; + struct device_node; + struct drm_crtc; +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +index 5db37247dc29b2..572c54a3709139 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +@@ -348,6 +348,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) + nvkm_chid_unref(&fifo->chid); + + nvkm_event_fini(&fifo->nonstall.event); ++ if (fifo->func->nonstall_dtor) ++ fifo->func->nonstall_dtor(fifo); + mutex_destroy(&fifo->mutex); + return fifo; + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c +index c56d2a839efbaf..686a2c9fec46d8 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c +@@ -516,19 +516,11 @@ ga100_fifo_nonstall_intr(struct nvkm_inth *inth) + static void + ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index) + { +- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event); +- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0); +- +- nvkm_inth_block(&runl->nonstall.inth); + } + + static void + ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index) + { +- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event); +- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0); +- +- nvkm_inth_allow(&runl->nonstall.inth); + } + + const struct nvkm_event_func +@@ -559,12 +551,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo) + if (ret) + return ret; + ++ nvkm_inth_allow(&runl->nonstall.inth); ++ + nr = max(nr, runl->id + 1); + } + + return nr; + } + ++void ++ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo) ++{ ++ struct nvkm_runl *runl; ++ ++ nvkm_runl_foreach(runl, fifo) { ++ if (runl->nonstall.vector < 0) ++ continue; ++ nvkm_inth_block(&runl->nonstall.inth); ++ } ++} ++ + int + ga100_fifo_runl_ctor(struct nvkm_fifo *fifo) + { +@@ -594,6 +600,7 @@ ga100_fifo = { + .runl_ctor = ga100_fifo_runl_ctor, + .mmu_fault = &tu102_fifo_mmu_fault, + .nonstall_ctor = ga100_fifo_nonstall_ctor, ++ .nonstall_dtor = ga100_fifo_nonstall_dtor, + .nonstall = &ga100_fifo_nonstall, + .runl = &ga100_runl, + .runq = &ga100_runq, +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c +index 2cdf5da339b60b..dccf38101fd9e7 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c +@@ -28,6 +28,7 @@ ga102_fifo = { + .runl_ctor = ga100_fifo_runl_ctor, + .mmu_fault = &tu102_fifo_mmu_fault, + .nonstall_ctor = ga100_fifo_nonstall_ctor, ++ .nonstall_dtor = ga100_fifo_nonstall_dtor, + .nonstall = &ga100_fifo_nonstall, + .runl = &ga100_runl, + .runq = &ga100_runq, +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +index 4d448be19224a8..b4ccf6b8bd21a1 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +@@ -38,6 +38,7 @@ struct nvkm_fifo_func { + void (*start)(struct nvkm_fifo *, unsigned long *); + + int (*nonstall_ctor)(struct nvkm_fifo *); ++ void (*nonstall_dtor)(struct nvkm_fifo *); + const struct nvkm_event_func *nonstall; + + const struct nvkm_runl_func *runl; +@@ -194,6 +195,7 @@ extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault; + + int ga100_fifo_runl_ctor(struct nvkm_fifo *); + int ga100_fifo_nonstall_ctor(struct nvkm_fifo *); ++void ga100_fifo_nonstall_dtor(struct nvkm_fifo *); + extern const struct nvkm_event_func ga100_fifo_nonstall; + extern const struct nvkm_runl_func ga100_runl; + extern const struct nvkm_runq_func ga100_runq; +diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c +index a5f89aab3fb4d2..c25a54d5b39ad5 100644 +--- a/drivers/hwmon/mlxreg-fan.c ++++ b/drivers/hwmon/mlxreg-fan.c +@@ -561,15 +561,14 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan) + if (!pwm->connected) + continue; + pwm->fan = fan; ++ /* Set minimal PWM speed. */ ++ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY); + pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i], + pwm, &mlxreg_fan_cooling_ops); + if (IS_ERR(pwm->cdev)) { + dev_err(dev, "Failed to register cooling device\n"); + return PTR_ERR(pwm->cdev); + } +- +- /* Set minimal PWM speed. */ +- pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY); + } + + return 0; +diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c +index e9857d93b307e4..70c92cbfc9f141 100644 +--- a/drivers/iio/chemical/pms7003.c ++++ b/drivers/iio/chemical/pms7003.c +@@ -5,7 +5,6 @@ + * Copyright (c) Tomasz Duszynski + */ + +-#include + #include + #include + #include +@@ -19,6 +18,8 @@ + #include + #include + #include ++#include ++#include + + #define PMS7003_DRIVER_NAME "pms7003" + +@@ -76,7 +77,7 @@ struct pms7003_state { + /* Used to construct scan to push to the IIO buffer */ + struct { + u16 data[3]; /* PM1, PM2P5, PM10 */ +- s64 ts; ++ aligned_s64 ts; + } scan; + }; + +diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +index d4f9b5d8d28d6d..ace3ce4faea73a 100644 +--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c ++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +@@ -52,7 +52,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) + u16 fifo_count; + u32 fifo_period; + s64 timestamp; +- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE]; ++ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8); + int int_status; + size_t i, nb; + +diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c +index dc529cbe3805e2..25a45c4251fbd0 100644 +--- a/drivers/iio/light/opt3001.c ++++ b/drivers/iio/light/opt3001.c +@@ -692,8 +692,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) + struct opt3001 *opt = iio_priv(iio); + int ret; + bool wake_result_ready_queue = false; ++ bool ok_to_ignore_lock = opt->ok_to_ignore_lock; + +- if (!opt->ok_to_ignore_lock) ++ if (!ok_to_ignore_lock) + mutex_lock(&opt->lock); + + ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION); +@@ -730,7 +731,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) + } + + out: +- if (!opt->ok_to_ignore_lock) ++ if (!ok_to_ignore_lock) + mutex_unlock(&opt->lock); + + if (wake_result_ready_queue) +diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c +index e3f0de020a40c9..829c472812e49b 100644 +--- a/drivers/iio/pressure/mprls0025pa.c ++++ b/drivers/iio/pressure/mprls0025pa.c +@@ -87,11 +87,6 @@ static const struct mpr_func_spec mpr_func_spec[] = { + [MPR_FUNCTION_C] = {.output_min = 3355443, .output_max = 13421773}, + }; + +-struct mpr_chan { +- s32 pres; /* pressure value */ +- s64 ts; /* timestamp */ +-}; +- + struct mpr_data { + struct i2c_client *client; + struct mutex lock; /* +@@ -120,7 +115,10 @@ struct mpr_data { + * loop until data is ready + */ + struct completion completion; /* handshake from irq to read */ +- struct mpr_chan chan; /* ++ struct { ++ s32 pres; /* pressure value */ ++ aligned_s64 ts; /* timestamp */ ++ } chan; /* + * channel values for buffered + * mode + */ +diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c +index 0b3f29195330ac..0cd216e28f0090 100644 +--- a/drivers/isdn/mISDN/dsp_hwec.c ++++ b/drivers/isdn/mISDN/dsp_hwec.c +@@ -51,14 +51,14 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) + goto _do; + + { +- char *dup, *tok, *name, *val; ++ char *dup, *next, *tok, *name, *val; + int tmp; + +- dup = kstrdup(arg, GFP_ATOMIC); ++ dup = next = kstrdup(arg, GFP_ATOMIC); + if (!dup) + return; + +- while ((tok = strsep(&dup, ","))) { ++ while ((tok = strsep(&next, ","))) { + if (!strlen(tok)) + continue; + name = strsep(&tok, "="); +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 534e7f7bca4c2f..b836ab2a649a2b 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -1234,11 +1234,12 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) + { + struct macb *bp = queue->bp; + u16 queue_index = queue - bp->queues; ++ unsigned long flags; + unsigned int tail; + unsigned int head; + int packets = 0; + +- spin_lock(&queue->tx_ptr_lock); ++ spin_lock_irqsave(&queue->tx_ptr_lock, flags); + head = queue->tx_head; + for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { + struct macb_tx_skb *tx_skb; +@@ -1297,7 +1298,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) + CIRC_CNT(queue->tx_head, queue->tx_tail, + bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) + netif_wake_subqueue(bp->dev, queue_index); +- spin_unlock(&queue->tx_ptr_lock); ++ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); + + return packets; + } +@@ -1713,8 +1714,9 @@ static void macb_tx_restart(struct macb_queue *queue) + { + struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; ++ unsigned long flags; + +- spin_lock(&queue->tx_ptr_lock); ++ spin_lock_irqsave(&queue->tx_ptr_lock, flags); + + if (queue->tx_head == queue->tx_tail) + goto out_tx_ptr_unlock; +@@ -1726,19 +1728,20 @@ static void macb_tx_restart(struct macb_queue *queue) + if (tbqp == head_idx) + goto out_tx_ptr_unlock; + +- spin_lock_irq(&bp->lock); ++ spin_lock(&bp->lock); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); +- spin_unlock_irq(&bp->lock); ++ spin_unlock(&bp->lock); + + out_tx_ptr_unlock: +- spin_unlock(&queue->tx_ptr_lock); ++ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); + } + + static bool macb_tx_complete_pending(struct macb_queue *queue) + { + bool retval = false; ++ unsigned long flags; + +- spin_lock(&queue->tx_ptr_lock); ++ spin_lock_irqsave(&queue->tx_ptr_lock, flags); + if (queue->tx_head != queue->tx_tail) { + /* Make hw descriptor updates visible to CPU */ + rmb(); +@@ -1746,7 +1749,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue) + if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) + retval = true; + } +- spin_unlock(&queue->tx_ptr_lock); ++ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); + return retval; + } + +@@ -2314,6 +2317,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) + struct macb_queue *queue = &bp->queues[queue_index]; + unsigned int desc_cnt, nr_frags, frag_size, f; + unsigned int hdrlen; ++ unsigned long flags; + bool is_lso; + netdev_tx_t ret = NETDEV_TX_OK; + +@@ -2374,7 +2378,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) + desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); + } + +- spin_lock_bh(&queue->tx_ptr_lock); ++ spin_lock_irqsave(&queue->tx_ptr_lock, flags); + + /* This is a hard error, log it. */ + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, +@@ -2396,15 +2400,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) + wmb(); + skb_tx_timestamp(skb); + +- spin_lock_irq(&bp->lock); ++ spin_lock(&bp->lock); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); +- spin_unlock_irq(&bp->lock); ++ spin_unlock(&bp->lock); + + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) + netif_stop_subqueue(dev, queue_index); + + unlock: +- spin_unlock_bh(&queue->tx_ptr_lock); ++ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); + + return ret; + } +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +index 087d4c2b3efd1a..a423a938821156 100644 +--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c ++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +@@ -1491,13 +1491,17 @@ static int bgx_init_of_phy(struct bgx *bgx) + * this cortina phy, for which there is no driver + * support, ignore it. + */ +- if (phy_np && +- !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { +- /* Wait until the phy drivers are available */ +- pd = of_phy_find_device(phy_np); +- if (!pd) +- goto defer; +- bgx->lmac[lmac].phydev = pd; ++ if (phy_np) { ++ if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { ++ /* Wait until the phy drivers are available */ ++ pd = of_phy_find_device(phy_np); ++ if (!pd) { ++ of_node_put(phy_np); ++ goto defer; ++ } ++ bgx->lmac[lmac].phydev = pd; ++ } ++ of_node_put(phy_np); + } + + lmac++; +@@ -1513,11 +1517,11 @@ static int bgx_init_of_phy(struct bgx *bgx) + * for phy devices we may have already found. + */ + while (lmac) { ++ lmac--; + if (bgx->lmac[lmac].phydev) { + put_device(&bgx->lmac[lmac].phydev->mdio.dev); + bgx->lmac[lmac].phydev = NULL; + } +- lmac--; + } + of_node_put(node); + return -EPROBE_DEFER; +diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c +index fc0f98ea61332f..a1abc51584a183 100644 +--- a/drivers/net/ethernet/intel/e1000e/ethtool.c ++++ b/drivers/net/ethernet/intel/e1000e/ethtool.c +@@ -567,12 +567,12 @@ static int e1000_set_eeprom(struct net_device *netdev, + { + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; ++ size_t total_len, max_len; + u16 *eeprom_buff; +- void *ptr; +- int max_len; ++ int ret_val = 0; + int first_word; + int last_word; +- int ret_val = 0; ++ void *ptr; + u16 i; + + if (eeprom->len == 0) +@@ -587,6 +587,10 @@ static int e1000_set_eeprom(struct net_device *netdev, + + max_len = hw->nvm.word_size * 2; + ++ if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) || ++ total_len > max_len) ++ return -EFBIG; ++ + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); +diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c +index 306758428aefd7..a569d2fcc90af4 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_client.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c +@@ -361,8 +361,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf) + if (i40e_client_get_params(vsi, &cdev->lan_info.params)) + goto free_cdev; + +- mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list, +- struct netdev_hw_addr, list); ++ mac = list_first_entry_or_null(&cdev->lan_info.netdev->dev_addrs.list, ++ struct netdev_hw_addr, list); + if (mac) + ether_addr_copy(cdev->lan_info.lanmac, mac->addr); + else +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index cb8efc952dfda9..aefe2af6f01d41 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1586,6 +1586,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + bool gso = false; + int tx_num; + ++ if (skb_vlan_tag_present(skb) && ++ !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) { ++ skb = __vlan_hwaccel_push_inside(skb); ++ if (!skb) ++ goto dropped; ++ } ++ + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running on the same ring so we need to lock + * the ring access +@@ -1631,8 +1638,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + + drop: + spin_unlock(ð->page_lock); +- stats->tx_dropped++; + dev_kfree_skb_any(skb); ++dropped: ++ stats->tx_dropped++; + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c +index 9f505cf02d9651..2dc1cfcd7ce99b 100644 +--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c ++++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c +@@ -1578,7 +1578,7 @@ do_reset(struct net_device *dev, int full) + msleep(40); /* wait 40 msec to let it complete */ + } + if (full_duplex) +- PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex)); ++ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR) | FullDuplex); + } else { /* No MII */ + SelectPage(0); + value = GetByte(XIRCREG_ESR); /* read the ESR */ +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 767053d6c6b6f9..af6cc3e90ef7ce 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -1840,7 +1840,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) + + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&rx_sa->lock); +- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); ++ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&rx_sa->lock); + } + +@@ -2082,7 +2082,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) + } + + spin_lock_bh(&tx_sa->lock); +- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); ++ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&tx_sa->lock); + + if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) +@@ -2394,7 +2394,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) + + spin_lock_bh(&tx_sa->lock); + prev_pn = tx_sa->next_pn_halves; +- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); ++ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&tx_sa->lock); + } + +@@ -2492,7 +2492,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) + + spin_lock_bh(&rx_sa->lock); + prev_pn = rx_sa->next_pn_halves; +- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); ++ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&rx_sa->lock); + } + +diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c +index 97139c07130fc9..b65682b8b6cd9a 100644 +--- a/drivers/net/pcs/pcs-rzn1-miic.c ++++ b/drivers/net/pcs/pcs-rzn1-miic.c +@@ -19,7 +19,7 @@ + #define MIIC_PRCMD 0x0 + #define MIIC_ESID_CODE 0x4 + +-#define MIIC_MODCTRL 0x20 ++#define MIIC_MODCTRL 0x8 + #define MIIC_MODCTRL_SW_MODE GENMASK(4, 0) + + #define MIIC_CONVCTRL(port) (0x100 + (port) * 4) +diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c +index 1f6237705b44b7..939a8a17595ef9 100644 +--- a/drivers/net/phy/mscc/mscc_ptp.c ++++ b/drivers/net/phy/mscc/mscc_ptp.c +@@ -455,12 +455,12 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp) + *p++ = (reg >> 24) & 0xff; + } + +- len = skb_queue_len(&ptp->tx_queue); ++ len = skb_queue_len_lockless(&ptp->tx_queue); + if (len < 1) + return; + + while (len--) { +- skb = __skb_dequeue(&ptp->tx_queue); ++ skb = skb_dequeue(&ptp->tx_queue); + if (!skb) + return; + +@@ -485,7 +485,7 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp) + * packet in the FIFO right now, reschedule it for later + * packets. + */ +- __skb_queue_tail(&ptp->tx_queue, skb); ++ skb_queue_tail(&ptp->tx_queue, skb); + } + } + +@@ -1067,6 +1067,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) + case HWTSTAMP_TX_ON: + break; + case HWTSTAMP_TX_OFF: ++ skb_queue_purge(&vsc8531->ptp->tx_queue); + break; + default: + return -ERANGE; +@@ -1091,9 +1092,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) + + mutex_lock(&vsc8531->ts_lock); + +- __skb_queue_purge(&vsc8531->ptp->tx_queue); +- __skb_queue_head_init(&vsc8531->ptp->tx_queue); +- + /* Disable predictor while configuring the 1588 block */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_PREDICTOR); +@@ -1179,9 +1177,7 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts, + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + +- mutex_lock(&vsc8531->ts_lock); +- __skb_queue_tail(&vsc8531->ptp->tx_queue, skb); +- mutex_unlock(&vsc8531->ts_lock); ++ skb_queue_tail(&vsc8531->ptp->tx_queue, skb); + return; + + out: +@@ -1547,6 +1543,7 @@ void vsc8584_ptp_deinit(struct phy_device *phydev) + if (vsc8531->ptp->ptp_clock) { + ptp_clock_unregister(vsc8531->ptp->ptp_clock); + skb_queue_purge(&vsc8531->rx_skbs_list); ++ skb_queue_purge(&vsc8531->ptp->tx_queue); + } + } + +@@ -1570,7 +1567,7 @@ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev) + if (rc & VSC85XX_1588_INT_FIFO_ADD) { + vsc85xx_get_tx_ts(priv->ptp); + } else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) { +- __skb_queue_purge(&priv->ptp->tx_queue); ++ skb_queue_purge(&priv->ptp->tx_queue); + vsc85xx_ts_reset_fifo(phydev); + } + +@@ -1590,6 +1587,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev) + mutex_init(&vsc8531->phc_lock); + mutex_init(&vsc8531->ts_lock); + skb_queue_head_init(&vsc8531->rx_skbs_list); ++ skb_queue_head_init(&vsc8531->ptp->tx_queue); + + /* Retrieve the shared load/save GPIO. Request it as non exclusive as + * the same GPIO can be requested by all the PHYs of the same package. +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 28b894bcd7a93d..46ac51217114bd 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -1753,7 +1753,6 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) + */ + if (net_ratelimit()) + netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); +- kfree_skb(skb); + consume_skb(new_skb); + new_skb = NULL; + } +@@ -1855,9 +1854,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) + "down - pkt dropped.\n"); + goto drop; + } +- skb = pad_compress_skb(ppp, skb); +- if (!skb) ++ new_skb = pad_compress_skb(ppp, skb); ++ if (!new_skb) + goto drop; ++ skb = new_skb; + } + + /* +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index d9792fd515a904..22554daaf6ff17 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -2043,6 +2043,13 @@ static const struct usb_device_id cdc_devs[] = { + .driver_info = (unsigned long)&wwan_info, + }, + ++ /* Intel modem (label from OEM reads Fibocom L850-GL) */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a, ++ USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), ++ .driver_info = (unsigned long)&wwan_info, ++ }, ++ + /* DisplayLink docking stations */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c +index afd78324f3aa39..6e4023791b4761 100644 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -3483,8 +3483,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int err = 0; + +- netdev->mtu = new_mtu; +- + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. +@@ -3498,6 +3496,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) + + /* we need to re-create the rx queue based on the new mtu */ + vmxnet3_rq_destroy_all(adapter); ++ netdev->mtu = new_mtu; + vmxnet3_adjust_rx_ring_size(adapter); + err = vmxnet3_rq_create_all(adapter); + if (err) { +@@ -3514,6 +3513,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) + "Closing it\n", err); + goto out; + } ++ } else { ++ netdev->mtu = new_mtu; + } + + out: +diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h +index 812a174f74c0b3..4bb36dc6ae08ba 100644 +--- a/drivers/net/wireless/ath/ath11k/core.h ++++ b/drivers/net/wireless/ath/ath11k/core.h +@@ -365,6 +365,8 @@ struct ath11k_vif { + struct ieee80211_chanctx_conf chanctx; + struct ath11k_arp_ns_offload arp_ns_offload; + struct ath11k_rekey_data rekey_data; ++ u32 num_stations; ++ bool reinstall_group_keys; + + #ifdef CONFIG_ATH11K_DEBUGFS + struct dentry *debugfs_twt; +@@ -1234,6 +1236,11 @@ static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif) + return (struct ath11k_vif *)vif->drv_priv; + } + ++static inline struct ath11k_sta *ath11k_sta_to_arsta(struct ieee80211_sta *sta) ++{ ++ return (struct ath11k_sta *)sta->drv_priv; ++} ++ + static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab, + int mac_id) + { +diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c +index 50bc17127e68a3..4304fed44d5839 100644 +--- a/drivers/net/wireless/ath/ath11k/debugfs.c ++++ b/drivers/net/wireless/ath/ath11k/debugfs.c +@@ -1452,7 +1452,7 @@ static void ath11k_reset_peer_ps_duration(void *data, + struct ieee80211_sta *sta) + { + struct ath11k *ar = data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + + spin_lock_bh(&ar->data_lock); + arsta->ps_total_duration = 0; +@@ -1503,7 +1503,7 @@ static void ath11k_peer_ps_state_disable(void *data, + struct ieee80211_sta *sta) + { + struct ath11k *ar = data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + + spin_lock_bh(&ar->data_lock); + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; +diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c +index 168879a380cb2d..f56a24b6c8da21 100644 +--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c ++++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c +@@ -137,7 +137,7 @@ static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + struct ath11k_htt_data_stats *stats; + static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail", +@@ -244,7 +244,7 @@ static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; + int len = 0, i, retval = 0; +@@ -341,7 +341,7 @@ static int + ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) + { + struct ieee80211_sta *sta = inode->i_private; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + struct debug_htt_stats_req *stats_req; + int type = ar->debug.htt_stats.type; +@@ -377,7 +377,7 @@ static int + ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) + { + struct ieee80211_sta *sta = inode->i_private; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + + mutex_lock(&ar->conf_mutex); +@@ -414,7 +414,7 @@ static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + int ret, enable; + +@@ -454,7 +454,7 @@ static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + char buf[32] = {0}; + int len; +@@ -481,7 +481,7 @@ static ssize_t ath11k_dbg_sta_write_delba(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + u32 tid, initiator, reason; + int ret; +@@ -532,7 +532,7 @@ static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + u32 tid, status; + int ret; +@@ -582,7 +582,7 @@ static ssize_t ath11k_dbg_sta_write_addba(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + u32 tid, buf_size; + int ret; +@@ -633,7 +633,7 @@ static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + char buf[64]; + int len = 0; +@@ -653,7 +653,7 @@ static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + u32 aggr_mode; + int ret; +@@ -698,7 +698,7 @@ ath11k_write_htt_peer_stats_reset(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + int ret; +@@ -757,7 +757,7 @@ static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + char buf[20]; + int len; +@@ -784,7 +784,7 @@ static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file, + loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + u64 time_since_station_in_power_save; + char buf[20]; +@@ -818,7 +818,7 @@ static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file, + size_t count, loff_t *ppos) + { + struct ieee80211_sta *sta = file->private_data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + char buf[20]; + u64 power_save_duration; +diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c +index 33b9764eaa9167..8cc51ab699de78 100644 +--- a/drivers/net/wireless/ath/ath11k/dp_rx.c ++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c +@@ -1100,7 +1100,7 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar, + struct ieee80211_ampdu_params *params) + { + struct ath11k_base *ab = ar->ab; +- struct ath11k_sta *arsta = (void *)params->sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); + int vdev_id = arsta->arvif->vdev_id; + int ret; + +@@ -1118,7 +1118,7 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, + { + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; +- struct ath11k_sta *arsta = (void *)params->sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); + int vdev_id = arsta->arvif->vdev_id; + dma_addr_t paddr; + bool active; +@@ -1460,7 +1460,7 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, + } + + sta = peer->sta; +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + +@@ -5269,7 +5269,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, + goto next_skb; + } + +- arsta = (struct ath11k_sta *)peer->sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(peer->sta); + ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); + + if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) +diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c +index 7dd1ee58980177..c1072e66e3e8fd 100644 +--- a/drivers/net/wireless/ath/ath11k/dp_tx.c ++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c +@@ -467,7 +467,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) + } + + sta = peer->sta; +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, +@@ -627,7 +627,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, + ieee80211_free_txskb(ar->hw, msdu); + return; + } +- arsta = (struct ath11k_sta *)peer->sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(peer->sta); + status.sta = peer->sta; + status.skb = msdu; + status.info = info; +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c +index 9df3f6449f7689..2921be9bd530cf 100644 +--- a/drivers/net/wireless/ath/ath11k/mac.c ++++ b/drivers/net/wireless/ath/ath11k/mac.c +@@ -254,9 +254,6 @@ static const u32 ath11k_smps_map[] = { + [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, + }; + +-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, +- struct ieee80211_vif *vif); +- + enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy) + { + enum nl80211_he_ru_alloc ret; +@@ -2828,7 +2825,7 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, + + lockdep_assert_held(&ar->conf_mutex); + +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + memset(arg, 0, sizeof(*arg)); + +@@ -4208,6 +4205,40 @@ static int ath11k_clear_peer_keys(struct ath11k_vif *arvif, + return first_errno; + } + ++static int ath11k_set_group_keys(struct ath11k_vif *arvif) ++{ ++ struct ath11k *ar = arvif->ar; ++ struct ath11k_base *ab = ar->ab; ++ const u8 *addr = arvif->bssid; ++ int i, ret, first_errno = 0; ++ struct ath11k_peer *peer; ++ ++ spin_lock_bh(&ab->base_lock); ++ peer = ath11k_peer_find(ab, arvif->vdev_id, addr); ++ spin_unlock_bh(&ab->base_lock); ++ ++ if (!peer) ++ return -ENOENT; ++ ++ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { ++ struct ieee80211_key_conf *key = peer->keys[i]; ++ ++ if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) ++ continue; ++ ++ ret = ath11k_install_key(arvif, key, SET_KEY, addr, ++ WMI_KEY_GROUP); ++ if (ret < 0 && first_errno == 0) ++ first_errno = ret; ++ ++ if (ret < 0) ++ ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n", ++ i, arvif->vdev_id, ret); ++ } ++ ++ return first_errno; ++} ++ + static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +@@ -4217,6 +4248,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_peer *peer; + struct ath11k_sta *arsta; ++ bool is_ap_with_no_sta; + const u8 *peer_addr; + int ret = 0; + u32 flags = 0; +@@ -4277,16 +4309,57 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + else + flags |= WMI_KEY_GROUP; + +- ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); +- if (ret) { +- ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); +- goto exit; +- } ++ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, ++ "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", ++ cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id, ++ flags, arvif->vdev_type, arvif->num_stations); ++ ++ /* Allow group key clearing only in AP mode when no stations are ++ * associated. There is a known race condition in firmware where ++ * group addressed packets may be dropped if the key is cleared ++ * and immediately set again during rekey. ++ * ++ * During GTK rekey, mac80211 issues a clear key (if the old key ++ * exists) followed by an install key operation for same key ++ * index. This causes ath11k to send two WMI commands in quick ++ * succession: one to clear the old key and another to install the ++ * new key in the same slot. ++ * ++ * Under certain conditions—especially under high load or time ++ * sensitive scenarios, firmware may process these commands ++ * asynchronously in a way that firmware assumes the key is ++ * cleared whereas hardware has a valid key. This inconsistency ++ * between hardware and firmware leads to group addressed packet ++ * drops after rekey. ++ * Only setting the same key again can restore a valid key in ++ * firmware and allow packets to be transmitted. ++ * ++ * There is a use case where an AP can transition from Secure mode ++ * to open mode without a vdev restart by just deleting all ++ * associated peers and clearing key, Hence allow clear key for ++ * that case alone. Mark arvif->reinstall_group_keys in such cases ++ * and reinstall the same key when the first peer is added, ++ * allowing firmware to recover from the race if it had occurred. ++ */ + +- ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key); +- if (ret) { +- ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret); +- goto exit; ++ is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP && ++ !arvif->num_stations); ++ if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { ++ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); ++ if (ret) { ++ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); ++ goto exit; ++ } ++ ++ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key); ++ if (ret) { ++ ath11k_warn(ab, "failed to offload PN replay detection %d\n", ++ ret); ++ goto exit; ++ } ++ ++ if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) ++ arvif->reinstall_group_keys = true; + } + + spin_lock_bh(&ab->base_lock); +@@ -4311,7 +4384,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr); + + if (sta) { +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: +@@ -4879,6 +4952,7 @@ static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif, + return -ENOBUFS; + + ar->num_stations++; ++ arvif->num_stations++; + + return 0; + } +@@ -4894,100 +4968,7 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif, + return; + + ar->num_stations--; +-} +- +-static int ath11k_mac_station_add(struct ath11k *ar, +- struct ieee80211_vif *vif, +- struct ieee80211_sta *sta) +-{ +- struct ath11k_base *ab = ar->ab; +- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; +- struct peer_create_params peer_param; +- int ret; +- +- lockdep_assert_held(&ar->conf_mutex); +- +- ret = ath11k_mac_inc_num_stations(arvif, sta); +- if (ret) { +- ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", +- ar->max_num_stations); +- goto exit; +- } +- +- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); +- if (!arsta->rx_stats) { +- ret = -ENOMEM; +- goto dec_num_station; +- } +- +- peer_param.vdev_id = arvif->vdev_id; +- peer_param.peer_addr = sta->addr; +- peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; +- +- ret = ath11k_peer_create(ar, arvif, sta, &peer_param); +- if (ret) { +- ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", +- sta->addr, arvif->vdev_id); +- goto free_rx_stats; +- } +- +- ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", +- sta->addr, arvif->vdev_id); +- +- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { +- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); +- if (!arsta->tx_stats) { +- ret = -ENOMEM; +- goto free_peer; +- } +- } +- +- if (ieee80211_vif_is_mesh(vif)) { +- ath11k_dbg(ab, ATH11K_DBG_MAC, +- "setting USE_4ADDR for mesh STA %pM\n", sta->addr); +- ret = ath11k_wmi_set_peer_param(ar, sta->addr, +- arvif->vdev_id, +- WMI_PEER_USE_4ADDR, 1); +- if (ret) { +- ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", +- sta->addr, ret); +- goto free_tx_stats; +- } +- } +- +- ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); +- if (ret) { +- ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", +- sta->addr, arvif->vdev_id, ret); +- goto free_tx_stats; +- } +- +- if (ab->hw_params.vdev_start_delay && +- !arvif->is_started && +- arvif->vdev_type != WMI_VDEV_TYPE_AP) { +- ret = ath11k_start_vdev_delay(ar->hw, vif); +- if (ret) { +- ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); +- goto free_tx_stats; +- } +- } +- +- ewma_avg_rssi_init(&arsta->avg_rssi); +- return 0; +- +-free_tx_stats: +- kfree(arsta->tx_stats); +- arsta->tx_stats = NULL; +-free_peer: +- ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); +-free_rx_stats: +- kfree(arsta->rx_stats); +- arsta->rx_stats = NULL; +-dec_num_station: +- ath11k_mac_dec_num_stations(arvif, sta); +-exit: +- return ret; ++ arvif->num_stations--; + } + + static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar, +@@ -5018,140 +4999,6 @@ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar, + return bw; + } + +-static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, +- struct ieee80211_vif *vif, +- struct ieee80211_sta *sta, +- enum ieee80211_sta_state old_state, +- enum ieee80211_sta_state new_state) +-{ +- struct ath11k *ar = hw->priv; +- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; +- struct ath11k_peer *peer; +- int ret = 0; +- +- /* cancel must be done outside the mutex to avoid deadlock */ +- if ((old_state == IEEE80211_STA_NONE && +- new_state == IEEE80211_STA_NOTEXIST)) { +- cancel_work_sync(&arsta->update_wk); +- cancel_work_sync(&arsta->set_4addr_wk); +- } +- +- mutex_lock(&ar->conf_mutex); +- +- if (old_state == IEEE80211_STA_NOTEXIST && +- new_state == IEEE80211_STA_NONE) { +- memset(arsta, 0, sizeof(*arsta)); +- arsta->arvif = arvif; +- arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; +- INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); +- INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); +- +- ret = ath11k_mac_station_add(ar, vif, sta); +- if (ret) +- ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", +- sta->addr, arvif->vdev_id); +- } else if ((old_state == IEEE80211_STA_NONE && +- new_state == IEEE80211_STA_NOTEXIST)) { +- bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay && +- vif->type == NL80211_IFTYPE_STATION; +- +- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); +- +- if (!skip_peer_delete) { +- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); +- if (ret) +- ath11k_warn(ar->ab, +- "Failed to delete peer: %pM for VDEV: %d\n", +- sta->addr, arvif->vdev_id); +- else +- ath11k_dbg(ar->ab, +- ATH11K_DBG_MAC, +- "Removed peer: %pM for VDEV: %d\n", +- sta->addr, arvif->vdev_id); +- } +- +- ath11k_mac_dec_num_stations(arvif, sta); +- mutex_lock(&ar->ab->tbl_mtx_lock); +- spin_lock_bh(&ar->ab->base_lock); +- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); +- if (skip_peer_delete && peer) { +- peer->sta = NULL; +- } else if (peer && peer->sta == sta) { +- ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", +- vif->addr, arvif->vdev_id); +- ath11k_peer_rhash_delete(ar->ab, peer); +- peer->sta = NULL; +- list_del(&peer->list); +- kfree(peer); +- ar->num_peers--; +- } +- spin_unlock_bh(&ar->ab->base_lock); +- mutex_unlock(&ar->ab->tbl_mtx_lock); +- +- kfree(arsta->tx_stats); +- arsta->tx_stats = NULL; +- +- kfree(arsta->rx_stats); +- arsta->rx_stats = NULL; +- } else if (old_state == IEEE80211_STA_AUTH && +- new_state == IEEE80211_STA_ASSOC && +- (vif->type == NL80211_IFTYPE_AP || +- vif->type == NL80211_IFTYPE_MESH_POINT || +- vif->type == NL80211_IFTYPE_ADHOC)) { +- ret = ath11k_station_assoc(ar, vif, sta, false); +- if (ret) +- ath11k_warn(ar->ab, "Failed to associate station: %pM\n", +- sta->addr); +- +- spin_lock_bh(&ar->data_lock); +- /* Set arsta bw and prev bw */ +- arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); +- arsta->bw_prev = arsta->bw; +- spin_unlock_bh(&ar->data_lock); +- } else if (old_state == IEEE80211_STA_ASSOC && +- new_state == IEEE80211_STA_AUTHORIZED) { +- spin_lock_bh(&ar->ab->base_lock); +- +- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); +- if (peer) +- peer->is_authorized = true; +- +- spin_unlock_bh(&ar->ab->base_lock); +- +- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { +- ret = ath11k_wmi_set_peer_param(ar, sta->addr, +- arvif->vdev_id, +- WMI_PEER_AUTHORIZE, +- 1); +- if (ret) +- ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", +- sta->addr, arvif->vdev_id, ret); +- } +- } else if (old_state == IEEE80211_STA_AUTHORIZED && +- new_state == IEEE80211_STA_ASSOC) { +- spin_lock_bh(&ar->ab->base_lock); +- +- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); +- if (peer) +- peer->is_authorized = false; +- +- spin_unlock_bh(&ar->ab->base_lock); +- } else if (old_state == IEEE80211_STA_ASSOC && +- new_state == IEEE80211_STA_AUTH && +- (vif->type == NL80211_IFTYPE_AP || +- vif->type == NL80211_IFTYPE_MESH_POINT || +- vif->type == NL80211_IFTYPE_ADHOC)) { +- ret = ath11k_station_disassoc(ar, vif, sta); +- if (ret) +- ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", +- sta->addr); +- } +- +- mutex_unlock(&ar->conf_mutex); +- return ret; +-} +- + static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +@@ -5192,7 +5039,7 @@ static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool enabled) + { + struct ath11k *ar = hw->priv; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + + if (enabled && !arsta->use_4addr_set) { + ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); +@@ -5206,7 +5053,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, + u32 changed) + { + struct ath11k *ar = hw->priv; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_peer *peer; + u32 bw, smps; +@@ -6204,7 +6051,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, + } + + if (control->sta) +- arsta = (struct ath11k_sta *)control->sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(control->sta); + + ret = ath11k_dp_tx(ar, arvif, arsta, skb); + if (unlikely(ret)) { +@@ -7546,8 +7393,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw, + mutex_unlock(&ar->conf_mutex); + } + +-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, +- struct ieee80211_vif *vif) ++static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw, ++ struct ieee80211_vif *vif) + { + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; +@@ -8228,7 +8075,7 @@ static void ath11k_mac_set_bitrate_mask_iter(void *data, + struct ieee80211_sta *sta) + { + struct ath11k_vif *arvif = data; +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); +@@ -8632,7 +8479,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct station_info *sinfo) + { +- struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k *ar = arsta->arvif->ar; + s8 signal; + bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, +@@ -9099,6 +8946,249 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, + return 0; + } + ++static int ath11k_mac_station_add(struct ath11k *ar, ++ struct ieee80211_vif *vif, ++ struct ieee80211_sta *sta) ++{ ++ struct ath11k_base *ab = ar->ab; ++ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); ++ struct peer_create_params peer_param; ++ int ret; ++ ++ lockdep_assert_held(&ar->conf_mutex); ++ ++ ret = ath11k_mac_inc_num_stations(arvif, sta); ++ if (ret) { ++ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", ++ ar->max_num_stations); ++ goto exit; ++ } ++ ++ /* Driver allows the DEL KEY followed by SET KEY sequence for ++ * group keys for only when there is no clients associated, if at ++ * all firmware has entered the race during that window, ++ * reinstalling the same key when the first sta connects will allow ++ * firmware to recover from the race. ++ */ ++ if (arvif->num_stations == 1 && arvif->reinstall_group_keys) { ++ ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n", ++ arvif->vdev_id); ++ ret = ath11k_set_group_keys(arvif); ++ if (ret) ++ goto dec_num_station; ++ arvif->reinstall_group_keys = false; ++ } ++ ++ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); ++ if (!arsta->rx_stats) { ++ ret = -ENOMEM; ++ goto dec_num_station; ++ } ++ ++ peer_param.vdev_id = arvif->vdev_id; ++ peer_param.peer_addr = sta->addr; ++ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ++ ++ ret = ath11k_peer_create(ar, arvif, sta, &peer_param); ++ if (ret) { ++ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", ++ sta->addr, arvif->vdev_id); ++ goto free_rx_stats; ++ } ++ ++ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", ++ sta->addr, arvif->vdev_id); ++ ++ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { ++ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); ++ if (!arsta->tx_stats) { ++ ret = -ENOMEM; ++ goto free_peer; ++ } ++ } ++ ++ if (ieee80211_vif_is_mesh(vif)) { ++ ath11k_dbg(ab, ATH11K_DBG_MAC, ++ "setting USE_4ADDR for mesh STA %pM\n", sta->addr); ++ ret = ath11k_wmi_set_peer_param(ar, sta->addr, ++ arvif->vdev_id, ++ WMI_PEER_USE_4ADDR, 1); ++ if (ret) { ++ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", ++ sta->addr, ret); ++ goto free_tx_stats; ++ } ++ } ++ ++ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); ++ if (ret) { ++ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", ++ sta->addr, arvif->vdev_id, ret); ++ goto free_tx_stats; ++ } ++ ++ if (ab->hw_params.vdev_start_delay && ++ !arvif->is_started && ++ arvif->vdev_type != WMI_VDEV_TYPE_AP) { ++ ret = ath11k_mac_start_vdev_delay(ar->hw, vif); ++ if (ret) { ++ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); ++ goto free_tx_stats; ++ } ++ } ++ ++ ewma_avg_rssi_init(&arsta->avg_rssi); ++ return 0; ++ ++free_tx_stats: ++ kfree(arsta->tx_stats); ++ arsta->tx_stats = NULL; ++free_peer: ++ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); ++free_rx_stats: ++ kfree(arsta->rx_stats); ++ arsta->rx_stats = NULL; ++dec_num_station: ++ ath11k_mac_dec_num_stations(arvif, sta); ++exit: ++ return ret; ++} ++ ++static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ++ struct ieee80211_vif *vif, ++ struct ieee80211_sta *sta, ++ enum ieee80211_sta_state old_state, ++ enum ieee80211_sta_state new_state) ++{ ++ struct ath11k *ar = hw->priv; ++ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); ++ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); ++ struct ath11k_peer *peer; ++ int ret = 0; ++ ++ /* cancel must be done outside the mutex to avoid deadlock */ ++ if ((old_state == IEEE80211_STA_NONE && ++ new_state == IEEE80211_STA_NOTEXIST)) { ++ cancel_work_sync(&arsta->update_wk); ++ cancel_work_sync(&arsta->set_4addr_wk); ++ } ++ ++ mutex_lock(&ar->conf_mutex); ++ ++ if (old_state == IEEE80211_STA_NOTEXIST && ++ new_state == IEEE80211_STA_NONE) { ++ memset(arsta, 0, sizeof(*arsta)); ++ arsta->arvif = arvif; ++ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; ++ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); ++ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); ++ ++ ret = ath11k_mac_station_add(ar, vif, sta); ++ if (ret) ++ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", ++ sta->addr, arvif->vdev_id); ++ } else if ((old_state == IEEE80211_STA_NONE && ++ new_state == IEEE80211_STA_NOTEXIST)) { ++ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay && ++ vif->type == NL80211_IFTYPE_STATION; ++ ++ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); ++ ++ if (!skip_peer_delete) { ++ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); ++ if (ret) ++ ath11k_warn(ar->ab, ++ "Failed to delete peer: %pM for VDEV: %d\n", ++ sta->addr, arvif->vdev_id); ++ else ++ ath11k_dbg(ar->ab, ++ ATH11K_DBG_MAC, ++ "Removed peer: %pM for VDEV: %d\n", ++ sta->addr, arvif->vdev_id); ++ } ++ ++ ath11k_mac_dec_num_stations(arvif, sta); ++ mutex_lock(&ar->ab->tbl_mtx_lock); ++ spin_lock_bh(&ar->ab->base_lock); ++ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); ++ if (skip_peer_delete && peer) { ++ peer->sta = NULL; ++ } else if (peer && peer->sta == sta) { ++ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", ++ vif->addr, arvif->vdev_id); ++ ath11k_peer_rhash_delete(ar->ab, peer); ++ peer->sta = NULL; ++ list_del(&peer->list); ++ kfree(peer); ++ ar->num_peers--; ++ } ++ spin_unlock_bh(&ar->ab->base_lock); ++ mutex_unlock(&ar->ab->tbl_mtx_lock); ++ ++ kfree(arsta->tx_stats); ++ arsta->tx_stats = NULL; ++ ++ kfree(arsta->rx_stats); ++ arsta->rx_stats = NULL; ++ } else if (old_state == IEEE80211_STA_AUTH && ++ new_state == IEEE80211_STA_ASSOC && ++ (vif->type == NL80211_IFTYPE_AP || ++ vif->type == NL80211_IFTYPE_MESH_POINT || ++ vif->type == NL80211_IFTYPE_ADHOC)) { ++ ret = ath11k_station_assoc(ar, vif, sta, false); ++ if (ret) ++ ath11k_warn(ar->ab, "Failed to associate station: %pM\n", ++ sta->addr); ++ ++ spin_lock_bh(&ar->data_lock); ++ /* Set arsta bw and prev bw */ ++ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); ++ arsta->bw_prev = arsta->bw; ++ spin_unlock_bh(&ar->data_lock); ++ } else if (old_state == IEEE80211_STA_ASSOC && ++ new_state == IEEE80211_STA_AUTHORIZED) { ++ spin_lock_bh(&ar->ab->base_lock); ++ ++ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); ++ if (peer) ++ peer->is_authorized = true; ++ ++ spin_unlock_bh(&ar->ab->base_lock); ++ ++ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { ++ ret = ath11k_wmi_set_peer_param(ar, sta->addr, ++ arvif->vdev_id, ++ WMI_PEER_AUTHORIZE, ++ 1); ++ if (ret) ++ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", ++ sta->addr, arvif->vdev_id, ret); ++ } ++ } else if (old_state == IEEE80211_STA_AUTHORIZED && ++ new_state == IEEE80211_STA_ASSOC) { ++ spin_lock_bh(&ar->ab->base_lock); ++ ++ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); ++ if (peer) ++ peer->is_authorized = false; ++ ++ spin_unlock_bh(&ar->ab->base_lock); ++ } else if (old_state == IEEE80211_STA_ASSOC && ++ new_state == IEEE80211_STA_AUTH && ++ (vif->type == NL80211_IFTYPE_AP || ++ vif->type == NL80211_IFTYPE_MESH_POINT || ++ vif->type == NL80211_IFTYPE_ADHOC)) { ++ ret = ath11k_station_disassoc(ar, vif, sta); ++ if (ret) ++ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", ++ sta->addr); ++ } ++ ++ mutex_unlock(&ar->conf_mutex); ++ return ret; ++} ++ + static const struct ieee80211_ops ath11k_ops = { + .tx = ath11k_mac_op_tx, + .wake_tx_queue = ieee80211_handle_wake_tx_queue, +diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c +index ca719eb3f7f829..6d0126c3930185 100644 +--- a/drivers/net/wireless/ath/ath11k/peer.c ++++ b/drivers/net/wireless/ath/ath11k/peer.c +@@ -446,7 +446,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, + peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; + + if (sta) { +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) | + FIELD_PREP(HTT_TCL_META_DATA_PEER_ID, + peer->peer_id); +diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c +index 9a829b8282420a..31dbabc9eaf330 100644 +--- a/drivers/net/wireless/ath/ath11k/wmi.c ++++ b/drivers/net/wireless/ath/ath11k/wmi.c +@@ -6452,7 +6452,7 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, + goto exit; + } + +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > + ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); +@@ -6540,7 +6540,7 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, + arvif->bssid, + NULL); + if (sta) { +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + arsta->rssi_beacon = src->beacon_snr; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "stats vdev id %d snr %d\n", +@@ -7469,7 +7469,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, + goto exit; + } + +- arsta = (struct ath11k_sta *)sta->drv_priv; ++ arsta = ath11k_sta_to_arsta(sta); + + spin_lock_bh(&ar->data_lock); + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +index 00794086cc7c97..bf80675667ba38 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +@@ -392,10 +392,8 @@ void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg) + if (!cfg->btcoex) + return; + +- if (cfg->btcoex->timer_on) { +- cfg->btcoex->timer_on = false; +- timer_shutdown_sync(&cfg->btcoex->timer); +- } ++ timer_shutdown_sync(&cfg->btcoex->timer); ++ cfg->btcoex->timer_on = false; + + cancel_work_sync(&cfg->btcoex->work); + +diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c +index b700c213d10c4f..38ad49033d0bad 100644 +--- a/drivers/net/wireless/marvell/libertas/cfg.c ++++ b/drivers/net/wireless/marvell/libertas/cfg.c +@@ -1150,10 +1150,13 @@ static int lbs_associate(struct lbs_private *priv, + /* add SSID TLV */ + rcu_read_lock(); + ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); +- if (ssid_eid) +- pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]); +- else ++ if (ssid_eid) { ++ u32 ssid_len = min(ssid_eid[1], IEEE80211_MAX_SSID_LEN); ++ ++ pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_len); ++ } else { + lbs_deb_assoc("no SSID\n"); ++ } + rcu_read_unlock(); + + /* add DS param TLV */ +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index b7ead0cd004508..69eea0628e670e 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -4316,8 +4316,9 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter) + * additional active scan request for hidden SSIDs on passive channels. + */ + adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a); +- adapter->chan_stats = vmalloc(array_size(sizeof(*adapter->chan_stats), +- adapter->num_in_chan_stats)); ++ adapter->chan_stats = kcalloc(adapter->num_in_chan_stats, ++ sizeof(*adapter->chan_stats), ++ GFP_KERNEL); + + if (!adapter->chan_stats) + return -ENOMEM; +diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c +index 6c60a4c21a3128..685dcab11a488f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/main.c ++++ b/drivers/net/wireless/marvell/mwifiex/main.c +@@ -664,7 +664,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) + goto done; + + err_add_intf: +- vfree(adapter->chan_stats); ++ kfree(adapter->chan_stats); + err_init_chan_scan: + wiphy_unregister(adapter->wiphy); + wiphy_free(adapter->wiphy); +@@ -1481,7 +1481,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) + wiphy_free(adapter->wiphy); + adapter->wiphy = NULL; + +- vfree(adapter->chan_stats); ++ kfree(adapter->chan_stats); + mwifiex_free_cmd_buffers(adapter); + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +index 65a5f24e53136b..8ab55fc705f076 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +@@ -1616,8 +1616,8 @@ mt7996_mcu_get_mmps_mode(enum ieee80211_smps_mode smps) + int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, + void *data, u16 version) + { ++ struct uni_header hdr = {}; + struct ra_fixed_rate *req; +- struct uni_header hdr; + struct sk_buff *skb; + struct tlv *tlv; + int len; +@@ -2638,7 +2638,7 @@ int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans) + { + struct { + u8 __rsv[4]; +- } __packed hdr; ++ } __packed hdr = {}; + struct hdr_trans_blacklist *req_blacklist; + struct hdr_trans_en *req_en; + struct sk_buff *skb; +diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c +index 8ef1d06b9bbddb..121d810c8839e5 100644 +--- a/drivers/net/wireless/st/cw1200/sta.c ++++ b/drivers/net/wireless/st/cw1200/sta.c +@@ -1290,7 +1290,7 @@ static void cw1200_do_join(struct cw1200_common *priv) + rcu_read_lock(); + ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssidie) { +- join.ssid_len = ssidie[1]; ++ join.ssid_len = min(ssidie[1], IEEE80211_MAX_SSID_LEN); + memcpy(join.ssid, &ssidie[2], join.ssid_len); + } + rcu_read_unlock(); +diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c +index 053bb9fac6e3e1..b638731aa5ff2f 100644 +--- a/drivers/pci/msi/msi.c ++++ b/drivers/pci/msi/msi.c +@@ -610,6 +610,9 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) + if (desc->pci.msi_attrib.can_mask) { + void __iomem *addr = pci_msix_desc_addr(desc); + ++ /* Workaround for SUN NIU insanity, which requires write before read */ ++ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST) ++ writel(0, addr + PCI_MSIX_ENTRY_DATA); + desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + } + } +diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c +index e613818dc0bc90..25382612e48acb 100644 +--- a/drivers/pcmcia/omap_cf.c ++++ b/drivers/pcmcia/omap_cf.c +@@ -215,6 +215,8 @@ static int __init omap_cf_probe(struct platform_device *pdev) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -EINVAL; + + cf = kzalloc(sizeof *cf, GFP_KERNEL); + if (!cf) +diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c +index b04b16496b0c4b..2677b577c1f858 100644 +--- a/drivers/pcmcia/rsrc_iodyn.c ++++ b/drivers/pcmcia/rsrc_iodyn.c +@@ -62,6 +62,9 @@ static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s, + unsigned long min = base; + int ret; + ++ if (!res) ++ return NULL; ++ + data.mask = align - 1; + data.offset = base & data.mask; + +diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c +index bf9d070a44966d..da494fe451baf0 100644 +--- a/drivers/pcmcia/rsrc_nonstatic.c ++++ b/drivers/pcmcia/rsrc_nonstatic.c +@@ -375,7 +375,9 @@ static int do_validate_mem(struct pcmcia_socket *s, + + if (validate && !s->fake_cis) { + /* move it to the validated data set */ +- add_interval(&s_data->mem_db_valid, base, size); ++ ret = add_interval(&s_data->mem_db_valid, base, size); ++ if (ret) ++ return ret; + sub_interval(&s_data->mem_db, base, size); + } + +diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c +index 04686ae1e976bd..6f5437d210a617 100644 +--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c ++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c +@@ -242,6 +242,20 @@ static const struct dmi_system_id fwbug_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), + } + }, ++ { ++ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10", ++ .driver_data = &quirk_spurious_8042, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"), ++ } ++ }, ++ { ++ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10", ++ .driver_data = &quirk_spurious_8042, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), ++ } ++ }, + {} + }; + +diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c +index d41fea53e41e90..502be061cc658c 100644 +--- a/drivers/scsi/lpfc/lpfc_nvmet.c ++++ b/drivers/scsi/lpfc/lpfc_nvmet.c +@@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_async_xchg_ctx *ctxp = + container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); +- struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; ++ struct rqb_dmabuf *nvmebuf; + struct lpfc_hba *phba = ctxp->phba; + unsigned long iflag; + +@@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", + ctxp->oxid, ctxp->size, raw_smp_processor_id()); + ++ spin_lock_irqsave(&ctxp->ctxlock, iflag); ++ nvmebuf = ctxp->rqb_buffer; + if (!nvmebuf) { ++ spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6425 Defer rcv: no buffer oxid x%x: " + "flg %x ste %x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + return; + } ++ ctxp->rqb_buffer = NULL; ++ spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + tgtp = phba->targetport->private; + if (tgtp) +@@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, + + /* Free the nvmebuf since a new buffer already replaced it */ + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); +- spin_lock_irqsave(&ctxp->ctxlock, iflag); +- ctxp->rqb_buffer = NULL; +- spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + } + + /** +diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c +index a6773075bfe3ef..0afecda0bfaa38 100644 +--- a/drivers/soc/qcom/mdt_loader.c ++++ b/drivers/soc/qcom/mdt_loader.c +@@ -38,12 +38,14 @@ static bool mdt_header_valid(const struct firmware *fw) + if (phend > fw->size) + return false; + +- if (ehdr->e_shentsize != sizeof(struct elf32_shdr)) +- return false; ++ if (ehdr->e_shentsize || ehdr->e_shnum) { ++ if (ehdr->e_shentsize != sizeof(struct elf32_shdr)) ++ return false; + +- shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff); +- if (shend > fw->size) +- return false; ++ shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff); ++ if (shend > fw->size) ++ return false; ++ } + + return true; + } +diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c +index 7c17b8c0425e3c..bf9b816637d02e 100644 +--- a/drivers/spi/spi-cadence-quadspi.c ++++ b/drivers/spi/spi-cadence-quadspi.c +@@ -1868,8 +1868,6 @@ static int cqspi_probe(struct platform_device *pdev) + goto probe_setup_failed; + } + +- pm_runtime_enable(dev); +- + ret = spi_register_controller(host); + if (ret) { + dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); +@@ -1879,7 +1877,6 @@ static int cqspi_probe(struct platform_device *pdev) + return 0; + probe_setup_failed: + cqspi_controller_enable(cqspi, 0); +- pm_runtime_disable(dev); + probe_reset_failed: + if (cqspi->is_jh7110) + cqspi_jh7110_disable_clk(pdev, cqspi); +@@ -1901,8 +1898,7 @@ static void cqspi_remove(struct platform_device *pdev) + if (cqspi->rx_chan) + dma_release_channel(cqspi->rx_chan); + +- if (pm_runtime_get_sync(&pdev->dev) >= 0) +- clk_disable(cqspi->clk); ++ clk_disable_unprepare(cqspi->clk); + + if (cqspi->is_jh7110) + cqspi_jh7110_disable_clk(pdev, cqspi); +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c +index fa899ab2014c6a..8ef82a11ebb0fa 100644 +--- a/drivers/spi/spi-fsl-lpspi.c ++++ b/drivers/spi/spi-fsl-lpspi.c +@@ -3,8 +3,9 @@ + // Freescale i.MX7ULP LPSPI driver + // + // Copyright 2016 Freescale Semiconductor, Inc. +-// Copyright 2018 NXP Semiconductors ++// Copyright 2018, 2023, 2025 NXP + ++#include + #include + #include + #include +@@ -70,7 +71,7 @@ + #define DER_TDDE BIT(0) + #define CFGR1_PCSCFG BIT(27) + #define CFGR1_PINCFG (BIT(24)|BIT(25)) +-#define CFGR1_PCSPOL BIT(8) ++#define CFGR1_PCSPOL_MASK GENMASK(11, 8) + #define CFGR1_NOSTALL BIT(3) + #define CFGR1_HOST BIT(0) + #define FSR_TXCOUNT (0xFF) +@@ -82,6 +83,8 @@ + #define TCR_RXMSK BIT(19) + #define TCR_TXMSK BIT(18) + ++#define SR_CLEAR_MASK GENMASK(13, 8) ++ + struct fsl_lpspi_devtype_data { + u8 prescale_max; + }; +@@ -420,7 +423,9 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) + else + temp = CFGR1_PINCFG; + if (fsl_lpspi->config.mode & SPI_CS_HIGH) +- temp |= CFGR1_PCSPOL; ++ temp |= FIELD_PREP(CFGR1_PCSPOL_MASK, ++ BIT(fsl_lpspi->config.chip_select)); ++ + writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); + + temp = readl(fsl_lpspi->base + IMX7ULP_CR); +@@ -529,14 +534,13 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) + fsl_lpspi_intctrl(fsl_lpspi, 0); + } + +- /* W1C for all flags in SR */ +- temp = 0x3F << 8; +- writel(temp, fsl_lpspi->base + IMX7ULP_SR); +- + /* Clear FIFO and disable module */ + temp = CR_RRF | CR_RTF; + writel(temp, fsl_lpspi->base + IMX7ULP_CR); + ++ /* W1C for all flags in SR */ ++ writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR); ++ + return 0; + } + +@@ -727,12 +731,10 @@ static int fsl_lpspi_pio_transfer(struct spi_controller *controller, + fsl_lpspi_write_tx_fifo(fsl_lpspi); + + ret = fsl_lpspi_wait_for_completion(controller); +- if (ret) +- return ret; + + fsl_lpspi_reset(fsl_lpspi); + +- return 0; ++ return ret; + } + + static int fsl_lpspi_transfer_one(struct spi_controller *controller, +@@ -780,7 +782,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) + if (temp_SR & SR_MBF || + readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) { + writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); +- fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); ++ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE)); + return IRQ_HANDLED; + } + +diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c +index 79bac30e79af64..21e357966d2a22 100644 +--- a/drivers/spi/spi-fsl-qspi.c ++++ b/drivers/spi/spi-fsl-qspi.c +@@ -839,6 +839,19 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { + .get_name = fsl_qspi_get_name, + }; + ++static void fsl_qspi_cleanup(void *data) ++{ ++ struct fsl_qspi *q = data; ++ ++ /* disable the hardware */ ++ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); ++ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); ++ ++ fsl_qspi_clk_disable_unprep(q); ++ ++ mutex_destroy(&q->lock); ++} ++ + static int fsl_qspi_probe(struct platform_device *pdev) + { + struct spi_controller *ctlr; +@@ -928,15 +941,16 @@ static int fsl_qspi_probe(struct platform_device *pdev) + + ctlr->dev.of_node = np; + ++ ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); ++ if (ret) ++ goto err_put_ctrl; ++ + ret = devm_spi_register_controller(dev, ctlr); + if (ret) +- goto err_destroy_mutex; ++ goto err_put_ctrl; + + return 0; + +-err_destroy_mutex: +- mutex_destroy(&q->lock); +- + err_disable_clk: + fsl_qspi_clk_disable_unprep(q); + +@@ -947,19 +961,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) + return ret; + } + +-static void fsl_qspi_remove(struct platform_device *pdev) +-{ +- struct fsl_qspi *q = platform_get_drvdata(pdev); +- +- /* disable the hardware */ +- qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); +- qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); +- +- fsl_qspi_clk_disable_unprep(q); +- +- mutex_destroy(&q->lock); +-} +- + static int fsl_qspi_suspend(struct device *dev) + { + return 0; +@@ -997,7 +998,6 @@ static struct platform_driver fsl_qspi_driver = { + .pm = &fsl_qspi_pm_ops, + }, + .probe = fsl_qspi_probe, +- .remove_new = fsl_qspi_remove, + }; + module_platform_driver(fsl_qspi_driver); + +diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c +index b8ba360e863edf..927c3d7947f9cf 100644 +--- a/drivers/tee/optee/ffa_abi.c ++++ b/drivers/tee/optee/ffa_abi.c +@@ -653,7 +653,7 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, + * with a matching configuration. + */ + +-static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, ++static bool optee_ffa_api_is_compatible(struct ffa_device *ffa_dev, + const struct ffa_ops *ops) + { + const struct ffa_msg_ops *msg_ops = ops->msg_ops; +@@ -804,7 +804,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) + + ffa_ops = ffa_dev->ops; + +- if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops)) ++ if (!optee_ffa_api_is_compatible(ffa_dev, ffa_ops)) + return -EINVAL; + + if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps, +diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c +index 673cf035949483..426b818f2dd795 100644 +--- a/drivers/tee/tee_shm.c ++++ b/drivers/tee/tee_shm.c +@@ -489,9 +489,13 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id); + */ + void tee_shm_put(struct tee_shm *shm) + { +- struct tee_device *teedev = shm->ctx->teedev; ++ struct tee_device *teedev; + bool do_release = false; + ++ if (!shm || !shm->ctx || !shm->ctx->teedev) ++ return; ++ ++ teedev = shm->ctx->teedev; + mutex_lock(&teedev->mutex); + if (refcount_dec_and_test(&shm->refcount)) { + /* +diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c +index 8d0ccf494ba224..603b37ce1eb8e6 100644 +--- a/drivers/thermal/mediatek/lvts_thermal.c ++++ b/drivers/thermal/mediatek/lvts_thermal.c +@@ -67,10 +67,14 @@ + #define LVTS_CALSCALE_CONF 0x300 + #define LVTS_MONINT_CONF 0x8300318C + +-#define LVTS_MONINT_OFFSET_SENSOR0 0xC +-#define LVTS_MONINT_OFFSET_SENSOR1 0x180 +-#define LVTS_MONINT_OFFSET_SENSOR2 0x3000 +-#define LVTS_MONINT_OFFSET_SENSOR3 0x3000000 ++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0 BIT(3) ++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1 BIT(8) ++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2 BIT(13) ++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3 BIT(25) ++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0 BIT(2) ++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1 BIT(7) ++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2 BIT(12) ++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3 BIT(24) + + #define LVTS_INT_SENSOR0 0x0009001F + #define LVTS_INT_SENSOR1 0x001203E0 +@@ -308,23 +312,41 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp) + + static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl) + { +- u32 masks[] = { +- LVTS_MONINT_OFFSET_SENSOR0, +- LVTS_MONINT_OFFSET_SENSOR1, +- LVTS_MONINT_OFFSET_SENSOR2, +- LVTS_MONINT_OFFSET_SENSOR3, ++ static const u32 high_offset_inten_masks[] = { ++ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0, ++ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1, ++ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2, ++ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3, ++ }; ++ static const u32 low_offset_inten_masks[] = { ++ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0, ++ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1, ++ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2, ++ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3, + }; + u32 value = 0; + int i; + + value = readl(LVTS_MONINT(lvts_ctrl->base)); + +- for (i = 0; i < ARRAY_SIZE(masks); i++) { ++ for (i = 0; i < ARRAY_SIZE(high_offset_inten_masks); i++) { + if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh +- && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh) +- value |= masks[i]; +- else +- value &= ~masks[i]; ++ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh) { ++ /* ++ * The minimum threshold needs to be configured in the ++ * OFFSETL register to get working interrupts, but we ++ * don't actually want to generate interrupts when ++ * crossing it. ++ */ ++ if (lvts_ctrl->low_thresh == -INT_MAX) { ++ value &= ~low_offset_inten_masks[i]; ++ value |= high_offset_inten_masks[i]; ++ } else { ++ value |= low_offset_inten_masks[i] | high_offset_inten_masks[i]; ++ } ++ } else { ++ value &= ~(low_offset_inten_masks[i] | high_offset_inten_masks[i]); ++ } + } + + writel(value, LVTS_MONINT(lvts_ctrl->base)); +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index c4968efc3fc464..a2e471d51a8f0b 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -179,7 +179,7 @@ struct btrfs_inode { + u64 new_delalloc_bytes; + /* + * The offset of the last dir index key that was logged. +- * This is used only for directories. ++ * This is used only for directories. Protected by 'log_mutex'. + */ + u64 last_dir_index_offset; + }; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index ed08d8e5639f59..48b06459bc485a 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -1742,7 +1742,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) + subpage->bitmaps)) { + spin_unlock_irqrestore(&subpage->lock, flags); + spin_unlock(&page->mapping->private_lock); +- bit_start++; ++ bit_start += sectors_per_node; + continue; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 4502a474a81dab..ee5ffeab85bb78 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -8525,6 +8525,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) + ei->last_sub_trans = 0; + ei->logged_trans = 0; + ei->delalloc_bytes = 0; ++ /* new_delalloc_bytes and last_dir_index_offset are in a union. */ + ei->new_delalloc_bytes = 0; + ei->defrag_bytes = 0; + ei->disk_i_size = 0; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 9439abf415ae36..e5d6bc1bb5e5da 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -3356,6 +3356,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, + return 0; + } + ++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans, ++ struct btrfs_inode *inode) ++{ ++ bool ret = false; ++ ++ /* ++ * Do this only if ->logged_trans is still 0 to prevent races with ++ * concurrent logging as we may see the inode not logged when ++ * inode_logged() is called but it gets logged after inode_logged() did ++ * not find it in the log tree and we end up setting ->logged_trans to a ++ * value less than trans->transid after the concurrent logging task has ++ * set it to trans->transid. As a consequence, subsequent rename, unlink ++ * and link operations may end up not logging new names and removing old ++ * names from the log. ++ */ ++ spin_lock(&inode->lock); ++ if (inode->logged_trans == 0) ++ inode->logged_trans = trans->transid - 1; ++ else if (inode->logged_trans == trans->transid) ++ ret = true; ++ spin_unlock(&inode->lock); ++ ++ return ret; ++} ++ + /* + * Check if an inode was logged in the current transaction. This correctly deals + * with the case where the inode was logged but has a logged_trans of 0, which +@@ -3373,15 +3398,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans, + struct btrfs_key key; + int ret; + +- if (inode->logged_trans == trans->transid) ++ /* ++ * Quick lockless call, since once ->logged_trans is set to the current ++ * transaction, we never set it to a lower value anywhere else. ++ */ ++ if (data_race(inode->logged_trans) == trans->transid) + return 1; + + /* +- * If logged_trans is not 0, then we know the inode logged was not logged +- * in this transaction, so we can return false right away. ++ * If logged_trans is not 0 and not trans->transid, then we know the ++ * inode was not logged in this transaction, so we can return false ++ * right away. We take the lock to avoid a race caused by load/store ++ * tearing with a concurrent btrfs_log_inode() call or a concurrent task ++ * in this function further below - an update to trans->transid can be ++ * teared into two 32 bits updates for example, in which case we could ++ * see a positive value that is not trans->transid and assume the inode ++ * was not logged when it was. + */ +- if (inode->logged_trans > 0) ++ spin_lock(&inode->lock); ++ if (inode->logged_trans == trans->transid) { ++ spin_unlock(&inode->lock); ++ return 1; ++ } else if (inode->logged_trans > 0) { ++ spin_unlock(&inode->lock); + return 0; ++ } ++ spin_unlock(&inode->lock); + + /* + * If no log tree was created for this root in this transaction, then +@@ -3390,10 +3432,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans, + * transaction's ID, to avoid the search below in a future call in case + * a log tree gets created after this. + */ +- if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) { +- inode->logged_trans = trans->transid - 1; +- return 0; +- } ++ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) ++ return mark_inode_as_not_logged(trans, inode); + + /* + * We have a log tree and the inode's logged_trans is 0. We can't tell +@@ -3447,8 +3487,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans, + * Set logged_trans to a value greater than 0 and less then the + * current transaction to avoid doing the search in future calls. + */ +- inode->logged_trans = trans->transid - 1; +- return 0; ++ return mark_inode_as_not_logged(trans, inode); + } + + /* +@@ -3456,20 +3495,9 @@ static int inode_logged(const struct btrfs_trans_handle *trans, + * the current transacion's ID, to avoid future tree searches as long as + * the inode is not evicted again. + */ ++ spin_lock(&inode->lock); + inode->logged_trans = trans->transid; +- +- /* +- * If it's a directory, then we must set last_dir_index_offset to the +- * maximum possible value, so that the next attempt to log the inode does +- * not skip checking if dir index keys found in modified subvolume tree +- * leaves have been logged before, otherwise it would result in attempts +- * to insert duplicate dir index keys in the log tree. This must be done +- * because last_dir_index_offset is an in-memory only field, not persisted +- * in the inode item or any other on-disk structure, so its value is lost +- * once the inode is evicted. +- */ +- if (S_ISDIR(inode->vfs_inode.i_mode)) +- inode->last_dir_index_offset = (u64)-1; ++ spin_unlock(&inode->lock); + + return 1; + } +@@ -4041,7 +4069,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, + + /* + * If the inode was logged before and it was evicted, then its +- * last_dir_index_offset is (u64)-1, so we don't the value of the last index ++ * last_dir_index_offset is 0, so we don't know the value of the last index + * key offset. If that's the case, search for it and update the inode. This + * is to avoid lookups in the log tree every time we try to insert a dir index + * key from a leaf changed in the current transaction, and to allow us to always +@@ -4057,7 +4085,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode, + + lockdep_assert_held(&inode->log_mutex); + +- if (inode->last_dir_index_offset != (u64)-1) ++ if (inode->last_dir_index_offset != 0) + return 0; + + if (!ctx->logged_before) { +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 0a498bc60f5573..ed110568d6127f 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -2536,10 +2536,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) + wakeup_bdi = inode_io_list_move_locked(inode, wb, + dirty_list); + +- spin_unlock(&wb->list_lock); +- spin_unlock(&inode->i_lock); +- trace_writeback_dirty_inode_enqueue(inode); +- + /* + * If this is the first dirty inode for this bdi, + * we have to wake-up the corresponding bdi thread +@@ -2549,6 +2545,11 @@ void __mark_inode_dirty(struct inode *inode, int flags) + if (wakeup_bdi && + (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) + wb_wakeup_delayed(wb); ++ ++ spin_unlock(&wb->list_lock); ++ spin_unlock(&inode->i_lock); ++ trace_writeback_dirty_inode_enqueue(inode); ++ + return; + } + } +diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c +index 999111bfc27178..c561a8a6493e7c 100644 +--- a/fs/ocfs2/inode.c ++++ b/fs/ocfs2/inode.c +@@ -1205,6 +1205,9 @@ static void ocfs2_clear_inode(struct inode *inode) + * the journal is flushed before journal shutdown. Thus it is safe to + * have inodes get cleaned up after journal shutdown. + */ ++ if (!osb->journal) ++ return; ++ + jbd2_journal_release_jbd_inode(osb->journal->j_journal, + &oi->ip_jinode); + } +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index 2187d9ca351ced..db3f2c6abc162a 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -362,6 +362,25 @@ static const struct inode_operations proc_dir_inode_operations = { + .setattr = proc_notify_change, + }; + ++static void pde_set_flags(struct proc_dir_entry *pde) ++{ ++ const struct proc_ops *proc_ops = pde->proc_ops; ++ ++ if (!proc_ops) ++ return; ++ ++ if (proc_ops->proc_flags & PROC_ENTRY_PERMANENT) ++ pde->flags |= PROC_ENTRY_PERMANENT; ++ if (proc_ops->proc_read_iter) ++ pde->flags |= PROC_ENTRY_proc_read_iter; ++#ifdef CONFIG_COMPAT ++ if (proc_ops->proc_compat_ioctl) ++ pde->flags |= PROC_ENTRY_proc_compat_ioctl; ++#endif ++ if (proc_ops->proc_lseek) ++ pde->flags |= PROC_ENTRY_proc_lseek; ++} ++ + /* returns the registered entry, or frees dp and returns NULL on failure */ + struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, + struct proc_dir_entry *dp) +@@ -369,6 +388,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, + if (proc_alloc_inum(&dp->low_ino)) + goto out_free_entry; + ++ pde_set_flags(dp); ++ + write_lock(&proc_subdir_lock); + dp->parent = dir; + if (pde_subdir_insert(dir, dp) == false) { +@@ -557,20 +578,6 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, + return p; + } + +-static void pde_set_flags(struct proc_dir_entry *pde) +-{ +- if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) +- pde->flags |= PROC_ENTRY_PERMANENT; +- if (pde->proc_ops->proc_read_iter) +- pde->flags |= PROC_ENTRY_proc_read_iter; +-#ifdef CONFIG_COMPAT +- if (pde->proc_ops->proc_compat_ioctl) +- pde->flags |= PROC_ENTRY_proc_compat_ioctl; +-#endif +- if (pde->proc_ops->proc_lseek) +- pde->flags |= PROC_ENTRY_proc_lseek; +-} +- + struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, + const struct proc_ops *proc_ops, void *data) +@@ -581,7 +588,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, + if (!p) + return NULL; + p->proc_ops = proc_ops; +- pde_set_flags(p); + return proc_register(parent, p); + } + EXPORT_SYMBOL(proc_create_data); +@@ -632,7 +638,6 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, + p->proc_ops = &proc_seq_ops; + p->seq_ops = ops; + p->state_size = state_size; +- pde_set_flags(p); + return proc_register(parent, p); + } + EXPORT_SYMBOL(proc_create_seq_private); +@@ -663,7 +668,6 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, + return NULL; + p->proc_ops = &proc_single_ops; + p->single_show = show; +- pde_set_flags(p); + return proc_register(parent, p); + } + EXPORT_SYMBOL(proc_create_single_data); +diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c +index 4cc6e0896fad37..f8659d36793f17 100644 +--- a/fs/smb/client/cifs_unicode.c ++++ b/fs/smb/client/cifs_unicode.c +@@ -629,6 +629,9 @@ cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len, + int len; + __le16 *dst; + ++ if (!src) ++ return NULL; ++ + len = cifs_local_to_utf16_bytes(src, maxlen, cp); + len += 2; /* NULL */ + dst = kmalloc(len, GFP_KERNEL); +diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h +index 2331cd8174fe3f..684c4822f76a3f 100644 +--- a/include/linux/bpf-cgroup.h ++++ b/include/linux/bpf-cgroup.h +@@ -72,9 +72,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) + extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; + #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) + +-#define for_each_cgroup_storage_type(stype) \ +- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) +- + struct bpf_cgroup_storage_map; + + struct bpf_storage_buffer { +@@ -500,8 +497,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) ({ 0; }) + +-#define for_each_cgroup_storage_type(stype) for (; false; ) +- + #endif /* CONFIG_CGROUP_BPF */ + + #endif /* _BPF_CGROUP_H */ +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 17de12a98f858a..83da9c81fa86ad 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -194,6 +194,20 @@ enum btf_field_type { + BPF_REFCOUNT = (1 << 8), + }; + ++enum bpf_cgroup_storage_type { ++ BPF_CGROUP_STORAGE_SHARED, ++ BPF_CGROUP_STORAGE_PERCPU, ++ __BPF_CGROUP_STORAGE_MAX ++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX ++}; ++ ++#ifdef CONFIG_CGROUP_BPF ++# define for_each_cgroup_storage_type(stype) \ ++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) ++#else ++# define for_each_cgroup_storage_type(stype) for (; false; ) ++#endif /* CONFIG_CGROUP_BPF */ ++ + typedef void (*btf_dtor_kfunc_t)(void *); + + struct btf_field_kptr { +@@ -244,6 +258,19 @@ struct bpf_list_node_kern { + void *owner; + } __attribute__((aligned(8))); + ++/* 'Ownership' of program-containing map is claimed by the first program ++ * that is going to use this map or by the first program which FD is ++ * stored in the map to make sure that all callers and callees have the ++ * same prog type, JITed flag and xdp_has_frags flag. ++ */ ++struct bpf_map_owner { ++ enum bpf_prog_type type; ++ bool jited; ++ bool xdp_has_frags; ++ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE]; ++ const struct btf_type *attach_func_proto; ++}; ++ + struct bpf_map { + /* The first two cachelines with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). +@@ -282,24 +309,15 @@ struct bpf_map { + }; + struct mutex freeze_mutex; + atomic64_t writecnt; +- /* 'Ownership' of program-containing map is claimed by the first program +- * that is going to use this map or by the first program which FD is +- * stored in the map to make sure that all callers and callees have the +- * same prog type, JITed flag and xdp_has_frags flag. +- */ +- struct { +- const struct btf_type *attach_func_proto; +- spinlock_t lock; +- enum bpf_prog_type type; +- bool jited; +- bool xdp_has_frags; +- } owner; ++ spinlock_t owner_lock; ++ struct bpf_map_owner *owner; + bool bypass_spec_v1; + bool frozen; /* write-once; write-protected by freeze_mutex */ + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + atomic64_t sleepable_refcnt; + s64 __percpu *elem_count; ++ u64 cookie; /* write-once */ + }; + + static inline const char *btf_field_type_name(enum btf_field_type type) +@@ -994,14 +1012,6 @@ struct bpf_prog_offload { + u32 jited_len; + }; + +-enum bpf_cgroup_storage_type { +- BPF_CGROUP_STORAGE_SHARED, +- BPF_CGROUP_STORAGE_PERCPU, +- __BPF_CGROUP_STORAGE_MAX +-}; +- +-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +- + /* The longest tracepoint has 12 args. + * See include/trace/bpf_probe.h + */ +@@ -1811,6 +1821,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags) + (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); + } + ++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map) ++{ ++ return kzalloc(sizeof(*map->owner), GFP_ATOMIC); ++} ++ ++static inline void bpf_map_owner_free(struct bpf_map *map) ++{ ++ kfree(map->owner); ++} ++ + struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; +diff --git a/include/linux/pci.h b/include/linux/pci.h +index ac5bd1718af241..0511f6f9a4e6ad 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -245,6 +245,8 @@ enum pci_dev_flags { + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), + /* Device does honor MSI masking despite saying otherwise */ + PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), ++ /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */ ++ PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13), + }; + + enum pci_irq_reroute_variant { +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h +index e2c9a0c259df3b..e42388b6998b17 100644 +--- a/include/linux/pgtable.h ++++ b/include/linux/pgtable.h +@@ -1465,6 +1465,22 @@ static inline int pmd_protnone(pmd_t pmd) + } + #endif /* CONFIG_NUMA_BALANCING */ + ++/* ++ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values ++ * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() ++ * needs to be called. ++ */ ++#ifndef ARCH_PAGE_TABLE_SYNC_MASK ++#define ARCH_PAGE_TABLE_SYNC_MASK 0 ++#endif ++ ++/* ++ * There is no default implementation for arch_sync_kernel_mappings(). It is ++ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK ++ * is 0. ++ */ ++void arch_sync_kernel_mappings(unsigned long start, unsigned long end); ++ + #endif /* CONFIG_MMU */ + + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h +index c720be70c8ddde..897f2109f6ada8 100644 +--- a/include/linux/vmalloc.h ++++ b/include/linux/vmalloc.h +@@ -173,22 +173,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, + extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long pgoff); + +-/* +- * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values +- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() +- * needs to be called. +- */ +-#ifndef ARCH_PAGE_TABLE_SYNC_MASK +-#define ARCH_PAGE_TABLE_SYNC_MASK 0 +-#endif +- +-/* +- * There is no default implementation for arch_sync_kernel_mappings(). It is +- * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK +- * is 0. +- */ +-void arch_sync_kernel_mappings(unsigned long start, unsigned long end); +- + /* + * Lowlevel-APIs (not for driver use!) + */ +diff --git a/include/net/netlink.h b/include/net/netlink.h +index 8a7cd1170e1f7b..aba2b162a2260b 100644 +--- a/include/net/netlink.h ++++ b/include/net/netlink.h +@@ -128,6 +128,8 @@ + * nla_len(nla) length of attribute payload + * + * Attribute Payload Access for Basic Types: ++ * nla_get_uint(nla) get payload for a uint attribute ++ * nla_get_sint(nla) get payload for a sint attribute + * nla_get_u8(nla) get payload for a u8 attribute + * nla_get_u16(nla) get payload for a u16 attribute + * nla_get_u32(nla) get payload for a u32 attribute +@@ -183,6 +185,8 @@ enum { + NLA_REJECT, + NLA_BE16, + NLA_BE32, ++ NLA_SINT, ++ NLA_UINT, + __NLA_TYPE_MAX, + }; + +@@ -229,6 +233,7 @@ enum nla_policy_validation { + * nested header (or empty); len field is used if + * nested_policy is also used, for the max attr + * number in the nested policy. ++ * NLA_SINT, NLA_UINT, + * NLA_U8, NLA_U16, + * NLA_U32, NLA_U64, + * NLA_S8, NLA_S16, +@@ -260,12 +265,14 @@ enum nla_policy_validation { + * while an array has the nested attributes at another + * level down and the attribute types directly in the + * nesting don't matter. ++ * NLA_UINT, + * NLA_U8, + * NLA_U16, + * NLA_U32, + * NLA_U64, + * NLA_BE16, + * NLA_BE32, ++ * NLA_SINT, + * NLA_S8, + * NLA_S16, + * NLA_S32, +@@ -280,6 +287,7 @@ enum nla_policy_validation { + * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead. + * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and + * NLA_POLICY_RANGE() macros. ++ * NLA_UINT, + * NLA_U8, + * NLA_U16, + * NLA_U32, +@@ -288,6 +296,7 @@ enum nla_policy_validation { + * to a struct netlink_range_validation that indicates + * the min/max values. + * Use NLA_POLICY_FULL_RANGE(). ++ * NLA_SINT, + * NLA_S8, + * NLA_S16, + * NLA_S32, +@@ -377,9 +386,11 @@ struct nla_policy { + + #define __NLA_IS_UINT_TYPE(tp) \ + (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \ +- tp == NLA_U64 || tp == NLA_BE16 || tp == NLA_BE32) ++ tp == NLA_U64 || tp == NLA_UINT || \ ++ tp == NLA_BE16 || tp == NLA_BE32) + #define __NLA_IS_SINT_TYPE(tp) \ +- (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64) ++ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \ ++ tp == NLA_SINT) + + #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) + #define NLA_ENSURE_UINT_TYPE(tp) \ +@@ -1357,6 +1368,22 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) + return nla_put(skb, attrtype, sizeof(u32), &tmp); + } + ++/** ++ * nla_put_uint - Add a variable-size unsigned int to a socket buffer ++ * @skb: socket buffer to add attribute to ++ * @attrtype: attribute type ++ * @value: numeric value ++ */ ++static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value) ++{ ++ u64 tmp64 = value; ++ u32 tmp32 = value; ++ ++ if (tmp64 == tmp32) ++ return nla_put_u32(skb, attrtype, tmp32); ++ return nla_put(skb, attrtype, sizeof(u64), &tmp64); ++} ++ + /** + * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to +@@ -1511,6 +1538,22 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, + return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr); + } + ++/** ++ * nla_put_sint - Add a variable-size signed int to a socket buffer ++ * @skb: socket buffer to add attribute to ++ * @attrtype: attribute type ++ * @value: numeric value ++ */ ++static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value) ++{ ++ s64 tmp64 = value; ++ s32 tmp32 = value; ++ ++ if (tmp64 == tmp32) ++ return nla_put_s32(skb, attrtype, tmp32); ++ return nla_put(skb, attrtype, sizeof(s64), &tmp64); ++} ++ + /** + * nla_put_string - Add a string netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to +@@ -1667,6 +1710,17 @@ static inline u64 nla_get_u64(const struct nlattr *nla) + return tmp; + } + ++/** ++ * nla_get_uint - return payload of uint attribute ++ * @nla: uint netlink attribute ++ */ ++static inline u64 nla_get_uint(const struct nlattr *nla) ++{ ++ if (nla_len(nla) == sizeof(u32)) ++ return nla_get_u32(nla); ++ return nla_get_u64(nla); ++} ++ + /** + * nla_get_be64 - return payload of __be64 attribute + * @nla: __be64 netlink attribute +@@ -1729,6 +1783,17 @@ static inline s64 nla_get_s64(const struct nlattr *nla) + return tmp; + } + ++/** ++ * nla_get_sint - return payload of uint attribute ++ * @nla: uint netlink attribute ++ */ ++static inline s64 nla_get_sint(const struct nlattr *nla) ++{ ++ if (nla_len(nla) == sizeof(s32)) ++ return nla_get_s32(nla); ++ return nla_get_s64(nla); ++} ++ + /** + * nla_get_flag - return payload of flag attribute + * @nla: flag netlink attribute +diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h +index e2ae82e3f9f718..f87aaf28a6491d 100644 +--- a/include/uapi/linux/netlink.h ++++ b/include/uapi/linux/netlink.h +@@ -298,6 +298,8 @@ struct nla_bitfield32 { + * entry has attributes again, the policy for those inner ones + * and the corresponding maxtype may be specified. + * @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute ++ * @NL_ATTR_TYPE_SINT: 32-bit or 64-bit signed attribute, aligned to 4B ++ * @NL_ATTR_TYPE_UINT: 32-bit or 64-bit unsigned attribute, aligned to 4B + */ + enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID, +@@ -322,6 +324,9 @@ enum netlink_attribute_type { + NL_ATTR_TYPE_NESTED_ARRAY, + + NL_ATTR_TYPE_BITFIELD32, ++ ++ NL_ATTR_TYPE_SINT, ++ NL_ATTR_TYPE_UINT, + }; + + /** +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 5eaaf95048abc1..3618be05fc3527 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -2262,28 +2262,44 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) + { + enum bpf_prog_type prog_type = resolve_prog_type(fp); +- bool ret; + struct bpf_prog_aux *aux = fp->aux; ++ enum bpf_cgroup_storage_type i; ++ bool ret = false; ++ u64 cookie; + + if (fp->kprobe_override) +- return false; ++ return ret; + +- spin_lock(&map->owner.lock); +- if (!map->owner.type) { +- /* There's no owner yet where we could check for +- * compatibility. +- */ +- map->owner.type = prog_type; +- map->owner.jited = fp->jited; +- map->owner.xdp_has_frags = aux->xdp_has_frags; +- map->owner.attach_func_proto = aux->attach_func_proto; ++ spin_lock(&map->owner_lock); ++ /* There's no owner yet where we could check for compatibility. */ ++ if (!map->owner) { ++ map->owner = bpf_map_owner_alloc(map); ++ if (!map->owner) ++ goto err; ++ map->owner->type = prog_type; ++ map->owner->jited = fp->jited; ++ map->owner->xdp_has_frags = aux->xdp_has_frags; ++ map->owner->attach_func_proto = aux->attach_func_proto; ++ for_each_cgroup_storage_type(i) { ++ map->owner->storage_cookie[i] = ++ aux->cgroup_storage[i] ? ++ aux->cgroup_storage[i]->cookie : 0; ++ } + ret = true; + } else { +- ret = map->owner.type == prog_type && +- map->owner.jited == fp->jited && +- map->owner.xdp_has_frags == aux->xdp_has_frags; ++ ret = map->owner->type == prog_type && ++ map->owner->jited == fp->jited && ++ map->owner->xdp_has_frags == aux->xdp_has_frags; ++ for_each_cgroup_storage_type(i) { ++ if (!ret) ++ break; ++ cookie = aux->cgroup_storage[i] ? ++ aux->cgroup_storage[i]->cookie : 0; ++ ret = map->owner->storage_cookie[i] == cookie || ++ !cookie; ++ } + if (ret && +- map->owner.attach_func_proto != aux->attach_func_proto) { ++ map->owner->attach_func_proto != aux->attach_func_proto) { + switch (prog_type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: +@@ -2296,8 +2312,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map, + } + } + } +- spin_unlock(&map->owner.lock); +- ++err: ++ spin_unlock(&map->owner_lock); + return ret; + } + +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index b66349f892f25e..98f3f206d112e1 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -50,6 +51,7 @@ + #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) + + DEFINE_PER_CPU(int, bpf_prog_active); ++DEFINE_COOKIE(bpf_map_cookie); + static DEFINE_IDR(prog_idr); + static DEFINE_SPINLOCK(prog_idr_lock); + static DEFINE_IDR(map_idr); +@@ -696,6 +698,7 @@ static void bpf_map_free_deferred(struct work_struct *work) + + security_bpf_map_free(map); + bpf_map_release_memcg(map); ++ bpf_map_owner_free(map); + /* implementation dependent freeing */ + map->ops->map_free(map); + /* Delay freeing of btf_record for maps, as map_free +@@ -713,7 +716,6 @@ static void bpf_map_free_deferred(struct work_struct *work) + */ + btf_put(btf); + } +- + static void bpf_map_put_uref(struct bpf_map *map) + { + if (atomic64_dec_and_test(&map->usercnt)) { +@@ -805,12 +807,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) + struct bpf_map *map = filp->private_data; + u32 type = 0, jited = 0; + +- if (map_type_contains_progs(map)) { +- spin_lock(&map->owner.lock); +- type = map->owner.type; +- jited = map->owner.jited; +- spin_unlock(&map->owner.lock); ++ spin_lock(&map->owner_lock); ++ if (map->owner) { ++ type = map->owner->type; ++ jited = map->owner->jited; + } ++ spin_unlock(&map->owner_lock); + + seq_printf(m, + "map_type:\t%u\n" +@@ -1253,10 +1255,14 @@ static int map_create(union bpf_attr *attr) + if (err < 0) + goto free_map; + ++ preempt_disable(); ++ map->cookie = gen_cookie_next(&bpf_map_cookie); ++ preempt_enable(); ++ + atomic64_set(&map->refcnt, 1); + atomic64_set(&map->usercnt, 1); + mutex_init(&map->freeze_mutex); +- spin_lock_init(&map->owner.lock); ++ spin_lock_init(&map->owner_lock); + + if (attr->btf_key_type_id || attr->btf_value_type_id || + /* Even the map's value is a kernel's struct, +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index c61698cff0f3a8..b87426b74eec28 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -2140,6 +2140,8 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) + goto unlock; + + hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); ++ if (!hop_masks) ++ goto unlock; + hop = hop_masks - k.masks; + + ret = hop ? +diff --git a/lib/nlattr.c b/lib/nlattr.c +index ba698a097fc810..0319e811bb10a3 100644 +--- a/lib/nlattr.c ++++ b/lib/nlattr.c +@@ -138,6 +138,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt, + range->max = U32_MAX; + break; + case NLA_U64: ++ case NLA_UINT: + case NLA_MSECS: + range->max = U64_MAX; + break; +@@ -187,6 +188,9 @@ static int nla_validate_range_unsigned(const struct nla_policy *pt, + case NLA_U64: + value = nla_get_u64(nla); + break; ++ case NLA_UINT: ++ value = nla_get_uint(nla); ++ break; + case NLA_MSECS: + value = nla_get_u64(nla); + break; +@@ -252,6 +256,7 @@ void nla_get_range_signed(const struct nla_policy *pt, + range->max = S32_MAX; + break; + case NLA_S64: ++ case NLA_SINT: + range->min = S64_MIN; + range->max = S64_MAX; + break; +@@ -299,6 +304,9 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt, + case NLA_S64: + value = nla_get_s64(nla); + break; ++ case NLA_SINT: ++ value = nla_get_sint(nla); ++ break; + default: + return -EINVAL; + } +@@ -324,6 +332,7 @@ static int nla_validate_int_range(const struct nla_policy *pt, + case NLA_U16: + case NLA_U32: + case NLA_U64: ++ case NLA_UINT: + case NLA_MSECS: + case NLA_BINARY: + case NLA_BE16: +@@ -333,6 +342,7 @@ static int nla_validate_int_range(const struct nla_policy *pt, + case NLA_S16: + case NLA_S32: + case NLA_S64: ++ case NLA_SINT: + return nla_validate_int_range_signed(pt, nla, extack); + default: + WARN_ON(1); +@@ -359,6 +369,9 @@ static int nla_validate_mask(const struct nla_policy *pt, + case NLA_U64: + value = nla_get_u64(nla); + break; ++ case NLA_UINT: ++ value = nla_get_uint(nla); ++ break; + case NLA_BE16: + value = ntohs(nla_get_be16(nla)); + break; +@@ -437,6 +450,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype, + goto out_err; + break; + ++ case NLA_SINT: ++ case NLA_UINT: ++ if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) { ++ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, ++ "invalid attribute length"); ++ return -EINVAL; ++ } ++ break; ++ + case NLA_BITFIELD32: + if (attrlen != sizeof(struct nla_bitfield32)) + goto out_err; +diff --git a/mm/slub.c b/mm/slub.c +index d2544c88a5c43c..400563c45266e6 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -771,19 +771,19 @@ static struct track *get_track(struct kmem_cache *s, void *object, + } + + #ifdef CONFIG_STACKDEPOT +-static noinline depot_stack_handle_t set_track_prepare(void) ++static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) + { + depot_stack_handle_t handle; + unsigned long entries[TRACK_ADDRS_COUNT]; + unsigned int nr_entries; + + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); +- handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); ++ handle = stack_depot_save(entries, nr_entries, gfp_flags); + + return handle; + } + #else +-static inline depot_stack_handle_t set_track_prepare(void) ++static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) + { + return 0; + } +@@ -805,9 +805,9 @@ static void set_track_update(struct kmem_cache *s, void *object, + } + + static __always_inline void set_track(struct kmem_cache *s, void *object, +- enum track_item alloc, unsigned long addr) ++ enum track_item alloc, unsigned long addr, gfp_t gfp_flags) + { +- depot_stack_handle_t handle = set_track_prepare(); ++ depot_stack_handle_t handle = set_track_prepare(gfp_flags); + + set_track_update(s, object, alloc, addr, handle); + } +@@ -988,7 +988,12 @@ static void object_err(struct kmem_cache *s, struct slab *slab, + return; + + slab_bug(s, "%s", reason); +- print_trailer(s, slab, object); ++ if (!object || !check_valid_pointer(s, slab, object)) { ++ print_slab_info(slab); ++ pr_err("Invalid pointer 0x%p\n", object); ++ } else { ++ print_trailer(s, slab, object); ++ } + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); + } + +@@ -1733,9 +1738,9 @@ static inline bool free_debug_processing(struct kmem_cache *s, + static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} + static inline int check_object(struct kmem_cache *s, struct slab *slab, + void *object, u8 val) { return 1; } +-static inline depot_stack_handle_t set_track_prepare(void) { return 0; } ++static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; } + static inline void set_track(struct kmem_cache *s, void *object, +- enum track_item alloc, unsigned long addr) {} ++ enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {} + static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, + struct slab *slab) {} + static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, +@@ -3223,8 +3228,26 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + pc.slab = &slab; + pc.orig_size = orig_size; + freelist = get_partial(s, node, &pc); +- if (freelist) +- goto check_new_slab; ++ if (freelist) { ++ if (kmem_cache_debug(s)) { ++ /* ++ * For debug caches here we had to go through ++ * alloc_single_from_partial() so just store the ++ * tracking info and return the object. ++ * ++ * Due to disabled preemption we need to disallow ++ * blocking. The flags are further adjusted by ++ * gfp_nested_mask() in stack_depot itself. ++ */ ++ if (s->flags & SLAB_STORE_USER) ++ set_track(s, freelist, TRACK_ALLOC, addr, ++ gfpflags & ~(__GFP_DIRECT_RECLAIM)); ++ ++ return freelist; ++ } ++ ++ goto retry_load_slab; ++ } + + slub_put_cpu_ptr(s->cpu_slab); + slab = new_slab(s, gfpflags, node); +@@ -3244,7 +3267,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + goto new_objects; + + if (s->flags & SLAB_STORE_USER) +- set_track(s, freelist, TRACK_ALLOC, addr); ++ set_track(s, freelist, TRACK_ALLOC, addr, ++ gfpflags & ~(__GFP_DIRECT_RECLAIM)); + + return freelist; + } +@@ -3260,20 +3284,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + + inc_slabs_node(s, slab_nid(slab), slab->objects); + +-check_new_slab: +- +- if (kmem_cache_debug(s)) { +- /* +- * For debug caches here we had to go through +- * alloc_single_from_partial() so just store the tracking info +- * and return the object +- */ +- if (s->flags & SLAB_STORE_USER) +- set_track(s, freelist, TRACK_ALLOC, addr); +- +- return freelist; +- } +- + if (unlikely(!pfmemalloc_match(slab, gfpflags))) { + /* + * For !pfmemalloc_match() case we don't load freelist so that +@@ -3546,8 +3556,12 @@ static noinline void free_to_partial_list( + unsigned long flags; + depot_stack_handle_t handle = 0; + ++ /* ++ * We cannot use GFP_NOWAIT as there are callsites where waking up ++ * kswapd could deadlock ++ */ + if (s->flags & SLAB_STORE_USER) +- handle = set_track_prepare(); ++ handle = set_track_prepare(__GFP_NOWARN); + + spin_lock_irqsave(&n->list_lock, flags); + +diff --git a/net/atm/resources.c b/net/atm/resources.c +index b19d851e1f4439..7c6fdedbcf4e5c 100644 +--- a/net/atm/resources.c ++++ b/net/atm/resources.c +@@ -112,7 +112,9 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent, + + if (atm_proc_dev_register(dev) < 0) { + pr_err("atm_proc_dev_register failed for dev %s\n", type); +- goto out_fail; ++ mutex_unlock(&atm_dev_mutex); ++ kfree(dev); ++ return NULL; + } + + if (atm_register_sysfs(dev, parent) < 0) { +@@ -128,7 +130,7 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent, + return dev; + + out_fail: +- kfree(dev); ++ put_device(&dev->class_dev); + dev = NULL; + goto out; + } +diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c +index 1cac25aca63784..f2d66af8635957 100644 +--- a/net/ax25/ax25_in.c ++++ b/net/ax25/ax25_in.c +@@ -433,6 +433,10 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, + int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) + { ++ skb = skb_share_check(skb, GFP_ATOMIC); ++ if (!skb) ++ return NET_RX_DROP; ++ + skb_orphan(skb); + + if (!net_eq(dev_net(dev), &init_net)) { +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c +index 71ebd0284f95d2..0adc783fb83ca2 100644 +--- a/net/batman-adv/network-coding.c ++++ b/net/batman-adv/network-coding.c +@@ -1687,7 +1687,12 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, + + coding_len = ntohs(coded_packet_tmp.coded_len); + +- if (coding_len > skb->len) ++ /* ensure dst buffer is large enough (payload only) */ ++ if (coding_len + h_size > skb->len) ++ return NULL; ++ ++ /* ensure src buffer is large enough (payload only) */ ++ if (coding_len + h_size > nc_packet->skb->len) + return NULL; + + /* Here the magic is reversed: +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index 020f1809fc9946..7f3f700faebc24 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -3354,7 +3354,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev) + * advertising data. This also applies to the case + * where BR/EDR was toggled during the AUTO_OFF phase. + */ +- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || ++ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) { + err = hci_setup_ext_adv_instance_sync(hdev, 0x00); +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c +index 9a906977c8723c..59630dbeda20d6 100644 +--- a/net/bluetooth/l2cap_sock.c ++++ b/net/bluetooth/l2cap_sock.c +@@ -1406,7 +1406,10 @@ static int l2cap_sock_release(struct socket *sock) + if (!sk) + return 0; + ++ lock_sock_nested(sk, L2CAP_NESTING_PARENT); + l2cap_sock_cleanup_listen(sk); ++ release_sock(sk); ++ + bt_sock_unlink(&l2cap_sk_list, sk); + + err = l2cap_sock_shutdown(sock, SHUT_RDWR); +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 2a4958e995f2d9..e6962d693359b3 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -648,9 +648,6 @@ static unsigned int br_nf_local_in(void *priv, + break; + } + +- ct = container_of(nfct, struct nf_conn, ct_general); +- WARN_ON_ONCE(!nf_ct_is_confirmed(ct)); +- + return ret; + } + #endif +diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c +index ea100bd25939b4..0a16c04c4bfc49 100644 +--- a/net/dsa/tag_ksz.c ++++ b/net/dsa/tag_ksz.c +@@ -139,7 +139,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) + + static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev) + { +- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; ++ u8 *tag; ++ ++ if (skb_linearize(skb)) ++ return NULL; ++ ++ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + + return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN); + } +@@ -176,8 +181,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME); + + #define KSZ9477_INGRESS_TAG_LEN 2 + #define KSZ9477_PTP_TAG_LEN 4 +-#define KSZ9477_PTP_TAG_INDICATION 0x80 ++#define KSZ9477_PTP_TAG_INDICATION BIT(7) + ++#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0) + #define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7) + #define KSZ9477_TAIL_TAG_OVERRIDE BIT(9) + #define KSZ9477_TAIL_TAG_LOOKUP BIT(10) +@@ -300,10 +306,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, + + static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev) + { +- /* Tag decoding */ +- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; +- unsigned int port = tag[0] & 7; + unsigned int len = KSZ_EGRESS_TAG_LEN; ++ unsigned int port; ++ u8 *tag; ++ ++ if (skb_linearize(skb)) ++ return NULL; ++ ++ /* Tag decoding */ ++ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; ++ port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; + + /* Extra 4-bytes PTP timestamp */ + if (tag[0] & KSZ9477_PTP_TAG_INDICATION) { +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index c33b1ecc591e4e..798497c8b1923e 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -336,14 +336,13 @@ static void inetdev_destroy(struct in_device *in_dev) + + static int __init inet_blackhole_dev_init(void) + { +- int err = 0; ++ struct in_device *in_dev; + + rtnl_lock(); +- if (!inetdev_init(blackhole_netdev)) +- err = -ENOMEM; ++ in_dev = inetdev_init(blackhole_netdev); + rtnl_unlock(); + +- return err; ++ return PTR_ERR_OR_ZERO(in_dev); + } + late_initcall(inet_blackhole_dev_init); + +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 94501bb30c431b..b17549c4e5de8a 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -801,11 +801,12 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) + struct sk_buff *cloned_skb = NULL; + struct ip_options opts = { 0 }; + enum ip_conntrack_info ctinfo; ++ enum ip_conntrack_dir dir; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); +- if (!ct || !(ct->status & IPS_SRC_NAT)) { ++ if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) { + __icmp_send(skb_in, type, code, info, &opts); + return; + } +@@ -820,7 +821,8 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; +- ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; ++ dir = CTINFO2DIR(ctinfo); ++ ip_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.ip; + __icmp_send(skb_in, type, code, info, &opts); + ip_hdr(skb_in)->saddr = orig_ip; + out: +diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c +index 9e3574880cb03e..233914b63bdb82 100644 +--- a/net/ipv6/ip6_icmp.c ++++ b/net/ipv6/ip6_icmp.c +@@ -54,11 +54,12 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) + struct inet6_skb_parm parm = { 0 }; + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; ++ enum ip_conntrack_dir dir; + struct in6_addr orig_ip; + struct nf_conn *ct; + + ct = nf_ct_get(skb_in, &ctinfo); +- if (!ct || !(ct->status & IPS_SRC_NAT)) { ++ if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) { + __icmpv6_send(skb_in, type, code, info, &parm); + return; + } +@@ -73,7 +74,8 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) + goto out; + + orig_ip = ipv6_hdr(skb_in)->saddr; +- ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; ++ dir = CTINFO2DIR(ctinfo); ++ ipv6_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.in6; + __icmpv6_send(skb_in, type, code, info, &parm); + ipv6_hdr(skb_in)->saddr = orig_ip; + out: +diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c +index 5f9592fb57add2..805f7376cebe3f 100644 +--- a/net/mctp/af_mctp.c ++++ b/net/mctp/af_mctp.c +@@ -346,7 +346,7 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname, + return 0; + } + +- return -EINVAL; ++ return -ENOPROTOOPT; + } + + static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg) +diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c +index f22691f8385363..10f72b5b4e1ad7 100644 +--- a/net/netfilter/nf_conntrack_helper.c ++++ b/net/netfilter/nf_conntrack_helper.c +@@ -373,7 +373,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) + (cur->tuple.src.l3num == NFPROTO_UNSPEC || + cur->tuple.src.l3num == me->tuple.src.l3num) && + cur->tuple.dst.protonum == me->tuple.dst.protonum) { +- ret = -EEXIST; ++ ret = -EBUSY; + goto out; + } + } +@@ -384,7 +384,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) + hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { + if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, + &mask)) { +- ret = -EEXIST; ++ ret = -EBUSY; + goto out; + } + } +diff --git a/net/netlink/policy.c b/net/netlink/policy.c +index 87e3de0fde8963..ef542a142b9800 100644 +--- a/net/netlink/policy.c ++++ b/net/netlink/policy.c +@@ -229,6 +229,8 @@ int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt) + case NLA_S16: + case NLA_S32: + case NLA_S64: ++ case NLA_SINT: ++ case NLA_UINT: + /* maximum is common, u64 min/max with padding */ + return common + + 2 * (nla_attr_size(0) + nla_attr_size(sizeof(u64))); +@@ -287,6 +289,7 @@ __netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state, + case NLA_U16: + case NLA_U32: + case NLA_U64: ++ case NLA_UINT: + case NLA_MSECS: { + struct netlink_range_validation range; + +@@ -296,8 +299,10 @@ __netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state, + type = NL_ATTR_TYPE_U16; + else if (pt->type == NLA_U32) + type = NL_ATTR_TYPE_U32; +- else ++ else if (pt->type == NLA_U64) + type = NL_ATTR_TYPE_U64; ++ else ++ type = NL_ATTR_TYPE_UINT; + + if (pt->validation_type == NLA_VALIDATE_MASK) { + if (nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MASK, +@@ -319,7 +324,8 @@ __netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state, + case NLA_S8: + case NLA_S16: + case NLA_S32: +- case NLA_S64: { ++ case NLA_S64: ++ case NLA_SINT: { + struct netlink_range_validation_signed range; + + if (pt->type == NLA_S8) +@@ -328,8 +334,10 @@ __netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state, + type = NL_ATTR_TYPE_S16; + else if (pt->type == NLA_S32) + type = NL_ATTR_TYPE_S32; +- else ++ else if (pt->type == NLA_S64) + type = NL_ATTR_TYPE_S64; ++ else ++ type = NL_ATTR_TYPE_SINT; + + nla_get_range_signed(pt, &range); + +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c +index dbce904c03cf73..4f485b9b31b288 100644 +--- a/net/smc/smc_clc.c ++++ b/net/smc/smc_clc.c +@@ -426,8 +426,6 @@ smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc) + { + struct smc_clc_msg_hdr *hdr = &dclc->hdr; + +- if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) +- return false; + if (hdr->version == SMC_V1) { + if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline)) + return false; +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c +index 598ac9ead64b72..6df543e083fb34 100644 +--- a/net/smc/smc_ib.c ++++ b/net/smc/smc_ib.c +@@ -743,6 +743,9 @@ bool smc_ib_is_sg_need_sync(struct smc_link *lnk, + unsigned int i; + bool ret = false; + ++ if (!lnk->smcibdev->ibdev->dma_device) ++ return ret; ++ + /* for now there is just one DMA address */ + for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, + buf_slot->sgt[lnk->link_idx].nents, i) { +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 6db8c9a2a7a2b8..c1d64e25045484 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -1807,7 +1807,8 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, + */ + + f = rcu_access_pointer(new->pub.beacon_ies); +- kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head); ++ if (!new->pub.hidden_beacon_bss) ++ kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head); + return false; + } + +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 70881782c25c6c..5904c869085c80 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -915,13 +915,16 @@ void __cfg80211_connect_result(struct net_device *dev, + if (!wdev->u.client.ssid_len) { + rcu_read_lock(); + for_each_valid_link(cr, link) { ++ u32 ssid_len; ++ + ssid = ieee80211_bss_get_elem(cr->links[link].bss, + WLAN_EID_SSID); + + if (!ssid || !ssid->datalen) + continue; + +- memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen); ++ ssid_len = min(ssid->datalen, IEEE80211_MAX_SSID_LEN); ++ memcpy(wdev->u.client.ssid, ssid->data, ssid_len); + wdev->u.client.ssid_len = ssid->datalen; + break; + } +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index f2c03fbf892f1b..80c015af09efde 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -1991,6 +1991,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) + static const struct snd_pci_quirk force_connect_list[] = { + SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1), + SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1), ++ SND_PCI_QUIRK(0x103c, 0x845a, "HP EliteDesk 800 G4 DM 65W", 1), + SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index d4bc80780a1f91..6aae06223f2664 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10249,6 +10249,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x8e3a, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), ++ SND_PCI_QUIRK(0x103c, 0x8e3b, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), +@@ -10632,6 +10635,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1d05, 0x300f, "TongFang X6AR5xxY", ALC2XX_FIXUP_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1d05, 0x3019, "TongFang X6FR5xxY", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c +index f2cce15be4e271..68c82e344d3baf 100644 +--- a/sound/usb/mixer_quirks.c ++++ b/sound/usb/mixer_quirks.c +@@ -3631,9 +3631,11 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, + snd_dragonfly_quirk_db_scale(mixer, cval, kctl); + break; + /* lowest playback value is muted on some devices */ ++ case USB_ID(0x0572, 0x1b09): /* Conexant Systems (Rockwell), Inc. */ + case USB_ID(0x0d8c, 0x000c): /* C-Media */ + case USB_ID(0x0d8c, 0x0014): /* C-Media */ + case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */ ++ case USB_ID(0x2d99, 0x0026): /* HECATE G2 GAMING HEADSET */ + if (strstr(kctl->id.name, "Playback")) + cval->min_mute = 1; + break; +diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile +index d29c9c49e2512a..342e056c8c665a 100644 +--- a/tools/gpio/Makefile ++++ b/tools/gpio/Makefile +@@ -77,8 +77,8 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN) + + clean: + rm -f $(ALL_PROGRAMS) +- rm -f $(OUTPUT)include/linux/gpio.h +- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete ++ rm -rf $(OUTPUT)include ++ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete + + install: $(ALL_PROGRAMS) + install -d -m 755 $(DESTDIR)$(bindir); \ +diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c +index b94b4f16a60a54..1573d6b6478d28 100644 +--- a/tools/perf/util/bpf-event.c ++++ b/tools/perf/util/bpf-event.c +@@ -289,9 +289,15 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, + + info_node->info_linear = info_linear; + if (!perf_env__insert_bpf_prog_info(env, info_node)) { +- free(info_linear); ++ /* ++ * Insert failed, likely because of a duplicate event ++ * made by the sideband thread. Ignore synthesizing the ++ * metadata. ++ */ + free(info_node); ++ goto out; + } ++ /* info_linear is now owned by info_node and shouldn't be freed below. */ + info_linear = NULL; + + /* +@@ -447,18 +453,18 @@ int perf_event__synthesize_bpf_events(struct perf_session *session, + return err; + } + +-static void perf_env__add_bpf_info(struct perf_env *env, u32 id) ++static int perf_env__add_bpf_info(struct perf_env *env, u32 id) + { + struct bpf_prog_info_node *info_node; + struct perf_bpil *info_linear; + struct btf *btf = NULL; + u64 arrays; + u32 btf_id; +- int fd; ++ int fd, err = 0; + + fd = bpf_prog_get_fd_by_id(id); + if (fd < 0) +- return; ++ return -EINVAL; + + arrays = 1UL << PERF_BPIL_JITED_KSYMS; + arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS; +@@ -471,6 +477,7 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id) + info_linear = get_bpf_prog_info_linear(fd, arrays); + if (IS_ERR_OR_NULL(info_linear)) { + pr_debug("%s: failed to get BPF program info. aborting\n", __func__); ++ err = PTR_ERR(info_linear); + goto out; + } + +@@ -480,38 +487,46 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id) + if (info_node) { + info_node->info_linear = info_linear; + if (!perf_env__insert_bpf_prog_info(env, info_node)) { ++ pr_debug("%s: duplicate add bpf info request for id %u\n", ++ __func__, btf_id); + free(info_linear); + free(info_node); ++ goto out; + } +- } else ++ } else { + free(info_linear); ++ err = -ENOMEM; ++ goto out; ++ } + + if (btf_id == 0) + goto out; + + btf = btf__load_from_kernel_by_id(btf_id); +- if (libbpf_get_error(btf)) { +- pr_debug("%s: failed to get BTF of id %u, aborting\n", +- __func__, btf_id); +- goto out; ++ if (!btf) { ++ err = -errno; ++ pr_debug("%s: failed to get BTF of id %u %d\n", __func__, btf_id, err); ++ } else { ++ perf_env__fetch_btf(env, btf_id, btf); + } +- perf_env__fetch_btf(env, btf_id, btf); + + out: + btf__free(btf); + close(fd); ++ return err; + } + + static int bpf_event__sb_cb(union perf_event *event, void *data) + { + struct perf_env *env = data; ++ int ret = 0; + + if (event->header.type != PERF_RECORD_BPF_EVENT) + return -1; + + switch (event->bpf.type) { + case PERF_BPF_EVENT_PROG_LOAD: +- perf_env__add_bpf_info(env, event->bpf.id); ++ ret = perf_env__add_bpf_info(env, event->bpf.id); + + case PERF_BPF_EVENT_PROG_UNLOAD: + /* +@@ -525,7 +540,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data) + break; + } + +- return 0; ++ return ret; + } + + int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env) +diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c +index 0677b58374abf1..59ace394cf3ef9 100644 +--- a/tools/power/cpupower/utils/cpupower-set.c ++++ b/tools/power/cpupower/utils/cpupower-set.c +@@ -62,8 +62,8 @@ int cmd_set(int argc, char **argv) + + params.params = 0; + /* parameter parsing */ +- while ((ret = getopt_long(argc, argv, "b:e:m:", +- set_opts, NULL)) != -1) { ++ while ((ret = getopt_long(argc, argv, "b:e:m:t:", ++ set_opts, NULL)) != -1) { + switch (ret) { + case 'b': + if (params.perf_bias) +diff --git a/tools/testing/selftests/net/bind_bhash.c b/tools/testing/selftests/net/bind_bhash.c +index 57ff67a3751eb3..da04b0b19b73ca 100644 +--- a/tools/testing/selftests/net/bind_bhash.c ++++ b/tools/testing/selftests/net/bind_bhash.c +@@ -75,7 +75,7 @@ static void *setup(void *arg) + int *array = (int *)arg; + + for (i = 0; i < MAX_CONNECTIONS; i++) { +- sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr); ++ sock_fd = bind_socket(SO_REUSEPORT, setup_addr); + if (sock_fd < 0) { + ret = sock_fd; + pthread_exit(&ret); +@@ -103,7 +103,7 @@ int main(int argc, const char *argv[]) + + setup_addr = use_v6 ? setup_addr_v6 : setup_addr_v4; + +- listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr); ++ listener_fd = bind_socket(SO_REUSEPORT, setup_addr); + if (listen(listener_fd, 100) < 0) { + perror("listen failed"); + return -1; diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.105-106.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.105-106.patch new file mode 100644 index 0000000000..8695b9de35 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.105-106.patch @@ -0,0 +1,744 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 868ec736a9d235..cfa393c51f4d61 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -528,6 +528,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsa + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort ++ /sys/devices/system/cpu/vulnerabilities/vmscape + Date: January 2018 + Contact: Linux kernel mailing list + Description: Information about CPU vulnerabilities +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +index d2caa390395e5b..5d6c001b8a988c 100644 +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -23,3 +23,4 @@ are configurable at compile, boot or run time. + gather_data_sampling + reg-file-data-sampling + indirect-target-selection ++ vmscape +diff --git a/Documentation/admin-guide/hw-vuln/vmscape.rst b/Documentation/admin-guide/hw-vuln/vmscape.rst +new file mode 100644 +index 00000000000000..d9b9a2b6c114c0 +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/vmscape.rst +@@ -0,0 +1,110 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++VMSCAPE ++======= ++ ++VMSCAPE is a vulnerability that may allow a guest to influence the branch ++prediction in host userspace. It particularly affects hypervisors like QEMU. ++ ++Even if a hypervisor may not have any sensitive data like disk encryption keys, ++guest-userspace may be able to attack the guest-kernel using the hypervisor as ++a confused deputy. ++ ++Affected processors ++------------------- ++ ++The following CPU families are affected by VMSCAPE: ++ ++**Intel processors:** ++ - Skylake generation (Parts without Enhanced-IBRS) ++ - Cascade Lake generation - (Parts affected by ITS guest/host separation) ++ - Alder Lake and newer (Parts affected by BHI) ++ ++Note that, BHI affected parts that use BHB clearing software mitigation e.g. ++Icelake are not vulnerable to VMSCAPE. ++ ++**AMD processors:** ++ - Zen series (families 0x17, 0x19, 0x1a) ++ ++** Hygon processors:** ++ - Family 0x18 ++ ++Mitigation ++---------- ++ ++Conditional IBPB ++---------------- ++ ++Kernel tracks when a CPU has run a potentially malicious guest and issues an ++IBPB before the first exit to userspace after VM-exit. If userspace did not run ++between VM-exit and the next VM-entry, no IBPB is issued. ++ ++Note that the existing userspace mitigation against Spectre-v2 is effective in ++protecting the userspace. They are insufficient to protect the userspace VMMs ++from a malicious guest. This is because Spectre-v2 mitigations are applied at ++context switch time, while the userspace VMM can run after a VM-exit without a ++context switch. ++ ++Vulnerability enumeration and mitigation is not applied inside a guest. This is ++because nested hypervisors should already be deploying IBPB to isolate ++themselves from nested guests. ++ ++SMT considerations ++------------------ ++ ++When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be ++vulnerable to cross-thread attacks. For complete protection against VMSCAPE ++attacks in SMT environments, STIBP should be enabled. ++ ++The kernel will issue a warning if SMT is enabled without adequate STIBP ++protection. Warning is not issued when: ++ ++- SMT is disabled ++- STIBP is enabled system-wide ++- Intel eIBRS is enabled (which implies STIBP protection) ++ ++System information and options ++------------------------------ ++ ++The sysfs file showing VMSCAPE mitigation status is: ++ ++ /sys/devices/system/cpu/vulnerabilities/vmscape ++ ++The possible values in this file are: ++ ++ * 'Not affected': ++ ++ The processor is not vulnerable to VMSCAPE attacks. ++ ++ * 'Vulnerable': ++ ++ The processor is vulnerable and no mitigation has been applied. ++ ++ * 'Mitigation: IBPB before exit to userspace': ++ ++ Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has ++ run a potentially malicious guest and issues an IBPB before the first ++ exit to userspace after VM-exit. ++ ++ * 'Mitigation: IBPB on VMEXIT': ++ ++ IBPB is issued on every VM-exit. This occurs when other mitigations like ++ RETBLEED or SRSO are already issuing IBPB on VM-exit. ++ ++Mitigation control on the kernel command line ++---------------------------------------------- ++ ++The mitigation can be controlled via the ``vmscape=`` command line parameter: ++ ++ * ``vmscape=off``: ++ ++ Disable the VMSCAPE mitigation. ++ ++ * ``vmscape=ibpb``: ++ ++ Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y). ++ ++ * ``vmscape=force``: ++ ++ Force vulnerability detection and mitigation even on processors that are ++ not known to be affected. +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index bcfa49019c3f16..60d48ebbc2cb00 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3368,6 +3368,7 @@ + srbds=off [X86,INTEL] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] ++ vmscape=off [X86] + + Exceptions: + This does not have any effect on +@@ -7074,6 +7075,16 @@ + vmpoff= [KNL,S390] Perform z/VM CP command after power off. + Format: + ++ vmscape= [X86] Controls mitigation for VMscape attacks. ++ VMscape attacks can leak information from a userspace ++ hypervisor to a guest via speculative side-channels. ++ ++ off - disable the mitigation ++ ibpb - use Indirect Branch Prediction Barrier ++ (IBPB) mitigation (default) ++ force - force vulnerability detection even on ++ unaffected processors ++ + vsyscall= [X86-64] + Controls the behavior of vsyscalls (i.e. calls to + fixed addresses of 0xffffffffff600x00 from legacy +diff --git a/Makefile b/Makefile +index 2b7f67d7b641ce..b934846659eed0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 105 ++SUBLEVEL = 106 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 2b5b7d9a24e98c..37e22efbd1e1e2 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2630,6 +2630,15 @@ config MITIGATION_TSA + security vulnerability on AMD CPUs which can lead to forwarding of + invalid info to subsequent instructions and thus can affect their + timing and thereby cause a leakage. ++ ++config MITIGATION_VMSCAPE ++ bool "Mitigate VMSCAPE" ++ depends on KVM ++ default y ++ help ++ Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security ++ vulnerability on Intel and AMD CPUs that may allow a guest to do ++ Spectre v2 style attacks on userspace hypervisor. + endif + + config ARCH_HAS_ADD_PAGES +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 199441d11fbbab..ae4ea1f9594f71 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -475,6 +475,7 @@ + #define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */ + #define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */ + #define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */ ++#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */ + + /* + * BUG word(s) +@@ -528,4 +529,5 @@ + #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ + #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ + #define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ ++#define X86_BUG_VMSCAPE X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h +index fb2809b20b0ac4..bb0a5ecc807fe7 100644 +--- a/arch/x86/include/asm/entry-common.h ++++ b/arch/x86/include/asm/entry-common.h +@@ -83,6 +83,13 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + * 8 (ia32) bits. + */ + choose_random_kstack_offset(rdtsc()); ++ ++ /* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */ ++ if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) && ++ this_cpu_read(x86_ibpb_exit_to_user)) { ++ indirect_branch_prediction_barrier(); ++ this_cpu_write(x86_ibpb_exit_to_user, false); ++ } + } + #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare + +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 04f5a41c3a04ed..fb469ace38393f 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -559,6 +559,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) + + extern u64 x86_pred_cmd; + ++DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user); ++ + static inline void indirect_branch_prediction_barrier(void) + { + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 332c6f24280dde..315926ccea0fa3 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -51,6 +51,7 @@ static void __init srso_select_mitigation(void); + static void __init gds_select_mitigation(void); + static void __init its_select_mitigation(void); + static void __init tsa_select_mitigation(void); ++static void __init vmscape_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; +@@ -60,6 +61,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); + DEFINE_PER_CPU(u64, x86_spec_ctrl_current); + EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); + ++/* ++ * Set when the CPU has run a potentially malicious guest. An IBPB will ++ * be needed to before running userspace. That IBPB will flush the branch ++ * predictor content. ++ */ ++DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user); ++EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user); ++ + u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; + EXPORT_SYMBOL_GPL(x86_pred_cmd); + +@@ -186,6 +195,7 @@ void __init cpu_select_mitigations(void) + gds_select_mitigation(); + its_select_mitigation(); + tsa_select_mitigation(); ++ vmscape_select_mitigation(); + } + + /* +@@ -2182,80 +2192,6 @@ static void __init tsa_select_mitigation(void) + pr_info("%s\n", tsa_strings[tsa_mitigation]); + } + +-void cpu_bugs_smt_update(void) +-{ +- mutex_lock(&spec_ctrl_mutex); +- +- if (sched_smt_active() && unprivileged_ebpf_enabled() && +- spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) +- pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); +- +- switch (spectre_v2_user_stibp) { +- case SPECTRE_V2_USER_NONE: +- break; +- case SPECTRE_V2_USER_STRICT: +- case SPECTRE_V2_USER_STRICT_PREFERRED: +- update_stibp_strict(); +- break; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: +- update_indir_branch_cond(); +- break; +- } +- +- switch (mds_mitigation) { +- case MDS_MITIGATION_FULL: +- case MDS_MITIGATION_VMWERV: +- if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) +- pr_warn_once(MDS_MSG_SMT); +- update_mds_branch_idle(); +- break; +- case MDS_MITIGATION_OFF: +- break; +- } +- +- switch (taa_mitigation) { +- case TAA_MITIGATION_VERW: +- case TAA_MITIGATION_UCODE_NEEDED: +- if (sched_smt_active()) +- pr_warn_once(TAA_MSG_SMT); +- break; +- case TAA_MITIGATION_TSX_DISABLED: +- case TAA_MITIGATION_OFF: +- break; +- } +- +- switch (mmio_mitigation) { +- case MMIO_MITIGATION_VERW: +- case MMIO_MITIGATION_UCODE_NEEDED: +- if (sched_smt_active()) +- pr_warn_once(MMIO_MSG_SMT); +- break; +- case MMIO_MITIGATION_OFF: +- break; +- } +- +- switch (tsa_mitigation) { +- case TSA_MITIGATION_USER_KERNEL: +- case TSA_MITIGATION_VM: +- case TSA_MITIGATION_FULL: +- case TSA_MITIGATION_UCODE_NEEDED: +- /* +- * TSA-SQ can potentially lead to info leakage between +- * SMT threads. +- */ +- if (sched_smt_active()) +- static_branch_enable(&cpu_buf_idle_clear); +- else +- static_branch_disable(&cpu_buf_idle_clear); +- break; +- case TSA_MITIGATION_NONE: +- break; +- } +- +- mutex_unlock(&spec_ctrl_mutex); +-} +- + #undef pr_fmt + #define pr_fmt(fmt) "Speculative Store Bypass: " fmt + +@@ -2941,9 +2877,169 @@ static void __init srso_select_mitigation(void) + x86_pred_cmd = PRED_CMD_SBPB; + } + ++#undef pr_fmt ++#define pr_fmt(fmt) "VMSCAPE: " fmt ++ ++enum vmscape_mitigations { ++ VMSCAPE_MITIGATION_NONE, ++ VMSCAPE_MITIGATION_AUTO, ++ VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER, ++ VMSCAPE_MITIGATION_IBPB_ON_VMEXIT, ++}; ++ ++static const char * const vmscape_strings[] = { ++ [VMSCAPE_MITIGATION_NONE] = "Vulnerable", ++ /* [VMSCAPE_MITIGATION_AUTO] */ ++ [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace", ++ [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT", ++}; ++ ++static enum vmscape_mitigations vmscape_mitigation __ro_after_init = ++ IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE; ++ ++static int __init vmscape_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) { ++ vmscape_mitigation = VMSCAPE_MITIGATION_NONE; ++ } else if (!strcmp(str, "ibpb")) { ++ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; ++ } else if (!strcmp(str, "force")) { ++ setup_force_cpu_bug(X86_BUG_VMSCAPE); ++ vmscape_mitigation = VMSCAPE_MITIGATION_AUTO; ++ } else { ++ pr_err("Ignoring unknown vmscape=%s option.\n", str); ++ } ++ ++ return 0; ++} ++early_param("vmscape", vmscape_parse_cmdline); ++ ++static void __init vmscape_select_mitigation(void) ++{ ++ if (cpu_mitigations_off() || ++ !boot_cpu_has_bug(X86_BUG_VMSCAPE) || ++ !boot_cpu_has(X86_FEATURE_IBPB)) { ++ vmscape_mitigation = VMSCAPE_MITIGATION_NONE; ++ return; ++ } ++ ++ if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) ++ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; ++ ++ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || ++ srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT) ++ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT; ++ ++ if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) ++ setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); ++ ++ pr_info("%s\n", vmscape_strings[vmscape_mitigation]); ++} ++ + #undef pr_fmt + #define pr_fmt(fmt) fmt + ++#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n" ++ ++void cpu_bugs_smt_update(void) ++{ ++ mutex_lock(&spec_ctrl_mutex); ++ ++ if (sched_smt_active() && unprivileged_ebpf_enabled() && ++ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) ++ pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); ++ ++ switch (spectre_v2_user_stibp) { ++ case SPECTRE_V2_USER_NONE: ++ break; ++ case SPECTRE_V2_USER_STRICT: ++ case SPECTRE_V2_USER_STRICT_PREFERRED: ++ update_stibp_strict(); ++ break; ++ case SPECTRE_V2_USER_PRCTL: ++ case SPECTRE_V2_USER_SECCOMP: ++ update_indir_branch_cond(); ++ break; ++ } ++ ++ switch (mds_mitigation) { ++ case MDS_MITIGATION_FULL: ++ case MDS_MITIGATION_VMWERV: ++ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ pr_warn_once(MDS_MSG_SMT); ++ update_mds_branch_idle(); ++ break; ++ case MDS_MITIGATION_OFF: ++ break; ++ } ++ ++ switch (taa_mitigation) { ++ case TAA_MITIGATION_VERW: ++ case TAA_MITIGATION_UCODE_NEEDED: ++ if (sched_smt_active()) ++ pr_warn_once(TAA_MSG_SMT); ++ break; ++ case TAA_MITIGATION_TSX_DISABLED: ++ case TAA_MITIGATION_OFF: ++ break; ++ } ++ ++ switch (mmio_mitigation) { ++ case MMIO_MITIGATION_VERW: ++ case MMIO_MITIGATION_UCODE_NEEDED: ++ if (sched_smt_active()) ++ pr_warn_once(MMIO_MSG_SMT); ++ break; ++ case MMIO_MITIGATION_OFF: ++ break; ++ } ++ ++ switch (tsa_mitigation) { ++ case TSA_MITIGATION_USER_KERNEL: ++ case TSA_MITIGATION_VM: ++ case TSA_MITIGATION_FULL: ++ case TSA_MITIGATION_UCODE_NEEDED: ++ /* ++ * TSA-SQ can potentially lead to info leakage between ++ * SMT threads. ++ */ ++ if (sched_smt_active()) ++ static_branch_enable(&cpu_buf_idle_clear); ++ else ++ static_branch_disable(&cpu_buf_idle_clear); ++ break; ++ case TSA_MITIGATION_NONE: ++ break; ++ } ++ ++ switch (vmscape_mitigation) { ++ case VMSCAPE_MITIGATION_NONE: ++ case VMSCAPE_MITIGATION_AUTO: ++ break; ++ case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT: ++ case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER: ++ /* ++ * Hypervisors can be attacked across-threads, warn for SMT when ++ * STIBP is not already enabled system-wide. ++ * ++ * Intel eIBRS (!AUTOIBRS) implies STIBP on. ++ */ ++ if (!sched_smt_active() || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || ++ (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && ++ !boot_cpu_has(X86_FEATURE_AUTOIBRS))) ++ break; ++ pr_warn_once(VMSCAPE_MSG_SMT); ++ break; ++ } ++ ++ mutex_unlock(&spec_ctrl_mutex); ++} ++ + #ifdef CONFIG_SYSFS + + #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" +@@ -3187,6 +3283,11 @@ static ssize_t tsa_show_state(char *buf) + return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); + } + ++static ssize_t vmscape_show_state(char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -3251,6 +3352,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_TSA: + return tsa_show_state(buf); + ++ case X86_BUG_VMSCAPE: ++ return vmscape_show_state(buf); ++ + default: + break; + } +@@ -3340,4 +3444,9 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu + { + return cpu_show_common(dev, attr, buf, X86_BUG_TSA); + } ++ ++ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index f66c71bffa6d93..cf455968f27b69 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1279,54 +1279,68 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + #define ITS_NATIVE_ONLY BIT(9) + /* CPU is affected by Transient Scheduler Attacks */ + #define TSA BIT(10) ++/* CPU is affected by VMSCAPE */ ++#define VMSCAPE BIT(11) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { +- VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), +- VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), ++ VULNBL_INTEL_STEPPINGS(SANDYBRIDGE_X, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SANDYBRIDGE, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(IVYBRIDGE_X, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED | VMSCAPE), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), +- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), +- VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS), +- VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS), +- VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS), +- VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS), +- VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS), +- VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS), ++ VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS | VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(METEORLAKE_L, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(ARROWLAKE_H, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(ARROWLAKE, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(ARROWLAKE_U, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(LUNARLAKE_M, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(SAPPHIRERAPIDS_X,X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPING_ANY, VMSCAPE), ++ VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS | VMSCAPE), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), + VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS), + VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS), + VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS), ++ VULNBL_INTEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPING_ANY, VMSCAPE), + + VULNBL_AMD(0x15, RETBLEED), + VULNBL_AMD(0x16, RETBLEED), +- VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), +- VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), +- VULNBL_AMD(0x19, SRSO | TSA), ++ VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE), ++ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE), ++ VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE), + {} + }; + +@@ -1541,6 +1555,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + } + } + ++ /* ++ * Set the bug only on bare-metal. A nested hypervisor should already be ++ * deploying IBPB to isolate itself from nested guests. ++ */ ++ if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) && ++ !boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ setup_force_cpu_bug(X86_BUG_VMSCAPE); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 5088065ac704be..7238686a49bb5a 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -10864,6 +10864,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + if (vcpu->arch.guest_fpu.xfd_err) + wrmsrl(MSR_IA32_XFD_ERR, 0); + ++ /* ++ * Mark this CPU as needing a branch predictor flush before running ++ * userspace. Must be done before enabling preemption to ensure it gets ++ * set for the CPU that actually ran the guest, and not the CPU that it ++ * may migrate to. ++ */ ++ if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER)) ++ this_cpu_write(x86_ibpb_exit_to_user, true); ++ + /* + * Consume any pending interrupts, including the possible source of + * VM-Exit on SVM and any ticks that occur between VM-Exit and now. +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index a3aea3c1431aa9..801a4b4f90260b 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -568,6 +568,7 @@ CPU_SHOW_VULN_FALLBACK(gds); + CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); + CPU_SHOW_VULN_FALLBACK(indirect_target_selection); + CPU_SHOW_VULN_FALLBACK(tsa); ++CPU_SHOW_VULN_FALLBACK(vmscape); + + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +@@ -585,6 +586,7 @@ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); + static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); + static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); + static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); ++static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -603,6 +605,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_reg_file_data_sampling.attr, + &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, ++ &dev_attr_vmscape.attr, + NULL + }; + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 6b4f9f16968821..1e3d1b2ac4c1da 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -80,6 +80,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, + extern ssize_t cpu_show_indirect_target_selection(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.106-107.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.106-107.patch new file mode 100644 index 0000000000..4e1e6afea7 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.106-107.patch @@ -0,0 +1,3614 @@ +diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml +index 89c462653e2d33..8cc848ae11cb73 100644 +--- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml ++++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml +@@ -41,7 +41,7 @@ properties: + - const: dma_intr2 + + clocks: +- minItems: 1 ++ maxItems: 1 + + clock-names: + const: sw_baud +diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst +index d7e1ada905b2d3..3bdd1558381057 100644 +--- a/Documentation/networking/can.rst ++++ b/Documentation/networking/can.rst +@@ -740,7 +740,7 @@ The broadcast manager sends responses to user space in the same form: + struct timeval ival1, ival2; /* count and subsequent interval */ + canid_t can_id; /* unique can_id for task */ + __u32 nframes; /* number of can_frames following */ +- struct can_frame frames[0]; ++ struct can_frame frames[]; + }; + + The aligned payload 'frames' uses the same basic CAN frame structure defined +diff --git a/Makefile b/Makefile +index b934846659eed0..9c9e272f48b879 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 106 ++SUBLEVEL = 107 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h +index 2ac955b51148f4..6b79287baecc00 100644 +--- a/arch/riscv/include/asm/compat.h ++++ b/arch/riscv/include/asm/compat.h +@@ -9,7 +9,6 @@ + */ + #include + #include +-#include + #include + + static inline int is_compat_task(void) +diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c +index 65a66df5bb865e..771e1cb17540db 100644 +--- a/arch/s390/kernel/perf_cpum_cf.c ++++ b/arch/s390/kernel/perf_cpum_cf.c +@@ -757,8 +757,6 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) + break; + + case PERF_TYPE_HARDWARE: +- if (is_sampling_event(event)) /* No sampling support */ +- return -ENOENT; + ev = attr->config; + if (!attr->exclude_user && attr->exclude_kernel) { + /* +@@ -856,6 +854,8 @@ static int cpumf_pmu_event_init(struct perf_event *event) + unsigned int type = event->attr.type; + int err; + ++ if (is_sampling_event(event)) /* No sampling support */ ++ return err; + if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW) + err = __hw_perf_event_init(event, type); + else if (event->pmu->type == type) +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index c57d5df1abc603..0929d7fe7e2740 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -500,10 +500,18 @@ SECTIONS + PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); + + /* +- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: ++ * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause ++ * this. Let's assume that nobody will be running a COMPILE_TEST kernel and ++ * let's assert that fuller build coverage is more valuable than being able to ++ * run a COMPILE_TEST kernel. ++ */ ++#ifndef CONFIG_COMPILE_TEST ++/* ++ * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility: + */ + . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); ++#endif + + #ifdef CONFIG_X86_64 + /* +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 288db351677222..2c0bc6a93ec3e8 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -791,10 +791,15 @@ void kvm_set_cpu_caps(void) + F(PERFMON_V2) + ); + ++ kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR); ++ + kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX, + F(TSA_SQ_NO) | F(TSA_L1_NO) + ); + ++ kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO); ++ kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO); ++ + /* + * Synthesize "LFENCE is serializing" into the AMD-defined entry in + * KVM's supported CPUID if the feature is reported as supported by the +diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c +index 4fb8508419dbd8..deadf135681b67 100644 +--- a/drivers/dma/dw/rzn1-dmamux.c ++++ b/drivers/dma/dw/rzn1-dmamux.c +@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, + u32 mask; + int ret; + +- if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) +- return ERR_PTR(-EINVAL); ++ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) { ++ ret = -EINVAL; ++ goto put_device; ++ } + + map = kzalloc(sizeof(*map), GFP_KERNEL); +- if (!map) +- return ERR_PTR(-ENOMEM); ++ if (!map) { ++ ret = -ENOMEM; ++ goto put_device; ++ } + + chan = dma_spec->args[0]; + map->req_idx = dma_spec->args[4]; +@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, + if (ret) + goto clear_bitmap; + ++ put_device(&pdev->dev); + return map; + + clear_bitmap: + clear_bit(map->req_idx, dmamux->used_chans); + free_map: + kfree(map); ++put_device: ++ put_device(&pdev->dev); + + return ERR_PTR(ret); + } +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c +index 92e86ae9db29d7..4b999c5802f4b2 100644 +--- a/drivers/dma/idxd/init.c ++++ b/drivers/dma/idxd/init.c +@@ -179,27 +179,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); + if (!idxd->wq_enable_map) { + rc = -ENOMEM; +- goto err_bitmap; ++ goto err_free_wqs; + } + + for (i = 0; i < idxd->max_wqs; i++) { + wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); + if (!wq) { + rc = -ENOMEM; +- goto err; ++ goto err_unwind; + } + + idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); + conf_dev = wq_confdev(wq); + wq->id = i; + wq->idxd = idxd; +- device_initialize(wq_confdev(wq)); ++ device_initialize(conf_dev); + conf_dev->parent = idxd_confdev(idxd); + conf_dev->bus = &dsa_bus_type; + conf_dev->type = &idxd_wq_device_type; + rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); +- if (rc < 0) +- goto err; ++ if (rc < 0) { ++ put_device(conf_dev); ++ kfree(wq); ++ goto err_unwind; ++ } + + mutex_init(&wq->wq_lock); + init_waitqueue_head(&wq->err_queue); +@@ -210,15 +213,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; + wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); + if (!wq->wqcfg) { ++ put_device(conf_dev); ++ kfree(wq); + rc = -ENOMEM; +- goto err; ++ goto err_unwind; + } + + if (idxd->hw.wq_cap.op_config) { + wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); + if (!wq->opcap_bmap) { ++ kfree(wq->wqcfg); ++ put_device(conf_dev); ++ kfree(wq); + rc = -ENOMEM; +- goto err_opcap_bmap; ++ goto err_unwind; + } + bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); + } +@@ -229,13 +237,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + + return 0; + +-err_opcap_bmap: +- kfree(wq->wqcfg); +- +-err: +- put_device(conf_dev); +- kfree(wq); +- ++err_unwind: + while (--i >= 0) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) +@@ -244,11 +246,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd) + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); +- + } + bitmap_free(idxd->wq_enable_map); + +-err_bitmap: ++err_free_wqs: + kfree(idxd->wqs); + + return rc; +@@ -904,10 +905,12 @@ static void idxd_remove(struct pci_dev *pdev) + device_unregister(idxd_confdev(idxd)); + idxd_shutdown(pdev); + idxd_device_remove_debugfs(idxd); +- idxd_cleanup(idxd); ++ perfmon_pmu_remove(idxd); ++ idxd_cleanup_interrupts(idxd); ++ if (device_pasid_enabled(idxd)) ++ idxd_disable_system_pasid(idxd); + pci_iounmap(pdev, idxd->reg_base); + put_device(idxd_confdev(idxd)); +- idxd_free(idxd); + pci_disable_device(pdev); + } + +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c +index 4c3eb972039d60..d5882c8537e52b 100644 +--- a/drivers/dma/qcom/bam_dma.c ++++ b/drivers/dma/qcom/bam_dma.c +@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev) + if (!bdev->bamclk) { + ret = of_property_read_u32(pdev->dev.of_node, "num-channels", + &bdev->num_channels); +- if (ret) ++ if (ret) { + dev_err(bdev->dev, "num-channels unspecified in dt\n"); ++ return ret; ++ } + + ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees", + &bdev->num_ees); +- if (ret) ++ if (ret) { + dev_err(bdev->dev, "num-ees unspecified in dt\n"); ++ return ret; ++ } + } + + ret = clk_prepare_enable(bdev->bamclk); +diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c +index c0fa5413246756..f7ddf588b7f9b7 100644 +--- a/drivers/dma/ti/edma.c ++++ b/drivers/dma/ti/edma.c +@@ -2063,8 +2063,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, + * priority. So Q0 is the highest priority queue and the last queue has + * the lowest priority. + */ +- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), +- GFP_KERNEL); ++ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, ++ sizeof(*queue_priority_map), GFP_KERNEL); + if (!queue_priority_map) + return -ENOMEM; + +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +index 54ec894150939a..233e58278943e0 100644 +--- a/drivers/edac/altera_edac.c ++++ b/drivers/edac/altera_edac.c +@@ -127,7 +127,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file, + + ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL); + if (!ptemp) { +- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + edac_printk(KERN_ERR, EDAC_MC, + "Inject: Buffer Allocation error\n"); + return -ENOMEM; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +index f44b303ae287a7..eebac2e1a6c75b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +@@ -396,9 +396,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = NULL; + ring->me = 0; +- +- if (!ring->is_mes_queue) +- ring->adev->rings[ring->idx] = NULL; + } + + /** +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +index a61ecefdafc512..710328f12194d3 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +@@ -1765,15 +1765,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, + struct amdgpu_job *job) + { + struct drm_gpu_scheduler **scheds; +- +- /* The create msg must be in the first IB submitted */ +- if (atomic_read(&job->base.entity->fence_seq)) +- return -EINVAL; ++ struct dma_fence *fence; + + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + ++ /* wait for all jobs to finish before switching to instance 0 */ ++ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull); ++ if (fence) { ++ dma_fence_wait(fence, false); ++ dma_fence_put(fence); ++ } ++ + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] + [AMDGPU_RING_PRIO_DEFAULT].sched; + drm_sched_entity_modify_sched(job->base.entity, scheds, 1); +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +index 29164289c5f3e1..43249e9f66d74d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +@@ -1644,15 +1644,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p, + struct amdgpu_job *job) + { + struct drm_gpu_scheduler **scheds; +- +- /* The create msg must be in the first IB submitted */ +- if (atomic_read(&job->base.entity->fence_seq)) +- return -EINVAL; ++ struct dma_fence *fence; + + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + ++ /* wait for all jobs to finish before switching to instance 0 */ ++ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull); ++ if (fence) { ++ dma_fence_wait(fence, false); ++ dma_fence_put(fence); ++ } ++ + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] + [AMDGPU_RING_PRIO_0].sched; + drm_sched_entity_modify_sched(job->base.entity, scheds, 1); +@@ -1743,22 +1747,16 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, + + #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) + #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) +- + #define RADEON_VCN_ENGINE_INFO (0x30000001) +-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 +- + #define RENCODE_ENCODE_STANDARD_AV1 2 + #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 +-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 + +-/* return the offset in ib if id is found, -1 otherwise +- * to speed up the searching we only search upto max_offset +- */ +-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) ++/* return the offset in ib if id is found, -1 otherwise */ ++static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start) + { + int i; + +- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { ++ for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) { + if (ib->ptr[i + 1] == id) + return i; + } +@@ -1773,33 +1771,29 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, + struct amdgpu_vcn_decode_buffer *decode_buffer; + uint64_t addr; + uint32_t val; +- int idx; ++ int idx = 0, sidx; + + /* The first instance can decode anything */ + if (!ring->me) + return 0; + +- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ +- idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, +- RADEON_VCN_ENGINE_INFO_MAX_OFFSET); +- if (idx < 0) /* engine info is missing */ +- return 0; +- +- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ +- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { +- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; +- +- if (!(decode_buffer->valid_buf_flag & 0x1)) +- return 0; +- +- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | +- decode_buffer->msg_buffer_address_lo; +- return vcn_v4_0_dec_msg(p, job, addr); +- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { +- idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, +- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); +- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) +- return vcn_v4_0_limit_sched(p, job); ++ while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) { ++ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ ++ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { ++ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; ++ ++ if (!(decode_buffer->valid_buf_flag & 0x1)) ++ return 0; ++ ++ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | ++ decode_buffer->msg_buffer_address_lo; ++ return vcn_v4_0_dec_msg(p, job, addr); ++ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { ++ sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx); ++ if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1) ++ return vcn_v4_0_limit_sched(p, job); ++ } ++ idx += ib->ptr[idx] / 4; + } + return 0; + } +diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c +index 9e01054c243001..8beeda3439818e 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_power.c ++++ b/drivers/gpu/drm/i915/display/intel_display_power.c +@@ -1170,7 +1170,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) + if (DISPLAY_VER(dev_priv) == 12) + abox_regs |= BIT(0); + +- for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) ++ for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs)) + intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); + } + +@@ -1623,11 +1623,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) + if (table[config].page_mask == 0) { + drm_dbg(&dev_priv->drm, + "Unknown memory configuration; disabling address buddy logic.\n"); +- for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) ++ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) + intel_de_write(dev_priv, BW_BUDDY_CTL(i), + BW_BUDDY_DISABLE); + } else { +- for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { ++ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) { + intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), + table[config].page_mask); + +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +index bfa1070a5f08e2..f1f73c1e7b5cbf 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +@@ -365,11 +365,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) + + of_id = of_match_node(mtk_drm_of_ids, node); + if (!of_id) +- goto next_put_node; ++ continue; + + pdev = of_find_device_by_node(node); + if (!pdev) +- goto next_put_node; ++ continue; + + drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match); + if (!drm_dev) +@@ -395,11 +395,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) + next_put_device_pdev_dev: + put_device(&pdev->dev); + +-next_put_node: +- of_node_put(node); +- +- if (cnt == MAX_CRTC) ++ if (cnt == MAX_CRTC) { ++ of_node_put(node); + break; ++ } + } + + if (drm_priv->data->mmsys_dev_num == cnt) { +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index 18c04f5e41d9c5..89fdc75cdcfa54 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -1051,7 +1051,7 @@ static const struct pci_device_id i801_ids[] = { + { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, +- { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, ++ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, +diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c +index ce7e977cc8a7a1..eb0e0b37eb41b6 100644 +--- a/drivers/input/misc/iqs7222.c ++++ b/drivers/input/misc/iqs7222.c +@@ -2430,6 +2430,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, + if (error) + return error; + ++ if (!iqs7222->kp_type[chan_index][i]) ++ continue; ++ + if (!dev_desc->event_offset) + continue; + +diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h +index 8813db7eec3978..630cdd5a132831 100644 +--- a/drivers/input/serio/i8042-acpipnpio.h ++++ b/drivers/input/serio/i8042-acpipnpio.h +@@ -1155,6 +1155,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), ++ }, ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) ++ }, + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with the forcenorestore quirk. +diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c +index 2f9c8582f9401a..db40008f31cf1b 100644 +--- a/drivers/media/i2c/imx214.c ++++ b/drivers/media/i2c/imx214.c +@@ -20,7 +20,9 @@ + #include + + #define IMX214_DEFAULT_CLK_FREQ 24000000 +-#define IMX214_DEFAULT_LINK_FREQ 480000000 ++#define IMX214_DEFAULT_LINK_FREQ 600000000 ++/* Keep wrong link frequency for backward compatibility */ ++#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000 + #define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10) + #define IMX214_FPS 30 + #define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10 +@@ -892,17 +894,26 @@ static int imx214_parse_fwnode(struct device *dev) + goto done; + } + +- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) ++ if (bus_cfg.nr_of_link_frequencies != 1) ++ dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n"); ++ ++ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) { + if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ) + break; +- +- if (i == bus_cfg.nr_of_link_frequencies) { +- dev_err(dev, "link-frequencies %d not supported, Please review your DT\n", +- IMX214_DEFAULT_LINK_FREQ); +- ret = -EINVAL; +- goto done; ++ if (bus_cfg.link_frequencies[i] == ++ IMX214_DEFAULT_LINK_FREQ_LEGACY) { ++ dev_warn(dev, ++ "link-frequencies %d not supported, please review your DT. Continuing anyway\n", ++ IMX214_DEFAULT_LINK_FREQ); ++ break; ++ } + } + ++ if (i == bus_cfg.nr_of_link_frequencies) ++ ret = dev_err_probe(dev, -EINVAL, ++ "link-frequencies %d not supported, please review your DT\n", ++ IMX214_DEFAULT_LINK_FREQ); ++ + done: + v4l2_fwnode_endpoint_free(&bus_cfg); + fwnode_handle_put(endpoint); +diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c +index d4fd1302008ebd..c5aff27ec4a895 100644 +--- a/drivers/mtd/nand/raw/atmel/nand-controller.c ++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c +@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, + return ret; + + /* +- * The write cycle timing is directly matching tWC, but is also ++ * Read setup timing depends on the operation done on the NAND: ++ * ++ * NRD_SETUP = max(tAR, tCLR) ++ */ ++ timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min); ++ ncycles = DIV_ROUND_UP(timeps, mckperiodps); ++ totalcycles += ncycles; ++ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles); ++ if (ret) ++ return ret; ++ ++ /* ++ * The read cycle timing is directly matching tRC, but is also + * dependent on the setup and hold timings we calculated earlier, + * which gives: + * +- * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD) +- * +- * NRD_SETUP is always 0. ++ * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD) + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps); + ncycles = max(totalcycles, ncycles); +diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +index 88811139aaf5b9..c7956298397173 100644 +--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c ++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +@@ -263,6 +263,7 @@ struct stm32_fmc2_nfc { + struct sg_table dma_data_sg; + struct sg_table dma_ecc_sg; + u8 *ecc_buf; ++ dma_addr_t dma_ecc_addr; + int dma_ecc_len; + + struct completion complete; +@@ -885,17 +886,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + + if (!write_data && !raw) { + /* Configure DMA ECC status */ +- p = nfc->ecc_buf; + for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) { +- sg_set_buf(sg, p, nfc->dma_ecc_len); +- p += nfc->dma_ecc_len; +- } +- +- ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl, +- eccsteps, dma_data_dir); +- if (!ret) { +- ret = -EIO; +- goto err_unmap_data; ++ sg_dma_address(sg) = nfc->dma_ecc_addr + ++ s * nfc->dma_ecc_len; ++ sg_dma_len(sg) = nfc->dma_ecc_len; + } + + desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch, +@@ -904,7 +898,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + DMA_PREP_INTERRUPT); + if (!desc_ecc) { + ret = -ENOMEM; +- goto err_unmap_ecc; ++ goto err_unmap_data; + } + + reinit_completion(&nfc->dma_ecc_complete); +@@ -912,7 +906,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + desc_ecc->callback_param = &nfc->dma_ecc_complete; + ret = dma_submit_error(dmaengine_submit(desc_ecc)); + if (ret) +- goto err_unmap_ecc; ++ goto err_unmap_data; + + dma_async_issue_pending(nfc->dma_ecc_ch); + } +@@ -932,7 +926,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + if (!write_data && !raw) + dmaengine_terminate_all(nfc->dma_ecc_ch); + ret = -ETIMEDOUT; +- goto err_unmap_ecc; ++ goto err_unmap_data; + } + + /* Wait DMA data transfer completion */ +@@ -952,11 +946,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + } + } + +-err_unmap_ecc: +- if (!write_data && !raw) +- dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl, +- eccsteps, dma_data_dir); +- + err_unmap_data: + dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir); + +@@ -979,9 +968,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf, + + /* Write oob */ + if (oob_required) { +- ret = nand_change_write_column_op(chip, mtd->writesize, +- chip->oob_poi, mtd->oobsize, +- false); ++ unsigned int offset_in_page = mtd->writesize; ++ const void *buf = chip->oob_poi; ++ unsigned int len = mtd->oobsize; ++ ++ if (!raw) { ++ struct mtd_oob_region oob_free; ++ ++ mtd_ooblayout_free(mtd, 0, &oob_free); ++ offset_in_page += oob_free.offset; ++ buf += oob_free.offset; ++ len = oob_free.length; ++ } ++ ++ ret = nand_change_write_column_op(chip, offset_in_page, ++ buf, len, false); + if (ret) + return ret; + } +@@ -1582,7 +1583,8 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) + return ret; + + /* Allocate a buffer to store ECC status registers */ +- nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL); ++ nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN, ++ &nfc->dma_ecc_addr, GFP_KERNEL); + if (!nfc->ecc_buf) + return -ENOMEM; + +diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c +index abe58f10304336..57d1209134f11b 100644 +--- a/drivers/net/can/xilinx_can.c ++++ b/drivers/net/can/xilinx_can.c +@@ -628,14 +628,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, + dlc |= XCAN_DLCR_EDL_MASK; + } + +- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && +- (priv->devtype.flags & XCAN_FLAG_TXFEMP)) +- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); +- else +- can_put_echo_skb(skb, ndev, 0, 0); +- +- priv->tx_head++; +- + priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); + /* If the CAN frame is RTR frame this write triggers transmission + * (not on CAN FD) +@@ -668,6 +660,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, + data[1]); + } + } ++ ++ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && ++ (priv->devtype.flags & XCAN_FLAG_TXFEMP)) ++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); ++ else ++ can_put_echo_skb(skb, ndev, 0, 0); ++ ++ priv->tx_head++; + } + + /** +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 2a8b5429df5957..8352d9b6469f2a 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -2300,7 +2300,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) + */ + phy_dev = of_phy_find_device(fep->phy_node); + phy_reset_after_clk_enable(phy_dev); +- put_device(&phy_dev->mdio.dev); ++ if (phy_dev) ++ put_device(&phy_dev->mdio.dev); + } + } + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index b749aa3e783ffe..72869336e3a9a9 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -4210,7 +4210,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); +- free_irq(irq_num, &vsi->q_vectors[vector]); ++ free_irq(irq_num, vsi->q_vectors[vector]); + } + return err; + } +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index 92b2be06a6e930..f11cba65e5d85e 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev, + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + +- /* PHY is powered down when interface is down */ +- if (if_running && igb_link_test(adapter, &data[TEST_LINK])) ++ if (igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; +- else +- data[TEST_LINK] = 0; + + /* Online tests aren't run; pass by default */ + data[TEST_REG] = 0; +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index e02706b7cc1ed6..f1fac89721ed93 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -99,6 +99,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) + if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) + return -EINVAL; + ++ gpiod_put(mdiodev->reset_gpio); + reset_control_put(mdiodev->reset_ctrl); + + mdiodev->bus->mdio_map[mdiodev->addr] = NULL; +@@ -775,9 +776,6 @@ void mdiobus_unregister(struct mii_bus *bus) + if (!mdiodev) + continue; + +- if (mdiodev->reset_gpio) +- gpiod_put(mdiodev->reset_gpio); +- + mdiodev->device_remove(mdiodev); + mdiodev->device_free(mdiodev); + } +diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c +index ebc8a7e21a3181..3409924498e9cf 100644 +--- a/drivers/phy/tegra/xusb-tegra210.c ++++ b/drivers/phy/tegra/xusb-tegra210.c +@@ -3164,18 +3164,22 @@ tegra210_xusb_padctl_probe(struct device *dev, + } + + pdev = of_find_device_by_node(np); ++ of_node_put(np); + if (!pdev) { + dev_warn(dev, "PMC device is not available\n"); + goto out; + } + +- if (!platform_get_drvdata(pdev)) ++ if (!platform_get_drvdata(pdev)) { ++ put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); ++ } + + padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk"); + if (!padctl->regmap) + dev_info(dev, "failed to find PMC regmap\n"); + ++ put_device(&pdev->dev); + out: + return &padctl->base; + } +diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c +index 507e1552db5e83..3127f3702c3ae3 100644 +--- a/drivers/phy/ti/phy-ti-pipe3.c ++++ b/drivers/phy/ti/phy-ti-pipe3.c +@@ -666,12 +666,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy) + return 0; + } + ++static void ti_pipe3_put_device(void *_dev) ++{ ++ struct device *dev = _dev; ++ ++ put_device(dev); ++} ++ + static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy) + { + struct device *dev = phy->dev; + struct device_node *node = dev->of_node; + struct device_node *control_node; + struct platform_device *control_pdev; ++ int ret; + + phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-phy-power"); +@@ -703,6 +711,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy) + } + + phy->control_dev = &control_pdev->dev; ++ ++ ret = devm_add_action_or_reset(dev, ti_pipe3_put_device, ++ phy->control_dev); ++ if (ret) ++ return ret; + } + + if (phy->mode == PIPE3_MODE_PCIE) { +diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c +index d1e7ba1fb3e1af..27e3d939b7bb9e 100644 +--- a/drivers/regulator/sy7636a-regulator.c ++++ b/drivers/regulator/sy7636a-regulator.c +@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev) + if (!regmap) + return -EPROBE_DEFER; + +- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN); ++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); ++ ++ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN); + if (IS_ERR(gdp)) { +- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp)); ++ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp)); + return PTR_ERR(gdp); + } + +@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev) + } + + config.dev = &pdev->dev; +- config.dev->of_node = pdev->dev.parent->of_node; + config.regmap = regmap; + + rdev = devm_regulator_register(&pdev->dev, &desc, &config); +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index 959fae54ca394b..4c252bbbf8e276 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -543,10 +543,10 @@ static ssize_t hvc_write(struct tty_struct *tty, const u8 *buf, size_t count) + } + + /* +- * Racy, but harmless, kick thread if there is still pending data. ++ * Kick thread to flush if there's still pending data ++ * or to wakeup the write queue. + */ +- if (hp->n_outbuf) +- hvc_kick(); ++ hvc_kick(); + + return written; + } +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index 8a2ce2ca6b394a..66a88bba8f15b8 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -1163,17 +1163,6 @@ static int sc16is7xx_startup(struct uart_port *port) + sc16is7xx_port_write(port, SC16IS7XX_FCR_REG, + SC16IS7XX_FCR_FIFO_BIT); + +- /* Enable EFR */ +- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, +- SC16IS7XX_LCR_CONF_MODE_B); +- +- regcache_cache_bypass(one->regmap, true); +- +- /* Enable write access to enhanced features and internal clock div */ +- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, +- SC16IS7XX_EFR_ENABLE_BIT, +- SC16IS7XX_EFR_ENABLE_BIT); +- + /* Enable TCR/TLR */ + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, + SC16IS7XX_MCR_TCRTLR_BIT, +@@ -1185,7 +1174,8 @@ static int sc16is7xx_startup(struct uart_port *port) + SC16IS7XX_TCR_RX_RESUME(24) | + SC16IS7XX_TCR_RX_HALT(48)); + +- regcache_cache_bypass(one->regmap, false); ++ /* Disable TCR/TLR access */ ++ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_TCRTLR_BIT, 0); + + /* Now, initialize the UART */ + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8); +diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c +index 90536f47906c33..d7ed50ff380cf4 100644 +--- a/drivers/usb/gadget/function/f_midi2.c ++++ b/drivers/usb/gadget/function/f_midi2.c +@@ -1601,6 +1601,7 @@ static int f_midi2_create_card(struct f_midi2 *midi2) + strscpy(fb->info.name, ump_fb_name(b), + sizeof(fb->info.name)); + } ++ snd_ump_update_group_attrs(ump); + } + + for (i = 0; i < midi2->num_eps; i++) { +@@ -1738,9 +1739,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 *midi2, + case USB_SPEED_HIGH: + midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512); + midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512); +- for (i = 0; i < midi2->num_eps; i++) ++ for (i = 0; i < midi2->num_eps; i++) { + midi2_midi2_ep_out_desc[i].wMaxPacketSize = + cpu_to_le16(512); ++ midi2_midi2_ep_in_desc[i].wMaxPacketSize = ++ cpu_to_le16(512); ++ } + fallthrough; + case USB_SPEED_FULL: + midi1_in_eps = midi2_midi1_ep_in_descs; +@@ -1749,9 +1753,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 *midi2, + case USB_SPEED_SUPER: + midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024); + midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024); +- for (i = 0; i < midi2->num_eps; i++) ++ for (i = 0; i < midi2->num_eps; i++) { + midi2_midi2_ep_out_desc[i].wMaxPacketSize = + cpu_to_le16(1024); ++ midi2_midi2_ep_in_desc[i].wMaxPacketSize = ++ cpu_to_le16(1024); ++ } + midi1_in_eps = midi2_midi1_ep_in_ss_descs; + midi1_out_eps = midi2_midi1_ep_out_ss_descs; + break; +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index d5d89fadde433f..a06f56c08e19d0 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -764,8 +764,7 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req) + if (!dum->driver) + return -ESHUTDOWN; + +- local_irq_save(flags); +- spin_lock(&dum->lock); ++ spin_lock_irqsave(&dum->lock, flags); + list_for_each_entry(iter, &ep->queue, queue) { + if (&iter->req != _req) + continue; +@@ -775,15 +774,16 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req) + retval = 0; + break; + } +- spin_unlock(&dum->lock); + + if (retval == 0) { + dev_dbg(udc_dev(dum), + "dequeued req %p from %s, len %d buf %p\n", + req, _ep->name, _req->length, _req->buf); ++ spin_unlock(&dum->lock); + usb_gadget_giveback_request(_ep, _req); ++ spin_lock(&dum->lock); + } +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&dum->lock, flags); + return retval; + } + +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 04718048b74bd9..621f12c11cbc2b 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -945,7 +945,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i + out: + /* we are now at a leaf device */ + xhci_debugfs_remove_slot(xhci, slot_id); +- xhci_free_virt_device(xhci, vdev, slot_id); ++ xhci_free_virt_device(xhci, xhci->devs[slot_id], slot_id); + } + + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index ac72b04c997bfb..ef546f660b9927 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1322,7 +1322,18 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ + .driver_info = NCTRL(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1034, 0xff), /* Telit LE910C4-WWX (rmnet) */ ++ .driver_info = RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */ ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1036, 0xff) }, /* Telit LE910C4-WWX */ ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1037, 0xff), /* Telit LE910C4-WWX (rmnet) */ ++ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1038, 0xff), /* Telit LE910C4-WWX (rmnet) */ ++ .driver_info = NCTRL(0) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103b, 0xff), /* Telit LE910C4-WWX */ ++ .driver_info = NCTRL(0) | NCTRL(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103c, 0xff), /* Telit LE910C4-WWX */ ++ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), +@@ -1369,6 +1380,12 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */ + .driver_info = RSVD(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1077, 0xff), /* Telit FN990A (rmnet + audio) */ ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1078, 0xff), /* Telit FN990A (MBIM + audio) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1079, 0xff), /* Telit FN990A (RNDIS + audio) */ ++ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */ +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 48b06459bc485a..ccf94c5fbfdfd6 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -103,6 +103,25 @@ struct btrfs_bio_ctrl { + blk_opf_t opf; + btrfs_bio_end_io_t end_io_func; + struct writeback_control *wbc; ++ struct readahead_control *ractl; ++ ++ /* ++ * The start offset of the last used extent map by a read operation. ++ * ++ * This is for proper compressed read merge. ++ * U64_MAX means we are starting the read and have made no progress yet. ++ * ++ * The current btrfs_bio_is_contig() only uses disk_bytenr as ++ * the condition to check if the read can be merged with previous ++ * bio, which is not correct. E.g. two file extents pointing to the ++ * same extent but with different offset. ++ * ++ * So here we need to do extra checks to only merge reads that are ++ * covered by the same extent map. ++ * Just extent_map::start will be enough, as they are unique ++ * inside the same inode. ++ */ ++ u64 last_em_start; + }; + + static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) +@@ -952,6 +971,23 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, + } + return em; + } ++ ++static void btrfs_readahead_expand(struct readahead_control *ractl, ++ const struct extent_map *em) ++{ ++ const u64 ra_pos = readahead_pos(ractl); ++ const u64 ra_end = ra_pos + readahead_length(ractl); ++ const u64 em_end = em->start + em->ram_bytes; ++ ++ /* No expansion for holes and inline extents. */ ++ if (em->block_start > EXTENT_MAP_LAST_BYTE) ++ return; ++ ++ ASSERT(em_end >= ra_pos); ++ if (em_end > ra_end) ++ readahead_expand(ractl, ra_pos, em_end - ra_pos); ++} ++ + /* + * basic readpage implementation. Locked extent state structs are inserted + * into the tree that are removed when the IO is done (by the end_io +@@ -960,7 +996,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, + * return 0 on success, otherwise return error + */ + static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, +- struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start) ++ struct btrfs_bio_ctrl *bio_ctrl) + { + struct inode *inode = page->mapping->host; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); +@@ -1023,6 +1059,17 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + + iosize = min(extent_map_end(em) - cur, end - cur + 1); + iosize = ALIGN(iosize, blocksize); ++ ++ /* ++ * Only expand readahead for extents which are already creating ++ * the pages anyway in add_ra_bio_pages, which is compressed ++ * extents in the non subpage case. ++ */ ++ if (bio_ctrl->ractl && ++ !btrfs_is_subpage(fs_info, page) && ++ compress_type != BTRFS_COMPRESS_NONE) ++ btrfs_readahead_expand(bio_ctrl->ractl, em); ++ + if (compress_type != BTRFS_COMPRESS_NONE) + disk_bytenr = em->block_start; + else +@@ -1066,12 +1113,11 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, + * non-optimal behavior (submitting 2 bios for the same extent). + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && +- prev_em_start && *prev_em_start != (u64)-1 && +- *prev_em_start != em->start) ++ bio_ctrl->last_em_start != (u64)-1 && ++ bio_ctrl->last_em_start != em->start) + force_bio_submit = true; + +- if (prev_em_start) +- *prev_em_start = em->start; ++ bio_ctrl->last_em_start = em->start; + + free_extent_map(em); + em = NULL; +@@ -1117,12 +1163,15 @@ int btrfs_read_folio(struct file *file, struct folio *folio) + struct btrfs_inode *inode = BTRFS_I(page->mapping->host); + u64 start = page_offset(page); + u64 end = start + PAGE_SIZE - 1; +- struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ }; ++ struct btrfs_bio_ctrl bio_ctrl = { ++ .opf = REQ_OP_READ, ++ .last_em_start = (u64)-1, ++ }; + int ret; + + btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); + +- ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL); ++ ret = btrfs_do_readpage(page, NULL, &bio_ctrl); + /* + * If btrfs_do_readpage() failed we will want to submit the assembled + * bio to do the cleanup. +@@ -1134,8 +1183,7 @@ int btrfs_read_folio(struct file *file, struct folio *folio) + static inline void contiguous_readpages(struct page *pages[], int nr_pages, + u64 start, u64 end, + struct extent_map **em_cached, +- struct btrfs_bio_ctrl *bio_ctrl, +- u64 *prev_em_start) ++ struct btrfs_bio_ctrl *bio_ctrl) + { + struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); + int index; +@@ -1143,8 +1191,7 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages, + btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); + + for (index = 0; index < nr_pages; index++) { +- btrfs_do_readpage(pages[index], em_cached, bio_ctrl, +- prev_em_start); ++ btrfs_do_readpage(pages[index], em_cached, bio_ctrl); + put_page(pages[index]); + } + } +@@ -2224,10 +2271,13 @@ int extent_writepages(struct address_space *mapping, + + void extent_readahead(struct readahead_control *rac) + { +- struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD }; ++ struct btrfs_bio_ctrl bio_ctrl = { ++ .opf = REQ_OP_READ | REQ_RAHEAD, ++ .ractl = rac, ++ .last_em_start = (u64)-1, ++ }; + struct page *pagepool[16]; + struct extent_map *em_cached = NULL; +- u64 prev_em_start = (u64)-1; + int nr; + + while ((nr = readahead_page_batch(rac, pagepool))) { +@@ -2235,7 +2285,7 @@ void extent_readahead(struct readahead_control *rac) + u64 contig_end = contig_start + readahead_batch_length(rac) - 1; + + contiguous_readpages(pagepool, nr, contig_start, contig_end, +- &em_cached, &bio_ctrl, &prev_em_start); ++ &em_cached, &bio_ctrl); + } + + if (em_cached) +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 3e4c3fcb588ba8..952c99fcb636dc 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -3106,7 +3106,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, + .nodeid_out = ff_out->nodeid, + .fh_out = ff_out->fh, + .off_out = pos_out, +- .len = len, ++ .len = min_t(size_t, len, UINT_MAX & PAGE_MASK), + .flags = flags + }; + struct fuse_write_out outarg; +@@ -3172,6 +3172,9 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, + fc->no_copy_file_range = 1; + err = -EOPNOTSUPP; + } ++ if (!err && outarg.size > len) ++ err = -EIO; ++ + if (err) + goto out; + +diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c +index 6b90fea6cca209..257ba5398387be 100644 +--- a/fs/kernfs/file.c ++++ b/fs/kernfs/file.c +@@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct kernfs_open_file *of) + !list_empty(&of->list)); + } + ++/* Get active reference to kernfs node for an open file */ ++static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of) ++{ ++ /* Skip if file was already released */ ++ if (unlikely(of->released)) ++ return NULL; ++ ++ if (!kernfs_get_active(of->kn)) ++ return NULL; ++ ++ return of; ++} ++ ++static void kernfs_put_active_of(struct kernfs_open_file *of) ++{ ++ return kernfs_put_active(of->kn); ++} ++ + /** + * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn + * +@@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v) + + if (ops->seq_stop) + ops->seq_stop(sf, v); +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + } + + static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) +@@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) + * the ops aren't called concurrently for the same open file. + */ + mutex_lock(&of->mutex); +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + return ERR_PTR(-ENODEV); + + ops = kernfs_ops(of->kn); +@@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) + * the ops aren't called concurrently for the same open file. + */ + mutex_lock(&of->mutex); +- if (!kernfs_get_active(of->kn)) { ++ if (!kernfs_get_active_of(of)) { + len = -ENODEV; + mutex_unlock(&of->mutex); + goto out_free; +@@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) + else + len = -EINVAL; + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + mutex_unlock(&of->mutex); + + if (len < 0) +@@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) + * the ops aren't called concurrently for the same open file. + */ + mutex_lock(&of->mutex); +- if (!kernfs_get_active(of->kn)) { ++ if (!kernfs_get_active_of(of)) { + mutex_unlock(&of->mutex); + len = -ENODEV; + goto out_free; +@@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) + else + len = -EINVAL; + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + mutex_unlock(&of->mutex); + + if (len > 0) +@@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma) + if (!of->vm_ops) + return; + +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + return; + + if (of->vm_ops->open) + of->vm_ops->open(vma); + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + } + + static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) +@@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) + if (!of->vm_ops) + return VM_FAULT_SIGBUS; + +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + return VM_FAULT_SIGBUS; + + ret = VM_FAULT_SIGBUS; + if (of->vm_ops->fault) + ret = of->vm_ops->fault(vmf); + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + return ret; + } + +@@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) + if (!of->vm_ops) + return VM_FAULT_SIGBUS; + +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + return VM_FAULT_SIGBUS; + + ret = 0; +@@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) + else + file_update_time(file); + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + return ret; + } + +@@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, + if (!of->vm_ops) + return -EINVAL; + +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + return -EINVAL; + + ret = -EINVAL; + if (of->vm_ops->access) + ret = of->vm_ops->access(vma, addr, buf, len, write); + +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + return ret; + } + +@@ -504,7 +522,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) + mutex_lock(&of->mutex); + + rc = -ENODEV; +- if (!kernfs_get_active(of->kn)) ++ if (!kernfs_get_active_of(of)) + goto out_unlock; + + ops = kernfs_ops(of->kn); +@@ -539,7 +557,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) + } + vma->vm_ops = &kernfs_vm_ops; + out_put: +- kernfs_put_active(of->kn); ++ kernfs_put_active_of(of); + out_unlock: + mutex_unlock(&of->mutex); + +@@ -894,7 +912,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) + struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); + __poll_t ret; + +- if (!kernfs_get_active(kn)) ++ if (!kernfs_get_active_of(of)) + return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; + + if (kn->attr.ops->poll) +@@ -902,7 +920,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) + else + ret = kernfs_generic_poll(of, wait); + +- kernfs_put_active(kn); ++ kernfs_put_active_of(of); + return ret; + } + +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index cc764da581c43c..1bcdaee7e856f0 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -873,6 +873,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, + + if (fsinfo->xattr_support) + server->caps |= NFS_CAP_XATTR; ++ else ++ server->caps &= ~NFS_CAP_XATTR; + #endif + } + +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index a1ff4a4f5380eb..4e53708dfcf434 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -469,8 +469,16 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, + if (user_backed_iter(iter)) + dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; + +- if (!swap) +- nfs_start_io_direct(inode); ++ if (!swap) { ++ result = nfs_start_io_direct(inode); ++ if (result) { ++ /* release the reference that would usually be ++ * consumed by nfs_direct_read_schedule_iovec() ++ */ ++ nfs_direct_req_release(dreq); ++ goto out_release; ++ } ++ } + + NFS_I(inode)->read_io += count; + requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); +@@ -1023,7 +1031,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_STABLE); + } else { +- nfs_start_io_direct(inode); ++ result = nfs_start_io_direct(inode); ++ if (result) { ++ /* release the reference that would usually be ++ * consumed by nfs_direct_write_schedule_iovec() ++ */ ++ nfs_direct_req_release(dreq); ++ goto out_release; ++ } + + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_COND_STABLE); +diff --git a/fs/nfs/file.c b/fs/nfs/file.c +index 003dda0018403d..2f4db026f8d678 100644 +--- a/fs/nfs/file.c ++++ b/fs/nfs/file.c +@@ -167,7 +167,10 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) + iocb->ki_filp, + iov_iter_count(to), (unsigned long) iocb->ki_pos); + +- nfs_start_io_read(inode); ++ result = nfs_start_io_read(inode); ++ if (result) ++ return result; ++ + result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); + if (!result) { + result = generic_file_read_iter(iocb, to); +@@ -188,7 +191,10 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe + + dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos); + +- nfs_start_io_read(inode); ++ result = nfs_start_io_read(inode); ++ if (result) ++ return result; ++ + result = nfs_revalidate_mapping(inode, in->f_mapping); + if (!result) { + result = filemap_splice_read(in, ppos, pipe, len, flags); +@@ -668,7 +674,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) + nfs_clear_invalid_mapping(file->f_mapping); + + since = filemap_sample_wb_err(file->f_mapping); +- nfs_start_io_write(inode); ++ error = nfs_start_io_write(inode); ++ if (error) ++ return error; + result = generic_write_checks(iocb, from); + if (result > 0) + result = generic_perform_write(iocb, from); +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 7354b6b1047833..42c73c647a27fe 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -276,7 +276,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1, + struct pnfs_layout_segment *l2) + { + const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1); +- const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1); ++ const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2); + u32 i; + + if (fl1->mirror_array_cnt != fl2->mirror_array_cnt) +@@ -756,8 +756,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, + continue; + + if (check_device && +- nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) ++ nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) { ++ // reinitialize the error state in case if this is the last iteration ++ ds = ERR_PTR(-EINVAL); + continue; ++ } + + *best_idx = idx; + break; +@@ -787,7 +790,7 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, + struct nfs4_pnfs_ds *ds; + + ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx); +- if (ds) ++ if (!IS_ERR(ds)) + return ds; + return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); + } +@@ -801,7 +804,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, + + ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx, + best_idx); +- if (ds || !pgio->pg_mirror_idx) ++ if (!IS_ERR(ds) || !pgio->pg_mirror_idx) + return ds; + return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx); + } +@@ -859,7 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, + req->wb_nio = 0; + + ds = ff_layout_get_ds_for_read(pgio, &ds_idx); +- if (!ds) { ++ if (IS_ERR(ds)) { + if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) + goto out_mds; + pnfs_generic_pg_cleanup(pgio); +@@ -1063,11 +1066,13 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr) + { + u32 idx = hdr->pgio_mirror_idx + 1; + u32 new_idx = 0; ++ struct nfs4_pnfs_ds *ds; + +- if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx)) +- ff_layout_send_layouterror(hdr->lseg); +- else ++ ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx); ++ if (IS_ERR(ds)) + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); ++ else ++ ff_layout_send_layouterror(hdr->lseg); + pnfs_read_resend_pnfs(hdr, new_idx); + } + +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 7e7dd2aab449dd..5cd5e4226db364 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -645,8 +645,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + trace_nfs_setattr_enter(inode); + + /* Write all dirty data */ +- if (S_ISREG(inode->i_mode)) ++ if (S_ISREG(inode->i_mode)) { ++ nfs_file_block_o_direct(NFS_I(inode)); + nfs_sync_inode(inode); ++ } + + fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); + if (fattr == NULL) { +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index 4eea91d054b241..bde81e0abf0ae1 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -6,6 +6,7 @@ + #include "nfs4_fs.h" + #include + #include ++#include + #include + #include + #include +@@ -461,11 +462,11 @@ extern const struct netfs_request_ops nfs_netfs_ops; + #endif + + /* io.c */ +-extern void nfs_start_io_read(struct inode *inode); ++extern __must_check int nfs_start_io_read(struct inode *inode); + extern void nfs_end_io_read(struct inode *inode); +-extern void nfs_start_io_write(struct inode *inode); ++extern __must_check int nfs_start_io_write(struct inode *inode); + extern void nfs_end_io_write(struct inode *inode); +-extern void nfs_start_io_direct(struct inode *inode); ++extern __must_check int nfs_start_io_direct(struct inode *inode); + extern void nfs_end_io_direct(struct inode *inode); + + static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi) +@@ -473,6 +474,16 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi) + return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0; + } + ++/* Must be called with exclusively locked inode->i_rwsem */ ++static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi) ++{ ++ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { ++ clear_bit(NFS_INO_ODIRECT, &nfsi->flags); ++ inode_dio_wait(&nfsi->vfs_inode); ++ } ++} ++ ++ + /* namespace.c */ + #define NFS_PATH_CANONICAL 1 + extern char *nfs_path(char **p, struct dentry *dentry, +diff --git a/fs/nfs/io.c b/fs/nfs/io.c +index b5551ed8f648bc..d275b0a250bf3b 100644 +--- a/fs/nfs/io.c ++++ b/fs/nfs/io.c +@@ -14,15 +14,6 @@ + + #include "internal.h" + +-/* Call with exclusively locked inode->i_rwsem */ +-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode) +-{ +- if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { +- clear_bit(NFS_INO_ODIRECT, &nfsi->flags); +- inode_dio_wait(inode); +- } +-} +- + /** + * nfs_start_io_read - declare the file is being used for buffered reads + * @inode: file inode +@@ -39,19 +30,28 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode) + * Note that buffered writes and truncates both take a write lock on + * inode->i_rwsem, meaning that those are serialised w.r.t. the reads. + */ +-void ++int + nfs_start_io_read(struct inode *inode) + { + struct nfs_inode *nfsi = NFS_I(inode); ++ int err; ++ + /* Be an optimist! */ +- down_read(&inode->i_rwsem); ++ err = down_read_killable(&inode->i_rwsem); ++ if (err) ++ return err; + if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0) +- return; ++ return 0; + up_read(&inode->i_rwsem); ++ + /* Slow path.... */ +- down_write(&inode->i_rwsem); +- nfs_block_o_direct(nfsi, inode); ++ err = down_write_killable(&inode->i_rwsem); ++ if (err) ++ return err; ++ nfs_file_block_o_direct(nfsi); + downgrade_write(&inode->i_rwsem); ++ ++ return 0; + } + + /** +@@ -74,11 +74,15 @@ nfs_end_io_read(struct inode *inode) + * Declare that a buffered read operation is about to start, and ensure + * that we block all direct I/O. + */ +-void ++int + nfs_start_io_write(struct inode *inode) + { +- down_write(&inode->i_rwsem); +- nfs_block_o_direct(NFS_I(inode), inode); ++ int err; ++ ++ err = down_write_killable(&inode->i_rwsem); ++ if (!err) ++ nfs_file_block_o_direct(NFS_I(inode)); ++ return err; + } + + /** +@@ -119,19 +123,28 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode) + * Note that buffered writes and truncates both take a write lock on + * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT. + */ +-void ++int + nfs_start_io_direct(struct inode *inode) + { + struct nfs_inode *nfsi = NFS_I(inode); ++ int err; ++ + /* Be an optimist! */ +- down_read(&inode->i_rwsem); ++ err = down_read_killable(&inode->i_rwsem); ++ if (err) ++ return err; + if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0) +- return; ++ return 0; + up_read(&inode->i_rwsem); ++ + /* Slow path.... */ +- down_write(&inode->i_rwsem); ++ err = down_write_killable(&inode->i_rwsem); ++ if (err) ++ return err; + nfs_block_buffered(nfsi, inode); + downgrade_write(&inode->i_rwsem); ++ ++ return 0; + } + + /** +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c +index 9f0d69e6526443..582cf8a469560b 100644 +--- a/fs/nfs/nfs42proc.c ++++ b/fs/nfs/nfs42proc.c +@@ -112,6 +112,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + exception.inode = inode; + exception.state = lock->open_context->state; + ++ nfs_file_block_o_direct(NFS_I(inode)); + err = nfs_sync_inode(inode); + if (err) + goto out; +@@ -355,6 +356,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, + return status; + } + ++ nfs_file_block_o_direct(NFS_I(dst_inode)); + status = nfs_sync_inode(dst_inode); + if (status) + return status; +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c +index 02788c3c85e5bb..befdb0f4e6dc3c 100644 +--- a/fs/nfs/nfs4file.c ++++ b/fs/nfs/nfs4file.c +@@ -282,9 +282,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, + + /* flush all pending writes on both src and dst so that server + * has the latest data */ ++ nfs_file_block_o_direct(NFS_I(src_inode)); + ret = nfs_sync_inode(src_inode); + if (ret) + goto out_unlock; ++ nfs_file_block_o_direct(NFS_I(dst_inode)); + ret = nfs_sync_inode(dst_inode); + if (ret) + goto out_unlock; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 6debcfc63222d2..124b9cee6fed7d 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -3882,8 +3882,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f + res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; + } + memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); +- server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | +- NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); ++ server->caps &= ++ ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | ++ NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS); + server->fattr_valid = NFS_ATTR_FATTR_V4; + if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && + res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) +@@ -3951,7 +3952,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) + }; + int err; + +- nfs_server_set_init_caps(server); + do { + err = nfs4_handle_exception(server, + _nfs4_server_capabilities(server, fhandle), +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index b2bbf3d6d177e8..cd78b7ecbd4325 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -1131,6 +1131,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + }; + struct inode *inode; + __be32 status = nfs_ok; ++ bool save_no_wcc; + int err; + + if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { +@@ -1156,8 +1157,11 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + + if (status) + goto out; ++ save_no_wcc = cstate->current_fh.fh_no_wcc; ++ cstate->current_fh.fh_no_wcc = true; + status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs, + 0, (time64_t)0); ++ cstate->current_fh.fh_no_wcc = save_no_wcc; + if (!status) + status = nfserrno(attrs.na_labelerr); + if (!status) +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index b3e51d88faff46..5ee7149ceaa5a7 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -480,7 +480,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + int accmode = NFSD_MAY_SATTR; + umode_t ftype = 0; + __be32 err; +- int host_err; ++ int host_err = 0; + bool get_write_count; + bool size_change = (iap->ia_valid & ATTR_SIZE); + int retries; +@@ -538,6 +538,9 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + } + + inode_lock(inode); ++ err = fh_fill_pre_attrs(fhp); ++ if (err) ++ goto out_unlock; + for (retries = 1;;) { + struct iattr attrs; + +@@ -565,13 +568,15 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + attr->na_aclerr = set_posix_acl(&nop_mnt_idmap, + dentry, ACL_TYPE_DEFAULT, + attr->na_dpacl); ++ fh_fill_post_attrs(fhp); ++out_unlock: + inode_unlock(inode); + if (size_change) + put_write_access(inode); + out: + if (!host_err) + host_err = commit_metadata(fhp); +- return nfserrno(host_err); ++ return err != 0 ? err : nfserrno(host_err); + } + + #if defined(CONFIG_NFSD_V4) +@@ -1965,11 +1970,9 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, + err = nfserr_file_open; + else + err = nfserr_acces; +- } else { +- err = nfserrno(host_err); + } + out: +- return err; ++ return err != nfs_ok ? err : nfserrno(host_err); + out_unlock: + inode_unlock(dirp); + goto out_drop_write; +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c +index f7672472fa8279..5e86c7e2c82125 100644 +--- a/fs/ocfs2/extent_map.c ++++ b/fs/ocfs2/extent_map.c +@@ -696,6 +696,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, + * it not only handles the fiemap for inlined files, but also deals + * with the fast symlink, cause they have no difference for extent + * mapping per se. ++ * ++ * Must be called with ip_alloc_sem semaphore held. + */ + static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, + struct fiemap_extent_info *fieinfo, +@@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, + u64 phys; + u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST; + struct ocfs2_inode_info *oi = OCFS2_I(inode); ++ lockdep_assert_held_read(&oi->ip_alloc_sem); + + di = (struct ocfs2_dinode *)di_bh->b_data; + if (ocfs2_inode_is_fast_symlink(inode)) +@@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, + phys += offsetof(struct ocfs2_dinode, + id2.i_data.id_data); + ++ /* Release the ip_alloc_sem to prevent deadlock on page fault */ ++ up_read(&OCFS2_I(inode)->ip_alloc_sem); + ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, + flags); ++ down_read(&OCFS2_I(inode)->ip_alloc_sem); + if (ret < 0) + return ret; + } +@@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; + phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits; + virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits; +- ++ /* Release the ip_alloc_sem to prevent deadlock on page fault */ ++ up_read(&OCFS2_I(inode)->ip_alloc_sem); + ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes, + len_bytes, fe_flags); ++ down_read(&OCFS2_I(inode)->ip_alloc_sem); + if (ret) + break; + +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index db3f2c6abc162a..4cadd2fd23d8f8 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -388,7 +388,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, + if (proc_alloc_inum(&dp->low_ino)) + goto out_free_entry; + +- pde_set_flags(dp); ++ if (!S_ISDIR(dp->mode)) ++ pde_set_flags(dp); + + write_lock(&proc_subdir_lock); + dp->parent = dir; +diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c +index 7a2b81fbd9cfd2..1058066913dd60 100644 +--- a/fs/smb/client/file.c ++++ b/fs/smb/client/file.c +@@ -2884,17 +2884,21 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping, + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); + if (rc) { + cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc); ++ folio_unlock(folio); + goto err_xid; + } + + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize, + &wsize, credits); +- if (rc != 0) ++ if (rc != 0) { ++ folio_unlock(folio); + goto err_close; ++ } + + wdata = cifs_writedata_alloc(cifs_writev_complete); + if (!wdata) { + rc = -ENOMEM; ++ folio_unlock(folio); + goto err_uncredit; + } + +@@ -3041,17 +3045,22 @@ static ssize_t cifs_writepages_begin(struct address_space *mapping, + lock_again: + if (wbc->sync_mode != WB_SYNC_NONE) { + ret = folio_lock_killable(folio); +- if (ret < 0) ++ if (ret < 0) { ++ folio_put(folio); + return ret; ++ } + } else { +- if (!folio_trylock(folio)) ++ if (!folio_trylock(folio)) { ++ folio_put(folio); + goto search_again; ++ } + } + + if (folio->mapping != mapping || + !folio_test_dirty(folio)) { + start += folio_size(folio); + folio_unlock(folio); ++ folio_put(folio); + goto search_again; + } + +@@ -3081,6 +3090,7 @@ static ssize_t cifs_writepages_begin(struct address_space *mapping, + out: + if (ret > 0) + *_start = start + ret; ++ folio_put(folio); + return ret; + } + +diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h +index 29ba91fc54076c..45421269ddd881 100644 +--- a/fs/smb/server/connection.h ++++ b/fs/smb/server/connection.h +@@ -27,6 +27,7 @@ enum { + KSMBD_SESS_EXITING, + KSMBD_SESS_NEED_RECONNECT, + KSMBD_SESS_NEED_NEGOTIATE, ++ KSMBD_SESS_NEED_SETUP, + KSMBD_SESS_RELEASING + }; + +@@ -195,6 +196,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn) + return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE; + } + ++static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn) ++{ ++ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP; ++} ++ + static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn) + { + return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT; +@@ -225,6 +231,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn) + WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE); + } + ++static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn) ++{ ++ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP); ++} ++ + static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn) + { + WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT); +diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c +index 82dcc86a32c57a..408f47220c07b7 100644 +--- a/fs/smb/server/mgmt/user_session.c ++++ b/fs/smb/server/mgmt/user_session.c +@@ -373,12 +373,12 @@ void destroy_previous_session(struct ksmbd_conn *conn, + ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT); + err = ksmbd_conn_wait_idle_sess_id(conn, id); + if (err) { +- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); ++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); + goto out; + } + ksmbd_destroy_file_table(&prev_sess->file_table); + prev_sess->state = SMB2_SESSION_EXPIRED; +- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); ++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); + out: + up_write(&conn->session_lock); + up_write(&sessions_table_lock); +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 85e7bc3a2bd33c..ae47450dc40f82 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1252,7 +1252,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work) + } + + conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode); +- ksmbd_conn_set_need_negotiate(conn); ++ ksmbd_conn_set_need_setup(conn); + + err_out: + if (rc) +@@ -1273,6 +1273,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess, + if (sess->Preauth_HashValue) + return 0; + ++ if (!conn->preauth_info) ++ return -ENOMEM; ++ + sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue, + PREAUTH_HASHVALUE_SIZE, GFP_KERNEL); + if (!sess->Preauth_HashValue) +@@ -1688,6 +1691,11 @@ int smb2_sess_setup(struct ksmbd_work *work) + + ksmbd_debug(SMB, "Received request for session setup\n"); + ++ if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) { ++ work->send_no_response = 1; ++ return rc; ++ } ++ + WORK_BUFFERS(work, req, rsp); + + rsp->StructureSize = cpu_to_le16(9); +@@ -1919,7 +1927,7 @@ int smb2_sess_setup(struct ksmbd_work *work) + if (try_delay) { + ksmbd_conn_set_need_reconnect(conn); + ssleep(5); +- ksmbd_conn_set_need_negotiate(conn); ++ ksmbd_conn_set_need_setup(conn); + } + } + smb2_set_err_rsp(work); +@@ -2249,7 +2257,7 @@ int smb2_session_logoff(struct ksmbd_work *work) + ksmbd_free_user(sess->user); + sess->user = NULL; + } +- ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE); ++ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP); + + rsp->StructureSize = cpu_to_le16(4); + err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp)); +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h +index 9b673fefcef8a6..f9de53fff3acc4 100644 +--- a/include/linux/compiler-clang.h ++++ b/include/linux/compiler-clang.h +@@ -23,23 +23,42 @@ + #define KASAN_ABI_VERSION 5 + + /* ++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually ++ * dropping __has_feature support for sanitizers: ++ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c ++ * Create these macros for older versions of clang so that it is easy to clean ++ * up once the minimum supported version of LLVM for building the kernel always ++ * creates these macros. ++ * + * Note: Checking __has_feature(*_sanitizer) is only true if the feature is + * enabled. Therefore it is not required to additionally check defined(CONFIG_*) + * to avoid adding redundant attributes in other configurations. + */ ++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__) ++#define __SANITIZE_ADDRESS__ ++#endif ++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__) ++#define __SANITIZE_HWADDRESS__ ++#endif ++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__) ++#define __SANITIZE_THREAD__ ++#endif + +-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) +-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ ++/* ++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel. ++ */ ++#ifdef __SANITIZE_HWADDRESS__ + #define __SANITIZE_ADDRESS__ ++#endif ++ ++#ifdef __SANITIZE_ADDRESS__ + #define __no_sanitize_address \ + __attribute__((no_sanitize("address", "hwaddress"))) + #else + #define __no_sanitize_address + #endif + +-#if __has_feature(thread_sanitizer) +-/* emulate gcc's __SANITIZE_THREAD__ flag */ +-#define __SANITIZE_THREAD__ ++#ifdef __SANITIZE_THREAD__ + #define __no_sanitize_thread \ + __attribute__((no_sanitize("thread"))) + #else +diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h +new file mode 100644 +index 00000000000000..9174fa59bbc54d +--- /dev/null ++++ b/include/linux/pgalloc.h +@@ -0,0 +1,29 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_PGALLOC_H ++#define _LINUX_PGALLOC_H ++ ++#include ++#include ++ ++/* ++ * {pgd,p4d}_populate_kernel() are defined as macros to allow ++ * compile-time optimization based on the configured page table levels. ++ * Without this, linking may fail because callers (e.g., KASAN) may rely ++ * on calls to these functions being optimized away when passing symbols ++ * that exist only for certain page table levels. ++ */ ++#define pgd_populate_kernel(addr, pgd, p4d) \ ++ do { \ ++ pgd_populate(&init_mm, pgd, p4d); \ ++ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED) \ ++ arch_sync_kernel_mappings(addr, addr); \ ++ } while (0) ++ ++#define p4d_populate_kernel(addr, p4d, pud) \ ++ do { \ ++ p4d_populate(&init_mm, p4d, pud); \ ++ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED) \ ++ arch_sync_kernel_mappings(addr, addr); \ ++ } while (0) ++ ++#endif /* _LINUX_PGALLOC_H */ +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h +index e42388b6998b17..78a518129e8f06 100644 +--- a/include/linux/pgtable.h ++++ b/include/linux/pgtable.h +@@ -1467,8 +1467,8 @@ static inline int pmd_protnone(pmd_t pmd) + + /* + * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values +- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() +- * needs to be called. ++ * and let generic vmalloc, ioremap and page table update code know when ++ * arch_sync_kernel_mappings() needs to be called. + */ + #ifndef ARCH_PAGE_TABLE_SYNC_MASK + #define ARCH_PAGE_TABLE_SYNC_MASK 0 +@@ -1601,10 +1601,11 @@ static inline bool arch_has_pfn_modify_check(void) + /* + * Page Table Modification bits for pgtbl_mod_mask. + * +- * These are used by the p?d_alloc_track*() set of functions an in the generic +- * vmalloc/ioremap code to track at which page-table levels entries have been +- * modified. Based on that the code can better decide when vmalloc and ioremap +- * mapping changes need to be synchronized to other page-tables in the system. ++ * These are used by the p?d_alloc_track*() and p*d_populate_kernel() ++ * functions in the generic vmalloc, ioremap and page table update code ++ * to track at which page-table levels entries have been modified. ++ * Based on that the code can better decide when page table changes need ++ * to be synchronized to other page-tables in the system. + */ + #define __PGTBL_PGD_MODIFIED 0 + #define __PGTBL_P4D_MODIFIED 1 +diff --git a/include/net/sock.h b/include/net/sock.h +index b5f7208a9ec383..f8e029cc48ccce 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -353,6 +353,8 @@ struct sk_filter; + * @sk_txtime_unused: unused txtime flags + * @ns_tracker: tracker for netns reference + * @sk_bind2_node: bind node in the bhash2 table ++ * @sk_owner: reference to the real owner of the socket that calls ++ * sock_lock_init_class_and_name(). + */ + struct sock { + /* +@@ -545,6 +547,10 @@ struct sock { + struct rcu_head sk_rcu; + netns_tracker ns_tracker; + struct hlist_node sk_bind2_node; ++ ++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) ++ struct module *sk_owner; ++#endif + }; + + enum sk_pacing { +@@ -1699,6 +1705,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) + sk_mem_reclaim(sk); + } + ++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) ++static inline void sk_owner_set(struct sock *sk, struct module *owner) ++{ ++ __module_get(owner); ++ sk->sk_owner = owner; ++} ++ ++static inline void sk_owner_clear(struct sock *sk) ++{ ++ sk->sk_owner = NULL; ++} ++ ++static inline void sk_owner_put(struct sock *sk) ++{ ++ module_put(sk->sk_owner); ++} ++#else ++static inline void sk_owner_set(struct sock *sk, struct module *owner) ++{ ++} ++ ++static inline void sk_owner_clear(struct sock *sk) ++{ ++} ++ ++static inline void sk_owner_put(struct sock *sk) ++{ ++} ++#endif + /* + * Macro so as to not evaluate some arguments when + * lockdep is not enabled. +@@ -1708,13 +1743,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) + */ + #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ + do { \ ++ sk_owner_set(sk, THIS_MODULE); \ + sk->sk_lock.owned = 0; \ + init_waitqueue_head(&sk->sk_lock.wq); \ + spin_lock_init(&(sk)->sk_lock.slock); \ + debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ +- sizeof((sk)->sk_lock)); \ ++ sizeof((sk)->sk_lock)); \ + lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ +- (skey), (sname)); \ ++ (skey), (sname)); \ + lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ + } while (0) + +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 4b20a72ab8cffe..90c281e1379eeb 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -1204,8 +1204,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u + goto out; + } + +- /* allocate hrtimer via map_kmalloc to use memcg accounting */ +- cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); ++ /* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until ++ * kmalloc_nolock() is available, avoid locking issues by using ++ * __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM). ++ */ ++ cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node); + if (!cb) { + ret = -ENOMEM; + goto out; +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h +index 3fcd9f92d38612..9189ccd4fee472 100644 +--- a/kernel/rcu/tasks.h ++++ b/kernel/rcu/tasks.h +@@ -150,8 +150,6 @@ static struct rcu_tasks rt_name = \ + } + + #ifdef CONFIG_TASKS_RCU +-/* Track exiting tasks in order to allow them to be waited for. */ +-DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); + + /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */ + static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); +@@ -879,10 +877,12 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) + // number of voluntary context switches, and add that task to the + // holdout list. + // rcu_tasks_postscan(): +-// Invoke synchronize_srcu() to ensure that all tasks that were +-// in the process of exiting (and which thus might not know to +-// synchronize with this RCU Tasks grace period) have completed +-// exiting. ++// Gather per-CPU lists of tasks in do_exit() to ensure that all ++// tasks that were in the process of exiting (and which thus might ++// not know to synchronize with this RCU Tasks grace period) have ++// completed exiting. The synchronize_rcu() in rcu_tasks_postgp() ++// will take care of any tasks stuck in the non-preemptible region ++// of do_exit() following its call to exit_tasks_rcu_stop(). + // check_all_holdout_tasks(), repeatedly until holdout list is empty: + // Scans the holdout list, attempting to identify a quiescent state + // for each task on the list. If there is a quiescent state, the +@@ -895,8 +895,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) + // with interrupts disabled. + // + // For each exiting task, the exit_tasks_rcu_start() and +-// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU +-// read-side critical sections waited for by rcu_tasks_postscan(). ++// exit_tasks_rcu_finish() functions add and remove, respectively, the ++// current task to a per-CPU list of tasks that rcu_tasks_postscan() must ++// wait on. This is necessary because rcu_tasks_postscan() must wait on ++// tasks that have already been removed from the global list of tasks. + // + // Pre-grace-period update-side code is ordered before the grace + // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code +@@ -960,9 +962,13 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) + } + } + ++void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); ++DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); ++ + /* Processing between scanning taskslist and draining the holdout list. */ + static void rcu_tasks_postscan(struct list_head *hop) + { ++ int cpu; + int rtsi = READ_ONCE(rcu_task_stall_info); + + if (!IS_ENABLED(CONFIG_TINY_RCU)) { +@@ -976,9 +982,9 @@ static void rcu_tasks_postscan(struct list_head *hop) + * this, divide the fragile exit path part in two intersecting + * read side critical sections: + * +- * 1) An _SRCU_ read side starting before calling exit_notify(), +- * which may remove the task from the tasklist, and ending after +- * the final preempt_disable() call in do_exit(). ++ * 1) A task_struct list addition before calling exit_notify(), ++ * which may remove the task from the tasklist, with the ++ * removal after the final preempt_disable() call in do_exit(). + * + * 2) An _RCU_ read side starting with the final preempt_disable() + * call in do_exit() and ending with the final call to schedule() +@@ -987,7 +993,37 @@ static void rcu_tasks_postscan(struct list_head *hop) + * This handles the part 1). And postgp will handle part 2) with a + * call to synchronize_rcu(). + */ +- synchronize_srcu(&tasks_rcu_exit_srcu); ++ ++ for_each_possible_cpu(cpu) { ++ unsigned long j = jiffies + 1; ++ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); ++ struct task_struct *t; ++ struct task_struct *t1; ++ struct list_head tmp; ++ ++ raw_spin_lock_irq_rcu_node(rtpcp); ++ list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { ++ if (list_empty(&t->rcu_tasks_holdout_list)) ++ rcu_tasks_pertask(t, hop); ++ ++ // RT kernels need frequent pauses, otherwise ++ // pause at least once per pair of jiffies. ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) ++ continue; ++ ++ // Keep our place in the list while pausing. ++ // Nothing else traverses this list, so adding a ++ // bare list_head is OK. ++ list_add(&tmp, &t->rcu_tasks_exit_list); ++ raw_spin_unlock_irq_rcu_node(rtpcp); ++ cond_resched(); // For CONFIG_PREEMPT=n kernels ++ raw_spin_lock_irq_rcu_node(rtpcp); ++ t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); ++ list_del(&tmp); ++ j = jiffies + 1; ++ } ++ raw_spin_unlock_irq_rcu_node(rtpcp); ++ } + + if (!IS_ENABLED(CONFIG_TINY_RCU)) + del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); +@@ -1055,7 +1091,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp) + * + * In addition, this synchronize_rcu() waits for exiting tasks + * to complete their final preempt_disable() region of execution, +- * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu), + * enforcing the whole region before tasklist removal until + * the final schedule() with TASK_DEAD state to be an RCU TASKS + * read side critical section. +@@ -1063,9 +1098,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp) + synchronize_rcu(); + } + +-void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); +-DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); +- + static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) + { + #ifndef CONFIG_TINY_RCU +@@ -1175,25 +1207,48 @@ struct task_struct *get_rcu_tasks_gp_kthread(void) + EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); + + /* +- * Contribute to protect against tasklist scan blind spot while the +- * task is exiting and may be removed from the tasklist. See +- * corresponding synchronize_srcu() for further details. ++ * Protect against tasklist scan blind spot while the task is exiting and ++ * may be removed from the tasklist. Do this by adding the task to yet ++ * another list. ++ * ++ * Note that the task will remove itself from this list, so there is no ++ * need for get_task_struct(), except in the case where rcu_tasks_pertask() ++ * adds it to the holdout list, in which case rcu_tasks_pertask() supplies ++ * the needed get_task_struct(). + */ +-void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) ++void exit_tasks_rcu_start(void) + { +- current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); ++ unsigned long flags; ++ struct rcu_tasks_percpu *rtpcp; ++ struct task_struct *t = current; ++ ++ WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); ++ preempt_disable(); ++ rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); ++ t->rcu_tasks_exit_cpu = smp_processor_id(); ++ raw_spin_lock_irqsave_rcu_node(rtpcp, flags); ++ if (!rtpcp->rtp_exit_list.next) ++ INIT_LIST_HEAD(&rtpcp->rtp_exit_list); ++ list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); ++ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); ++ preempt_enable(); + } + + /* +- * Contribute to protect against tasklist scan blind spot while the +- * task is exiting and may be removed from the tasklist. See +- * corresponding synchronize_srcu() for further details. ++ * Remove the task from the "yet another list" because do_exit() is now ++ * non-preemptible, allowing synchronize_rcu() to wait beyond this point. + */ +-void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu) ++void exit_tasks_rcu_stop(void) + { ++ unsigned long flags; ++ struct rcu_tasks_percpu *rtpcp; + struct task_struct *t = current; + +- __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); ++ WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); ++ rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); ++ raw_spin_lock_irqsave_rcu_node(rtpcp, flags); ++ list_del_init(&t->rcu_tasks_exit_list); ++ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + } + + /* +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 6d9da768604d68..ccea52adcba672 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -671,17 +671,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) + /* + * Is the high resolution mode active ? + */ +-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) ++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) + { + return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? + cpu_base->hres_active : 0; + } + +-static inline int hrtimer_hres_active(void) +-{ +- return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); +-} +- + static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base, + struct hrtimer *next_timer, + ktime_t expires_next) +@@ -705,7 +700,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base, + * set. So we'd effectively block all timers until the T2 event + * fires. + */ +- if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) ++ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) + return; + + tick_program_event(expires_next, 1); +@@ -813,13 +808,13 @@ static void retrigger_next_event(void *arg) + * of the next expiring timer is enough. The return from the SMP + * function call will take care of the reprogramming in case the + * CPU was in a NOHZ idle sleep. ++ * ++ * In periodic low resolution mode, the next softirq expiration ++ * must also be updated. + */ +- if (!__hrtimer_hres_active(base) && !tick_nohz_active) +- return; +- + raw_spin_lock(&base->lock); + hrtimer_update_base(base); +- if (__hrtimer_hres_active(base)) ++ if (hrtimer_hres_active(base)) + hrtimer_force_reprogram(base, 0); + else + hrtimer_update_next_event(base); +@@ -976,7 +971,7 @@ void clock_was_set(unsigned int bases) + cpumask_var_t mask; + int cpu; + +- if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active) ++ if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active) + goto out_timerfd; + + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { +@@ -1554,7 +1549,7 @@ u64 hrtimer_get_next_event(void) + + raw_spin_lock_irqsave(&cpu_base->lock, flags); + +- if (!__hrtimer_hres_active(cpu_base)) ++ if (!hrtimer_hres_active(cpu_base)) + expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); + + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); +@@ -1577,7 +1572,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude) + + raw_spin_lock_irqsave(&cpu_base->lock, flags); + +- if (__hrtimer_hres_active(cpu_base)) { ++ if (hrtimer_hres_active(cpu_base)) { + unsigned int active; + + if (!cpu_base->softirq_activated) { +@@ -1938,25 +1933,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) + tick_program_event(expires_next, 1); + pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); + } +- +-/* called with interrupts disabled */ +-static inline void __hrtimer_peek_ahead_timers(void) +-{ +- struct tick_device *td; +- +- if (!hrtimer_hres_active()) +- return; +- +- td = this_cpu_ptr(&tick_cpu_device); +- if (td && td->evtdev) +- hrtimer_interrupt(td->evtdev); +-} +- +-#else /* CONFIG_HIGH_RES_TIMERS */ +- +-static inline void __hrtimer_peek_ahead_timers(void) { } +- +-#endif /* !CONFIG_HIGH_RES_TIMERS */ ++#endif /* !CONFIG_HIGH_RES_TIMERS */ + + /* + * Called from run_local_timers in hardirq context every jiffy +@@ -1967,7 +1944,7 @@ void hrtimer_run_queues(void) + unsigned long flags; + ktime_t now; + +- if (__hrtimer_hres_active(cpu_base)) ++ if (hrtimer_hres_active(cpu_base)) + return; + + /* +@@ -2312,11 +2289,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu) + &new_base->clock_base[i]); + } + +- /* +- * The migration might have changed the first expiring softirq +- * timer on this CPU. Update it. +- */ +- __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT); + /* Tell the other CPU to retrigger the next event */ + smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index a32c8637503d14..a111be83c36939 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -750,7 +750,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, + /* copy the current bits to the new max */ + ret = trace_pid_list_first(filtered_pids, &pid); + while (!ret) { +- trace_pid_list_set(pid_list, pid); ++ ret = trace_pid_list_set(pid_list, pid); ++ if (ret < 0) ++ goto out; ++ + ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); + nr_pids++; + } +@@ -787,6 +790,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, + trace_parser_clear(&parser); + ret = 0; + } ++ out: + trace_parser_put(&parser); + + if (ret < 0) { +@@ -7226,7 +7230,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, + entry = ring_buffer_event_data(event); + entry->ip = _THIS_IP_; + +- len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); ++ len = copy_from_user_nofault(&entry->buf, ubuf, cnt); + if (len) { + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); + cnt = FAULTED_SIZE; +@@ -7301,7 +7305,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, + + entry = ring_buffer_event_data(event); + +- len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); ++ len = copy_from_user_nofault(&entry->id, ubuf, cnt); + if (len) { + entry->id = -1; + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); +diff --git a/mm/Kconfig b/mm/Kconfig +index c11cd01169e8d1..046c32686fc4d8 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -280,7 +280,7 @@ config SLAB + + config SLUB_TINY + bool "Configure SLUB for minimal memory footprint" +- depends on SLUB && EXPERT ++ depends on SLUB && EXPERT && !COMPILE_TEST + select SLAB_MERGE_DEFAULT + help + Configures the SLUB allocator in a way to achieve minimal memory +diff --git a/mm/damon/core.c b/mm/damon/core.c +index 43e4fe7ef17eb4..48747236c21ca8 100644 +--- a/mm/damon/core.c ++++ b/mm/damon/core.c +@@ -1043,6 +1043,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) + if (!quota->ms && !quota->sz) + return; + ++ /* First charge window */ ++ if (!quota->total_charged_sz && !quota->charged_from) ++ quota->charged_from = jiffies; ++ + /* New charge window starts */ + if (time_after_eq(jiffies, quota->charged_from + + msecs_to_jiffies(quota->reset_interval))) { +diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c +index 3de2916a65c38c..b4032538b22cf7 100644 +--- a/mm/damon/lru_sort.c ++++ b/mm/damon/lru_sort.c +@@ -203,6 +203,9 @@ static int damon_lru_sort_apply_parameters(void) + unsigned int hot_thres, cold_thres; + int err = 0; + ++ if (!damon_lru_sort_mon_attrs.sample_interval) ++ return -EINVAL; ++ + err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); + if (err) + return err; +diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c +index 66e190f0374ac8..586daa2cefe4fe 100644 +--- a/mm/damon/reclaim.c ++++ b/mm/damon/reclaim.c +@@ -167,6 +167,9 @@ static int damon_reclaim_apply_parameters(void) + struct damos_filter *filter; + int err = 0; + ++ if (!damon_reclaim_mon_attrs.aggr_interval) ++ return -EINVAL; ++ + err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs); + if (err) + return err; +diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c +index b317f51dcc9876..91893543d47ca3 100644 +--- a/mm/damon/sysfs.c ++++ b/mm/damon/sysfs.c +@@ -1055,14 +1055,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, + { + struct damon_sysfs_kdamond *kdamond = container_of(kobj, + struct damon_sysfs_kdamond, kobj); +- struct damon_ctx *ctx = kdamond->damon_ctx; +- bool running; ++ struct damon_ctx *ctx; ++ bool running = false; + +- if (!ctx) +- running = false; +- else ++ if (!mutex_trylock(&damon_sysfs_lock)) ++ return -EBUSY; ++ ++ ctx = kdamond->damon_ctx; ++ if (ctx) + running = damon_sysfs_ctx_running(ctx); + ++ mutex_unlock(&damon_sysfs_lock); ++ + return sysfs_emit(buf, "%s\n", running ? + damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] : + damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]); +diff --git a/mm/kasan/init.c b/mm/kasan/init.c +index 89895f38f72242..afecc04b486a63 100644 +--- a/mm/kasan/init.c ++++ b/mm/kasan/init.c +@@ -13,9 +13,9 @@ + #include + #include + #include ++#include + + #include +-#include + + #include "kasan.h" + +@@ -197,7 +197,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, + pud_t *pud; + pmd_t *pmd; + +- p4d_populate(&init_mm, p4d, ++ p4d_populate_kernel(addr, p4d, + lm_alias(kasan_early_shadow_pud)); + pud = pud_offset(p4d, addr); + pud_populate(&init_mm, pud, +@@ -218,7 +218,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, + } else { + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + pud_init(p); +- p4d_populate(&init_mm, p4d, p); ++ p4d_populate_kernel(addr, p4d, p); + } + } + zero_pud_populate(p4d, addr, next); +@@ -257,10 +257,10 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, + * puds,pmds, so pgd_populate(), pud_populate() + * is noops. + */ +- pgd_populate(&init_mm, pgd, ++ pgd_populate_kernel(addr, pgd, + lm_alias(kasan_early_shadow_p4d)); + p4d = p4d_offset(pgd, addr); +- p4d_populate(&init_mm, p4d, ++ p4d_populate_kernel(addr, p4d, + lm_alias(kasan_early_shadow_pud)); + pud = pud_offset(p4d, addr); + pud_populate(&init_mm, pud, +@@ -279,7 +279,7 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, + if (!p) + return -ENOMEM; + } else { +- pgd_populate(&init_mm, pgd, ++ pgd_populate_kernel(addr, pgd, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + } +diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c +index ecf9f5aa352005..9ca21d1e10ac19 100644 +--- a/mm/kasan/kasan_test.c ++++ b/mm/kasan/kasan_test.c +@@ -1053,6 +1053,7 @@ static void kasan_strings(struct kunit *test) + + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ++ OPTIMIZER_HIDE_VAR(ptr); + + kfree(ptr); + +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index f227b39ae4cf74..16d29ee602c70a 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -1240,6 +1240,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + int result = SCAN_FAIL, referenced = 0; + int none_or_zero = 0, shared = 0; + struct page *page = NULL; ++ struct folio *folio = NULL; + unsigned long _address; + spinlock_t *ptl; + int node = NUMA_NO_NODE, unmapped = 0; +@@ -1326,29 +1327,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + } + } + +- page = compound_head(page); +- ++ folio = page_folio(page); + /* + * Record which node the original page is from and save this + * information to cc->node_load[]. + * Khugepaged will allocate hugepage from the node has the max + * hit record. + */ +- node = page_to_nid(page); ++ node = folio_nid(folio); + if (hpage_collapse_scan_abort(node, cc)) { + result = SCAN_SCAN_ABORT; + goto out_unmap; + } + cc->node_load[node]++; +- if (!PageLRU(page)) { ++ if (!folio_test_lru(folio)) { + result = SCAN_PAGE_LRU; + goto out_unmap; + } +- if (PageLocked(page)) { ++ if (folio_test_locked(folio)) { + result = SCAN_PAGE_LOCK; + goto out_unmap; + } +- if (!PageAnon(page)) { ++ if (!folio_test_anon(folio)) { + result = SCAN_PAGE_ANON; + goto out_unmap; + } +@@ -1363,7 +1363,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + * has excessive GUP pins (i.e. 512). Anyway the same check + * will be done again later the risk seems low. + */ +- if (!is_refcount_suitable(page)) { ++ if (!is_refcount_suitable(&folio->page)) { + result = SCAN_PAGE_COUNT; + goto out_unmap; + } +@@ -1373,9 +1373,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + * enough young pte to justify collapsing the page + */ + if (cc->is_khugepaged && +- (pte_young(pteval) || page_is_young(page) || +- PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, +- address))) ++ (pte_young(pteval) || folio_test_young(folio) || ++ folio_test_referenced(folio) || ++ mmu_notifier_test_young(vma->vm_mm, _address))) + referenced++; + } + if (!writable) { +@@ -1396,7 +1396,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + *mmap_locked = false; + } + out: +- trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, ++ trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced, + none_or_zero, result, unmapped); + return result; + } +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index dae5e60d64e2fd..8bedcd288a0cc9 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -2535,10 +2535,9 @@ int unpoison_memory(unsigned long pfn) + static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + +- if (!pfn_valid(pfn)) +- return -ENXIO; +- +- p = pfn_to_page(pfn); ++ p = pfn_to_online_page(pfn); ++ if (!p) ++ return -EIO; + folio = page_folio(p); + + mutex_lock(&mf_mutex); +diff --git a/mm/percpu.c b/mm/percpu.c +index d287cebd58caa3..38d5121c2b652a 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -3157,7 +3157,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, + #endif /* BUILD_EMBED_FIRST_CHUNK */ + + #ifdef BUILD_PAGE_FIRST_CHUNK +-#include ++#include + + #ifndef P4D_TABLE_SIZE + #define P4D_TABLE_SIZE PAGE_SIZE +@@ -3185,7 +3185,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr) + p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE); + if (!p4d) + goto err_alloc; +- pgd_populate(&init_mm, pgd, p4d); ++ pgd_populate_kernel(addr, pgd, p4d); + } + + p4d = p4d_offset(pgd, addr); +@@ -3193,7 +3193,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr) + pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + if (!pud) + goto err_alloc; +- p4d_populate(&init_mm, p4d, pud); ++ p4d_populate_kernel(addr, p4d, pud); + } + + pud = pud_offset(p4d, addr); +diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c +index a2cbe44c48e10f..589d6a262b6dad 100644 +--- a/mm/sparse-vmemmap.c ++++ b/mm/sparse-vmemmap.c +@@ -27,9 +27,9 @@ + #include + #include + #include ++#include + + #include +-#include + + /* + * Allocate a block of memory to be used to back the virtual memory map +@@ -225,7 +225,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) + if (!p) + return NULL; + pud_init(p); +- p4d_populate(&init_mm, p4d, p); ++ p4d_populate_kernel(addr, p4d, p); + } + return p4d; + } +@@ -237,7 +237,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) + void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); + if (!p) + return NULL; +- pgd_populate(&init_mm, pgd, p); ++ pgd_populate_kernel(addr, pgd, p); + } + return pgd; + } +diff --git a/net/bridge/br.c b/net/bridge/br.c +index a6e94ceb7c9a08..a45db67197226b 100644 +--- a/net/bridge/br.c ++++ b/net/bridge/br.c +@@ -312,6 +312,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br, + int err = 0; + int opt_id; + ++ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX); ++ if (opt_id != BITS_PER_LONG) { ++ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d", ++ opt_id); ++ return -EINVAL; ++ } ++ + for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) { + bool on = !!(bm->optval & BIT(opt_id)); + +diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c +index 48668790160211..e0b966c2517cf1 100644 +--- a/net/can/j1939/bus.c ++++ b/net/can/j1939/bus.c +@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa) + if (!ecu) + ecu = j1939_ecu_create_locked(priv, name); + err = PTR_ERR_OR_ZERO(ecu); +- if (err) ++ if (err) { ++ if (j1939_address_is_unicast(sa)) ++ priv->ents[sa].nusers--; + goto done; ++ } + + ecu->nusers++; + /* TODO: do we care if ecu->addr != sa? */ +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c +index cf9a12d8da6f90..7bf4d4fb96735a 100644 +--- a/net/can/j1939/socket.c ++++ b/net/can/j1939/socket.c +@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) + ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa); + if (ret) { + j1939_netdev_stop(priv); ++ jsk->priv = NULL; ++ synchronize_rcu(); ++ j1939_priv_put(priv); + goto out_release_sock; + } + +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 3c8b78d9c4d1ce..8add91385375e1 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -1524,7 +1524,7 @@ static void con_fault_finish(struct ceph_connection *con) + * in case we faulted due to authentication, invalidate our + * current tickets so that we can get new ones. + */ +- if (con->v1.auth_retry) { ++ if (!ceph_msgr2(from_msgr(con->msgr)) && con->v1.auth_retry) { + dout("auth_retry %d, invalidating\n", con->v1.auth_retry); + if (con->ops->invalidate_authorizer) + con->ops->invalidate_authorizer(con); +@@ -1714,9 +1714,10 @@ static void clear_standby(struct ceph_connection *con) + { + /* come back from STANDBY? */ + if (con->state == CEPH_CON_S_STANDBY) { +- dout("clear_standby %p and ++connect_seq\n", con); ++ dout("clear_standby %p\n", con); + con->state = CEPH_CON_S_PREOPEN; +- con->v1.connect_seq++; ++ if (!ceph_msgr2(from_msgr(con->msgr))) ++ con->v1.connect_seq++; + WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)); + WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)); + } +diff --git a/net/core/sock.c b/net/core/sock.c +index b74bc8175937e2..9918a9a337b616 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2029,6 +2029,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname, + */ + static inline void sock_lock_init(struct sock *sk) + { ++ sk_owner_clear(sk); ++ + if (sk->sk_kern_sock) + sock_lock_init_class_and_name( + sk, +@@ -2124,6 +2126,9 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) + cgroup_sk_free(&sk->sk_cgrp_data); + mem_cgroup_sk_free(sk); + security_sk_free(sk); ++ ++ sk_owner_put(sk); ++ + if (slab != NULL) + kmem_cache_free(slab, sk); + else +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index 906c38b9d66ff3..5514b5bedc9298 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master) + + ASSERT_RTNL(); + +- hsr_for_each_port(master->hsr, port) { ++ hsr_for_each_port_rtnl(master->hsr, port) { + if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) { + netif_carrier_on(master->dev); + return true; +@@ -109,7 +109,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr) + struct hsr_port *port; + + mtu_max = ETH_DATA_LEN; +- hsr_for_each_port(hsr, port) ++ hsr_for_each_port_rtnl(hsr, port) + if (port->type != HSR_PT_MASTER) + mtu_max = min(port->dev->mtu, mtu_max); + +@@ -144,7 +144,7 @@ static int hsr_dev_open(struct net_device *dev) + hsr = netdev_priv(dev); + designation = '\0'; + +- hsr_for_each_port(hsr, port) { ++ hsr_for_each_port_rtnl(hsr, port) { + if (port->type == HSR_PT_MASTER) + continue; + switch (port->type) { +@@ -170,7 +170,24 @@ static int hsr_dev_open(struct net_device *dev) + + static int hsr_dev_close(struct net_device *dev) + { +- /* Nothing to do here. */ ++ struct hsr_port *port; ++ struct hsr_priv *hsr; ++ ++ hsr = netdev_priv(dev); ++ hsr_for_each_port_rtnl(hsr, port) { ++ if (port->type == HSR_PT_MASTER) ++ continue; ++ switch (port->type) { ++ case HSR_PT_SLAVE_A: ++ case HSR_PT_SLAVE_B: ++ dev_uc_unsync(port->dev, dev); ++ dev_mc_unsync(port->dev, dev); ++ break; ++ default: ++ break; ++ } ++ } ++ + return 0; + } + +@@ -190,7 +207,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr, + * may become enabled. + */ + features &= ~NETIF_F_ONE_FOR_ALL; +- hsr_for_each_port(hsr, port) ++ hsr_for_each_port_rtnl(hsr, port) + features = netdev_increment_features(features, + port->dev->features, + mask); +@@ -211,6 +228,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) + struct hsr_priv *hsr = netdev_priv(dev); + struct hsr_port *master; + ++ rcu_read_lock(); + master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); + if (master) { + skb->dev = master->dev; +@@ -223,6 +241,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) + dev_core_stats_tx_dropped_inc(dev); + dev_kfree_skb_any(skb); + } ++ rcu_read_unlock(); ++ + return NETDEV_TX_OK; + } + +@@ -401,12 +421,133 @@ void hsr_del_ports(struct hsr_priv *hsr) + hsr_del_port(port); + } + ++static void hsr_set_rx_mode(struct net_device *dev) ++{ ++ struct hsr_port *port; ++ struct hsr_priv *hsr; ++ ++ hsr = netdev_priv(dev); ++ ++ hsr_for_each_port_rtnl(hsr, port) { ++ if (port->type == HSR_PT_MASTER) ++ continue; ++ switch (port->type) { ++ case HSR_PT_SLAVE_A: ++ case HSR_PT_SLAVE_B: ++ dev_mc_sync_multiple(port->dev, dev); ++ dev_uc_sync_multiple(port->dev, dev); ++ break; ++ default: ++ break; ++ } ++ } ++} ++ ++static void hsr_change_rx_flags(struct net_device *dev, int change) ++{ ++ struct hsr_port *port; ++ struct hsr_priv *hsr; ++ ++ hsr = netdev_priv(dev); ++ ++ hsr_for_each_port_rtnl(hsr, port) { ++ if (port->type == HSR_PT_MASTER) ++ continue; ++ switch (port->type) { ++ case HSR_PT_SLAVE_A: ++ case HSR_PT_SLAVE_B: ++ if (change & IFF_ALLMULTI) ++ dev_set_allmulti(port->dev, ++ dev->flags & ++ IFF_ALLMULTI ? 1 : -1); ++ break; ++ default: ++ break; ++ } ++ } ++} ++ ++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev, ++ __be16 proto, u16 vid) ++{ ++ bool is_slave_a_added = false; ++ bool is_slave_b_added = false; ++ struct hsr_port *port; ++ struct hsr_priv *hsr; ++ int ret = 0; ++ ++ hsr = netdev_priv(dev); ++ ++ hsr_for_each_port_rtnl(hsr, port) { ++ if (port->type == HSR_PT_MASTER || ++ port->type == HSR_PT_INTERLINK) ++ continue; ++ ++ ret = vlan_vid_add(port->dev, proto, vid); ++ switch (port->type) { ++ case HSR_PT_SLAVE_A: ++ if (ret) { ++ /* clean up Slave-B */ ++ netdev_err(dev, "add vid failed for Slave-A\n"); ++ if (is_slave_b_added) ++ vlan_vid_del(port->dev, proto, vid); ++ return ret; ++ } ++ ++ is_slave_a_added = true; ++ break; ++ ++ case HSR_PT_SLAVE_B: ++ if (ret) { ++ /* clean up Slave-A */ ++ netdev_err(dev, "add vid failed for Slave-B\n"); ++ if (is_slave_a_added) ++ vlan_vid_del(port->dev, proto, vid); ++ return ret; ++ } ++ ++ is_slave_b_added = true; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ return 0; ++} ++ ++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev, ++ __be16 proto, u16 vid) ++{ ++ struct hsr_port *port; ++ struct hsr_priv *hsr; ++ ++ hsr = netdev_priv(dev); ++ ++ hsr_for_each_port_rtnl(hsr, port) { ++ switch (port->type) { ++ case HSR_PT_SLAVE_A: ++ case HSR_PT_SLAVE_B: ++ vlan_vid_del(port->dev, proto, vid); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ return 0; ++} ++ + static const struct net_device_ops hsr_device_ops = { + .ndo_change_mtu = hsr_dev_change_mtu, + .ndo_open = hsr_dev_open, + .ndo_stop = hsr_dev_close, + .ndo_start_xmit = hsr_dev_xmit, ++ .ndo_change_rx_flags = hsr_change_rx_flags, + .ndo_fix_features = hsr_fix_features, ++ .ndo_set_rx_mode = hsr_set_rx_mode, ++ .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid, ++ .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid, + }; + + static struct device_type hsr_type = { +@@ -447,7 +588,8 @@ void hsr_dev_setup(struct net_device *dev) + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | + NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | +- NETIF_F_HW_VLAN_CTAG_TX; ++ NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_CTAG_FILTER; + + dev->features = dev->hw_features; + +@@ -533,6 +675,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], + (slave[1]->features & NETIF_F_HW_HSR_FWD)) + hsr->fwd_offloaded = true; + ++ if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) && ++ (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER)) ++ hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ++ + res = register_netdevice(hsr_dev); + if (res) + goto err_unregister; +diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c +index 257b50124cee5e..76a1958609e291 100644 +--- a/net/hsr/hsr_main.c ++++ b/net/hsr/hsr_main.c +@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr) + { + struct hsr_port *port; + +- hsr_for_each_port(hsr, port) ++ hsr_for_each_port_rtnl(hsr, port) + if (port->type != HSR_PT_MASTER) + return false; + return true; +@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt) + { + struct hsr_port *port; + +- hsr_for_each_port(hsr, port) ++ hsr_for_each_port_rtnl(hsr, port) + if (port->type == pt) + return port; + return NULL; +diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h +index 18e01791ad799d..2fcabe39e61f4f 100644 +--- a/net/hsr/hsr_main.h ++++ b/net/hsr/hsr_main.h +@@ -221,6 +221,9 @@ struct hsr_priv { + #define hsr_for_each_port(hsr, port) \ + list_for_each_entry_rcu((port), &(hsr)->ports, port_list) + ++#define hsr_for_each_port_rtnl(hsr, port) \ ++ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held()) ++ + struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt); + + /* Caller must ensure skb is a valid HSR frame */ +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c +index deb08cab44640d..75e3d7501752df 100644 +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + return -EINVAL; + ++ if (skb_is_gso(skb)) ++ skb_gso_reset(skb); ++ + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); +@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu) + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + return -EINVAL; + ++ if (skb_is_gso(skb)) ++ skb_gso_reset(skb); ++ + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 5312237e804093..7518d2af630880 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -408,8 +408,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, + if (!psock->cork) { + psock->cork = kzalloc(sizeof(*psock->cork), + GFP_ATOMIC | __GFP_NOWARN); +- if (!psock->cork) ++ if (!psock->cork) { ++ sk_msg_free(sk, msg); ++ *copied = 0; + return -ENOMEM; ++ } + } + memcpy(psock->cork, msg, sizeof(*msg)); + return 0; +diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c +index 31f6899ef71aac..b31ba9f905e30c 100644 +--- a/net/mptcp/sockopt.c ++++ b/net/mptcp/sockopt.c +@@ -1471,13 +1471,12 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) + { + static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK; + struct sock *sk = (struct sock *)msk; ++ bool keep_open; + +- if (ssk->sk_prot->keepalive) { +- if (sock_flag(sk, SOCK_KEEPOPEN)) +- ssk->sk_prot->keepalive(ssk, 1); +- else +- ssk->sk_prot->keepalive(ssk, 0); +- } ++ keep_open = sock_flag(sk, SOCK_KEEPOPEN); ++ if (ssk->sk_prot->keepalive) ++ ssk->sk_prot->keepalive(ssk, keep_open); ++ sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open); + + ssk->sk_priority = sk->sk_priority; + ssk->sk_bound_dev_if = sk->sk_bound_dev_if; +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 73bc39281ef5f5..9b45fbdc90cabe 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); + + static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) + { +- if (unlikely(current->flags & PF_EXITING)) +- return -EINTR; + schedule(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 8b27a21f3b42d8..3660ef26471129 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -407,9 +407,9 @@ xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags) + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, + alert_kvec.iov_len); + ret = sock_recvmsg(sock, &msg, flags); +- if (ret > 0 && +- tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { +- iov_iter_revert(&msg.msg_iter, ret); ++ if (ret > 0) { ++ if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) ++ iov_iter_revert(&msg.msg_iter, ret); + ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg, + -EAGAIN); + } +diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c +index e2a6a69352dfb7..b40f85e3806fcb 100644 +--- a/samples/ftrace/ftrace-direct-modify.c ++++ b/samples/ftrace/ftrace-direct-modify.c +@@ -40,8 +40,8 @@ asm ( + CALL_DEPTH_ACCOUNT + " call my_direct_func1\n" + " leave\n" +-" .size my_tramp1, .-my_tramp1\n" + ASM_RET ++" .size my_tramp1, .-my_tramp1\n" + + " .type my_tramp2, @function\n" + " .globl my_tramp2\n" +diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan +index 390658a2d5b746..a57c24c129720f 100644 +--- a/scripts/Makefile.kasan ++++ b/scripts/Makefile.kasan +@@ -68,10 +68,14 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ + $(call cc-param,hwasan-inline-all-checks=0) \ + $(instrumentation_flags) + +-# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*(). +-ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y) +-CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1) +-endif ++# Instrument memcpy/memset/memmove calls by using instrumented __(hw)asan_mem*(). ++ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX ++ ifdef CONFIG_CC_IS_GCC ++ CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1) ++ else ++ CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1) ++ endif ++endif # CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX + + endif # CONFIG_KASAN_SW_TAGS + +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 068edb0d79f736..3b734a4dfcbe4e 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -128,16 +128,22 @@ static void ima_rdwr_violation_check(struct file *file, + if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { + if (!iint) + iint = integrity_iint_find(inode); ++ + /* IMA_MEASURE is set from reader side */ +- if (iint && test_bit(IMA_MUST_MEASURE, +- &iint->atomic_flags)) ++ if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU, ++ &iint->atomic_flags)) + send_tomtou = true; + } + } else { + if (must_measure) +- set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); +- if (inode_is_open_for_write(inode) && must_measure) +- send_writers = true; ++ set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags); ++ ++ /* Limit number of open_writers violations */ ++ if (inode_is_open_for_write(inode) && must_measure) { ++ if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS, ++ &iint->atomic_flags)) ++ send_writers = true; ++ } + } + + if (!send_tomtou && !send_writers) +diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h +index ad20ff7f5dfaa4..a007edae938aeb 100644 +--- a/security/integrity/integrity.h ++++ b/security/integrity/integrity.h +@@ -74,7 +74,8 @@ + #define IMA_UPDATE_XATTR 1 + #define IMA_CHANGE_ATTR 2 + #define IMA_DIGSIG 3 +-#define IMA_MUST_MEASURE 4 ++#define IMA_MAY_EMIT_TOMTOU 4 ++#define IMA_EMITTED_OPENWRITERS 5 + + enum evm_ima_xattr_type { + IMA_XATTR_DIGEST = 0x01, diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.107-108.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.107-108.patch new file mode 100644 index 0000000000..bd6af10b4a --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.107-108.patch @@ -0,0 +1,2572 @@ +diff --git a/Makefile b/Makefile +index 9c9e272f48b879..1e3fb36bb71d78 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 107 ++SUBLEVEL = 108 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig +index 25aa993abebcea..9f56c15ed22d0c 100644 +--- a/arch/loongarch/Kconfig ++++ b/arch/loongarch/Kconfig +@@ -503,10 +503,14 @@ config ARCH_STRICT_ALIGN + -mstrict-align build parameter to prevent unaligned accesses. + + CPUs with h/w unaligned access support: +- Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. ++ Loongson-2K2000/2K3000 and all of Loongson-3 series processors ++ based on LoongArch. + + CPUs without h/w unaligned access support: +- Loongson-2K500/2K1000. ++ Loongson-2K0300/2K0500/2K1000. ++ ++ If you want to make sure whether to support unaligned memory access ++ on your hardware, please read the bit 20 (UAL) of CPUCFG1 register. + + This option is enabled by default to make the kernel be able to run + on all LoongArch systems. But you can disable it manually if you want +diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h +index 52f298f7293bab..483c955f2ae50d 100644 +--- a/arch/loongarch/include/asm/acenv.h ++++ b/arch/loongarch/include/asm/acenv.h +@@ -10,9 +10,8 @@ + #ifndef _ASM_LOONGARCH_ACENV_H + #define _ASM_LOONGARCH_ACENV_H + +-/* +- * This header is required by ACPI core, but we have nothing to fill in +- * right now. Will be updated later when needed. +- */ ++#ifdef CONFIG_ARCH_STRICT_ALIGN ++#define ACPI_MISALIGNMENT_NOT_SUPPORTED ++#endif /* CONFIG_ARCH_STRICT_ALIGN */ + + #endif /* _ASM_LOONGARCH_ACENV_H */ +diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c +index 6b3bfb0092e60b..2178b35896af19 100644 +--- a/arch/loongarch/kernel/env.c ++++ b/arch/loongarch/kernel/env.c +@@ -72,6 +72,8 @@ static int __init boardinfo_init(void) + struct kobject *loongson_kobj; + + loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); ++ if (!loongson_kobj) ++ return -ENOMEM; + + return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); + } +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c +index 8adca2000e519f..d790acfc2c674d 100644 +--- a/arch/um/drivers/virtio_uml.c ++++ b/arch/um/drivers/virtio_uml.c +@@ -1229,10 +1229,12 @@ static int virtio_uml_probe(struct platform_device *pdev) + device_set_wakeup_capable(&vu_dev->vdev.dev, true); + + rc = register_virtio_device(&vu_dev->vdev); +- if (rc) ++ if (rc) { + put_device(&vu_dev->vdev.dev); ++ return rc; ++ } + vu_dev->registered = 1; +- return rc; ++ return 0; + + error_init: + os_close_file(vu_dev->sock); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index abff6d45ae3334..29566e457ec4b5 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -4029,8 +4029,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) + struct vcpu_svm *svm = to_svm(vcpu); + u64 cr8; + +- if (nested_svm_virtualize_tpr(vcpu) || +- kvm_vcpu_apicv_active(vcpu)) ++ if (nested_svm_virtualize_tpr(vcpu)) + return; + + cr8 = kvm_get_cr8(vcpu); +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index b18f5a71e679e2..98aa303ad0546f 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -107,7 +107,7 @@ static inline void pgd_list_del(pgd_t *pgd) + #define UNSHARED_PTRS_PER_PGD \ + (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) + #define MAX_UNSHARED_PTRS_PER_PGD \ +- max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) ++ MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) + + + static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 68cc9290cabe9a..886eccb97b041d 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -969,6 +969,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + } + + lock_sock(sk); ++ if (ctx->write) { ++ release_sock(sk); ++ return -EBUSY; ++ } ++ ctx->write = true; ++ + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; +@@ -1018,6 +1024,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + continue; + } + ++ ctx->merge = 0; ++ + if (!af_alg_writable(sk)) { + err = af_alg_wait_for_wmem(sk, msg->msg_flags); + if (err) +@@ -1057,7 +1065,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + ctx->used += plen; + copied += plen; + size -= plen; +- ctx->merge = 0; + } else { + do { + struct page *pg; +@@ -1103,6 +1110,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + + unlock: + af_alg_data_wakeup(sk); ++ ctx->write = false; + release_sock(sk); + + return copied ?: err; +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index ed004e1610dd1f..455e2a2b149f4b 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -1472,36 +1472,19 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) + return error; + } + +-static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, +- struct block_device *bdev, unsigned long arg) ++static int loop_set_block_size(struct loop_device *lo, unsigned long arg) + { + int err = 0; + +- /* +- * If we don't hold exclusive handle for the device, upgrade to it +- * here to avoid changing device under exclusive owner. +- */ +- if (!(mode & BLK_OPEN_EXCL)) { +- err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); +- if (err) +- return err; +- } +- +- err = mutex_lock_killable(&lo->lo_mutex); +- if (err) +- goto abort_claim; +- +- if (lo->lo_state != Lo_bound) { +- err = -ENXIO; +- goto unlock; +- } ++ if (lo->lo_state != Lo_bound) ++ return -ENXIO; + + err = blk_validate_block_size(arg); + if (err) + return err; + + if (lo->lo_queue->limits.logical_block_size == arg) +- goto unlock; ++ return 0; + + sync_blockdev(lo->lo_device); + invalidate_bdev(lo->lo_device); +@@ -1513,11 +1496,6 @@ static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, + loop_update_dio(lo); + blk_mq_unfreeze_queue(lo->lo_queue); + +-unlock: +- mutex_unlock(&lo->lo_mutex); +-abort_claim: +- if (!(mode & BLK_OPEN_EXCL)) +- bd_abort_claiming(bdev, loop_set_block_size); + return err; + } + +@@ -1536,6 +1514,9 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, + case LOOP_SET_DIRECT_IO: + err = loop_set_dio(lo, arg); + break; ++ case LOOP_SET_BLOCK_SIZE: ++ err = loop_set_block_size(lo, arg); ++ break; + default: + err = -EINVAL; + } +@@ -1590,12 +1571,9 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, + break; + case LOOP_GET_STATUS64: + return loop_get_status64(lo, argp); +- case LOOP_SET_BLOCK_SIZE: +- if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) +- return -EPERM; +- return loop_set_block_size(lo, mode, bdev, arg); + case LOOP_SET_CAPACITY: + case LOOP_SET_DIRECT_IO: ++ case LOOP_SET_BLOCK_SIZE: + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + fallthrough; +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c +index 0c779a0326b628..6c3a40e6f4c63a 100644 +--- a/drivers/edac/sb_edac.c ++++ b/drivers/edac/sb_edac.c +@@ -109,8 +109,8 @@ static const u32 knl_interleave_list[] = { + 0x104, 0x10c, 0x114, 0x11c, /* 20-23 */ + }; + #define MAX_INTERLEAVE \ +- (max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \ +- max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \ ++ (MAX_T(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \ ++ MAX_T(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \ + ARRAY_SIZE(knl_interleave_list)))) + + struct interleave_pkg { +diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c +index ddf944651c55a9..08885a5ba826ef 100644 +--- a/drivers/gpu/drm/bridge/analogix/anx7625.c ++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c +@@ -2705,7 +2705,7 @@ static int anx7625_i2c_probe(struct i2c_client *client) + ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, + NULL, anx7625_intr_hpd_isr, + IRQF_TRIGGER_FALLING | +- IRQF_ONESHOT, ++ IRQF_ONESHOT | IRQF_NO_AUTOEN, + "anx7625-intp", platform); + if (ret) { + DRM_DEV_ERROR(dev, "fail to request irq\n"); +@@ -2775,8 +2775,10 @@ static int anx7625_i2c_probe(struct i2c_client *client) + } + + /* Add work function */ +- if (platform->pdata.intp_irq) ++ if (platform->pdata.intp_irq) { ++ enable_irq(platform->pdata.intp_irq); + queue_work(platform->workqueue, &platform->work); ++ } + + if (platform->pdata.audio_en) + anx7625_register_audio(dev, platform); +diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +index 858f5b6508491f..bdb9fc00c776b3 100644 +--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c ++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +@@ -2057,8 +2057,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, + mhdp_state = to_cdns_mhdp_bridge_state(new_state); + + mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); +- if (!mhdp_state->current_mode) +- return; ++ if (!mhdp_state->current_mode) { ++ ret = -EINVAL; ++ goto out; ++ } + + drm_mode_set_name(mhdp_state->current_mode); + +diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c +index d021497841b846..3969dc548cff60 100644 +--- a/drivers/gpu/drm/drm_color_mgmt.c ++++ b/drivers/gpu/drm/drm_color_mgmt.c +@@ -532,7 +532,7 @@ int drm_plane_create_color_properties(struct drm_plane *plane, + { + struct drm_device *dev = plane->dev; + struct drm_property *prop; +- struct drm_prop_enum_list enum_list[max_t(int, DRM_COLOR_ENCODING_MAX, ++ struct drm_prop_enum_list enum_list[MAX_T(int, DRM_COLOR_ENCODING_MAX, + DRM_COLOR_RANGE_MAX)]; + int i, len; + +diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h +index 7dc30c2b56b302..d872054b874fa5 100644 +--- a/drivers/iommu/amd/amd_iommu_types.h ++++ b/drivers/iommu/amd/amd_iommu_types.h +@@ -540,6 +540,7 @@ struct amd_irte_ops; + container_of((x), struct amd_io_pgtable, pgtbl_cfg) + + struct amd_io_pgtable { ++ seqcount_t seqcount; /* Protects root/mode update */ + struct io_pgtable_cfg pgtbl_cfg; + struct io_pgtable iop; + int mode; +diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c +index 2892aa1b4dc1db..b785d823999835 100644 +--- a/drivers/iommu/amd/io_pgtable.c ++++ b/drivers/iommu/amd/io_pgtable.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include + +@@ -171,8 +172,11 @@ static bool increase_address_space(struct protection_domain *domain, + + *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); + ++ write_seqcount_begin(&domain->iop.seqcount); + domain->iop.root = pte; + domain->iop.mode += 1; ++ write_seqcount_end(&domain->iop.seqcount); ++ + amd_iommu_update_and_flush_device_table(domain); + amd_iommu_domain_flush_complete(domain); + +@@ -199,6 +203,7 @@ static u64 *alloc_pte(struct protection_domain *domain, + gfp_t gfp, + bool *updated) + { ++ unsigned int seqcount; + int level, end_lvl; + u64 *pte, *page; + +@@ -214,8 +219,14 @@ static u64 *alloc_pte(struct protection_domain *domain, + } + + +- level = domain->iop.mode - 1; +- pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; ++ do { ++ seqcount = read_seqcount_begin(&domain->iop.seqcount); ++ ++ level = domain->iop.mode - 1; ++ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; ++ } while (read_seqcount_retry(&domain->iop.seqcount, seqcount)); ++ ++ + address = PAGE_SIZE_ALIGN(address, page_size); + end_lvl = PAGE_SIZE_LEVEL(page_size); + +@@ -292,6 +303,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, + unsigned long *page_size) + { + int level; ++ unsigned int seqcount; + u64 *pte; + + *page_size = 0; +@@ -299,8 +311,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, + if (address > PM_LEVEL_SIZE(pgtable->mode)) + return NULL; + +- level = pgtable->mode - 1; +- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; ++ do { ++ seqcount = read_seqcount_begin(&pgtable->seqcount); ++ level = pgtable->mode - 1; ++ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; ++ } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); ++ + *page_size = PTE_LEVEL_PAGE_SIZE(level); + + while (level > 0) { +@@ -524,6 +540,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo + cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, + cfg->tlb = &v1_flush_ops; + ++ seqcount_init(&pgtable->seqcount); ++ + pgtable->iop.ops.map_pages = iommu_v1_map_pages; + pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; + pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 6a745616d85a4b..88bccdbb0bed08 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -2168,6 +2168,10 @@ static void switch_to_super_page(struct dmar_domain *domain, + struct dma_pte *pte = NULL; + unsigned long i; + ++ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) || ++ !IS_ALIGNED(end_pfn + 1, lvl_pages))) ++ return; ++ + while (start_pfn <= end_pfn) { + if (!pte) + pte = pfn_to_dma_pte(domain, start_pfn, &level, +@@ -2241,7 +2245,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + unsigned long pages_to_remove; + + pteval |= DMA_PTE_LARGE_PAGE; +- pages_to_remove = min_t(unsigned long, nr_pages, ++ pages_to_remove = min_t(unsigned long, ++ round_down(nr_pages, lvl_pages), + nr_pte_to_next_page(pte) * lvl_pages); + end_pfn = iov_pfn + pages_to_remove - 1; + switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl); +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index 1e27a5bce2d942..0bd76f8d4dc6b8 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -1794,7 +1794,7 @@ static void integrity_metadata(struct work_struct *w) + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + char *checksums; + unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; +- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; ++ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + sector_t sector; + unsigned int sectors_to_process; + +@@ -2073,7 +2073,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, + } while (++s < ic->sectors_per_block); + #ifdef INTERNAL_VERIFY + if (ic->internal_hash) { +- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; ++ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + + integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); + if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { +@@ -2638,7 +2638,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start + unlikely(from_replay) && + #endif + ic->internal_hash) { +- char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; ++ char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + + integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), + (char *)access_journal_data(ic, i, l), test_tag); +diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c +index ca01b7d204ba66..955df7f370a789 100644 +--- a/drivers/mmc/host/mvsdio.c ++++ b/drivers/mmc/host/mvsdio.c +@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, + host->pio_ptr = NULL; + host->pio_size = 0; + } else { +- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, ++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index cd5691ed9f171e..f7ed129fc8110a 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2042,6 +2042,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, + memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); + } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW && + BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && ++ bond_has_slaves(bond) && + memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) { + /* Set slave to random address to avoid duplicate mac + * address in later fail over. +@@ -3260,7 +3261,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave) + /* Find out through which dev should the packet go */ + memset(&fl6, 0, sizeof(struct flowi6)); + fl6.daddr = targets[i]; +- fl6.flowi6_oif = bond->dev->ifindex; + + dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); + if (dst->error) { +diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c +index 7926aaef8f0c52..ad2745c07c1ae7 100644 +--- a/drivers/net/ethernet/broadcom/cnic.c ++++ b/drivers/net/ethernet/broadcom/cnic.c +@@ -4220,8 +4220,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) + + cnic_bnx2x_delete_wait(dev, 0); + +- cancel_delayed_work(&cp->delete_task); +- flush_workqueue(cnic_wq); ++ cancel_delayed_work_sync(&cp->delete_task); + + if (atomic_read(&cp->iscsi_conn) != 0) + netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", +diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c +index de8a6ce86ad7e2..12105ffb5dac6d 100644 +--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c ++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c +@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, + oct->io_qmask.iq |= BIT_ULL(iq_no); + + /* Set the 32B/64B mode for each input queue */ +- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); ++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no); + iq->iqcmd_64B = (conf->instr_type == 64); + + oct->fn_list.setup_iq_regs(oct, iq_no); +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +index 76795bb0b564b7..cdab37e9634d4f 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +@@ -2700,7 +2700,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) + dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); + goto err_get_attr; + } +- ethsw->bpid = dpbp_attrs.id; ++ ethsw->bpid = dpbp_attrs.bpid; + + return 0; + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +index c962987d8b51bb..6a9b47b005d29b 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +@@ -950,9 +950,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + if (!eop_desc) + break; + +- /* prevent any other reads prior to eop_desc */ +- smp_rmb(); +- + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +index 3a72b0793d4a73..82725923555c51 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +@@ -476,7 +476,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf) + if (!ptp) + return; + +- cancel_delayed_work(&pfvf->ptp->synctstamp_work); ++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work); + + ptp_clock_unregister(ptp->ptp_clock); + kfree(ptp); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index d378aa55f22f90..09ba60b2e744b1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -109,8 +109,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv) + if (up) { + netdev_info(priv->netdev, "Link up\n"); + netif_carrier_on(priv->netdev); +- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, +- NULL, NULL, NULL); + } else { + netdev_info(priv->netdev, "Link down\n"); + netif_carrier_off(priv->netdev); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 851c499faa7954..656a7b65f4c7bb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1448,12 +1448,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { + static int + mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) + { +- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); ++ struct net_device *netdev; ++ struct mlx5e_priv *priv; ++ int err; ++ ++ netdev = mlx5_uplink_netdev_get(dev); ++ if (!netdev) ++ return 0; + ++ priv = netdev_priv(netdev); + rpriv->netdev = priv->netdev; +- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, +- rpriv); ++ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, ++ rpriv); ++ mlx5_uplink_netdev_put(dev, netdev); ++ return err; + } + + static void +@@ -1565,8 +1574,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) + { + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct net_device *netdev = rpriv->netdev; +- struct mlx5e_priv *priv = netdev_priv(netdev); +- void *ppriv = priv->ppriv; ++ struct mlx5e_priv *priv; ++ void *ppriv; ++ ++ if (!netdev) { ++ ppriv = rpriv; ++ goto free_ppriv; ++ } ++ ++ priv = netdev_priv(netdev); ++ ppriv = priv->ppriv; + + if (rep->vport == MLX5_VPORT_UPLINK) { + mlx5e_vport_uplink_rep_unload(rpriv); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +index cc0f2be21a265a..05fbd2098b268a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +@@ -2,6 +2,7 @@ + /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + + #include "eswitch.h" ++#include "lib/mlx5.h" + #include "esw/qos.h" + #include "en/port.h" + #define CREATE_TRACE_POINTS +@@ -712,6 +713,71 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo + return err; + } + ++static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) ++{ ++ struct ethtool_link_ksettings lksettings; ++ struct net_device *slave, *master; ++ u32 speed = SPEED_UNKNOWN; ++ ++ /* Lock ensures a stable reference to master and slave netdevice ++ * while port speed of master is queried. ++ */ ++ ASSERT_RTNL(); ++ ++ slave = mlx5_uplink_netdev_get(mdev); ++ if (!slave) ++ goto out; ++ ++ master = netdev_master_upper_dev_get(slave); ++ if (master && !__ethtool_get_link_ksettings(master, &lksettings)) ++ speed = lksettings.base.speed; ++ ++out: ++ mlx5_uplink_netdev_put(mdev, slave); ++ return speed; ++} ++ ++static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, ++ bool hold_rtnl_lock, struct netlink_ext_ack *extack) ++{ ++ int err; ++ ++ if (!mlx5_lag_is_active(mdev)) ++ goto skip_lag; ++ ++ if (hold_rtnl_lock) ++ rtnl_lock(); ++ ++ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); ++ ++ if (hold_rtnl_lock) ++ rtnl_unlock(); ++ ++ if (*link_speed_max != (u32)SPEED_UNKNOWN) ++ return 0; ++ ++skip_lag: ++ err = mlx5_port_max_linkspeed(mdev, link_speed_max); ++ if (err) ++ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); ++ ++ return err; ++} ++ ++static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, ++ const char *name, u32 link_speed_max, ++ u64 value, struct netlink_ext_ack *extack) ++{ ++ if (value > link_speed_max) { ++ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", ++ name, value, link_speed_max); ++ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) + { + u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; +@@ -755,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char * + u64 value; + int err; + +- err = mlx5_port_max_linkspeed(mdev, &link_speed_max); +- if (err) { +- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); +- return err; +- } +- + value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder); + if (remainder) { + pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n", +@@ -769,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char * + return -EINVAL; + } + +- if (value > link_speed_max) { +- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", +- name, value, link_speed_max); +- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); +- return -EINVAL; +- } ++ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack); ++ if (err) ++ return err; ++ ++ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack); ++ if (err) ++ return err; + + *rate = value; + return 0; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +index 2b5826a785c4f7..adcc2bc9c8c870 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +@@ -52,6 +52,19 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) + + static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) + { +- return mdev->mlx5e_res.uplink_netdev; ++ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res; ++ struct net_device *netdev; ++ ++ mutex_lock(&mlx5e_res->uplink_netdev_lock); ++ netdev = mlx5e_res->uplink_netdev; ++ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL); ++ mutex_unlock(&mlx5e_res->uplink_netdev_lock); ++ return netdev; ++} ++ ++static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev, ++ struct net_device *netdev) ++{ ++ netdev_put(netdev, &mdev->mlx5e_res.tracker); + } + #endif +diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c +index 998586872599b3..c692d2e878b2e3 100644 +--- a/drivers/net/ethernet/natsemi/ns83820.c ++++ b/drivers/net/ethernet/natsemi/ns83820.c +@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev) + struct ns83820 *dev = PRIV(ndev); + struct rx_info *info = &dev->rx_info; + unsigned next_rx; +- int rx_rc, len; ++ int len; + u32 cmdsts; + __le32 *desc; + unsigned long flags; +@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev) + if (likely(CMDSTS_OK & cmdsts)) { + #endif + skb_put(skb, len); +- if (unlikely(!skb)) ++ if (unlikely(!skb)) { ++ ndev->stats.rx_dropped++; + goto netdev_mangle_me_harder_failed; ++ } + if (cmdsts & CMDSTS_DEST_MULTI) + ndev->stats.multicast++; + ndev->stats.rx_packets++; +@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); + } + #endif +- rx_rc = netif_rx(skb); +- if (NET_RX_DROP == rx_rc) { +-netdev_mangle_me_harder_failed: +- ndev->stats.rx_dropped++; +- } ++ netif_rx(skb); + } else { + dev_kfree_skb_irq(skb); + } + ++netdev_mangle_me_harder_failed: + nr++; + next_rx = info->next_rx; + desc = info->descs + (DESC_SIZE * next_rx); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c +index cdcead614e9fa8..ae421c2707785f 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c +@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, + goto out; + } + +- /* Add override window info to buffer */ ++ /* Add override window info to buffer, preventing buffer overflow */ + override_window_dwords = +- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * +- PROTECTION_OVERRIDE_ELEMENT_DWORDS; ++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * ++ PROTECTION_OVERRIDE_ELEMENT_DWORDS, ++ PROTECTION_OVERRIDE_DEPTH_DWORDS); + if (override_window_dwords) { + addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); + offset += qed_grc_dump_addr_range(p_hwfn, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index ff5389a8efc33a..f3155d69a013c6 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2841,7 +2841,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) + u32 channels_to_check = tx_channel_count > rx_channel_count ? + tx_channel_count : rx_channel_count; + u32 chan; +- int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; ++ int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + + /* Make sure we never check beyond our status buffer. */ + if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c +index 6e4023791b4761..68b8e458a88f6a 100644 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -1981,6 +1981,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, + + rq->comp_ring.gen = VMXNET3_INIT_GEN; + rq->comp_ring.next2proc = 0; ++ ++ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) ++ xdp_rxq_info_unreg(&rq->xdp_rxq); ++ page_pool_destroy(rq->page_pool); ++ rq->page_pool = NULL; + } + + +@@ -2021,11 +2026,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + } + } + +- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) +- xdp_rxq_info_unreg(&rq->xdp_rxq); +- page_pool_destroy(rq->page_pool); +- rq->page_pool = NULL; +- + if (rq->data_ring.base) { + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[0].size * rq->data_ring.desc_size, +diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c +index 131388886acbfa..cfabd5aebb5400 100644 +--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c ++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c +@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = { + }; + + static const struct wilc_cfg_str g_cfg_str[] = { +- {WID_FIRMWARE_VERSION, NULL}, +- {WID_MAC_ADDR, NULL}, +- {WID_ASSOC_RES_INFO, NULL}, +- {WID_NIL, NULL} ++ {WID_FIRMWARE_VERSION, 0, NULL}, ++ {WID_MAC_ADDR, 0, NULL}, ++ {WID_ASSOC_RES_INFO, 0, NULL}, ++ {WID_NIL, 0, NULL} + }; + + #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R' +@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size) + + switch (FIELD_GET(WILC_WID_TYPE, wid)) { + case WID_CHAR: ++ len = 3; ++ if (len + 2 > size) ++ return; ++ + while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid) + i++; + + if (cfg->b[i].id == wid) + cfg->b[i].val = info[4]; + +- len = 3; + break; + + case WID_SHORT: ++ len = 4; ++ if (len + 2 > size) ++ return; ++ + while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid) + i++; + + if (cfg->hw[i].id == wid) + cfg->hw[i].val = get_unaligned_le16(&info[4]); + +- len = 4; + break; + + case WID_INT: ++ len = 6; ++ if (len + 2 > size) ++ return; ++ + while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid) + i++; + + if (cfg->w[i].id == wid) + cfg->w[i].val = get_unaligned_le32(&info[4]); + +- len = 6; + break; + + case WID_STR: ++ len = 2 + get_unaligned_le16(&info[2]); ++ + while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid) + i++; + +- if (cfg->s[i].id == wid) ++ if (cfg->s[i].id == wid) { ++ if (len > cfg->s[i].len || (len + 2 > size)) ++ return; ++ + memcpy(cfg->s[i].str, &info[2], +- get_unaligned_le16(&info[2]) + 2); ++ len); ++ } + +- len = 2 + get_unaligned_le16(&info[2]); + break; + + default: +@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl) + /* store the string cfg parameters */ + wl->cfg.s[i].id = WID_FIRMWARE_VERSION; + wl->cfg.s[i].str = str_vals->firmware_version; ++ wl->cfg.s[i].len = sizeof(str_vals->firmware_version); + i++; + wl->cfg.s[i].id = WID_MAC_ADDR; + wl->cfg.s[i].str = str_vals->mac_address; ++ wl->cfg.s[i].len = sizeof(str_vals->mac_address); + i++; + wl->cfg.s[i].id = WID_ASSOC_RES_INFO; + wl->cfg.s[i].str = str_vals->assoc_rsp; ++ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp); + i++; + wl->cfg.s[i].id = WID_NIL; + wl->cfg.s[i].str = NULL; +diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h +index 7038b74f8e8ff6..5ae74bced7d748 100644 +--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h ++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h +@@ -24,12 +24,13 @@ struct wilc_cfg_word { + + struct wilc_cfg_str { + u16 id; ++ u16 len; + u8 *str; + }; + + struct wilc_cfg_str_vals { +- u8 mac_address[7]; +- u8 firmware_version[129]; ++ u8 mac_address[8]; ++ u8 firmware_version[130]; + u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE]; + }; + +diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c +index 25382612e48acb..a8e0dd5d30c436 100644 +--- a/drivers/pcmcia/omap_cf.c ++++ b/drivers/pcmcia/omap_cf.c +@@ -305,7 +305,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev) + return 0; + } + +-static struct platform_driver omap_cf_driver = { ++/* ++ * omap_cf_remove() lives in .exit.text. For drivers registered via ++ * platform_driver_probe() this is ok because they cannot get unbound at ++ * runtime. So mark the driver struct with __refdata to prevent modpost ++ * triggering a section mismatch warning. ++ */ ++static struct platform_driver omap_cf_driver __refdata = { + .driver = { + .name = driver_name, + }, +diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c +index 69584b685edbb7..2c8b1b7dda5bdc 100644 +--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c ++++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c +@@ -16,10 +16,11 @@ + #include + #include + #include ++#include + #include +-#include + #include + #include ++#include + #include + + #define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f +@@ -189,7 +190,6 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, + static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev) + { + struct device *dev = &mdiodev->dev; +- const struct of_device_id *of_id; + struct phy_provider *phy_provider; + struct device_node *syscon_np; + struct bcm_ns_usb3 *usb3; +@@ -203,10 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev) + usb3->dev = dev; + usb3->mdiodev = mdiodev; + +- of_id = of_match_device(bcm_ns_usb3_id_table, dev); +- if (!of_id) +- return -EINVAL; +- usb3->family = (uintptr_t)of_id->data; ++ usb3->family = (enum bcm_ns_family)device_get_match_data(dev); + + syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0); + err = of_address_to_resource(syscon_np, 0, &res); +diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c +index 78ef6ae72a9a74..f26bf630da2c9f 100644 +--- a/drivers/phy/marvell/phy-berlin-usb.c ++++ b/drivers/phy/marvell/phy-berlin-usb.c +@@ -8,9 +8,10 @@ + + #include + #include +-#include ++#include + #include + #include ++#include + #include + + #define USB_PHY_PLL 0x04 +@@ -162,8 +163,6 @@ MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match); + + static int phy_berlin_usb_probe(struct platform_device *pdev) + { +- const struct of_device_id *match = +- of_match_device(phy_berlin_usb_of_match, &pdev->dev); + struct phy_berlin_usb_priv *priv; + struct phy *phy; + struct phy_provider *phy_provider; +@@ -180,7 +179,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev) + if (IS_ERR(priv->rst_ctrl)) + return PTR_ERR(priv->rst_ctrl); + +- priv->pll_divider = *((u32 *)match->data); ++ priv->pll_divider = *((u32 *)device_get_match_data(&pdev->dev)); + + phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops); + if (IS_ERR(phy)) { +diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c +index 2bd8ad2e76eda0..41bce5290e9220 100644 +--- a/drivers/phy/ralink/phy-ralink-usb.c ++++ b/drivers/phy/ralink/phy-ralink-usb.c +@@ -13,9 +13,10 @@ + #include + #include + #include +-#include ++#include + #include + #include ++#include + #include + #include + +@@ -171,18 +172,13 @@ static int ralink_usb_phy_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; +- const struct of_device_id *match; + struct ralink_usb_phy *phy; + +- match = of_match_device(ralink_usb_phy_of_match, &pdev->dev); +- if (!match) +- return -ENODEV; +- + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + +- phy->clk = (uintptr_t)match->data; ++ phy->clk = (uintptr_t)device_get_match_data(&pdev->dev); + phy->base = NULL; + + phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl"); +diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c +index cbf3c140a138ee..4669e87b32435a 100644 +--- a/drivers/phy/rockchip/phy-rockchip-pcie.c ++++ b/drivers/phy/rockchip/phy-rockchip-pcie.c +@@ -12,10 +12,9 @@ + #include + #include + #include +-#include +-#include + #include + #include ++#include + #include + #include + +@@ -62,7 +61,7 @@ struct rockchip_pcie_data { + }; + + struct rockchip_pcie_phy { +- struct rockchip_pcie_data *phy_data; ++ const struct rockchip_pcie_data *phy_data; + struct regmap *reg_base; + struct phy_pcie_instance { + struct phy *phy; +@@ -349,7 +348,6 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) + struct rockchip_pcie_phy *rk_phy; + struct phy_provider *phy_provider; + struct regmap *grf; +- const struct of_device_id *of_id; + int i; + u32 phy_num; + +@@ -363,11 +361,10 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) + if (!rk_phy) + return -ENOMEM; + +- of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev); +- if (!of_id) ++ rk_phy->phy_data = device_get_match_data(&pdev->dev); ++ if (!rk_phy->phy_data) + return -EINVAL; + +- rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data; + rk_phy->reg_base = grf; + + mutex_init(&rk_phy->pcie_mutex); +diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c +index 8454285977ebc1..666a896c8f0a08 100644 +--- a/drivers/phy/rockchip/phy-rockchip-usb.c ++++ b/drivers/phy/rockchip/phy-rockchip-usb.c +@@ -13,10 +13,9 @@ + #include + #include + #include +-#include +-#include + #include + #include ++#include + #include + #include + #include +@@ -458,7 +457,6 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct rockchip_usb_phy_base *phy_base; + struct phy_provider *phy_provider; +- const struct of_device_id *match; + struct device_node *child; + int err; + +@@ -466,14 +464,12 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) + if (!phy_base) + return -ENOMEM; + +- match = of_match_device(dev->driver->of_match_table, dev); +- if (!match || !match->data) { ++ phy_base->pdata = device_get_match_data(dev); ++ if (!phy_base->pdata) { + dev_err(dev, "missing phy data\n"); + return -EINVAL; + } + +- phy_base->pdata = match->data; +- + phy_base->dev = dev; + phy_base->reg_base = ERR_PTR(-ENODEV); + if (dev->parent && dev->parent->of_node) +diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c +index 76c5595f0859cb..2fdb8f4241c742 100644 +--- a/drivers/phy/ti/phy-omap-control.c ++++ b/drivers/phy/ti/phy-omap-control.c +@@ -8,9 +8,9 @@ + + #include + #include ++#include + #include + #include +-#include + #include + #include + #include +@@ -268,20 +268,15 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table); + + static int omap_control_phy_probe(struct platform_device *pdev) + { +- const struct of_device_id *of_id; + struct omap_control_phy *control_phy; + +- of_id = of_match_device(omap_control_phy_id_table, &pdev->dev); +- if (!of_id) +- return -EINVAL; +- + control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy), + GFP_KERNEL); + if (!control_phy) + return -ENOMEM; + + control_phy->dev = &pdev->dev; +- control_phy->type = *(enum omap_control_phy_type *)of_id->data; ++ control_phy->type = *(enum omap_control_phy_type *)device_get_match_data(&pdev->dev); + + if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) { + control_phy->otghs_control = +diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c +index 6bd3c749233068..0fea766a98d756 100644 +--- a/drivers/phy/ti/phy-omap-usb2.c ++++ b/drivers/phy/ti/phy-omap-usb2.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -362,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy) + phy->flags |= OMAP_USB2_DISABLE_CHRG_DET; + } + ++static void omap_usb2_put_device(void *_dev) ++{ ++ struct device *dev = _dev; ++ ++ put_device(dev); ++} ++ + static int omap_usb2_probe(struct platform_device *pdev) + { + struct omap_usb *phy; +@@ -371,16 +379,13 @@ static int omap_usb2_probe(struct platform_device *pdev) + struct device_node *node = pdev->dev.of_node; + struct device_node *control_node; + struct platform_device *control_pdev; +- const struct of_device_id *of_id; +- struct usb_phy_data *phy_data; +- +- of_id = of_match_device(omap_usb2_id_table, &pdev->dev); ++ const struct usb_phy_data *phy_data; ++ int ret; + +- if (!of_id) ++ phy_data = device_get_match_data(&pdev->dev); ++ if (!phy_data) + return -EINVAL; + +- phy_data = (struct usb_phy_data *)of_id->data; +- + phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; +@@ -426,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev) + return -EINVAL; + } + phy->control_dev = &control_pdev->dev; ++ ++ ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device, ++ phy->control_dev); ++ if (ret) ++ return ret; + } else { + if (of_property_read_u32_index(node, + "syscon-phy-power", 1, +diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c +index 3127f3702c3ae3..8e94d2c6e266a0 100644 +--- a/drivers/phy/ti/phy-ti-pipe3.c ++++ b/drivers/phy/ti/phy-ti-pipe3.c +@@ -8,6 +8,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -791,23 +792,16 @@ static int ti_pipe3_probe(struct platform_device *pdev) + struct phy_provider *phy_provider; + struct device *dev = &pdev->dev; + int ret; +- const struct of_device_id *match; +- struct pipe3_data *data; ++ const struct pipe3_data *data; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + +- match = of_match_device(ti_pipe3_id_table, dev); +- if (!match) ++ data = device_get_match_data(dev); ++ if (!data) + return -EINVAL; + +- data = (struct pipe3_data *)match->data; +- if (!data) { +- dev_err(dev, "no driver data\n"); +- return -EINVAL; +- } +- + phy->dev = dev; + phy->mode = data->mode; + phy->dpll_map = data->dpll_map; +diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c +index e51fa2c694bc6d..1f06dee4b8b4e3 100644 +--- a/drivers/power/supply/bq27xxx_battery.c ++++ b/drivers/power/supply/bq27xxx_battery.c +@@ -1872,8 +1872,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) + bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; + + cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); +- if ((cache.flags & 0xff) == 0xff) +- cache.flags = -1; /* read error */ ++ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff) ++ cache.flags = -ENODEV; /* bq27000 hdq read error */ + if (cache.flags >= 0) { + cache.temperature = bq27xxx_battery_read_temperature(di); + if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index fc079b9dcf7192..502571f0c203fa 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -1383,11 +1383,6 @@ static int pcf2127_i2c_probe(struct i2c_client *client) + variant = &pcf21xx_cfg[type]; + } + +- if (variant->type == PCF2131) { +- config.read_flag_mask = 0x0; +- config.write_flag_mask = 0x0; +- } +- + config.max_register = variant->max_register, + + regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap, +@@ -1461,6 +1456,11 @@ static int pcf2127_spi_probe(struct spi_device *spi) + variant = &pcf21xx_cfg[type]; + } + ++ if (variant->type == PCF2131) { ++ config.read_flag_mask = 0x0; ++ config.write_flag_mask = 0x0; ++ } ++ + config.max_register = variant->max_register; + + regmap = devm_regmap_init_spi(spi, &config); +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c +index 2cd8c757c65342..764657070883c1 100644 +--- a/drivers/usb/host/xhci-dbgcap.c ++++ b/drivers/usb/host/xhci-dbgcap.c +@@ -86,13 +86,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) + return string_length; + } + ++static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc) ++{ ++ struct xhci_ep_ctx *ep_ctx; ++ unsigned int max_burst; ++ dma_addr_t deq; ++ ++ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); ++ ++ /* Populate bulk out endpoint context: */ ++ ep_ctx = dbc_bulkout_ctx(dbc); ++ deq = dbc_bulkout_enq(dbc); ++ ep_ctx->ep_info = 0; ++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); ++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); ++ ++ /* Populate bulk in endpoint context: */ ++ ep_ctx = dbc_bulkin_ctx(dbc); ++ deq = dbc_bulkin_enq(dbc); ++ ep_ctx->ep_info = 0; ++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); ++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); ++} ++ + static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) + { + struct dbc_info_context *info; +- struct xhci_ep_ctx *ep_ctx; + u32 dev_info; +- dma_addr_t deq, dma; +- unsigned int max_burst; ++ dma_addr_t dma; + + if (!dbc) + return; +@@ -106,20 +127,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) + info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); + info->length = cpu_to_le32(string_length); + +- /* Populate bulk out endpoint context: */ +- ep_ctx = dbc_bulkout_ctx(dbc); +- max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); +- deq = dbc_bulkout_enq(dbc); +- ep_ctx->ep_info = 0; +- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); +- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); +- +- /* Populate bulk in endpoint context: */ +- ep_ctx = dbc_bulkin_ctx(dbc); +- deq = dbc_bulkin_enq(dbc); +- ep_ctx->ep_info = 0; +- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); +- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); ++ /* Populate bulk in and out endpoint contexts: */ ++ xhci_dbc_init_ep_contexts(dbc); + + /* Set DbC context and info registers: */ + lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); +@@ -421,6 +430,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags) + return ctx; + } + ++static void xhci_dbc_ring_init(struct xhci_ring *ring) ++{ ++ struct xhci_segment *seg = ring->first_seg; ++ ++ /* clear all trbs on ring in case of old ring */ ++ memset(seg->trbs, 0, TRB_SEGMENT_SIZE); ++ ++ /* Only event ring does not use link TRB */ ++ if (ring->type != TYPE_EVENT) { ++ union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; ++ ++ trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma); ++ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); ++ } ++ xhci_initialize_ring_info(ring, 1); ++} ++ ++static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc) ++{ ++ struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring; ++ struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring; ++ ++ if (!in_ring || !out_ring || !dbc->ctx) { ++ dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n"); ++ return -ENODEV; ++ } ++ ++ xhci_dbc_ring_init(in_ring); ++ xhci_dbc_ring_init(out_ring); ++ ++ /* set ep context enqueue, dequeue, and cycle to initial values */ ++ xhci_dbc_init_ep_contexts(dbc); ++ ++ return 0; ++} ++ + static struct xhci_ring * + xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) + { +@@ -449,15 +494,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) + + seg->dma = dma; + +- /* Only event ring does not use link TRB */ +- if (type != TYPE_EVENT) { +- union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; +- +- trb->link.segment_ptr = cpu_to_le64(dma); +- trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); +- } + INIT_LIST_HEAD(&ring->td_list); +- xhci_initialize_ring_info(ring, 1); ++ ++ xhci_dbc_ring_init(ring); ++ + return ring; + dma_fail: + kfree(seg); +@@ -850,7 +890,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + dev_info(dbc->dev, "DbC cable unplugged\n"); + dbc->state = DS_ENABLED; + xhci_dbc_flush_requests(dbc); +- ++ xhci_dbc_reinit_ep_rings(dbc); + return EVT_DISC; + } + +@@ -860,7 +900,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) + writel(portsc, &dbc->regs->portsc); + dbc->state = DS_ENABLED; + xhci_dbc_flush_requests(dbc); +- ++ xhci_dbc_reinit_ep_rings(dbc); + return EVT_DISC; + } + +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index 6d16506bbdc0d9..d49ce7768f7f5b 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -1717,10 +1717,10 @@ static int check_inode_ref(struct extent_buffer *leaf, + while (ptr < end) { + u16 namelen; + +- if (unlikely(ptr + sizeof(iref) > end)) { ++ if (unlikely(ptr + sizeof(*iref) > end)) { + inode_ref_err(leaf, slot, + "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", +- ptr, end, sizeof(iref)); ++ ptr, end, sizeof(*iref)); + return -EUCLEAN; + } + +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index e5d6bc1bb5e5da..4b53e19f7520fe 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1998,7 +1998,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + + search_key.objectid = log_key.objectid; + search_key.type = BTRFS_INODE_EXTREF_KEY; +- search_key.offset = key->objectid; ++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len); + ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); + if (ret < 0) { + goto out; +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c +index 905c7eadf9676d..59fda8ce0790bc 100644 +--- a/fs/nilfs2/sysfs.c ++++ b/fs/nilfs2/sysfs.c +@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) + ************************************************************************/ + + static ssize_t nilfs_feature_revision_show(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + return sysfs_emit(buf, "%d.%d\n", + NILFS_CURRENT_REV, NILFS_MINOR_REV); +@@ -1087,7 +1087,7 @@ static const char features_readme_str[] = + "(1) revision\n\tshow current revision of NILFS file system driver.\n"; + + static ssize_t nilfs_feature_README_show(struct kobject *kobj, +- struct attribute *attr, ++ struct kobj_attribute *attr, + char *buf) + { + return sysfs_emit(buf, features_readme_str); +diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h +index 78a87a016928b7..d370cd5cce3f5d 100644 +--- a/fs/nilfs2/sysfs.h ++++ b/fs/nilfs2/sysfs.h +@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups { + struct completion sg_segments_kobj_unregister; + }; + +-#define NILFS_COMMON_ATTR_STRUCT(name) \ ++#define NILFS_KOBJ_ATTR_STRUCT(name) \ + struct nilfs_##name##_attr { \ + struct attribute attr; \ +- ssize_t (*show)(struct kobject *, struct attribute *, \ ++ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \ + char *); \ +- ssize_t (*store)(struct kobject *, struct attribute *, \ ++ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \ + const char *, size_t); \ + } + +-NILFS_COMMON_ATTR_STRUCT(feature); ++NILFS_KOBJ_ATTR_STRUCT(feature); + + #define NILFS_DEV_ATTR_STRUCT(name) \ + struct nilfs_##name##_attr { \ +diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c +index 713bd1dcd39cce..be9be8f3633196 100644 +--- a/fs/smb/client/smbdirect.c ++++ b/fs/smb/client/smbdirect.c +@@ -1064,8 +1064,10 @@ static int smbd_negotiate(struct smbd_connection *info) + log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", + rc, response->sge.addr, + response->sge.length, response->sge.lkey); +- if (rc) ++ if (rc) { ++ put_receive_buffer(info, response); + return rc; ++ } + + init_completion(&info->negotiate_completion); + info->negotiate_done = false; +diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c +index a4ff1167c9a123..3720304d679293 100644 +--- a/fs/smb/server/transport_rdma.c ++++ b/fs/smb/server/transport_rdma.c +@@ -553,7 +553,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) + case SMB_DIRECT_MSG_DATA_TRANSFER: { + struct smb_direct_data_transfer *data_transfer = + (struct smb_direct_data_transfer *)recvmsg->packet; +- unsigned int data_length; ++ u32 remaining_data_length, data_offset, data_length; + int avail_recvmsg_count, receive_credits; + + if (wc->byte_len < +@@ -563,15 +563,25 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) + return; + } + ++ remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); + data_length = le32_to_cpu(data_transfer->data_length); +- if (data_length) { +- if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + +- (u64)data_length) { +- put_recvmsg(t, recvmsg); +- smb_direct_disconnect_rdma_connection(t); +- return; +- } ++ data_offset = le32_to_cpu(data_transfer->data_offset); ++ if (wc->byte_len < data_offset || ++ wc->byte_len < (u64)data_offset + data_length) { ++ put_recvmsg(t, recvmsg); ++ smb_direct_disconnect_rdma_connection(t); ++ return; ++ } ++ if (remaining_data_length > t->max_fragmented_recv_size || ++ data_length > t->max_fragmented_recv_size || ++ (u64)remaining_data_length + (u64)data_length > ++ (u64)t->max_fragmented_recv_size) { ++ put_recvmsg(t, recvmsg); ++ smb_direct_disconnect_rdma_connection(t); ++ return; ++ } + ++ if (data_length) { + if (t->full_packet_received) + recvmsg->first_segment = true; + +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 08b803a4fcde4c..7402bf3e037ee7 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -134,6 +134,7 @@ struct af_alg_async_req { + * SG? + * @enc: Cryptographic operation to be performed when + * recvmsg is invoked. ++ * @write: True if we are in the middle of a write. + * @init: True if metadata has been sent. + * @len: Length of memory allocated for this data structure. + * @inflight: Non-zero when AIO requests are in flight. +@@ -149,10 +150,11 @@ struct af_alg_ctx { + size_t used; + atomic_t rcvused; + +- bool more; +- bool merge; +- bool enc; +- bool init; ++ u32 more:1, ++ merge:1, ++ enc:1, ++ write:1, ++ init:1; + + unsigned int len; + +diff --git a/include/linux/minmax.h b/include/linux/minmax.h +index 2ec559284a9f6c..9c2848abc80496 100644 +--- a/include/linux/minmax.h ++++ b/include/linux/minmax.h +@@ -45,17 +45,20 @@ + + #define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y)) + +-#define __cmp_once(op, x, y, unique_x, unique_y) ({ \ +- typeof(x) unique_x = (x); \ +- typeof(y) unique_y = (y); \ ++#define __cmp_once_unique(op, type, x, y, ux, uy) \ ++ ({ type ux = (x); type uy = (y); __cmp(op, ux, uy); }) ++ ++#define __cmp_once(op, type, x, y) \ ++ __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_)) ++ ++#define __careful_cmp_once(op, x, y) ({ \ + static_assert(__types_ok(x, y), \ + #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \ +- __cmp(op, unique_x, unique_y); }) ++ __cmp_once(op, __auto_type, x, y); }) + + #define __careful_cmp(op, x, y) \ + __builtin_choose_expr(__is_constexpr((x) - (y)), \ +- __cmp(op, x, y), \ +- __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y))) ++ __cmp(op, x, y), __careful_cmp_once(op, x, y)) + + #define __clamp(val, lo, hi) \ + ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) +@@ -158,7 +161,7 @@ + * @x: first value + * @y: second value + */ +-#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y)) ++#define min_t(type, x, y) __cmp_once(min, type, x, y) + + /** + * max_t - return maximum of two values, using the specified type +@@ -166,7 +169,7 @@ + * @x: first value + * @y: second value + */ +-#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y)) ++#define max_t(type, x, y) __cmp_once(max, type, x, y) + + /* + * Do not check the array parameter using __must_be_array(). +@@ -270,4 +273,11 @@ static inline bool in_range32(u32 val, u32 start, u32 len) + #define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + ++/* ++ * Use these carefully: no type checking, and uses the arguments ++ * multiple times. Use for obvious constants only. ++ */ ++#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b)) ++#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b)) ++ + #endif /* _LINUX_MINMAX_H */ +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 696a2227869fb3..c0e0468b25a180 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -677,6 +677,7 @@ struct mlx5e_resources { + struct mlx5_sq_bfreg bfreg; + } hw_objs; + struct net_device *uplink_netdev; ++ netdevice_tracker tracker; + struct mutex uplink_netdev_lock; + struct mlx5_crypto_dek_priv *dek_priv; + }; +diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h +index e83c4c09504173..a4bf7f0989b211 100644 +--- a/include/linux/pageblock-flags.h ++++ b/include/linux/pageblock-flags.h +@@ -41,7 +41,7 @@ extern unsigned int pageblock_order; + * Huge pages are a constant size, but don't exceed the maximum allocation + * granularity. + */ +-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER) ++#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER) + + #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h +index ee9c49f949a2c5..00d62212167330 100644 +--- a/include/uapi/linux/mptcp.h ++++ b/include/uapi/linux/mptcp.h +@@ -81,6 +81,8 @@ enum { + + #define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) + ++#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0) ++ + #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) + #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) + #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) +@@ -132,13 +134,13 @@ struct mptcp_info { + + /* + * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, +- * sport, dport ++ * sport, dport, server-side, [flags] + * A new MPTCP connection has been created. It is the good time to allocate + * memory and send ADD_ADDR if needed. Depending on the traffic-patterns + * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. + * + * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, +- * sport, dport ++ * sport, dport, server-side, [flags] + * A MPTCP connection is established (can start new subflows). + * + * MPTCP_EVENT_CLOSED: token +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 897f07014c0193..07a5824ad98d48 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -1459,9 +1459,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) + + void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) + { +- io_tw_lock(req->ctx, ts); +- /* req->task == current here, checking PF_EXITING is safe */ +- if (unlikely(req->task->flags & PF_EXITING)) ++ struct io_ring_ctx *ctx = req->ctx; ++ ++ io_tw_lock(ctx, ts); ++ if (unlikely(io_should_terminate_tw(ctx))) + io_req_defer_failed(req, -EFAULT); + else if (req->flags & REQ_F_FORCE_ASYNC) + io_queue_iowq(req); +diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h +index 59f5f71037ffe1..0f6b47f55c249c 100644 +--- a/io_uring/io_uring.h ++++ b/io_uring/io_uring.h +@@ -394,6 +394,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) + ctx->submitter_task == current); + } + ++/* ++ * Terminate the request if either of these conditions are true: ++ * ++ * 1) It's being executed by the original task, but that task is marked ++ * with PF_EXITING as it's exiting. ++ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is ++ * our fallback task_work. ++ */ ++static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) ++{ ++ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); ++} ++ + static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) + { + io_req_set_res(req, res, 0); +diff --git a/io_uring/poll.c b/io_uring/poll.c +index 65935ec8de89c4..b6c8acd8625e3f 100644 +--- a/io_uring/poll.c ++++ b/io_uring/poll.c +@@ -258,8 +258,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) + { + int v; + +- /* req->task == current here, checking PF_EXITING is safe */ +- if (unlikely(req->task->flags & PF_EXITING)) ++ if (unlikely(io_should_terminate_tw(req->ctx))) + return -ECANCELED; + + do { +diff --git a/io_uring/timeout.c b/io_uring/timeout.c +index 277e22d55c6171..be2a0f6c209b2a 100644 +--- a/io_uring/timeout.c ++++ b/io_uring/timeout.c +@@ -307,7 +307,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t + int ret = -ENOENT; + + if (prev) { +- if (!(req->task->flags & PF_EXITING)) { ++ if (!io_should_terminate_tw(req->ctx)) { + struct io_cancel_data cd = { + .ctx = req->ctx, + .data = prev->cqe.user_data, +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index e8ef062f6ca058..5135838b5899f8 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem); + * of concurrent destructions. Use a separate workqueue so that cgroup + * destruction work items don't end up filling up max_active of system_wq + * which may lead to deadlock. ++ * ++ * A cgroup destruction should enqueue work sequentially to: ++ * cgroup_offline_wq: use for css offline work ++ * cgroup_release_wq: use for css release work ++ * cgroup_free_wq: use for free work ++ * ++ * Rationale for using separate workqueues: ++ * The cgroup root free work may depend on completion of other css offline ++ * operations. If all tasks were enqueued to a single workqueue, this could ++ * create a deadlock scenario where: ++ * - Free work waits for other css offline work to complete. ++ * - But other css offline work is queued after free work in the same queue. ++ * ++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq): ++ * 1. umount net_prio ++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx) ++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx) ++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline. ++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline, ++ * which can never complete as it's behind in the same queue and ++ * workqueue's max_active is 1. + */ +-static struct workqueue_struct *cgroup_destroy_wq; ++static struct workqueue_struct *cgroup_offline_wq; ++static struct workqueue_struct *cgroup_release_wq; ++static struct workqueue_struct *cgroup_free_wq; + + /* generate an array of cgroup subsystem pointers */ + #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, +@@ -5435,7 +5458,7 @@ static void css_release_work_fn(struct work_struct *work) + cgroup_unlock(); + + INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); +- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); ++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); + } + + static void css_release(struct percpu_ref *ref) +@@ -5444,7 +5467,7 @@ static void css_release(struct percpu_ref *ref) + container_of(ref, struct cgroup_subsys_state, refcnt); + + INIT_WORK(&css->destroy_work, css_release_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ queue_work(cgroup_release_wq, &css->destroy_work); + } + + static void init_and_link_css(struct cgroup_subsys_state *css, +@@ -5566,7 +5589,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, + err_free_css: + list_del_rcu(&css->rstat_css_node); + INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); +- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); ++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); + return ERR_PTR(err); + } + +@@ -5801,7 +5824,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref) + + if (atomic_dec_and_test(&css->online_cnt)) { + INIT_WORK(&css->destroy_work, css_killed_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ queue_work(cgroup_offline_wq, &css->destroy_work); + } + } + +@@ -6173,8 +6196,14 @@ static int __init cgroup_wq_init(void) + * We would prefer to do this in cgroup_init() above, but that + * is called before init_workqueues(): so leave this until after. + */ +- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); +- BUG_ON(!cgroup_destroy_wq); ++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1); ++ BUG_ON(!cgroup_offline_wq); ++ ++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1); ++ BUG_ON(!cgroup_release_wq); ++ ++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1); ++ BUG_ON(!cgroup_free_wq); + return 0; + } + core_initcall(cgroup_wq_init); +diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c +index a85b0aba36462d..b444767b15a5d2 100644 +--- a/net/ipv4/proc.c ++++ b/net/ipv4/proc.c +@@ -43,7 +43,7 @@ + #include + #include + +-#define TCPUDP_MIB_MAX max_t(u32, UDP_MIB_MAX, TCP_MIB_MAX) ++#define TCPUDP_MIB_MAX MAX_T(u32, UDP_MIB_MAX, TCP_MIB_MAX) + + /* + * Report socket allocation statistics [mea@utu.fi] +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index a4bbe959d1e25f..40a2f172be2cbd 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -3011,6 +3011,7 @@ int tcp_disconnect(struct sock *sk, int flags) + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + int old_state = sk->sk_state; ++ struct request_sock *req; + u32 seq; + + if (old_state != TCP_CLOSE) +@@ -3121,6 +3122,10 @@ int tcp_disconnect(struct sock *sk, int flags) + + + /* Clean up fastopen related fields */ ++ req = rcu_dereference_protected(tp->fastopen_rsk, ++ lockdep_sock_is_held(sk)); ++ if (req) ++ reqsk_fastopen_remove(sk, req, false); + tcp_free_fastopen_req(tp); + inet_clear_bit(DEFER_CONNECT, sk); + tp->fastopen_client_fail = 0; +diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c +index 6d1d9221649d52..752327b10dde74 100644 +--- a/net/ipv6/proc.c ++++ b/net/ipv6/proc.c +@@ -27,7 +27,7 @@ + #include + + #define MAX4(a, b, c, d) \ +- max_t(u32, max_t(u32, a, b), max_t(u32, c, d)) ++ MAX_T(u32, MAX_T(u32, a, b), MAX_T(u32, c, d)) + #define SNMP_MIB_MAX MAX4(UDP_MIB_MAX, TCP_MIB_MAX, \ + IPSTATS_MIB_MAX, ICMP_MIB_MAX) + +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h +index 78aa3bc51586e2..5f8f72ca2769c4 100644 +--- a/net/mac80211/driver-ops.h ++++ b/net/mac80211/driver-ops.h +@@ -1273,7 +1273,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_ftm_responder_stats *ftm_stats) + { +- u32 ret = -EOPNOTSUPP; ++ int ret = -EOPNOTSUPP; + + if (local->ops->get_ftm_responder_stats) + ret = local->ops->get_ftm_responder_stats(&local->hw, +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 3a6fff98748b86..80b143bde93d63 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -965,7 +965,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + int result, i; + enum nl80211_band band; + int channels, max_bitrates; +- bool supp_ht, supp_vht, supp_he, supp_eht; ++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g; + struct cfg80211_chan_def dflt_chandef = {}; + + if (ieee80211_hw_check(hw, QUEUE_CONTROL) && +@@ -1081,6 +1081,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + supp_vht = false; + supp_he = false; + supp_eht = false; ++ supp_s1g = false; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband; + +@@ -1127,6 +1128,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + max_bitrates = sband->n_bitrates; + supp_ht = supp_ht || sband->ht_cap.ht_supported; + supp_vht = supp_vht || sband->vht_cap.vht_supported; ++ supp_s1g = supp_s1g || sband->s1g_cap.s1g; + + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *iftd; +@@ -1253,6 +1255,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + local->scan_ies_len += + 2 + sizeof(struct ieee80211_vht_cap); + ++ if (supp_s1g) ++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap); ++ + /* + * HE cap element is variable in size - set len to allow max size */ + if (supp_he) { +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 9406d2d555e74d..b245abd08c8241 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, + return false; + } + +- if (mp_opt->deny_join_id0) +- WRITE_ONCE(msk->pm.remote_deny_join_id0, true); +- + if (unlikely(!READ_ONCE(msk->pm.server_side))) + pr_warn_once("bogus mpc option on established client sk"); + + set_fully_established: ++ if (mp_opt->deny_join_id0) ++ WRITE_ONCE(msk->pm.remote_deny_join_id0, true); ++ + mptcp_data_lock((struct sock *)msk); + __mptcp_subflow_fully_established(msk, subflow, mp_opt); + mptcp_data_unlock((struct sock *)msk); +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index e8042014bd5f4a..de24989b05a600 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -2252,6 +2252,7 @@ static int mptcp_event_created(struct sk_buff *skb, + const struct sock *ssk) + { + int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token); ++ u16 flags = 0; + + if (err) + return err; +@@ -2259,6 +2260,12 @@ static int mptcp_event_created(struct sk_buff *skb, + if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) + return -EMSGSIZE; + ++ if (READ_ONCE(msk->pm.remote_deny_join_id0)) ++ flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0; ++ ++ if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags)) ++ return -EMSGSIZE; ++ + return mptcp_event_add_subflow(skb, ssk); + } + +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index e3f09467b36b2f..643d64bdef2ea7 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -415,6 +415,20 @@ static void mptcp_close_wake_up(struct sock *sk) + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + } + ++static void mptcp_shutdown_subflows(struct mptcp_sock *msk) ++{ ++ struct mptcp_subflow_context *subflow; ++ ++ mptcp_for_each_subflow(msk, subflow) { ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ bool slow; ++ ++ slow = lock_sock_fast(ssk); ++ tcp_shutdown(ssk, SEND_SHUTDOWN); ++ unlock_sock_fast(ssk, slow); ++ } ++} ++ + static bool mptcp_pending_data_fin_ack(struct sock *sk) + { + struct mptcp_sock *msk = mptcp_sk(sk); +@@ -438,6 +452,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk) + break; + case TCP_CLOSING: + case TCP_LAST_ACK: ++ mptcp_shutdown_subflows(msk); + mptcp_set_state(sk, TCP_CLOSE); + break; + } +@@ -605,6 +620,7 @@ static bool mptcp_check_data_fin(struct sock *sk) + mptcp_set_state(sk, TCP_CLOSING); + break; + case TCP_FIN_WAIT2: ++ mptcp_shutdown_subflows(msk); + mptcp_set_state(sk, TCP_CLOSE); + break; + default: +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 0c9b9c0c277c23..dfee1890c841bb 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -863,6 +863,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, + + ctx->subflow_id = 1; + owner = mptcp_sk(ctx->conn); ++ ++ if (mp_opt.deny_join_id0) ++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true); ++ + mptcp_pm_new_connection(owner, child, 1); + + /* with OoO packets we can reach here without ingress +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c +index 28c1b00221780f..bd861191157b54 100644 +--- a/net/rds/ib_frmr.c ++++ b/net/rds/ib_frmr.c +@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) + + ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, + &off, PAGE_SIZE); +- if (unlikely(ret != ibmr->sg_dma_len)) +- return ret < 0 ? ret : -EINVAL; ++ if (unlikely(ret != ibmr->sg_dma_len)) { ++ ret = ret < 0 ? ret : -EINVAL; ++ goto out_inc; ++ } + +- if (cmpxchg(&frmr->fr_state, +- FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) +- return -EBUSY; ++ if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) { ++ ret = -EBUSY; ++ goto out_inc; ++ } + + atomic_inc(&ibmr->ic->i_fastreg_inuse_count); + +@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) + /* Failure here can be because of -ENOMEM as well */ + rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); + +- atomic_inc(&ibmr->ic->i_fastreg_wrs); + if (printk_ratelimit()) + pr_warn("RDS/IB: %s returned error(%d)\n", + __func__, ret); +- goto out; ++ goto out_inc; + } + + /* Wait for the registration to complete in order to prevent an invalid +@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) + */ + wait_event(frmr->fr_reg_done, !frmr->fr_reg); + +-out: ++ return ret; + ++out_inc: ++ atomic_inc(&ibmr->ic->i_fastreg_wrs); + return ret; + } + +diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c +index b12edbe0ef45c1..badc71acbbdd8d 100644 +--- a/net/rfkill/rfkill-gpio.c ++++ b/net/rfkill/rfkill-gpio.c +@@ -79,10 +79,10 @@ static int rfkill_gpio_acpi_probe(struct device *dev, + static int rfkill_gpio_probe(struct platform_device *pdev) + { + struct rfkill_gpio_data *rfkill; +- struct gpio_desc *gpio; ++ const char *type_name = NULL; + const char *name_property; + const char *type_property; +- const char *type_name; ++ struct gpio_desc *gpio; + int ret; + + rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL); +diff --git a/net/tls/tls.h b/net/tls/tls.h +index 5dc61c85c076ec..a3c5c5a59fda69 100644 +--- a/net/tls/tls.h ++++ b/net/tls/tls.h +@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx); + + int wait_on_pending_writer(struct sock *sk, long *timeo); + void tls_err_abort(struct sock *sk, int err); ++void tls_strp_abort_strp(struct tls_strparser *strp, int err); + + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); + void tls_update_rx_zc_capable(struct tls_context *tls_ctx); +diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c +index 6ce64a6e4495ec..ae723cd6af3975 100644 +--- a/net/tls/tls_strp.c ++++ b/net/tls/tls_strp.c +@@ -12,7 +12,7 @@ + + static struct workqueue_struct *tls_strp_wq; + +-static void tls_strp_abort_strp(struct tls_strparser *strp, int err) ++void tls_strp_abort_strp(struct tls_strparser *strp, int err) + { + if (strp->stopped) + return; +@@ -210,11 +210,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, + struct sk_buff *in_skb, unsigned int offset, + size_t in_len) + { ++ unsigned int nfrag = skb->len / PAGE_SIZE; + size_t len, chunk; + skb_frag_t *frag; + int sz; + +- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; ++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) { ++ DEBUG_NET_WARN_ON_ONCE(1); ++ return -EMSGSIZE; ++ } ++ ++ frag = &skb_shinfo(skb)->frags[nfrag]; + + len = in_len; + /* First make sure we got the header */ +@@ -519,10 +525,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp) + tls_strp_load_anchor_with_queue(strp, inq); + if (!strp->stm.full_len) { + sz = tls_rx_msg_size(strp, strp->anchor); +- if (sz < 0) { +- tls_strp_abort_strp(strp, sz); ++ if (sz < 0) + return sz; +- } + + strp->stm.full_len = sz; + +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 27ce1feb79e14e..435235a351e2f4 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -2441,8 +2441,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) + return data_len + TLS_HEADER_SIZE; + + read_failure: +- tls_err_abort(strp->sk, ret); +- ++ tls_strp_abort_strp(strp, ret); + return ret; + } + +diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c +index 88d1f4b56e4be4..a220ac0c8eb831 100644 +--- a/sound/firewire/motu/motu-hwdep.c ++++ b/sound/firewire/motu/motu-hwdep.c +@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file, + events = 0; + spin_unlock_irq(&motu->lock); + +- return events | EPOLLOUT; ++ return events; + } + + static int hwdep_get_info(struct snd_motu *motu, void __user *arg) +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 6aae06223f2664..5fe6b71d90f4f4 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10161,6 +10161,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2), ++ SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED), +diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c +index b9432f8b64e5bc..39d2c8e85d9dbc 100644 +--- a/sound/soc/codecs/wm8940.c ++++ b/sound/soc/codecs/wm8940.c +@@ -220,7 +220,7 @@ static const struct snd_kcontrol_new wm8940_snd_controls[] = { + SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL, + 0, 255, 0, wm8940_adc_tlv), + SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum), +- SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST, ++ SOC_SINGLE_TLV("Capture Boost Volume", WM8940_ADCBOOST, + 8, 1, 0, wm8940_capture_boost_vol_tlv), + SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL, + 0, 63, 0, wm8940_spk_vol_tlv), +@@ -693,7 +693,12 @@ static int wm8940_update_clocks(struct snd_soc_dai *dai) + f = wm8940_get_mclkdiv(priv->mclk, fs256, &mclkdiv); + if (f != priv->mclk) { + /* The PLL performs best around 90MHz */ +- fpll = wm8940_get_mclkdiv(22500000, fs256, &mclkdiv); ++ if (fs256 % 8000) ++ f = 22579200; ++ else ++ f = 24576000; ++ ++ fpll = wm8940_get_mclkdiv(f, fs256, &mclkdiv); + } + + wm8940_set_dai_pll(dai, 0, 0, priv->mclk, fpll); +diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c +index 260bac695b20ab..2aaa1cbe68b71c 100644 +--- a/sound/soc/codecs/wm8974.c ++++ b/sound/soc/codecs/wm8974.c +@@ -419,10 +419,14 @@ static int wm8974_update_clocks(struct snd_soc_dai *dai) + fs256 = 256 * priv->fs; + + f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv); +- + if (f != priv->mclk) { + /* The PLL performs best around 90MHz */ +- fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv); ++ if (fs256 % 8000) ++ f = 22579200; ++ else ++ f = 24576000; ++ ++ fpll = wm8974_get_mclkdiv(f, fs256, &mclkdiv); + } + + wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll); +diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c +index 5974c7929dd370..3eab43e9efb324 100644 +--- a/sound/soc/qcom/qdsp6/audioreach.c ++++ b/sound/soc/qcom/qdsp6/audioreach.c +@@ -967,6 +967,7 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph, + param_data->param_id = PARAM_ID_I2S_INTF_CFG; + param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE; + ++ intf_cfg->cfg.lpaif_type = module->hw_interface_type; + intf_cfg->cfg.intf_idx = module->hw_interface_idx; + intf_cfg->cfg.sd_line_idx = module->sd_line_idx; + +diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c +index 6511f0a08de161..3813fe5e01819a 100644 +--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c ++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c +@@ -207,8 +207,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s + + return 0; + err: +- q6apm_graph_close(dai_data->graph[dai->id]); +- dai_data->graph[dai->id] = NULL; ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ q6apm_graph_close(dai_data->graph[dai->id]); ++ dai_data->graph[dai->id] = NULL; ++ } + return rc; + } + +@@ -254,6 +256,7 @@ static const struct snd_soc_dai_ops q6i2s_ops = { + .shutdown = q6apm_lpass_dai_shutdown, + .set_channel_map = q6dma_set_channel_map, + .hw_params = q6dma_hw_params, ++ .set_fmt = q6i2s_set_fmt, + }; + + static const struct snd_soc_dai_ops q6hdmi_ops = { +diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c +index 0b0087abcc50ed..3bb743cb167a5b 100644 +--- a/sound/soc/sof/intel/hda-stream.c ++++ b/sound/soc/sof/intel/hda-stream.c +@@ -842,7 +842,7 @@ int hda_dsp_stream_init(struct snd_sof_dev *sdev) + + if (num_capture >= SOF_HDA_CAPTURE_STREAMS) { + dev_err(sdev->dev, "error: too many capture streams %d\n", +- num_playback); ++ num_capture); + return -EINVAL; + } + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c +index c83a8b47bbdfa5..fc9eff0e89e226 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c +@@ -1079,6 +1079,7 @@ int main_loop_s(int listensock) + struct pollfd polls; + socklen_t salen; + int remotesock; ++ int err = 0; + int fd = 0; + + again: +@@ -1111,7 +1112,7 @@ int main_loop_s(int listensock) + SOCK_TEST_TCPULP(remotesock, 0); + + memset(&winfo, 0, sizeof(winfo)); +- copyfd_io(fd, remotesock, 1, true, &winfo); ++ err = copyfd_io(fd, remotesock, 1, true, &winfo); + } else { + perror("accept"); + return 1; +@@ -1120,10 +1121,10 @@ int main_loop_s(int listensock) + if (cfg_input) + close(fd); + +- if (--cfg_repeat > 0) ++ if (!err && --cfg_repeat > 0) + goto again; + +- return 0; ++ return err; + } + + static void init_rng(void) +@@ -1233,7 +1234,7 @@ void xdisconnect(int fd) + else + xerror("bad family"); + +- strcpy(cmd, "ss -M | grep -q "); ++ strcpy(cmd, "ss -Mnt | grep -q "); + cmdlen = strlen(cmd); + if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen], + sizeof(cmd) - cmdlen)) +@@ -1243,7 +1244,7 @@ void xdisconnect(int fd) + + /* + * wait until the pending data is completely flushed and all +- * the MPTCP sockets reached the closed status. ++ * the sockets reached the closed status. + * disconnect will bypass/ignore/drop any pending data. + */ + for (i = 0; ; i += msec_sleep) { +diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c +index 926b0be87c9905..1dc2bd6ee4a50e 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c ++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c +@@ -658,22 +658,26 @@ static void process_one_client(int fd, int pipefd) + + do_getsockopts(&s, fd, ret, ret2); + if (s.mptcpi_rcv_delta != (uint64_t)ret + 1) +- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret); ++ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64, ++ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1)); + + /* be nice when running on top of older kernel */ + if (s.pkt_stats_avail) { + if (s.last_sample.mptcpi_bytes_sent != ret2) +- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64, ++ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64 ++ ", diff %" PRId64, + s.last_sample.mptcpi_bytes_sent, ret2, + s.last_sample.mptcpi_bytes_sent - ret2); + if (s.last_sample.mptcpi_bytes_received != ret) +- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64, ++ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64 ++ ", diff %" PRId64, + s.last_sample.mptcpi_bytes_received, ret, + s.last_sample.mptcpi_bytes_received - ret); + if (s.last_sample.mptcpi_bytes_acked != ret) +- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64, +- s.last_sample.mptcpi_bytes_acked, ret2, +- s.last_sample.mptcpi_bytes_acked - ret2); ++ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64 ++ ", diff %" PRId64, ++ s.last_sample.mptcpi_bytes_acked, ret, ++ s.last_sample.mptcpi_bytes_acked - ret); + } + + close(fd); +diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c +index 763402dd17742f..234c267dd2aad3 100644 +--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c ++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c +@@ -194,6 +194,13 @@ static int capture_events(int fd, int event_group) + fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs)); + else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE) + fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs)); ++ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) { ++ __u16 flags = *(__u16 *)RTA_DATA(attrs); ++ ++ /* only print when present, easier */ ++ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0) ++ fprintf(stderr, ",deny_join_id0:1"); ++ } + + attrs = RTA_NEXT(attrs, msg_len); + } +diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh +index c5d7af8e8efde1..4e966a9e373835 100755 +--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh ++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh +@@ -196,6 +196,9 @@ make_connection() + is_v6="v4" + fi + ++ # set this on the client side only: will not affect the rest ++ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0 ++ + # Capture netlink events over the two network namespaces running + # the MPTCP client and server + if [ -z "$client_evts" ]; then +@@ -227,23 +230,28 @@ make_connection() + local client_token + local client_port + local client_serverside ++ local client_nojoin + local server_token + local server_serverside ++ local server_nojoin + + client_token=$(mptcp_lib_evts_get_info token "$client_evts") + client_port=$(mptcp_lib_evts_get_info sport "$client_evts") + client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts") ++ client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts") + server_token=$(mptcp_lib_evts_get_info token "$server_evts") + server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts") ++ server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts") + + print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1" +- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] && +- [ "$server_serverside" = 1 ] ++ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] && ++ [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] && ++ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ] + then + test_pass + print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}" + else +- test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})" ++ test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})" + mptcp_lib_result_print_all_tap + exit 1 + fi