4556 lines
153 KiB
Diff
4556 lines
153 KiB
Diff
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
index 38dc56a577604..ecec514b31550 100644
|
|
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
@@ -43,19 +43,19 @@ group emmc_nb
|
|
|
|
group pwm0
|
|
- pin 11 (GPIO1-11)
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm1
|
|
- pin 12
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm2
|
|
- pin 13
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm3
|
|
- pin 14
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pmic1
|
|
- pin 7
|
|
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
|
|
index 056898685d408..fc531c29a2e83 100644
|
|
--- a/Documentation/networking/ipvs-sysctl.txt
|
|
+++ b/Documentation/networking/ipvs-sysctl.txt
|
|
@@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
|
|
|
|
0: disable any special handling on port reuse. The new
|
|
connection will be delivered to the same real server that was
|
|
- servicing the previous connection. This will effectively
|
|
- disable expire_nodest_conn.
|
|
+ servicing the previous connection.
|
|
|
|
bit 1: enable rescheduling of new connections when it is safe.
|
|
That is, whenever expire_nodest_conn and for TCP sockets, when
|
|
diff --git a/Makefile b/Makefile
|
|
index e8b05f7d3b238..91d77df0128b4 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 162
|
|
+SUBLEVEL = 163
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
|
|
index 9711170649b69..05d67f9769118 100644
|
|
--- a/arch/arm/boot/dts/bcm5301x.dtsi
|
|
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
|
|
@@ -242,6 +242,8 @@
|
|
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
+ interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
pcie0: pcie@12000 {
|
|
@@ -387,7 +389,7 @@
|
|
i2c0: i2c@18009000 {
|
|
compatible = "brcm,iproc-i2c";
|
|
reg = <0x18009000 0x50>;
|
|
- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
clock-frequency = <100000>;
|
|
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
|
|
index fc2608b18a0d0..18f01190dcfd4 100644
|
|
--- a/arch/arm/mach-socfpga/core.h
|
|
+++ b/arch/arm/mach-socfpga/core.h
|
|
@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
|
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
|
extern unsigned int socfpga_sdram_self_refresh_sz;
|
|
|
|
-extern char secondary_trampoline, secondary_trampoline_end;
|
|
+extern char secondary_trampoline[], secondary_trampoline_end[];
|
|
|
|
extern unsigned long socfpga_cpu1start_addr;
|
|
|
|
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
|
|
index fbb80b883e5dd..201191cf68f32 100644
|
|
--- a/arch/arm/mach-socfpga/platsmp.c
|
|
+++ b/arch/arm/mach-socfpga/platsmp.c
|
|
@@ -20,14 +20,14 @@
|
|
|
|
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
|
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
|
|
|
if (socfpga_cpu1start_addr) {
|
|
/* This will put CPU #1 into reset. */
|
|
writel(RSTMGR_MPUMODRST_CPU1,
|
|
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
|
|
|
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
|
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
|
|
|
writel(__pa_symbol(secondary_startup),
|
|
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
|
@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
|
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
|
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
|
|
|
if (socfpga_cpu1start_addr) {
|
|
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
|
SOCFPGA_A10_RSTMGR_MODMPURST);
|
|
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
|
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
|
|
|
writel(__pa_symbol(secondary_startup),
|
|
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
|
|
index f2cc00594d64a..3e5789f372069 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
|
|
@@ -128,6 +128,9 @@
|
|
|
|
/* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
|
|
&pcie0 {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
|
|
+ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
|
|
status = "okay";
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
|
|
index 6226e7e809807..a75bb2ea3506d 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
|
|
@@ -59,6 +59,7 @@
|
|
phys = <&comphy1 0>;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
|
|
+ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
|
|
};
|
|
|
|
/* J6 */
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
|
|
index de0eabff29353..16e73597bb78c 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
|
|
@@ -127,10 +127,6 @@
|
|
};
|
|
};
|
|
|
|
-&pcie_reset_pins {
|
|
- function = "gpio";
|
|
-};
|
|
-
|
|
&pcie0 {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
index c28611c1c251a..3d15e4ab3f53a 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
@@ -318,7 +318,7 @@
|
|
|
|
pcie_reset_pins: pcie-reset-pins {
|
|
groups = "pcie1";
|
|
- function = "pcie";
|
|
+ function = "gpio";
|
|
};
|
|
|
|
pcie_clkreq_pins: pcie-clkreq-pins {
|
|
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
|
|
index 9749818eed6d6..2811ecc1f3c71 100644
|
|
--- a/arch/mips/Kconfig
|
|
+++ b/arch/mips/Kconfig
|
|
@@ -3059,7 +3059,7 @@ config STACKTRACE_SUPPORT
|
|
config PGTABLE_LEVELS
|
|
int
|
|
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
|
- default 3 if 64BIT && !PAGE_SIZE_64KB
|
|
+ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
|
default 2
|
|
|
|
config MIPS_AUTO_PFN_OFFSET
|
|
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
|
|
index 164483b37d854..99cd24f2ea01b 100644
|
|
--- a/arch/parisc/kernel/vmlinux.lds.S
|
|
+++ b/arch/parisc/kernel/vmlinux.lds.S
|
|
@@ -56,8 +56,6 @@ SECTIONS
|
|
{
|
|
. = KERNEL_BINARY_TEXT_START;
|
|
|
|
- _stext = .; /* start of kernel text, includes init code & data */
|
|
-
|
|
__init_begin = .;
|
|
HEAD_TEXT_SECTION
|
|
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
|
|
@@ -81,6 +79,7 @@ SECTIONS
|
|
/* freed after init ends here */
|
|
|
|
_text = .; /* Text and read-only data */
|
|
+ _stext = .;
|
|
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
|
|
.text ALIGN(PAGE_SIZE) : {
|
|
TEXT_TEXT
|
|
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
|
|
index 4a91b543a8540..6d34b69729854 100644
|
|
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
|
|
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
|
|
@@ -821,6 +821,7 @@ static void flush_guest_tlb(struct kvm *kvm)
|
|
"r" (0) : "memory");
|
|
}
|
|
asm volatile("ptesync": : :"memory");
|
|
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
|
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
|
|
} else {
|
|
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
|
|
@@ -831,7 +832,9 @@ static void flush_guest_tlb(struct kvm *kvm)
|
|
rb += PPC_BIT(51); /* increment set number */
|
|
}
|
|
asm volatile("ptesync": : :"memory");
|
|
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
|
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
|
|
index 9ebd01219812c..4438c00acb656 100644
|
|
--- a/arch/s390/mm/pgtable.c
|
|
+++ b/arch/s390/mm/pgtable.c
|
|
@@ -970,6 +970,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
|
|
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
|
|
unsigned long *oldpte, unsigned long *oldpgste)
|
|
{
|
|
+ struct vm_area_struct *vma;
|
|
unsigned long pgstev;
|
|
spinlock_t *ptl;
|
|
pgste_t pgste;
|
|
@@ -979,6 +980,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
|
|
WARN_ON_ONCE(orc > ESSA_MAX);
|
|
if (unlikely(orc > ESSA_MAX))
|
|
return -EINVAL;
|
|
+
|
|
+ vma = find_vma(mm, hva);
|
|
+ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
|
+ return -EFAULT;
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
if (unlikely(!ptep))
|
|
return -EFAULT;
|
|
@@ -1071,10 +1076,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
|
|
int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
|
|
unsigned long bits, unsigned long value)
|
|
{
|
|
+ struct vm_area_struct *vma;
|
|
spinlock_t *ptl;
|
|
pgste_t new;
|
|
pte_t *ptep;
|
|
|
|
+ vma = find_vma(mm, hva);
|
|
+ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
|
+ return -EFAULT;
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
if (unlikely(!ptep))
|
|
return -EFAULT;
|
|
@@ -1099,9 +1108,13 @@ EXPORT_SYMBOL(set_pgste_bits);
|
|
*/
|
|
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
|
|
{
|
|
+ struct vm_area_struct *vma;
|
|
spinlock_t *ptl;
|
|
pte_t *ptep;
|
|
|
|
+ vma = find_vma(mm, hva);
|
|
+ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
|
+ return -EFAULT;
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
if (unlikely(!ptep))
|
|
return -EFAULT;
|
|
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
|
|
index 47f839bc0234f..1cdc7426bd033 100644
|
|
--- a/drivers/android/binder.c
|
|
+++ b/drivers/android/binder.c
|
|
@@ -3095,7 +3095,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
t->from = thread;
|
|
else
|
|
t->from = NULL;
|
|
- t->sender_euid = proc->cred->euid;
|
|
+ t->sender_euid = task_euid(proc->tsk);
|
|
t->to_proc = target_proc;
|
|
t->to_thread = target_thread;
|
|
t->code = tr->code;
|
|
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
|
|
index def41e1bd7364..baf10b73675e2 100644
|
|
--- a/drivers/block/xen-blkfront.c
|
|
+++ b/drivers/block/xen-blkfront.c
|
|
@@ -80,6 +80,7 @@ enum blkif_state {
|
|
BLKIF_STATE_DISCONNECTED,
|
|
BLKIF_STATE_CONNECTED,
|
|
BLKIF_STATE_SUSPENDED,
|
|
+ BLKIF_STATE_ERROR,
|
|
};
|
|
|
|
struct grant {
|
|
@@ -89,6 +90,7 @@ struct grant {
|
|
};
|
|
|
|
enum blk_req_status {
|
|
+ REQ_PROCESSING,
|
|
REQ_WAITING,
|
|
REQ_DONE,
|
|
REQ_ERROR,
|
|
@@ -533,10 +535,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
|
|
|
|
id = get_id_from_freelist(rinfo);
|
|
rinfo->shadow[id].request = req;
|
|
- rinfo->shadow[id].status = REQ_WAITING;
|
|
+ rinfo->shadow[id].status = REQ_PROCESSING;
|
|
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
|
|
|
|
- (*ring_req)->u.rw.id = id;
|
|
+ rinfo->shadow[id].req.u.rw.id = id;
|
|
|
|
return id;
|
|
}
|
|
@@ -544,11 +546,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
|
|
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
|
|
{
|
|
struct blkfront_info *info = rinfo->dev_info;
|
|
- struct blkif_request *ring_req;
|
|
+ struct blkif_request *ring_req, *final_ring_req;
|
|
unsigned long id;
|
|
|
|
/* Fill out a communications ring structure. */
|
|
- id = blkif_ring_get_request(rinfo, req, &ring_req);
|
|
+ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
|
+ ring_req = &rinfo->shadow[id].req;
|
|
|
|
ring_req->operation = BLKIF_OP_DISCARD;
|
|
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
|
|
@@ -559,8 +562,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
|
|
else
|
|
ring_req->u.discard.flag = 0;
|
|
|
|
- /* Keep a private copy so we can reissue requests when recovering. */
|
|
- rinfo->shadow[id].req = *ring_req;
|
|
+ /* Copy the request to the ring page. */
|
|
+ *final_ring_req = *ring_req;
|
|
+ rinfo->shadow[id].status = REQ_WAITING;
|
|
|
|
return 0;
|
|
}
|
|
@@ -693,6 +697,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|
{
|
|
struct blkfront_info *info = rinfo->dev_info;
|
|
struct blkif_request *ring_req, *extra_ring_req = NULL;
|
|
+ struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
|
|
unsigned long id, extra_id = NO_ASSOCIATED_ID;
|
|
bool require_extra_req = false;
|
|
int i;
|
|
@@ -737,7 +742,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|
}
|
|
|
|
/* Fill out a communications ring structure. */
|
|
- id = blkif_ring_get_request(rinfo, req, &ring_req);
|
|
+ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
|
+ ring_req = &rinfo->shadow[id].req;
|
|
|
|
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
|
|
num_grant = 0;
|
|
@@ -788,7 +794,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|
ring_req->u.rw.nr_segments = num_grant;
|
|
if (unlikely(require_extra_req)) {
|
|
extra_id = blkif_ring_get_request(rinfo, req,
|
|
- &extra_ring_req);
|
|
+ &final_extra_ring_req);
|
|
+ extra_ring_req = &rinfo->shadow[extra_id].req;
|
|
+
|
|
/*
|
|
* Only the first request contains the scatter-gather
|
|
* list.
|
|
@@ -830,10 +838,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|
if (setup.segments)
|
|
kunmap_atomic(setup.segments);
|
|
|
|
- /* Keep a private copy so we can reissue requests when recovering. */
|
|
- rinfo->shadow[id].req = *ring_req;
|
|
- if (unlikely(require_extra_req))
|
|
- rinfo->shadow[extra_id].req = *extra_ring_req;
|
|
+ /* Copy request(s) to the ring page. */
|
|
+ *final_ring_req = *ring_req;
|
|
+ rinfo->shadow[id].status = REQ_WAITING;
|
|
+ if (unlikely(require_extra_req)) {
|
|
+ *final_extra_ring_req = *extra_ring_req;
|
|
+ rinfo->shadow[extra_id].status = REQ_WAITING;
|
|
+ }
|
|
|
|
if (new_persistent_gnts)
|
|
gnttab_free_grant_references(setup.gref_head);
|
|
@@ -1407,8 +1418,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
|
|
static int blkif_get_final_status(enum blk_req_status s1,
|
|
enum blk_req_status s2)
|
|
{
|
|
- BUG_ON(s1 == REQ_WAITING);
|
|
- BUG_ON(s2 == REQ_WAITING);
|
|
+ BUG_ON(s1 < REQ_DONE);
|
|
+ BUG_ON(s2 < REQ_DONE);
|
|
|
|
if (s1 == REQ_ERROR || s2 == REQ_ERROR)
|
|
return BLKIF_RSP_ERROR;
|
|
@@ -1441,7 +1452,7 @@ static bool blkif_completion(unsigned long *id,
|
|
s->status = blkif_rsp_to_req_status(bret->status);
|
|
|
|
/* Wait the second response if not yet here. */
|
|
- if (s2->status == REQ_WAITING)
|
|
+ if (s2->status < REQ_DONE)
|
|
return false;
|
|
|
|
bret->status = blkif_get_final_status(s->status,
|
|
@@ -1549,7 +1560,7 @@ static bool blkif_completion(unsigned long *id,
|
|
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|
{
|
|
struct request *req;
|
|
- struct blkif_response *bret;
|
|
+ struct blkif_response bret;
|
|
RING_IDX i, rp;
|
|
unsigned long flags;
|
|
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
|
|
@@ -1560,54 +1571,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|
|
|
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
|
again:
|
|
- rp = rinfo->ring.sring->rsp_prod;
|
|
- rmb(); /* Ensure we see queued responses up to 'rp'. */
|
|
+ rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
|
|
+ virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
|
|
+ if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
|
|
+ pr_alert("%s: illegal number of responses %u\n",
|
|
+ info->gd->disk_name, rp - rinfo->ring.rsp_cons);
|
|
+ goto err;
|
|
+ }
|
|
|
|
for (i = rinfo->ring.rsp_cons; i != rp; i++) {
|
|
unsigned long id;
|
|
+ unsigned int op;
|
|
+
|
|
+ RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
|
|
+ id = bret.id;
|
|
|
|
- bret = RING_GET_RESPONSE(&rinfo->ring, i);
|
|
- id = bret->id;
|
|
/*
|
|
* The backend has messed up and given us an id that we would
|
|
* never have given to it (we stamp it up to BLK_RING_SIZE -
|
|
* look in get_id_from_freelist.
|
|
*/
|
|
if (id >= BLK_RING_SIZE(info)) {
|
|
- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
|
|
- info->gd->disk_name, op_name(bret->operation), id);
|
|
- /* We can't safely get the 'struct request' as
|
|
- * the id is busted. */
|
|
- continue;
|
|
+ pr_alert("%s: response has incorrect id (%ld)\n",
|
|
+ info->gd->disk_name, id);
|
|
+ goto err;
|
|
}
|
|
+ if (rinfo->shadow[id].status != REQ_WAITING) {
|
|
+ pr_alert("%s: response references no pending request\n",
|
|
+ info->gd->disk_name);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ rinfo->shadow[id].status = REQ_PROCESSING;
|
|
req = rinfo->shadow[id].request;
|
|
|
|
- if (bret->operation != BLKIF_OP_DISCARD) {
|
|
+ op = rinfo->shadow[id].req.operation;
|
|
+ if (op == BLKIF_OP_INDIRECT)
|
|
+ op = rinfo->shadow[id].req.u.indirect.indirect_op;
|
|
+ if (bret.operation != op) {
|
|
+ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
|
|
+ info->gd->disk_name, bret.operation, op);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (bret.operation != BLKIF_OP_DISCARD) {
|
|
/*
|
|
* We may need to wait for an extra response if the
|
|
* I/O request is split in 2
|
|
*/
|
|
- if (!blkif_completion(&id, rinfo, bret))
|
|
+ if (!blkif_completion(&id, rinfo, &bret))
|
|
continue;
|
|
}
|
|
|
|
if (add_id_to_freelist(rinfo, id)) {
|
|
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
|
|
- info->gd->disk_name, op_name(bret->operation), id);
|
|
+ info->gd->disk_name, op_name(bret.operation), id);
|
|
continue;
|
|
}
|
|
|
|
- if (bret->status == BLKIF_RSP_OKAY)
|
|
+ if (bret.status == BLKIF_RSP_OKAY)
|
|
blkif_req(req)->error = BLK_STS_OK;
|
|
else
|
|
blkif_req(req)->error = BLK_STS_IOERR;
|
|
|
|
- switch (bret->operation) {
|
|
+ switch (bret.operation) {
|
|
case BLKIF_OP_DISCARD:
|
|
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
|
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
|
struct request_queue *rq = info->rq;
|
|
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
|
- info->gd->disk_name, op_name(bret->operation));
|
|
+
|
|
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
|
|
+ info->gd->disk_name, op_name(bret.operation));
|
|
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
|
info->feature_discard = 0;
|
|
info->feature_secdiscard = 0;
|
|
@@ -1617,15 +1650,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|
break;
|
|
case BLKIF_OP_FLUSH_DISKCACHE:
|
|
case BLKIF_OP_WRITE_BARRIER:
|
|
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
|
- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
|
- info->gd->disk_name, op_name(bret->operation));
|
|
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
|
+ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
|
|
+ info->gd->disk_name, op_name(bret.operation));
|
|
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
|
}
|
|
- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
|
|
+ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
|
|
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
|
|
- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
|
|
- info->gd->disk_name, op_name(bret->operation));
|
|
+ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
|
|
+ info->gd->disk_name, op_name(bret.operation));
|
|
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
|
}
|
|
if (unlikely(blkif_req(req)->error)) {
|
|
@@ -1638,9 +1671,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|
/* fall through */
|
|
case BLKIF_OP_READ:
|
|
case BLKIF_OP_WRITE:
|
|
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
|
|
- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
|
|
- "request: %x\n", bret->status);
|
|
+ if (unlikely(bret.status != BLKIF_RSP_OKAY))
|
|
+ dev_dbg_ratelimited(&info->xbdev->dev,
|
|
+ "Bad return from blkdev data request: %#x\n",
|
|
+ bret.status);
|
|
|
|
break;
|
|
default:
|
|
@@ -1665,6 +1699,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
+
|
|
+ err:
|
|
+ info->connected = BLKIF_STATE_ERROR;
|
|
+
|
|
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
|
+
|
|
+ pr_alert("%s disabled for further use\n", info->gd->disk_name);
|
|
+ return IRQ_HANDLED;
|
|
}
|
|
|
|
|
|
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
|
|
index 041f8152272bf..177874adccf0d 100644
|
|
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
|
|
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
|
|
@@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
|
scmi_pd_data->domains = domains;
|
|
scmi_pd_data->num_domains = num_domains;
|
|
|
|
- of_genpd_add_provider_onecell(np, scmi_pd_data);
|
|
-
|
|
- return 0;
|
|
+ return of_genpd_add_provider_onecell(np, scmi_pd_data);
|
|
}
|
|
|
|
static const struct scmi_device_id scmi_id_table[] = {
|
|
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
|
|
index 72d30d90b856c..0af246a5609ca 100644
|
|
--- a/drivers/gpu/drm/vc4/vc4_bo.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
|
|
@@ -389,7 +389,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
|
|
|
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
|
if (!bo)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ return NULL;
|
|
|
|
bo->madv = VC4_MADV_WILLNEED;
|
|
refcount_set(&bo->usecnt, 0);
|
|
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
|
|
index f6be2e70a4967..e011839f19f89 100644
|
|
--- a/drivers/hid/wacom_wac.c
|
|
+++ b/drivers/hid/wacom_wac.c
|
|
@@ -2578,6 +2578,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
|
|
return;
|
|
|
|
switch (equivalent_usage) {
|
|
+ case HID_DG_CONFIDENCE:
|
|
+ wacom_wac->hid_data.confidence = value;
|
|
+ break;
|
|
case HID_GD_X:
|
|
wacom_wac->hid_data.x = value;
|
|
break;
|
|
@@ -2610,7 +2613,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
|
|
}
|
|
|
|
if (usage->usage_index + 1 == field->report_count) {
|
|
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
|
|
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
|
|
+ wacom_wac->hid_data.confidence)
|
|
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
|
|
}
|
|
}
|
|
@@ -2625,6 +2629,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
|
|
|
|
wacom_wac->is_invalid_bt_frame = false;
|
|
|
|
+ hid_data->confidence = true;
|
|
+
|
|
for (i = 0; i < report->maxfield; i++) {
|
|
struct hid_field *field = report->field[i];
|
|
int j;
|
|
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
|
|
index e3835407e8d23..8dea7cb298e69 100644
|
|
--- a/drivers/hid/wacom_wac.h
|
|
+++ b/drivers/hid/wacom_wac.h
|
|
@@ -300,6 +300,7 @@ struct hid_data {
|
|
bool tipswitch;
|
|
bool barrelswitch;
|
|
bool barrelswitch2;
|
|
+ bool confidence;
|
|
int x;
|
|
int y;
|
|
int pressure;
|
|
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
|
|
index 06383b26712b6..56857ac0a0be2 100644
|
|
--- a/drivers/media/cec/cec-adap.c
|
|
+++ b/drivers/media/cec/cec-adap.c
|
|
@@ -1191,6 +1191,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
|
|
if (abort)
|
|
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
|
|
msg->flags = dst->flags;
|
|
+ msg->sequence = dst->sequence;
|
|
/* Remove it from the wait_queue */
|
|
list_del_init(&data->list);
|
|
|
|
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
|
|
index cb54fa2120d72..deafcc56adee6 100644
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -749,7 +749,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
|
|
len -= offset;
|
|
}
|
|
|
|
- BUG_ON(len > 65536);
|
|
+ /*
|
|
+ * The block layer forces a minimum segment size of PAGE_SIZE,
|
|
+ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
|
|
+ * multiple descriptors, noting that the ADMA table is sized
|
|
+ * for 4KiB chunks anyway, so it will be big enough.
|
|
+ */
|
|
+ while (len > host->max_adma) {
|
|
+ int n = 32 * 1024; /* 32KiB*/
|
|
+
|
|
+ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
|
|
+ addr += n;
|
|
+ len -= n;
|
|
+ }
|
|
|
|
/* tran, valid */
|
|
if (len)
|
|
@@ -3568,6 +3580,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
|
|
* descriptor for each segment, plus 1 for a nop end descriptor.
|
|
*/
|
|
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
|
|
+ host->max_adma = 65536;
|
|
|
|
return host;
|
|
}
|
|
@@ -4221,10 +4234,12 @@ int sdhci_setup_host(struct sdhci_host *host)
|
|
* be larger than 64 KiB though.
|
|
*/
|
|
if (host->flags & SDHCI_USE_ADMA) {
|
|
- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
|
|
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
|
|
+ host->max_adma = 65532; /* 32-bit alignment */
|
|
mmc->max_seg_size = 65535;
|
|
- else
|
|
+ } else {
|
|
mmc->max_seg_size = 65536;
|
|
+ }
|
|
} else {
|
|
mmc->max_seg_size = mmc->max_req_size;
|
|
}
|
|
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
|
|
index 96a0a8f97f559..54f9d6720f132 100644
|
|
--- a/drivers/mmc/host/sdhci.h
|
|
+++ b/drivers/mmc/host/sdhci.h
|
|
@@ -349,7 +349,8 @@ struct sdhci_adma2_64_desc {
|
|
|
|
/*
|
|
* Maximum segments assuming a 512KiB maximum requisition size and a minimum
|
|
- * 4KiB page size.
|
|
+ * 4KiB page size. Note this also allows enough for multiple descriptors in
|
|
+ * case of PAGE_SIZE >= 64KiB.
|
|
*/
|
|
#define SDHCI_MAX_SEGS 128
|
|
|
|
@@ -547,6 +548,7 @@ struct sdhci_host {
|
|
unsigned int blocks; /* remaining PIO blocks */
|
|
|
|
int sg_count; /* Mapped sg entries */
|
|
+ int max_adma; /* Max. length in ADMA descriptor */
|
|
|
|
void *adma_table; /* ADMA descriptor table */
|
|
void *align_buffer; /* Bounce buffer */
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
index db2e9dd5681eb..ce6a4e1965e1d 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
@@ -644,9 +644,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
|
|
roundup_size = ilog2(roundup_size);
|
|
|
|
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
|
|
- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
|
|
+ tc_valid[i] = 1;
|
|
tc_size[i] = roundup_size;
|
|
- tc_offset[i] = rss_size * i;
|
|
+ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
|
|
}
|
|
|
|
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
|
|
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
index ad1e796e5544a..4e0e1b02d615e 100644
|
|
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
@@ -719,12 +719,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
|
|
*
|
|
* Change the ITR settings for a specific queue.
|
|
**/
|
|
-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
|
|
- struct ethtool_coalesce *ec, int queue)
|
|
+static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
|
|
+ struct ethtool_coalesce *ec, int queue)
|
|
{
|
|
struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
|
|
struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
|
|
struct iavf_q_vector *q_vector;
|
|
+ u16 itr_setting;
|
|
+
|
|
+ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
|
|
+
|
|
+ if (ec->rx_coalesce_usecs != itr_setting &&
|
|
+ ec->use_adaptive_rx_coalesce) {
|
|
+ netif_info(adapter, drv, adapter->netdev,
|
|
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
|
|
+
|
|
+ if (ec->tx_coalesce_usecs != itr_setting &&
|
|
+ ec->use_adaptive_tx_coalesce) {
|
|
+ netif_info(adapter, drv, adapter->netdev,
|
|
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
|
|
tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
|
|
@@ -747,6 +766,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
|
|
* the Tx and Rx ITR values based on the values we have entered
|
|
* into the q_vector, no need to write the values now.
|
|
*/
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -788,9 +808,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
|
|
*/
|
|
if (queue < 0) {
|
|
for (i = 0; i < adapter->num_active_queues; i++)
|
|
- iavf_set_itr_per_queue(adapter, ec, i);
|
|
+ if (iavf_set_itr_per_queue(adapter, ec, i))
|
|
+ return -EINVAL;
|
|
} else if (queue < adapter->num_active_queues) {
|
|
- iavf_set_itr_per_queue(adapter, ec, queue);
|
|
+ if (iavf_set_itr_per_queue(adapter, ec, queue))
|
|
+ return -EINVAL;
|
|
} else {
|
|
netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
|
|
adapter->num_active_queues - 1);
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
index 158feb0ab2739..c11244a9b7e69 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
|
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
@@ -7752,7 +7752,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
|
|
if (likely(napi_complete_done(napi, work_done)))
|
|
igb_ring_irq_enable(q_vector);
|
|
|
|
- return min(work_done, budget - 1);
|
|
+ return work_done;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
|
|
index 6030c90d50ccb..bf7832b34a000 100644
|
|
--- a/drivers/net/ethernet/mscc/ocelot.c
|
|
+++ b/drivers/net/ethernet/mscc/ocelot.c
|
|
@@ -1024,12 +1024,6 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
|
|
switch (cfg.rx_filter) {
|
|
case HWTSTAMP_FILTER_NONE:
|
|
break;
|
|
- case HWTSTAMP_FILTER_ALL:
|
|
- case HWTSTAMP_FILTER_SOME:
|
|
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
|
|
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
|
|
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
|
|
- case HWTSTAMP_FILTER_NTP_ALL:
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
|
|
@@ -1189,7 +1183,10 @@ static int ocelot_get_ts_info(struct net_device *dev,
|
|
SOF_TIMESTAMPING_RAW_HARDWARE;
|
|
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
|
|
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
|
|
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
|
|
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
|
|
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
|
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
|
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
|
|
index 250f510b1d212..3dcb09f17b77f 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
|
|
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
|
|
@@ -557,7 +557,6 @@ struct nfp_net_dp {
|
|
* @exn_name: Name for Exception interrupt
|
|
* @shared_handler: Handler for shared interrupts
|
|
* @shared_name: Name for shared interrupt
|
|
- * @me_freq_mhz: ME clock_freq (MHz)
|
|
* @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
|
|
* @reconfig_sync_present and HW reconfiguration request
|
|
* regs/machinery from async requests (sync must take
|
|
@@ -639,8 +638,6 @@ struct nfp_net {
|
|
irq_handler_t shared_handler;
|
|
char shared_name[IFNAMSIZ + 8];
|
|
|
|
- u32 me_freq_mhz;
|
|
-
|
|
bool link_up;
|
|
spinlock_t link_status_lock;
|
|
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
|
|
index 2354dec994184..89e578e25ff8f 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
|
|
@@ -1269,7 +1269,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
|
|
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
|
|
* count.
|
|
*/
|
|
- factor = nn->me_freq_mhz / 16;
|
|
+ factor = nn->tlv_caps.me_freq_mhz / 16;
|
|
|
|
/* Each pair of (usecs, max_frames) fields specifies that interrupts
|
|
* should be coalesced until
|
|
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c
|
|
index cad820568f751..966c3b4ad59d1 100644
|
|
--- a/drivers/net/phy/mdio-aspeed.c
|
|
+++ b/drivers/net/phy/mdio-aspeed.c
|
|
@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
|
|
|
|
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
|
|
|
|
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
|
|
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
|
|
+ ASPEED_MDIO_INTERVAL_US,
|
|
+ ASPEED_MDIO_TIMEOUT_US);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+
|
|
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
|
|
data & ASPEED_MDIO_DATA_IDLE,
|
|
ASPEED_MDIO_INTERVAL_US,
|
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
|
index 7d389c2cc9026..d6f44343213cc 100644
|
|
--- a/drivers/net/xen-netfront.c
|
|
+++ b/drivers/net/xen-netfront.c
|
|
@@ -121,21 +121,17 @@ struct netfront_queue {
|
|
|
|
/*
|
|
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
|
|
- * are linked from tx_skb_freelist through skb_entry.link.
|
|
- *
|
|
- * NB. Freelist index entries are always going to be less than
|
|
- * PAGE_OFFSET, whereas pointers to skbs will always be equal or
|
|
- * greater than PAGE_OFFSET: we use this property to distinguish
|
|
- * them.
|
|
+ * are linked from tx_skb_freelist through tx_link.
|
|
*/
|
|
- union skb_entry {
|
|
- struct sk_buff *skb;
|
|
- unsigned long link;
|
|
- } tx_skbs[NET_TX_RING_SIZE];
|
|
+ struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
|
|
+ unsigned short tx_link[NET_TX_RING_SIZE];
|
|
+#define TX_LINK_NONE 0xffff
|
|
+#define TX_PENDING 0xfffe
|
|
grant_ref_t gref_tx_head;
|
|
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
|
|
struct page *grant_tx_page[NET_TX_RING_SIZE];
|
|
unsigned tx_skb_freelist;
|
|
+ unsigned int tx_pend_queue;
|
|
|
|
spinlock_t rx_lock ____cacheline_aligned_in_smp;
|
|
struct xen_netif_rx_front_ring rx;
|
|
@@ -161,6 +157,9 @@ struct netfront_info {
|
|
struct netfront_stats __percpu *rx_stats;
|
|
struct netfront_stats __percpu *tx_stats;
|
|
|
|
+ /* Is device behaving sane? */
|
|
+ bool broken;
|
|
+
|
|
atomic_t rx_gso_checksum_fixup;
|
|
};
|
|
|
|
@@ -169,33 +168,25 @@ struct netfront_rx_info {
|
|
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
|
|
};
|
|
|
|
-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
|
|
-{
|
|
- list->link = id;
|
|
-}
|
|
-
|
|
-static int skb_entry_is_link(const union skb_entry *list)
|
|
-{
|
|
- BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
|
|
- return (unsigned long)list->skb < PAGE_OFFSET;
|
|
-}
|
|
-
|
|
/*
|
|
* Access macros for acquiring freeing slots in tx_skbs[].
|
|
*/
|
|
|
|
-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
|
|
- unsigned short id)
|
|
+static void add_id_to_list(unsigned *head, unsigned short *list,
|
|
+ unsigned short id)
|
|
{
|
|
- skb_entry_set_link(&list[id], *head);
|
|
+ list[id] = *head;
|
|
*head = id;
|
|
}
|
|
|
|
-static unsigned short get_id_from_freelist(unsigned *head,
|
|
- union skb_entry *list)
|
|
+static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
|
|
{
|
|
unsigned int id = *head;
|
|
- *head = list[id].link;
|
|
+
|
|
+ if (id != TX_LINK_NONE) {
|
|
+ *head = list[id];
|
|
+ list[id] = TX_LINK_NONE;
|
|
+ }
|
|
return id;
|
|
}
|
|
|
|
@@ -351,7 +342,7 @@ static int xennet_open(struct net_device *dev)
|
|
unsigned int i = 0;
|
|
struct netfront_queue *queue = NULL;
|
|
|
|
- if (!np->queues)
|
|
+ if (!np->queues || np->broken)
|
|
return -ENODEV;
|
|
|
|
for (i = 0; i < num_queues; ++i) {
|
|
@@ -379,27 +370,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|
unsigned short id;
|
|
struct sk_buff *skb;
|
|
bool more_to_do;
|
|
+ const struct device *dev = &queue->info->netdev->dev;
|
|
|
|
BUG_ON(!netif_carrier_ok(queue->info->netdev));
|
|
|
|
do {
|
|
prod = queue->tx.sring->rsp_prod;
|
|
+ if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
|
|
+ dev_alert(dev, "Illegal number of responses %u\n",
|
|
+ prod - queue->tx.rsp_cons);
|
|
+ goto err;
|
|
+ }
|
|
rmb(); /* Ensure we see responses up to 'rp'. */
|
|
|
|
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
|
- struct xen_netif_tx_response *txrsp;
|
|
+ struct xen_netif_tx_response txrsp;
|
|
|
|
- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
|
|
- if (txrsp->status == XEN_NETIF_RSP_NULL)
|
|
+ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
|
|
+ if (txrsp.status == XEN_NETIF_RSP_NULL)
|
|
continue;
|
|
|
|
- id = txrsp->id;
|
|
- skb = queue->tx_skbs[id].skb;
|
|
+ id = txrsp.id;
|
|
+ if (id >= RING_SIZE(&queue->tx)) {
|
|
+ dev_alert(dev,
|
|
+ "Response has incorrect id (%u)\n",
|
|
+ id);
|
|
+ goto err;
|
|
+ }
|
|
+ if (queue->tx_link[id] != TX_PENDING) {
|
|
+ dev_alert(dev,
|
|
+ "Response for inactive request\n");
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ queue->tx_link[id] = TX_LINK_NONE;
|
|
+ skb = queue->tx_skbs[id];
|
|
+ queue->tx_skbs[id] = NULL;
|
|
if (unlikely(gnttab_query_foreign_access(
|
|
queue->grant_tx_ref[id]) != 0)) {
|
|
- pr_alert("%s: warning -- grant still in use by backend domain\n",
|
|
- __func__);
|
|
- BUG();
|
|
+ dev_alert(dev,
|
|
+ "Grant still in use by backend domain\n");
|
|
+ goto err;
|
|
}
|
|
gnttab_end_foreign_access_ref(
|
|
queue->grant_tx_ref[id], GNTMAP_readonly);
|
|
@@ -407,7 +418,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|
&queue->gref_tx_head, queue->grant_tx_ref[id]);
|
|
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
|
|
queue->grant_tx_page[id] = NULL;
|
|
- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
|
|
+ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
|
|
dev_kfree_skb_irq(skb);
|
|
}
|
|
|
|
@@ -417,13 +428,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|
} while (more_to_do);
|
|
|
|
xennet_maybe_wake_tx(queue);
|
|
+
|
|
+ return;
|
|
+
|
|
+ err:
|
|
+ queue->info->broken = true;
|
|
+ dev_alert(dev, "Disabled for further use\n");
|
|
}
|
|
|
|
struct xennet_gnttab_make_txreq {
|
|
struct netfront_queue *queue;
|
|
struct sk_buff *skb;
|
|
struct page *page;
|
|
- struct xen_netif_tx_request *tx; /* Last request */
|
|
+ struct xen_netif_tx_request *tx; /* Last request on ring page */
|
|
+ struct xen_netif_tx_request tx_local; /* Last request local copy*/
|
|
unsigned int size;
|
|
};
|
|
|
|
@@ -439,7 +457,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
|
struct netfront_queue *queue = info->queue;
|
|
struct sk_buff *skb = info->skb;
|
|
|
|
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
|
+ id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
|
|
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
|
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
|
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
|
@@ -447,34 +465,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
|
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
|
gfn, GNTMAP_readonly);
|
|
|
|
- queue->tx_skbs[id].skb = skb;
|
|
+ queue->tx_skbs[id] = skb;
|
|
queue->grant_tx_page[id] = page;
|
|
queue->grant_tx_ref[id] = ref;
|
|
|
|
- tx->id = id;
|
|
- tx->gref = ref;
|
|
- tx->offset = offset;
|
|
- tx->size = len;
|
|
- tx->flags = 0;
|
|
+ info->tx_local.id = id;
|
|
+ info->tx_local.gref = ref;
|
|
+ info->tx_local.offset = offset;
|
|
+ info->tx_local.size = len;
|
|
+ info->tx_local.flags = 0;
|
|
+
|
|
+ *tx = info->tx_local;
|
|
+
|
|
+ /*
|
|
+ * Put the request in the pending queue, it will be set to be pending
|
|
+ * when the producer index is about to be raised.
|
|
+ */
|
|
+ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
|
|
|
|
info->tx = tx;
|
|
- info->size += tx->size;
|
|
+ info->size += info->tx_local.size;
|
|
}
|
|
|
|
static struct xen_netif_tx_request *xennet_make_first_txreq(
|
|
- struct netfront_queue *queue, struct sk_buff *skb,
|
|
- struct page *page, unsigned int offset, unsigned int len)
|
|
+ struct xennet_gnttab_make_txreq *info,
|
|
+ unsigned int offset, unsigned int len)
|
|
{
|
|
- struct xennet_gnttab_make_txreq info = {
|
|
- .queue = queue,
|
|
- .skb = skb,
|
|
- .page = page,
|
|
- .size = 0,
|
|
- };
|
|
+ info->size = 0;
|
|
|
|
- gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
|
|
+ gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
|
|
|
|
- return info.tx;
|
|
+ return info->tx;
|
|
}
|
|
|
|
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
|
|
@@ -487,35 +508,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
|
|
xennet_tx_setup_grant(gfn, offset, len, data);
|
|
}
|
|
|
|
-static struct xen_netif_tx_request *xennet_make_txreqs(
|
|
- struct netfront_queue *queue, struct xen_netif_tx_request *tx,
|
|
- struct sk_buff *skb, struct page *page,
|
|
+static void xennet_make_txreqs(
|
|
+ struct xennet_gnttab_make_txreq *info,
|
|
+ struct page *page,
|
|
unsigned int offset, unsigned int len)
|
|
{
|
|
- struct xennet_gnttab_make_txreq info = {
|
|
- .queue = queue,
|
|
- .skb = skb,
|
|
- .tx = tx,
|
|
- };
|
|
-
|
|
/* Skip unused frames from start of page */
|
|
page += offset >> PAGE_SHIFT;
|
|
offset &= ~PAGE_MASK;
|
|
|
|
while (len) {
|
|
- info.page = page;
|
|
- info.size = 0;
|
|
+ info->page = page;
|
|
+ info->size = 0;
|
|
|
|
gnttab_foreach_grant_in_range(page, offset, len,
|
|
xennet_make_one_txreq,
|
|
- &info);
|
|
+ info);
|
|
|
|
page++;
|
|
offset = 0;
|
|
- len -= info.size;
|
|
+ len -= info->size;
|
|
}
|
|
-
|
|
- return info.tx;
|
|
}
|
|
|
|
/*
|
|
@@ -562,13 +575,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
return queue_idx;
|
|
}
|
|
|
|
+static void xennet_mark_tx_pending(struct netfront_queue *queue)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
|
|
+ TX_LINK_NONE)
|
|
+ queue->tx_link[i] = TX_PENDING;
|
|
+}
|
|
+
|
|
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
|
|
|
|
static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
struct netfront_info *np = netdev_priv(dev);
|
|
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
|
|
- struct xen_netif_tx_request *tx, *first_tx;
|
|
+ struct xen_netif_tx_request *first_tx;
|
|
unsigned int i;
|
|
int notify;
|
|
int slots;
|
|
@@ -577,6 +599,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
unsigned int len;
|
|
unsigned long flags;
|
|
struct netfront_queue *queue = NULL;
|
|
+ struct xennet_gnttab_make_txreq info = { };
|
|
unsigned int num_queues = dev->real_num_tx_queues;
|
|
u16 queue_index;
|
|
struct sk_buff *nskb;
|
|
@@ -584,6 +607,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
/* Drop the packet if no queues are set up */
|
|
if (num_queues < 1)
|
|
goto drop;
|
|
+ if (unlikely(np->broken))
|
|
+ goto drop;
|
|
/* Determine which queue to transmit this SKB on */
|
|
queue_index = skb_get_queue_mapping(skb);
|
|
queue = &np->queues[queue_index];
|
|
@@ -634,21 +659,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
}
|
|
|
|
/* First request for the linear area. */
|
|
- first_tx = tx = xennet_make_first_txreq(queue, skb,
|
|
- page, offset, len);
|
|
- offset += tx->size;
|
|
+ info.queue = queue;
|
|
+ info.skb = skb;
|
|
+ info.page = page;
|
|
+ first_tx = xennet_make_first_txreq(&info, offset, len);
|
|
+ offset += info.tx_local.size;
|
|
if (offset == PAGE_SIZE) {
|
|
page++;
|
|
offset = 0;
|
|
}
|
|
- len -= tx->size;
|
|
+ len -= info.tx_local.size;
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
/* local packet? */
|
|
- tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
|
|
+ first_tx->flags |= XEN_NETTXF_csum_blank |
|
|
+ XEN_NETTXF_data_validated;
|
|
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
|
/* remote but checksummed. */
|
|
- tx->flags |= XEN_NETTXF_data_validated;
|
|
+ first_tx->flags |= XEN_NETTXF_data_validated;
|
|
|
|
/* Optional extra info after the first request. */
|
|
if (skb_shinfo(skb)->gso_size) {
|
|
@@ -657,7 +685,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
gso = (struct xen_netif_extra_info *)
|
|
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
|
|
|
- tx->flags |= XEN_NETTXF_extra_info;
|
|
+ first_tx->flags |= XEN_NETTXF_extra_info;
|
|
|
|
gso->u.gso.size = skb_shinfo(skb)->gso_size;
|
|
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
|
|
@@ -671,12 +699,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
}
|
|
|
|
/* Requests for the rest of the linear area. */
|
|
- tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
|
|
+ xennet_make_txreqs(&info, page, offset, len);
|
|
|
|
/* Requests for all the frags. */
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
- tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
|
|
+ xennet_make_txreqs(&info, skb_frag_page(frag),
|
|
skb_frag_off(frag),
|
|
skb_frag_size(frag));
|
|
}
|
|
@@ -684,6 +712,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
/* First request has the packet length. */
|
|
first_tx->size = skb->len;
|
|
|
|
+ xennet_mark_tx_pending(queue);
|
|
+
|
|
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
|
|
if (notify)
|
|
notify_remote_via_irq(queue->tx_irq);
|
|
@@ -741,7 +771,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
|
RING_IDX rp)
|
|
|
|
{
|
|
- struct xen_netif_extra_info *extra;
|
|
+ struct xen_netif_extra_info extra;
|
|
struct device *dev = &queue->info->netdev->dev;
|
|
RING_IDX cons = queue->rx.rsp_cons;
|
|
int err = 0;
|
|
@@ -757,24 +787,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
|
break;
|
|
}
|
|
|
|
- extra = (struct xen_netif_extra_info *)
|
|
- RING_GET_RESPONSE(&queue->rx, ++cons);
|
|
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
|
|
|
|
- if (unlikely(!extra->type ||
|
|
- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
|
+ if (unlikely(!extra.type ||
|
|
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
|
if (net_ratelimit())
|
|
dev_warn(dev, "Invalid extra type: %d\n",
|
|
- extra->type);
|
|
+ extra.type);
|
|
err = -EINVAL;
|
|
} else {
|
|
- memcpy(&extras[extra->type - 1], extra,
|
|
- sizeof(*extra));
|
|
+ extras[extra.type - 1] = extra;
|
|
}
|
|
|
|
skb = xennet_get_rx_skb(queue, cons);
|
|
ref = xennet_get_rx_ref(queue, cons);
|
|
xennet_move_rx_slot(queue, skb, ref);
|
|
- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
|
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
|
|
|
queue->rx.rsp_cons = cons;
|
|
return err;
|
|
@@ -784,7 +812,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
|
struct netfront_rx_info *rinfo, RING_IDX rp,
|
|
struct sk_buff_head *list)
|
|
{
|
|
- struct xen_netif_rx_response *rx = &rinfo->rx;
|
|
+ struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
|
|
struct xen_netif_extra_info *extras = rinfo->extras;
|
|
struct device *dev = &queue->info->netdev->dev;
|
|
RING_IDX cons = queue->rx.rsp_cons;
|
|
@@ -842,7 +870,8 @@ next:
|
|
break;
|
|
}
|
|
|
|
- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
|
|
+ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
|
|
+ rx = &rx_local;
|
|
skb = xennet_get_rx_skb(queue, cons + slots);
|
|
ref = xennet_get_rx_ref(queue, cons + slots);
|
|
slots++;
|
|
@@ -897,10 +926,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|
struct sk_buff *nskb;
|
|
|
|
while ((nskb = __skb_dequeue(list))) {
|
|
- struct xen_netif_rx_response *rx =
|
|
- RING_GET_RESPONSE(&queue->rx, ++cons);
|
|
+ struct xen_netif_rx_response rx;
|
|
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
|
|
|
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
|
|
+
|
|
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
|
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
|
|
|
@@ -915,7 +945,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|
|
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
|
skb_frag_page(nfrag),
|
|
- rx->offset, rx->status, PAGE_SIZE);
|
|
+ rx.offset, rx.status, PAGE_SIZE);
|
|
|
|
skb_shinfo(nskb)->nr_frags = 0;
|
|
kfree_skb(nskb);
|
|
@@ -1008,12 +1038,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|
skb_queue_head_init(&tmpq);
|
|
|
|
rp = queue->rx.sring->rsp_prod;
|
|
+ if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
|
|
+ dev_alert(&dev->dev, "Illegal number of responses %u\n",
|
|
+ rp - queue->rx.rsp_cons);
|
|
+ queue->info->broken = true;
|
|
+ spin_unlock(&queue->rx_lock);
|
|
+ return 0;
|
|
+ }
|
|
rmb(); /* Ensure we see queued responses up to 'rp'. */
|
|
|
|
i = queue->rx.rsp_cons;
|
|
work_done = 0;
|
|
while ((i != rp) && (work_done < budget)) {
|
|
- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
|
|
+ RING_COPY_RESPONSE(&queue->rx, i, rx);
|
|
memset(extras, 0, sizeof(rinfo.extras));
|
|
|
|
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
|
|
@@ -1135,17 +1172,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
|
|
|
|
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
|
/* Skip over entries which are actually freelist references */
|
|
- if (skb_entry_is_link(&queue->tx_skbs[i]))
|
|
+ if (!queue->tx_skbs[i])
|
|
continue;
|
|
|
|
- skb = queue->tx_skbs[i].skb;
|
|
+ skb = queue->tx_skbs[i];
|
|
+ queue->tx_skbs[i] = NULL;
|
|
get_page(queue->grant_tx_page[i]);
|
|
gnttab_end_foreign_access(queue->grant_tx_ref[i],
|
|
GNTMAP_readonly,
|
|
(unsigned long)page_address(queue->grant_tx_page[i]));
|
|
queue->grant_tx_page[i] = NULL;
|
|
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
|
|
- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
|
|
+ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
|
|
dev_kfree_skb_irq(skb);
|
|
}
|
|
}
|
|
@@ -1225,6 +1263,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
|
|
struct netfront_queue *queue = dev_id;
|
|
unsigned long flags;
|
|
|
|
+ if (queue->info->broken)
|
|
+ return IRQ_HANDLED;
|
|
+
|
|
spin_lock_irqsave(&queue->tx_lock, flags);
|
|
xennet_tx_buf_gc(queue);
|
|
spin_unlock_irqrestore(&queue->tx_lock, flags);
|
|
@@ -1237,6 +1278,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
|
|
struct netfront_queue *queue = dev_id;
|
|
struct net_device *dev = queue->info->netdev;
|
|
|
|
+ if (queue->info->broken)
|
|
+ return IRQ_HANDLED;
|
|
+
|
|
if (likely(netif_carrier_ok(dev) &&
|
|
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
|
|
napi_schedule(&queue->napi);
|
|
@@ -1258,6 +1302,10 @@ static void xennet_poll_controller(struct net_device *dev)
|
|
struct netfront_info *info = netdev_priv(dev);
|
|
unsigned int num_queues = dev->real_num_tx_queues;
|
|
unsigned int i;
|
|
+
|
|
+ if (info->broken)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < num_queues; ++i)
|
|
xennet_interrupt(0, &info->queues[i]);
|
|
}
|
|
@@ -1627,13 +1675,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
|
|
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
|
|
devid, queue->id);
|
|
|
|
- /* Initialise tx_skbs as a free chain containing every entry. */
|
|
+ /* Initialise tx_skb_freelist as a free chain containing every entry. */
|
|
queue->tx_skb_freelist = 0;
|
|
+ queue->tx_pend_queue = TX_LINK_NONE;
|
|
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
|
- skb_entry_set_link(&queue->tx_skbs[i], i+1);
|
|
+ queue->tx_link[i] = i + 1;
|
|
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
|
|
queue->grant_tx_page[i] = NULL;
|
|
}
|
|
+ queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
|
|
|
|
/* Clear out rx_skbs */
|
|
for (i = 0; i < NET_RX_RING_SIZE; i++) {
|
|
@@ -1838,6 +1888,9 @@ static int talk_to_netback(struct xenbus_device *dev,
|
|
if (info->queues)
|
|
xennet_destroy_queues(info);
|
|
|
|
+ /* For the case of a reconnect reset the "broken" indicator. */
|
|
+ info->broken = false;
|
|
+
|
|
err = xennet_create_queues(info, &num_queues);
|
|
if (err < 0) {
|
|
xenbus_dev_fatal(dev, err, "creating queues");
|
|
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
|
|
index 6ca17a0babae2..1c8d16b0245b1 100644
|
|
--- a/drivers/nvme/target/io-cmd-file.c
|
|
+++ b/drivers/nvme/target/io-cmd-file.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/uio.h>
|
|
#include <linux/falloc.h>
|
|
#include <linux/file.h>
|
|
+#include <linux/fs.h>
|
|
#include "nvmet.h"
|
|
|
|
#define NVMET_MAX_MPOOL_BVEC 16
|
|
@@ -254,7 +255,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
|
|
|
|
if (req->ns->buffered_io) {
|
|
if (likely(!req->f.mpool_alloc) &&
|
|
- nvmet_file_execute_io(req, IOCB_NOWAIT))
|
|
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
|
|
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
|
|
return;
|
|
nvmet_file_submit_buffered_io(req);
|
|
} else
|
|
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
|
|
index fac1985870765..4341c72446628 100644
|
|
--- a/drivers/nvme/target/tcp.c
|
|
+++ b/drivers/nvme/target/tcp.c
|
|
@@ -631,10 +631,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
|
|
{
|
|
struct nvmet_tcp_queue *queue = cmd->queue;
|
|
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
|
|
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
|
struct kvec iov = {
|
|
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
|
|
- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
|
+ .iov_len = left
|
|
};
|
|
int ret;
|
|
|
|
@@ -643,6 +644,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
|
|
return ret;
|
|
|
|
cmd->offset += ret;
|
|
+ left -= ret;
|
|
+
|
|
+ if (left)
|
|
+ return -EAGAIN;
|
|
|
|
if (queue->nvme_sq.sqhd_disabled) {
|
|
cmd->queue->snd_cmd = NULL;
|
|
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
|
|
index 45794ba643d40..9e208294946cd 100644
|
|
--- a/drivers/pci/controller/pci-aardvark.c
|
|
+++ b/drivers/pci/controller/pci-aardvark.c
|
|
@@ -9,6 +9,7 @@
|
|
*/
|
|
|
|
#include <linux/delay.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/irqdomain.h>
|
|
@@ -17,6 +18,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/of_address.h>
|
|
+#include <linux/of_gpio.h>
|
|
#include <linux/of_pci.h>
|
|
|
|
#include "../pci.h"
|
|
@@ -25,21 +27,8 @@
|
|
/* PCIe core registers */
|
|
#define PCIE_CORE_DEV_ID_REG 0x0
|
|
#define PCIE_CORE_CMD_STATUS_REG 0x4
|
|
-#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
|
|
-#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
|
|
-#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
|
|
#define PCIE_CORE_DEV_REV_REG 0x8
|
|
#define PCIE_CORE_PCIEXP_CAP 0xc0
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
|
|
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
|
|
-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
|
|
-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
|
|
-#define PCIE_CORE_LINK_TRAINING BIT(5)
|
|
-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
|
|
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
|
|
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
|
|
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
|
|
@@ -122,6 +111,46 @@
|
|
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
|
|
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
|
|
|
|
+/* PCIe window configuration */
|
|
+#define OB_WIN_BASE_ADDR 0x4c00
|
|
+#define OB_WIN_BLOCK_SIZE 0x20
|
|
+#define OB_WIN_COUNT 8
|
|
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
|
|
+ OB_WIN_BLOCK_SIZE * (win) + \
|
|
+ (offset))
|
|
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
|
|
+#define OB_WIN_ENABLE BIT(0)
|
|
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
|
|
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
|
|
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
|
|
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
|
|
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
|
|
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
|
|
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
|
|
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
|
|
+#define OB_WIN_FUNC_NUM_SHIFT 24
|
|
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
|
|
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
|
|
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
|
|
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
|
|
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
|
|
+#define OB_WIN_MSG_CODE_SHIFT 14
|
|
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
|
|
+#define OB_WIN_ATTR_ENABLE BIT(11)
|
|
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
|
|
+#define OB_WIN_ATTR_TC_SHIFT 8
|
|
+#define OB_WIN_ATTR_RELAXED BIT(7)
|
|
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
|
|
+#define OB_WIN_ATTR_POISON BIT(5)
|
|
+#define OB_WIN_ATTR_IDO BIT(4)
|
|
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
|
|
+#define OB_WIN_TYPE_SHIFT 0
|
|
+#define OB_WIN_TYPE_MEM 0x0
|
|
+#define OB_WIN_TYPE_IO 0x4
|
|
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
|
|
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
|
|
+#define OB_WIN_TYPE_MSG 0xc
|
|
+
|
|
/* LMI registers base address and register offsets */
|
|
#define LMI_BASE_ADDR 0x6000
|
|
#define CFG_REG (LMI_BASE_ADDR + 0x0)
|
|
@@ -237,6 +266,13 @@ struct advk_pcie {
|
|
struct platform_device *pdev;
|
|
void __iomem *base;
|
|
struct list_head resources;
|
|
+ struct {
|
|
+ phys_addr_t match;
|
|
+ phys_addr_t remap;
|
|
+ phys_addr_t mask;
|
|
+ u32 actions;
|
|
+ } wins[OB_WIN_COUNT];
|
|
+ u8 wins_count;
|
|
struct irq_domain *irq_domain;
|
|
struct irq_chip irq_chip;
|
|
raw_spinlock_t irq_lock;
|
|
@@ -249,7 +285,9 @@ struct advk_pcie {
|
|
struct mutex msi_used_lock;
|
|
u16 msi_msg;
|
|
int root_bus_nr;
|
|
+ int link_gen;
|
|
struct pci_bridge_emul bridge;
|
|
+ struct gpio_desc *reset_gpio;
|
|
};
|
|
|
|
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
|
|
@@ -309,20 +347,16 @@ static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
|
|
|
|
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
|
|
{
|
|
- struct device *dev = &pcie->pdev->dev;
|
|
int retries;
|
|
|
|
/* check if the link is up or not */
|
|
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
|
|
- if (advk_pcie_link_up(pcie)) {
|
|
- dev_info(dev, "link up\n");
|
|
+ if (advk_pcie_link_up(pcie))
|
|
return 0;
|
|
- }
|
|
|
|
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
|
|
}
|
|
|
|
- dev_err(dev, "link never came up\n");
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
@@ -337,9 +371,115 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
|
|
}
|
|
}
|
|
|
|
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
|
|
+{
|
|
+ if (!pcie->reset_gpio)
|
|
+ return;
|
|
+
|
|
+ /* 10ms delay is needed for some cards */
|
|
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
|
|
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
|
|
+ usleep_range(10000, 11000);
|
|
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
|
|
+}
|
|
+
|
|
+static void advk_pcie_train_link(struct advk_pcie *pcie)
|
|
+{
|
|
+ struct device *dev = &pcie->pdev->dev;
|
|
+ u32 reg;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * Setup PCIe rev / gen compliance based on device tree property
|
|
+ * 'max-link-speed' which also forces maximal link speed.
|
|
+ */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
|
+ reg &= ~PCIE_GEN_SEL_MSK;
|
|
+ if (pcie->link_gen == 3)
|
|
+ reg |= SPEED_GEN_3;
|
|
+ else if (pcie->link_gen == 2)
|
|
+ reg |= SPEED_GEN_2;
|
|
+ else
|
|
+ reg |= SPEED_GEN_1;
|
|
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
|
+
|
|
+ /*
|
|
+ * Set maximal link speed value also into PCIe Link Control 2 register.
|
|
+ * Armada 3700 Functional Specification says that default value is based
|
|
+ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
|
|
+ */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
|
|
+ reg &= ~PCI_EXP_LNKCTL2_TLS;
|
|
+ if (pcie->link_gen == 3)
|
|
+ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
|
|
+ else if (pcie->link_gen == 2)
|
|
+ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
|
|
+ else
|
|
+ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
|
|
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
|
|
+
|
|
+ /* Enable link training after selecting PCIe generation */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
|
+ reg |= LINK_TRAINING_EN;
|
|
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
|
+
|
|
+ /*
|
|
+ * Reset PCIe card via PERST# signal. Some cards are not detected
|
|
+ * during link training when they are in some non-initial state.
|
|
+ */
|
|
+ advk_pcie_issue_perst(pcie);
|
|
+
|
|
+ /*
|
|
+ * PERST# signal could have been asserted by pinctrl subsystem before
|
|
+ * probe() callback has been called or issued explicitly by reset gpio
|
|
+ * function advk_pcie_issue_perst(), making the endpoint going into
|
|
+ * fundamental reset. As required by PCI Express spec (PCI Express
|
|
+ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
|
|
+ * Conventional Reset) a delay for at least 100ms after such a reset
|
|
+ * before sending a Configuration Request to the device is needed.
|
|
+ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
|
|
+ * waits for link at least 900ms.
|
|
+ */
|
|
+ ret = advk_pcie_wait_for_link(pcie);
|
|
+ if (ret < 0)
|
|
+ dev_err(dev, "link never came up\n");
|
|
+ else
|
|
+ dev_info(dev, "link up\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Set PCIe address window register which could be used for memory
|
|
+ * mapping.
|
|
+ */
|
|
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
|
|
+ phys_addr_t match, phys_addr_t remap,
|
|
+ phys_addr_t mask, u32 actions)
|
|
+{
|
|
+ advk_writel(pcie, OB_WIN_ENABLE |
|
|
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
|
|
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
|
|
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
|
|
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
|
|
+}
|
|
+
|
|
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
|
|
+{
|
|
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
|
|
+}
|
|
+
|
|
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
{
|
|
u32 reg;
|
|
+ int i;
|
|
|
|
/* Set to Direct mode */
|
|
reg = advk_readl(pcie, CTRL_CONFIG_REG);
|
|
@@ -362,6 +502,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
|
|
advk_writel(pcie, reg, VENDOR_ID_REG);
|
|
|
|
+ /*
|
|
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
|
|
+ * because the default value is Mass storage controller (0x010400).
|
|
+ *
|
|
+ * Note that this Aardvark PCI Bridge does not have compliant Type 1
|
|
+ * Configuration Space and it even cannot be accessed via Aardvark's
|
|
+ * PCI config space access method. Something like config space is
|
|
+ * available in internal Aardvark registers starting at offset 0x0
|
|
+ * and is reported as Type 0. In range 0x10 - 0x34 it has totally
|
|
+ * different registers.
|
|
+ *
|
|
+ * Therefore driver uses emulation of PCI Bridge which emulates
|
|
+ * access to configuration space via internal Aardvark registers or
|
|
+ * emulated configuration buffer.
|
|
+ */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
|
|
+ reg &= ~0xffffff00;
|
|
+ reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
|
|
+ advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
|
|
+
|
|
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
|
+ reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
|
|
+ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
|
|
+
|
|
/* Set Advanced Error Capabilities and Control PF0 register */
|
|
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
|
|
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
|
|
@@ -369,36 +534,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
|
|
advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
|
|
|
|
- /* Set PCIe Device Control and Status 1 PF0 register */
|
|
- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
|
|
- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
|
|
- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
|
|
- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
|
|
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
|
|
- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
|
|
+ /* Set PCIe Device Control register */
|
|
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
|
|
+ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
|
|
+ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
|
|
+ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
|
|
+ reg &= ~PCI_EXP_DEVCTL_READRQ;
|
|
+ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
|
|
+ reg |= PCI_EXP_DEVCTL_READRQ_512B;
|
|
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
|
|
|
|
/* Program PCIe Control 2 to disable strict ordering */
|
|
reg = PCIE_CORE_CTRL2_RESERVED |
|
|
PCIE_CORE_CTRL2_TD_ENABLE;
|
|
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
|
|
|
|
- /* Set GEN2 */
|
|
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
|
- reg &= ~PCIE_GEN_SEL_MSK;
|
|
- reg |= SPEED_GEN_2;
|
|
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
|
-
|
|
/* Set lane X1 */
|
|
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
|
reg &= ~LANE_CNT_MSK;
|
|
reg |= LANE_COUNT_1;
|
|
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
|
|
|
- /* Enable link training */
|
|
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
|
- reg |= LINK_TRAINING_EN;
|
|
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
|
-
|
|
/* Enable MSI */
|
|
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
|
|
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
|
|
@@ -423,27 +579,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
|
|
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
|
|
|
|
+ /*
|
|
+ * Enable AXI address window location generation:
|
|
+ * When it is enabled, the default outbound window
|
|
+ * configurations (Default User Field: 0xD0074CFC)
|
|
+ * are used to transparent address translation for
|
|
+ * the outbound transactions. Thus, PCIe address
|
|
+ * windows are not required for transparent memory
|
|
+ * access when default outbound window configuration
|
|
+ * is set for memory access.
|
|
+ */
|
|
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
|
|
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
|
|
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
|
|
|
|
- /* Bypass the address window mapping for PIO */
|
|
+ /*
|
|
+ * Set memory access in Default User Field so it
|
|
+ * is not required to configure PCIe address for
|
|
+ * transparent memory access.
|
|
+ */
|
|
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
|
|
+
|
|
+ /*
|
|
+ * Bypass the address window mapping for PIO:
|
|
+ * Since PIO access already contains all required
|
|
+ * info over AXI interface by PIO registers, the
|
|
+ * address window is not required.
|
|
+ */
|
|
reg = advk_readl(pcie, PIO_CTRL);
|
|
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
|
|
advk_writel(pcie, reg, PIO_CTRL);
|
|
|
|
- /* Start link training */
|
|
- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
|
|
- reg |= PCIE_CORE_LINK_TRAINING;
|
|
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
|
|
+ /*
|
|
+ * Configure PCIe address windows for non-memory or
|
|
+ * non-transparent access as by default PCIe uses
|
|
+ * transparent memory access.
|
|
+ */
|
|
+ for (i = 0; i < pcie->wins_count; i++)
|
|
+ advk_pcie_set_ob_win(pcie, i,
|
|
+ pcie->wins[i].match, pcie->wins[i].remap,
|
|
+ pcie->wins[i].mask, pcie->wins[i].actions);
|
|
|
|
- advk_pcie_wait_for_link(pcie);
|
|
+ /* Disable remaining PCIe outbound windows */
|
|
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
|
|
+ advk_pcie_disable_ob_win(pcie, i);
|
|
|
|
- reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
|
- reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
|
|
- PCIE_CORE_CMD_IO_ACCESS_EN |
|
|
- PCIE_CORE_CMD_MEM_IO_REQ_EN;
|
|
- advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
|
|
+ advk_pcie_train_link(pcie);
|
|
}
|
|
|
|
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
|
|
@@ -452,6 +633,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
u32 reg;
|
|
unsigned int status;
|
|
char *strcomp_status, *str_posted;
|
|
+ int ret;
|
|
|
|
reg = advk_readl(pcie, PIO_STAT);
|
|
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
|
|
@@ -476,6 +658,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
case PIO_COMPLETION_STATUS_OK:
|
|
if (reg & PIO_ERR_STATUS) {
|
|
strcomp_status = "COMP_ERR";
|
|
+ ret = -EFAULT;
|
|
break;
|
|
}
|
|
/* Get the read result */
|
|
@@ -483,9 +666,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
*val = advk_readl(pcie, PIO_RD_DATA);
|
|
/* No error */
|
|
strcomp_status = NULL;
|
|
+ ret = 0;
|
|
break;
|
|
case PIO_COMPLETION_STATUS_UR:
|
|
strcomp_status = "UR";
|
|
+ ret = -EOPNOTSUPP;
|
|
break;
|
|
case PIO_COMPLETION_STATUS_CRS:
|
|
if (allow_crs && val) {
|
|
@@ -503,6 +688,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
*/
|
|
*val = CFG_RD_CRS_VAL;
|
|
strcomp_status = NULL;
|
|
+ ret = 0;
|
|
break;
|
|
}
|
|
/* PCIe r4.0, sec 2.3.2, says:
|
|
@@ -518,21 +704,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
* Request and taking appropriate action, e.g., complete the
|
|
* Request to the host as a failed transaction.
|
|
*
|
|
- * To simplify implementation do not re-issue the Configuration
|
|
- * Request and complete the Request as a failed transaction.
|
|
+ * So return -EAGAIN and caller (pci-aardvark.c driver) will
|
|
+ * re-issue request again up to the PIO_RETRY_CNT retries.
|
|
*/
|
|
strcomp_status = "CRS";
|
|
+ ret = -EAGAIN;
|
|
break;
|
|
case PIO_COMPLETION_STATUS_CA:
|
|
strcomp_status = "CA";
|
|
+ ret = -ECANCELED;
|
|
break;
|
|
default:
|
|
strcomp_status = "Unknown";
|
|
+ ret = -EINVAL;
|
|
break;
|
|
}
|
|
|
|
if (!strcomp_status)
|
|
- return 0;
|
|
+ return ret;
|
|
|
|
if (reg & PIO_NON_POSTED_REQ)
|
|
str_posted = "Non-posted";
|
|
@@ -542,7 +731,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
|
dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
|
|
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
|
|
|
|
- return -EFAULT;
|
|
+ return ret;
|
|
}
|
|
|
|
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
|
|
@@ -550,13 +739,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
|
|
struct device *dev = &pcie->pdev->dev;
|
|
int i;
|
|
|
|
- for (i = 0; i < PIO_RETRY_CNT; i++) {
|
|
+ for (i = 1; i <= PIO_RETRY_CNT; i++) {
|
|
u32 start, isr;
|
|
|
|
start = advk_readl(pcie, PIO_START);
|
|
isr = advk_readl(pcie, PIO_ISR);
|
|
if (!start && isr)
|
|
- return 0;
|
|
+ return i;
|
|
udelay(PIO_RETRY_DELAY);
|
|
}
|
|
|
|
@@ -564,6 +753,64 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
+static pci_bridge_emul_read_status_t
|
|
+advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
|
|
+ int reg, u32 *value)
|
|
+{
|
|
+ struct advk_pcie *pcie = bridge->data;
|
|
+
|
|
+ switch (reg) {
|
|
+ case PCI_COMMAND:
|
|
+ *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
|
+ return PCI_BRIDGE_EMUL_HANDLED;
|
|
+
|
|
+ case PCI_INTERRUPT_LINE: {
|
|
+ /*
|
|
+ * From the whole 32bit register we support reading from HW only
|
|
+ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
|
|
+ * Other bits are retrieved only from emulated config buffer.
|
|
+ */
|
|
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
|
|
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
|
|
+ if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
|
|
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
|
|
+ else
|
|
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
|
|
+ *value = val;
|
|
+ return PCI_BRIDGE_EMUL_HANDLED;
|
|
+ }
|
|
+
|
|
+ default:
|
|
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void
|
|
+advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
|
|
+ int reg, u32 old, u32 new, u32 mask)
|
|
+{
|
|
+ struct advk_pcie *pcie = bridge->data;
|
|
+
|
|
+ switch (reg) {
|
|
+ case PCI_COMMAND:
|
|
+ advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
|
|
+ break;
|
|
+
|
|
+ case PCI_INTERRUPT_LINE:
|
|
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
|
|
+ u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
|
|
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
|
|
+ val |= HOT_RESET_GEN;
|
|
+ else
|
|
+ val &= ~HOT_RESET_GEN;
|
|
+ advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
|
|
static pci_bridge_emul_read_status_t
|
|
advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
|
|
@@ -665,6 +912,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
|
|
}
|
|
|
|
static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
|
|
+ .read_base = advk_pci_bridge_emul_base_conf_read,
|
|
+ .write_base = advk_pci_bridge_emul_base_conf_write,
|
|
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
|
|
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
|
|
};
|
|
@@ -676,37 +925,33 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
|
|
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
|
|
{
|
|
struct pci_bridge_emul *bridge = &pcie->bridge;
|
|
- int ret;
|
|
|
|
- bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
|
|
- bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
|
|
+ bridge->conf.vendor =
|
|
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
|
|
+ bridge->conf.device =
|
|
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
|
|
bridge->conf.class_revision =
|
|
- advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
|
|
+ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
|
|
|
|
/* Support 32 bits I/O addressing */
|
|
bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
|
|
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
|
|
|
|
/* Support 64 bits memory pref */
|
|
- bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
|
|
- bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
|
|
+ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
|
|
+ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
|
|
|
|
/* Support interrupt A for MSI feature */
|
|
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
|
|
|
|
+ /* Indicates supports for Completion Retry Status */
|
|
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
|
|
+
|
|
bridge->has_pcie = true;
|
|
bridge->data = pcie;
|
|
bridge->ops = &advk_pci_bridge_emul_ops;
|
|
|
|
- /* PCIe config space can be initialized after pci_bridge_emul_init() */
|
|
- ret = pci_bridge_emul_init(bridge, 0);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- /* Indicates supports for Completion Retry Status */
|
|
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
|
|
-
|
|
- return 0;
|
|
+ return pci_bridge_emul_init(bridge, 0);
|
|
}
|
|
|
|
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
|
|
@@ -715,6 +960,13 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
|
|
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
|
|
return false;
|
|
|
|
+ /*
|
|
+ * If the link goes down after we check for link-up, nothing bad
|
|
+ * happens but the config access times out.
|
|
+ */
|
|
+ if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
|
|
+ return false;
|
|
+
|
|
return true;
|
|
}
|
|
|
|
@@ -751,6 +1003,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
|
int where, int size, u32 *val)
|
|
{
|
|
struct advk_pcie *pcie = bus->sysdata;
|
|
+ int retry_count;
|
|
bool allow_crs;
|
|
u32 reg;
|
|
int ret;
|
|
@@ -773,18 +1026,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
|
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
|
|
PCI_EXP_RTCTL_CRSSVE);
|
|
|
|
- if (advk_pcie_pio_is_running(pcie)) {
|
|
- /*
|
|
- * If it is possible return Completion Retry Status so caller
|
|
- * tries to issue the request again instead of failing.
|
|
- */
|
|
- if (allow_crs) {
|
|
- *val = CFG_RD_CRS_VAL;
|
|
- return PCIBIOS_SUCCESSFUL;
|
|
- }
|
|
- *val = 0xffffffff;
|
|
- return PCIBIOS_SET_FAILED;
|
|
- }
|
|
+ if (advk_pcie_pio_is_running(pcie))
|
|
+ goto try_crs;
|
|
|
|
/* Program the control register */
|
|
reg = advk_readl(pcie, PIO_CTRL);
|
|
@@ -803,30 +1046,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
|
/* Program the data strobe */
|
|
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
|
|
|
|
- /* Clear PIO DONE ISR and start the transfer */
|
|
- advk_writel(pcie, 1, PIO_ISR);
|
|
- advk_writel(pcie, 1, PIO_START);
|
|
+ retry_count = 0;
|
|
+ do {
|
|
+ /* Clear PIO DONE ISR and start the transfer */
|
|
+ advk_writel(pcie, 1, PIO_ISR);
|
|
+ advk_writel(pcie, 1, PIO_START);
|
|
|
|
- ret = advk_pcie_wait_pio(pcie);
|
|
- if (ret < 0) {
|
|
- /*
|
|
- * If it is possible return Completion Retry Status so caller
|
|
- * tries to issue the request again instead of failing.
|
|
- */
|
|
- if (allow_crs) {
|
|
- *val = CFG_RD_CRS_VAL;
|
|
- return PCIBIOS_SUCCESSFUL;
|
|
- }
|
|
- *val = 0xffffffff;
|
|
- return PCIBIOS_SET_FAILED;
|
|
- }
|
|
+ ret = advk_pcie_wait_pio(pcie);
|
|
+ if (ret < 0)
|
|
+ goto try_crs;
|
|
|
|
- /* Check PIO status and get the read result */
|
|
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
|
|
- if (ret < 0) {
|
|
- *val = 0xffffffff;
|
|
- return PCIBIOS_SET_FAILED;
|
|
- }
|
|
+ retry_count += ret;
|
|
+
|
|
+ /* Check PIO status and get the read result */
|
|
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
|
|
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto fail;
|
|
|
|
if (size == 1)
|
|
*val = (*val >> (8 * (where & 3))) & 0xff;
|
|
@@ -834,6 +1071,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
|
*val = (*val >> (8 * (where & 3))) & 0xffff;
|
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
+
|
|
+try_crs:
|
|
+ /*
|
|
+ * If it is possible, return Completion Retry Status so that caller
|
|
+ * tries to issue the request again instead of failing.
|
|
+ */
|
|
+ if (allow_crs) {
|
|
+ *val = CFG_RD_CRS_VAL;
|
|
+ return PCIBIOS_SUCCESSFUL;
|
|
+ }
|
|
+
|
|
+fail:
|
|
+ *val = 0xffffffff;
|
|
+ return PCIBIOS_SET_FAILED;
|
|
}
|
|
|
|
static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
|
@@ -842,6 +1093,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
|
struct advk_pcie *pcie = bus->sysdata;
|
|
u32 reg;
|
|
u32 data_strobe = 0x0;
|
|
+ int retry_count;
|
|
int offset;
|
|
int ret;
|
|
|
|
@@ -883,19 +1135,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
|
/* Program the data strobe */
|
|
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
|
|
|
|
- /* Clear PIO DONE ISR and start the transfer */
|
|
- advk_writel(pcie, 1, PIO_ISR);
|
|
- advk_writel(pcie, 1, PIO_START);
|
|
+ retry_count = 0;
|
|
+ do {
|
|
+ /* Clear PIO DONE ISR and start the transfer */
|
|
+ advk_writel(pcie, 1, PIO_ISR);
|
|
+ advk_writel(pcie, 1, PIO_START);
|
|
|
|
- ret = advk_pcie_wait_pio(pcie);
|
|
- if (ret < 0)
|
|
- return PCIBIOS_SET_FAILED;
|
|
+ ret = advk_pcie_wait_pio(pcie);
|
|
+ if (ret < 0)
|
|
+ return PCIBIOS_SET_FAILED;
|
|
|
|
- ret = advk_pcie_check_pio_status(pcie, false, NULL);
|
|
- if (ret < 0)
|
|
- return PCIBIOS_SET_FAILED;
|
|
+ retry_count += ret;
|
|
|
|
- return PCIBIOS_SUCCESSFUL;
|
|
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
|
|
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
|
|
+
|
|
+ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static struct pci_ops advk_pcie_ops = {
|
|
@@ -1244,6 +1499,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
|
struct advk_pcie *pcie;
|
|
struct resource *res;
|
|
struct pci_host_bridge *bridge;
|
|
+ struct resource_entry *entry;
|
|
int ret, irq;
|
|
|
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
|
|
@@ -1273,6 +1529,102 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
|
return ret;
|
|
}
|
|
|
|
+ resource_list_for_each_entry(entry, &pcie->resources) {
|
|
+ resource_size_t start = entry->res->start;
|
|
+ resource_size_t size = resource_size(entry->res);
|
|
+ unsigned long type = resource_type(entry->res);
|
|
+ u64 win_size;
|
|
+
|
|
+ /*
|
|
+ * Aardvark hardware allows to configure also PCIe window
|
|
+ * for config type 0 and type 1 mapping, but driver uses
|
|
+ * only PIO for issuing configuration transfers which does
|
|
+ * not use PCIe window configuration.
|
|
+ */
|
|
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
|
|
+ type != IORESOURCE_IO)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Skip transparent memory resources. Default outbound access
|
|
+ * configuration is set to transparent memory access so it
|
|
+ * does not need window configuration.
|
|
+ */
|
|
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
|
|
+ entry->offset == 0)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
|
|
+ * and an access to address A uses this window if A matches the
|
|
+ * match with given mask.
|
|
+ * So every PCIe window size must be a power of two and every start
|
|
+ * address must be aligned to window size. Minimal size is 64 KiB
|
|
+ * because lower 16 bits of mask must be zero. Remapped address
|
|
+ * may have set only bits from the mask.
|
|
+ */
|
|
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
|
|
+ /* Calculate the largest aligned window size */
|
|
+ win_size = (1ULL << (fls64(size)-1)) |
|
|
+ (start ? (1ULL << __ffs64(start)) : 0);
|
|
+ win_size = 1ULL << __ffs64(win_size);
|
|
+ if (win_size < 0x10000)
|
|
+ break;
|
|
+
|
|
+ dev_dbg(dev,
|
|
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
|
|
+ pcie->wins_count, (unsigned long long)start,
|
|
+ (unsigned long long)start + win_size, type);
|
|
+
|
|
+ if (type == IORESOURCE_IO) {
|
|
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
|
|
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
|
|
+ } else {
|
|
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
|
|
+ pcie->wins[pcie->wins_count].match = start;
|
|
+ }
|
|
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
|
|
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
|
|
+
|
|
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
|
|
+ break;
|
|
+
|
|
+ start += win_size;
|
|
+ size -= win_size;
|
|
+ pcie->wins_count++;
|
|
+ }
|
|
+
|
|
+ if (size > 0) {
|
|
+ dev_err(&pcie->pdev->dev,
|
|
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
|
|
+ (unsigned long long)entry->res->start,
|
|
+ (unsigned long long)entry->res->end + 1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
|
|
+ "reset-gpios", 0,
|
|
+ GPIOD_OUT_LOW,
|
|
+ "pcie1-reset");
|
|
+ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
|
|
+ if (ret) {
|
|
+ if (ret == -ENOENT) {
|
|
+ pcie->reset_gpio = NULL;
|
|
+ } else {
|
|
+ if (ret != -EPROBE_DEFER)
|
|
+ dev_err(dev, "Failed to get reset-gpio: %i\n",
|
|
+ ret);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = of_pci_get_max_link_speed(dev->of_node);
|
|
+ if (ret <= 0 || ret > 3)
|
|
+ pcie->link_gen = 3;
|
|
+ else
|
|
+ pcie->link_gen = ret;
|
|
+
|
|
advk_pcie_setup_hw(pcie);
|
|
|
|
ret = advk_sw_pci_bridge_init(pcie);
|
|
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
|
|
index b3d63e319bb39..3026346ccb18c 100644
|
|
--- a/drivers/pci/pci-bridge-emul.c
|
|
+++ b/drivers/pci/pci-bridge-emul.c
|
|
@@ -21,8 +21,9 @@
|
|
#include "pci-bridge-emul.h"
|
|
|
|
#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
|
|
+#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
|
|
#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
|
|
-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
|
|
+#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
|
|
|
|
struct pci_bridge_reg_behavior {
|
|
/* Read-only bits */
|
|
@@ -38,7 +39,8 @@ struct pci_bridge_reg_behavior {
|
|
u32 rsvd;
|
|
};
|
|
|
|
-static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
|
|
+static const
|
|
+struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
|
|
[PCI_VENDOR_ID / 4] = { .ro = ~0 },
|
|
[PCI_COMMAND / 4] = {
|
|
.rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
|
|
@@ -173,7 +175,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
|
|
},
|
|
};
|
|
|
|
-static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
|
|
+static const
|
|
+struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
|
|
[PCI_CAP_LIST_ID / 4] = {
|
|
/*
|
|
* Capability ID, Next Capability Pointer and
|
|
@@ -270,6 +273,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
|
|
int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
|
|
unsigned int flags)
|
|
{
|
|
+ BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
|
|
+
|
|
bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
|
|
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
|
|
bridge->conf.cache_line_size = 0x10;
|
|
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
index 83e585c5a6132..f56add78d58ce 100644
|
|
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
@@ -166,10 +166,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
|
PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
|
|
PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
|
|
PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
|
|
- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
|
|
- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
|
|
- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
|
|
- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
|
|
+ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
|
|
+ "pwm", "led"),
|
|
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
|
|
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
|
|
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
|
|
@@ -183,11 +187,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
|
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
|
|
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
|
|
18, 2, "gpio", "uart"),
|
|
- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
|
|
-
|
|
};
|
|
|
|
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index 3654cfc4376fa..97c1f242ef0a3 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -3387,7 +3387,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
|
|
|
|
shost_for_each_device(sdev, ioc->shost) {
|
|
sas_device_priv_data = sdev->hostdata;
|
|
- if (!sas_device_priv_data)
|
|
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
|
|
continue;
|
|
if (sas_device_priv_data->sas_target->sas_address
|
|
!= sas_address)
|
|
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
|
|
index 16432d42a50aa..6faf1d6451b0c 100644
|
|
--- a/drivers/scsi/scsi_sysfs.c
|
|
+++ b/drivers/scsi/scsi_sysfs.c
|
|
@@ -796,7 +796,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
|
|
|
mutex_lock(&sdev->state_mutex);
|
|
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
|
|
- ret = count;
|
|
+ ret = 0;
|
|
} else {
|
|
ret = scsi_device_set_state(sdev, state);
|
|
if (ret == 0 && state == SDEV_RUNNING)
|
|
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
|
|
index cf263a58a1489..6fd549a424d53 100644
|
|
--- a/drivers/staging/fbtft/fb_ssd1351.c
|
|
+++ b/drivers/staging/fbtft/fb_ssd1351.c
|
|
@@ -187,7 +187,6 @@ static struct fbtft_display display = {
|
|
},
|
|
};
|
|
|
|
-#ifdef CONFIG_FB_BACKLIGHT
|
|
static int update_onboard_backlight(struct backlight_device *bd)
|
|
{
|
|
struct fbtft_par *par = bl_get_data(bd);
|
|
@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
|
|
if (!par->fbtftops.unregister_backlight)
|
|
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
|
|
}
|
|
-#else
|
|
-static void register_onboard_backlight(struct fbtft_par *par) { };
|
|
-#endif
|
|
|
|
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
|
|
|
|
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
|
|
index bc53d68bfcaa3..771697508cec8 100644
|
|
--- a/drivers/staging/fbtft/fbtft-core.c
|
|
+++ b/drivers/staging/fbtft/fbtft-core.c
|
|
@@ -136,7 +136,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
|
|
}
|
|
#endif
|
|
|
|
-#ifdef CONFIG_FB_BACKLIGHT
|
|
static int fbtft_backlight_update_status(struct backlight_device *bd)
|
|
{
|
|
struct fbtft_par *par = bl_get_data(bd);
|
|
@@ -169,6 +168,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
|
|
par->info->bl_dev = NULL;
|
|
}
|
|
}
|
|
+EXPORT_SYMBOL(fbtft_unregister_backlight);
|
|
|
|
static const struct backlight_ops fbtft_bl_ops = {
|
|
.get_brightness = fbtft_backlight_get_brightness,
|
|
@@ -206,12 +206,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
|
|
if (!par->fbtftops.unregister_backlight)
|
|
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
|
|
}
|
|
-#else
|
|
-void fbtft_register_backlight(struct fbtft_par *par) { };
|
|
-void fbtft_unregister_backlight(struct fbtft_par *par) { };
|
|
-#endif
|
|
EXPORT_SYMBOL(fbtft_register_backlight);
|
|
-EXPORT_SYMBOL(fbtft_unregister_backlight);
|
|
|
|
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
|
|
int ye)
|
|
@@ -860,13 +855,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
|
|
fb_info->fix.smem_len >> 10, text1,
|
|
HZ / fb_info->fbdefio->delay, text2);
|
|
|
|
-#ifdef CONFIG_FB_BACKLIGHT
|
|
/* Turn on backlight if available */
|
|
if (fb_info->bl_dev) {
|
|
fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
|
|
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
|
|
}
|
|
-#endif
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
index c702ee9691b1d..bcbf0c8cd4209 100644
|
|
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
@@ -2559,13 +2559,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
|
|
free_irq(dev->irq, dev);
|
|
priv->irq = 0;
|
|
}
|
|
- free_rtllib(dev);
|
|
|
|
if (dev->mem_start != 0) {
|
|
iounmap((void __iomem *)dev->mem_start);
|
|
release_mem_region(pci_resource_start(pdev, 1),
|
|
pci_resource_len(pdev, 1));
|
|
}
|
|
+
|
|
+ free_rtllib(dev);
|
|
} else {
|
|
priv = rtllib_priv(dev);
|
|
}
|
|
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
|
|
index 5ef08905fe05c..15da02aeee948 100644
|
|
--- a/drivers/tty/hvc/hvc_xen.c
|
|
+++ b/drivers/tty/hvc/hvc_xen.c
|
|
@@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
|
|
cons = intf->out_cons;
|
|
prod = intf->out_prod;
|
|
mb(); /* update queue values before going on */
|
|
- BUG_ON((prod - cons) > sizeof(intf->out));
|
|
+
|
|
+ if ((prod - cons) > sizeof(intf->out)) {
|
|
+ pr_err_once("xencons: Illegal ring page indices");
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
|
|
intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
|
|
@@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
|
|
*/
|
|
while (len) {
|
|
int sent = __write_console(cons, data, len);
|
|
-
|
|
+
|
|
+ if (sent < 0)
|
|
+ return sent;
|
|
+
|
|
data += sent;
|
|
len -= sent;
|
|
|
|
@@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
|
cons = intf->in_cons;
|
|
prod = intf->in_prod;
|
|
mb(); /* get pointers before reading ring */
|
|
- BUG_ON((prod - cons) > sizeof(intf->in));
|
|
+
|
|
+ if ((prod - cons) > sizeof(intf->in)) {
|
|
+ pr_err_once("xencons: Illegal ring page indices");
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
while (cons != prod && recv < len)
|
|
buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 303e8b3c1bdae..d7ab2e88631a0 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -4609,8 +4609,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
|
|
if (oldspeed == USB_SPEED_LOW)
|
|
delay = HUB_LONG_RESET_TIME;
|
|
|
|
- mutex_lock(hcd->address0_mutex);
|
|
-
|
|
/* Reset the device; full speed may morph to high speed */
|
|
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
|
|
retval = hub_port_reset(hub, port1, udev, delay, false);
|
|
@@ -4925,7 +4923,6 @@ fail:
|
|
hub_port_disable(hub, port1, 0);
|
|
update_devnum(udev, devnum); /* for disconnect processing */
|
|
}
|
|
- mutex_unlock(hcd->address0_mutex);
|
|
return retval;
|
|
}
|
|
|
|
@@ -5015,6 +5012,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
|
struct usb_port *port_dev = hub->ports[port1 - 1];
|
|
struct usb_device *udev = port_dev->child;
|
|
static int unreliable_port = -1;
|
|
+ bool retry_locked;
|
|
|
|
/* Disconnect any existing devices under this port */
|
|
if (udev) {
|
|
@@ -5070,7 +5068,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
|
unit_load = 100;
|
|
|
|
status = 0;
|
|
+
|
|
for (i = 0; i < SET_CONFIG_TRIES; i++) {
|
|
+ usb_lock_port(port_dev);
|
|
+ mutex_lock(hcd->address0_mutex);
|
|
+ retry_locked = true;
|
|
|
|
/* reallocate for each attempt, since references
|
|
* to the previous one can escape in various ways
|
|
@@ -5079,6 +5081,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
|
if (!udev) {
|
|
dev_err(&port_dev->dev,
|
|
"couldn't allocate usb_device\n");
|
|
+ mutex_unlock(hcd->address0_mutex);
|
|
+ usb_unlock_port(port_dev);
|
|
goto done;
|
|
}
|
|
|
|
@@ -5100,12 +5104,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
|
}
|
|
|
|
/* reset (non-USB 3.0 devices) and get descriptor */
|
|
- usb_lock_port(port_dev);
|
|
status = hub_port_init(hub, udev, port1, i);
|
|
- usb_unlock_port(port_dev);
|
|
if (status < 0)
|
|
goto loop;
|
|
|
|
+ mutex_unlock(hcd->address0_mutex);
|
|
+ usb_unlock_port(port_dev);
|
|
+ retry_locked = false;
|
|
+
|
|
if (udev->quirks & USB_QUIRK_DELAY_INIT)
|
|
msleep(2000);
|
|
|
|
@@ -5198,6 +5204,10 @@ loop:
|
|
usb_ep0_reinit(udev);
|
|
release_devnum(udev);
|
|
hub_free_dev(udev);
|
|
+ if (retry_locked) {
|
|
+ mutex_unlock(hcd->address0_mutex);
|
|
+ usb_unlock_port(port_dev);
|
|
+ }
|
|
usb_put_dev(udev);
|
|
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
|
|
break;
|
|
@@ -5794,6 +5804,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
|
bos = udev->bos;
|
|
udev->bos = NULL;
|
|
|
|
+ mutex_lock(hcd->address0_mutex);
|
|
+
|
|
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
|
|
|
|
/* ep0 maxpacket size may change; let the HCD know about it.
|
|
@@ -5803,6 +5815,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
|
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
|
|
break;
|
|
}
|
|
+ mutex_unlock(hcd->address0_mutex);
|
|
|
|
if (ret < 0)
|
|
goto re_enumerate;
|
|
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
|
|
index e8b25dae09499..249e8e6aa9282 100644
|
|
--- a/drivers/usb/dwc2/gadget.c
|
|
+++ b/drivers/usb/dwc2/gadget.c
|
|
@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
|
|
}
|
|
ctrl |= DXEPCTL_CNAK;
|
|
} else {
|
|
+ hs_req->req.frame_number = hs_ep->target_frame;
|
|
+ hs_req->req.actual = 0;
|
|
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
|
|
return;
|
|
}
|
|
@@ -2855,9 +2857,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
|
|
|
|
do {
|
|
hs_req = get_ep_head(hs_ep);
|
|
- if (hs_req)
|
|
+ if (hs_req) {
|
|
+ hs_req->req.frame_number = hs_ep->target_frame;
|
|
+ hs_req->req.actual = 0;
|
|
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
|
|
-ENODATA);
|
|
+ }
|
|
dwc2_gadget_incr_frame_num(hs_ep);
|
|
/* Update current frame number value. */
|
|
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
|
|
@@ -2910,8 +2915,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
|
|
|
|
while (dwc2_gadget_target_frame_elapsed(ep)) {
|
|
hs_req = get_ep_head(ep);
|
|
- if (hs_req)
|
|
+ if (hs_req) {
|
|
+ hs_req->req.frame_number = ep->target_frame;
|
|
+ hs_req->req.actual = 0;
|
|
dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
|
|
+ }
|
|
|
|
dwc2_gadget_incr_frame_num(ep);
|
|
/* Update current frame number value. */
|
|
@@ -3000,8 +3008,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
|
|
|
|
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
|
|
hs_req = get_ep_head(hs_ep);
|
|
- if (hs_req)
|
|
+ if (hs_req) {
|
|
+ hs_req->req.frame_number = hs_ep->target_frame;
|
|
+ hs_req->req.actual = 0;
|
|
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
|
|
+ }
|
|
|
|
dwc2_gadget_incr_frame_num(hs_ep);
|
|
/* Update current frame number value. */
|
|
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
|
|
index 68bbac64b7536..94af71e9856f2 100644
|
|
--- a/drivers/usb/dwc2/hcd_queue.c
|
|
+++ b/drivers/usb/dwc2/hcd_queue.c
|
|
@@ -59,7 +59,7 @@
|
|
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
|
|
|
|
/* If we get a NAK, wait this long before retrying */
|
|
-#define DWC2_RETRY_WAIT_DELAY 1*1E6L
|
|
+#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
|
|
|
|
/**
|
|
* dwc2_periodic_channel_available() - Checks that a channel is available for a
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index a1e9cbe518c74..74203ed5479fa 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = NCTRL(2) },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
|
|
.driver_info = NCTRL(0) | ZLP },
|
|
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
|
|
+ .driver_info = NCTRL(0) | ZLP },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
|
|
.driver_info = RSVD(1) },
|
|
@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
|
|
+ .driver_info = RSVD(4) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
|
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
|
|
index b498960ff72b5..5e661bae39972 100644
|
|
--- a/drivers/usb/typec/tcpm/fusb302.c
|
|
+++ b/drivers/usb/typec/tcpm/fusb302.c
|
|
@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
|
|
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
|
|
FUSB_REG_MASK_BC_LVL |
|
|
FUSB_REG_MASK_COMP_CHNG,
|
|
- FUSB_REG_MASK_COMP_CHNG);
|
|
+ FUSB_REG_MASK_BC_LVL);
|
|
if (ret < 0) {
|
|
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
|
|
ret);
|
|
goto done;
|
|
}
|
|
chip->intr_comp_chng = true;
|
|
+ chip->intr_bc_lvl = false;
|
|
break;
|
|
case TYPEC_CC_RD:
|
|
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
|
|
FUSB_REG_MASK_BC_LVL |
|
|
FUSB_REG_MASK_COMP_CHNG,
|
|
- FUSB_REG_MASK_BC_LVL);
|
|
+ FUSB_REG_MASK_COMP_CHNG);
|
|
if (ret < 0) {
|
|
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
|
|
ret);
|
|
goto done;
|
|
}
|
|
chip->intr_bc_lvl = true;
|
|
+ chip->intr_comp_chng = false;
|
|
break;
|
|
default:
|
|
break;
|
|
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
|
|
index f21f5bfbb78dc..2bf7cb01da9a3 100644
|
|
--- a/drivers/vhost/vsock.c
|
|
+++ b/drivers/vhost/vsock.c
|
|
@@ -491,7 +491,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
len += sizeof(pkt->hdr);
|
|
- vhost_add_used(vq, head, len);
|
|
+ vhost_add_used(vq, head, 0);
|
|
total_len += len;
|
|
added = true;
|
|
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
|
|
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
|
|
index 652894d619677..b911a91bce6b7 100644
|
|
--- a/drivers/xen/xenbus/xenbus_probe.c
|
|
+++ b/drivers/xen/xenbus/xenbus_probe.c
|
|
@@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
|
|
|
|
static int __init xenbus_init(void)
|
|
{
|
|
- int err = 0;
|
|
+ int err;
|
|
uint64_t v = 0;
|
|
xen_store_domain_type = XS_UNKNOWN;
|
|
|
|
@@ -886,6 +886,29 @@ static int __init xenbus_init(void)
|
|
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
|
|
if (err)
|
|
goto out_error;
|
|
+ /*
|
|
+ * Uninitialized hvm_params are zero and return no error.
|
|
+ * Although it is theoretically possible to have
|
|
+ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
|
|
+ * not zero when valid. If zero, it means that Xenstore hasn't
|
|
+ * been properly initialized. Instead of attempting to map a
|
|
+ * wrong guest physical address return error.
|
|
+ *
|
|
+ * Also recognize all bits set as an invalid value.
|
|
+ */
|
|
+ if (!v || !~v) {
|
|
+ err = -ENOENT;
|
|
+ goto out_error;
|
|
+ }
|
|
+ /* Avoid truncation on 32-bit. */
|
|
+#if BITS_PER_LONG == 32
|
|
+ if (v > ULONG_MAX) {
|
|
+ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
|
|
+ __func__, v);
|
|
+ err = -EINVAL;
|
|
+ goto out_error;
|
|
+ }
|
|
+#endif
|
|
xen_store_gfn = (unsigned long)v;
|
|
xen_store_interface =
|
|
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
|
|
@@ -920,8 +943,10 @@ static int __init xenbus_init(void)
|
|
*/
|
|
proc_create_mount_point("xen");
|
|
#endif
|
|
+ return 0;
|
|
|
|
out_error:
|
|
+ xen_store_domain_type = XS_UNKNOWN;
|
|
return err;
|
|
}
|
|
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index a9746af5a44db..03c85beecec10 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -2577,12 +2577,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
|
|
tcon = tlink_tcon(smbfile->tlink);
|
|
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
|
|
server = tcon->ses->server;
|
|
- if (server->ops->flush)
|
|
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
- else
|
|
+ if (server->ops->flush == NULL) {
|
|
rc = -ENOSYS;
|
|
+ goto strict_fsync_exit;
|
|
+ }
|
|
+
|
|
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
|
|
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
|
|
+ if (smbfile) {
|
|
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
+ cifsFileInfo_put(smbfile);
|
|
+ } else
|
|
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
|
|
+ } else
|
|
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
}
|
|
|
|
+strict_fsync_exit:
|
|
free_xid(xid);
|
|
return rc;
|
|
}
|
|
@@ -2594,6 +2605,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
struct cifs_tcon *tcon;
|
|
struct TCP_Server_Info *server;
|
|
struct cifsFileInfo *smbfile = file->private_data;
|
|
+ struct inode *inode = file_inode(file);
|
|
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
|
|
|
|
rc = file_write_and_wait_range(file, start, end);
|
|
@@ -2608,12 +2620,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
tcon = tlink_tcon(smbfile->tlink);
|
|
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
|
|
server = tcon->ses->server;
|
|
- if (server->ops->flush)
|
|
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
- else
|
|
+ if (server->ops->flush == NULL) {
|
|
rc = -ENOSYS;
|
|
+ goto fsync_exit;
|
|
+ }
|
|
+
|
|
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
|
|
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
|
|
+ if (smbfile) {
|
|
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
+ cifsFileInfo_put(smbfile);
|
|
+ } else
|
|
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
|
|
+ } else
|
|
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
|
|
}
|
|
|
|
+fsync_exit:
|
|
free_xid(xid);
|
|
return rc;
|
|
}
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index 4cb182c20eedd..0cd1d51dde06d 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -1385,6 +1385,7 @@ page_hit:
|
|
nid, nid_of_node(page), ino_of_node(page),
|
|
ofs_of_node(page), cpver_of_node(page),
|
|
next_blkaddr_of_node(page));
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
err = -EINVAL;
|
|
out_err:
|
|
ClearPageUptodate(page);
|
|
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
|
|
index fa4d2aba5a701..64d6c8c9f1ff2 100644
|
|
--- a/fs/fuse/dev.c
|
|
+++ b/fs/fuse/dev.c
|
|
@@ -839,17 +839,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
|
|
goto out_put_old;
|
|
}
|
|
|
|
+ get_page(newpage);
|
|
+
|
|
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
|
+ lru_cache_add_file(newpage);
|
|
+
|
|
/*
|
|
* Release while we have extra ref on stolen page. Otherwise
|
|
* anon_pipe_buf_release() might think the page can be reused.
|
|
*/
|
|
pipe_buf_release(cs->pipe, buf);
|
|
|
|
- get_page(newpage);
|
|
-
|
|
- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
|
- lru_cache_add_file(newpage);
|
|
-
|
|
err = 0;
|
|
spin_lock(&cs->req->waitq.lock);
|
|
if (test_bit(FR_ABORTED, &cs->req->flags))
|
|
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
|
|
index aed865a846296..2b78f7b8d5467 100644
|
|
--- a/fs/nfs/nfs42xdr.c
|
|
+++ b/fs/nfs/nfs42xdr.c
|
|
@@ -769,8 +769,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
|
|
status = decode_clone(xdr);
|
|
if (status)
|
|
goto out;
|
|
- status = decode_getfattr(xdr, res->dst_fattr, res->server);
|
|
-
|
|
+ decode_getfattr(xdr, res->dst_fattr, res->server);
|
|
out:
|
|
res->rpc_status = status;
|
|
return status;
|
|
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
|
|
index 080ca9d5eccbb..b1102a31a1085 100644
|
|
--- a/fs/proc/vmcore.c
|
|
+++ b/fs/proc/vmcore.c
|
|
@@ -125,9 +125,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
|
nr_bytes = count;
|
|
|
|
/* If pfn is not ram, return zeros for sparse dump files */
|
|
- if (pfn_is_ram(pfn) == 0)
|
|
- memset(buf, 0, nr_bytes);
|
|
- else {
|
|
+ if (pfn_is_ram(pfn) == 0) {
|
|
+ tmp = 0;
|
|
+ if (!userbuf)
|
|
+ memset(buf, 0, nr_bytes);
|
|
+ else if (clear_user(buf, nr_bytes))
|
|
+ tmp = -EFAULT;
|
|
+ } else {
|
|
if (encrypted)
|
|
tmp = copy_oldmem_page_encrypted(pfn, buf,
|
|
nr_bytes,
|
|
@@ -136,10 +140,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
|
else
|
|
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
|
|
offset, userbuf);
|
|
-
|
|
- if (tmp < 0)
|
|
- return tmp;
|
|
}
|
|
+ if (tmp < 0)
|
|
+ return tmp;
|
|
+
|
|
*ppos += nr_bytes;
|
|
count -= nr_bytes;
|
|
buf += nr_bytes;
|
|
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
|
|
index c309f43bde45e..f8c4d9f97819f 100644
|
|
--- a/include/linux/ipc_namespace.h
|
|
+++ b/include/linux/ipc_namespace.h
|
|
@@ -130,6 +130,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
|
return ns;
|
|
}
|
|
|
|
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
|
+{
|
|
+ if (ns) {
|
|
+ if (refcount_inc_not_zero(&ns->count))
|
|
+ return ns;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
extern void put_ipc_ns(struct ipc_namespace *ns);
|
|
#else
|
|
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
|
|
@@ -146,6 +156,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
|
return ns;
|
|
}
|
|
|
|
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
|
+{
|
|
+ return ns;
|
|
+}
|
|
+
|
|
static inline void put_ipc_ns(struct ipc_namespace *ns)
|
|
{
|
|
}
|
|
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
|
|
index 4b1c3b664f517..36f3011ab6013 100644
|
|
--- a/include/linux/sched/task.h
|
|
+++ b/include/linux/sched/task.h
|
|
@@ -157,7 +157,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
|
|
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
|
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
|
* pins the final release of task.io_context. Also protects ->cpuset and
|
|
- * ->cgroup.subsys[]. And ->vfork_done.
|
|
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
|
|
*
|
|
* Nests both inside and outside of read_lock(&tasklist_lock).
|
|
* It must not be nested with write_lock_irq(&tasklist_lock),
|
|
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
|
|
index bd0f1595bdc71..05ecaefeb6322 100644
|
|
--- a/include/net/ip6_fib.h
|
|
+++ b/include/net/ip6_fib.h
|
|
@@ -451,6 +451,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
|
|
struct fib6_config *cfg, gfp_t gfp_flags,
|
|
struct netlink_ext_ack *extack);
|
|
void fib6_nh_release(struct fib6_nh *fib6_nh);
|
|
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
|
|
|
|
int call_fib6_entry_notifiers(struct net *net,
|
|
enum fib_event_type event_type,
|
|
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
|
|
index 3e7d2c0e79ca1..af9e127779adf 100644
|
|
--- a/include/net/ipv6_stubs.h
|
|
+++ b/include/net/ipv6_stubs.h
|
|
@@ -47,6 +47,7 @@ struct ipv6_stub {
|
|
struct fib6_config *cfg, gfp_t gfp_flags,
|
|
struct netlink_ext_ack *extack);
|
|
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
|
|
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
|
|
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
|
|
int (*ip6_del_rt)(struct net *net, struct fib6_info *rt);
|
|
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
|
|
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
|
|
index 33979017b7824..004e49f748419 100644
|
|
--- a/include/net/nfc/nci_core.h
|
|
+++ b/include/net/nfc/nci_core.h
|
|
@@ -30,6 +30,7 @@ enum nci_flag {
|
|
NCI_UP,
|
|
NCI_DATA_EXCHANGE,
|
|
NCI_DATA_EXCHANGE_TO,
|
|
+ NCI_UNREG,
|
|
};
|
|
|
|
/* NCI device states */
|
|
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
|
|
index ddcee128f5d9a..145acb8f25095 100644
|
|
--- a/include/net/nl802154.h
|
|
+++ b/include/net/nl802154.h
|
|
@@ -19,6 +19,8 @@
|
|
*
|
|
*/
|
|
|
|
+#include <linux/types.h>
|
|
+
|
|
#define NL802154_GENL_NAME "nl802154"
|
|
|
|
enum nl802154_commands {
|
|
@@ -150,10 +152,9 @@ enum nl802154_attrs {
|
|
};
|
|
|
|
enum nl802154_iftype {
|
|
- /* for backwards compatibility TODO */
|
|
- NL802154_IFTYPE_UNSPEC = -1,
|
|
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
|
|
|
|
- NL802154_IFTYPE_NODE,
|
|
+ NL802154_IFTYPE_NODE = 0,
|
|
NL802154_IFTYPE_MONITOR,
|
|
NL802154_IFTYPE_COORD,
|
|
|
|
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
|
|
index 3f40501fc60b1..b39cdbc522ec7 100644
|
|
--- a/include/xen/interface/io/ring.h
|
|
+++ b/include/xen/interface/io/ring.h
|
|
@@ -1,21 +1,53 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 */
|
|
/******************************************************************************
|
|
* ring.h
|
|
*
|
|
* Shared producer-consumer ring macros.
|
|
*
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
+ * of this software and associated documentation files (the "Software"), to
|
|
+ * deal in the Software without restriction, including without limitation the
|
|
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
+ * sell copies of the Software, and to permit persons to whom the Software is
|
|
+ * furnished to do so, subject to the following conditions:
|
|
+ *
|
|
+ * The above copyright notice and this permission notice shall be included in
|
|
+ * all copies or substantial portions of the Software.
|
|
+ *
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
+ * DEALINGS IN THE SOFTWARE.
|
|
+ *
|
|
* Tim Deegan and Andrew Warfield November 2004.
|
|
*/
|
|
|
|
#ifndef __XEN_PUBLIC_IO_RING_H__
|
|
#define __XEN_PUBLIC_IO_RING_H__
|
|
|
|
+/*
|
|
+ * When #include'ing this header, you need to provide the following
|
|
+ * declaration upfront:
|
|
+ * - standard integers types (uint8_t, uint16_t, etc)
|
|
+ * They are provided by stdint.h of the standard headers.
|
|
+ *
|
|
+ * In addition, if you intend to use the FLEX macros, you also need to
|
|
+ * provide the following, before invoking the FLEX macros:
|
|
+ * - size_t
|
|
+ * - memcpy
|
|
+ * - grant_ref_t
|
|
+ * These declarations are provided by string.h of the standard headers,
|
|
+ * and grant_table.h from the Xen public headers.
|
|
+ */
|
|
+
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
typedef unsigned int RING_IDX;
|
|
|
|
/* Round a 32-bit unsigned constant down to the nearest power of two. */
|
|
-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
|
|
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
|
|
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
|
|
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
|
|
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
|
|
@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
|
|
* A ring contains as many entries as will fit, rounded down to the nearest
|
|
* power of two (so we can mask with (size-1) to loop around).
|
|
*/
|
|
-#define __CONST_RING_SIZE(_s, _sz) \
|
|
- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
|
|
- sizeof(((struct _s##_sring *)0)->ring[0])))
|
|
-
|
|
+#define __CONST_RING_SIZE(_s, _sz) \
|
|
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
|
|
+ sizeof(((struct _s##_sring *)0)->ring[0])))
|
|
/*
|
|
* The same for passing in an actual pointer instead of a name tag.
|
|
*/
|
|
-#define __RING_SIZE(_s, _sz) \
|
|
- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
|
|
+#define __RING_SIZE(_s, _sz) \
|
|
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
|
|
|
|
/*
|
|
* Macros to make the correct C datatypes for a new kind of ring.
|
|
*
|
|
* To make a new ring datatype, you need to have two message structures,
|
|
- * let's say struct request, and struct response already defined.
|
|
+ * let's say request_t, and response_t already defined.
|
|
*
|
|
* In a header where you want the ring datatype declared, you then do:
|
|
*
|
|
- * DEFINE_RING_TYPES(mytag, struct request, struct response);
|
|
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
|
|
*
|
|
* These expand out to give you a set of types, as you can see below.
|
|
* The most important of these are:
|
|
*
|
|
- * struct mytag_sring - The shared ring.
|
|
- * struct mytag_front_ring - The 'front' half of the ring.
|
|
- * struct mytag_back_ring - The 'back' half of the ring.
|
|
+ * mytag_sring_t - The shared ring.
|
|
+ * mytag_front_ring_t - The 'front' half of the ring.
|
|
+ * mytag_back_ring_t - The 'back' half of the ring.
|
|
*
|
|
* To initialize a ring in your code you need to know the location and size
|
|
* of the shared memory area (PAGE_SIZE, for instance). To initialise
|
|
* the front half:
|
|
*
|
|
- * struct mytag_front_ring front_ring;
|
|
- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
|
|
- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
|
|
- * PAGE_SIZE);
|
|
+ * mytag_front_ring_t front_ring;
|
|
+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
|
|
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
|
|
*
|
|
* Initializing the back follows similarly (note that only the front
|
|
* initializes the shared ring):
|
|
*
|
|
- * struct mytag_back_ring back_ring;
|
|
- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
|
|
- * PAGE_SIZE);
|
|
+ * mytag_back_ring_t back_ring;
|
|
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
|
|
*/
|
|
|
|
-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
|
|
- \
|
|
-/* Shared ring entry */ \
|
|
-union __name##_sring_entry { \
|
|
- __req_t req; \
|
|
- __rsp_t rsp; \
|
|
-}; \
|
|
- \
|
|
-/* Shared ring page */ \
|
|
-struct __name##_sring { \
|
|
- RING_IDX req_prod, req_event; \
|
|
- RING_IDX rsp_prod, rsp_event; \
|
|
- uint8_t pad[48]; \
|
|
- union __name##_sring_entry ring[1]; /* variable-length */ \
|
|
-}; \
|
|
- \
|
|
-/* "Front" end's private variables */ \
|
|
-struct __name##_front_ring { \
|
|
- RING_IDX req_prod_pvt; \
|
|
- RING_IDX rsp_cons; \
|
|
- unsigned int nr_ents; \
|
|
- struct __name##_sring *sring; \
|
|
-}; \
|
|
- \
|
|
-/* "Back" end's private variables */ \
|
|
-struct __name##_back_ring { \
|
|
- RING_IDX rsp_prod_pvt; \
|
|
- RING_IDX req_cons; \
|
|
- unsigned int nr_ents; \
|
|
- struct __name##_sring *sring; \
|
|
-};
|
|
-
|
|
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
|
|
+ \
|
|
+/* Shared ring entry */ \
|
|
+union __name##_sring_entry { \
|
|
+ __req_t req; \
|
|
+ __rsp_t rsp; \
|
|
+}; \
|
|
+ \
|
|
+/* Shared ring page */ \
|
|
+struct __name##_sring { \
|
|
+ RING_IDX req_prod, req_event; \
|
|
+ RING_IDX rsp_prod, rsp_event; \
|
|
+ uint8_t __pad[48]; \
|
|
+ union __name##_sring_entry ring[1]; /* variable-length */ \
|
|
+}; \
|
|
+ \
|
|
+/* "Front" end's private variables */ \
|
|
+struct __name##_front_ring { \
|
|
+ RING_IDX req_prod_pvt; \
|
|
+ RING_IDX rsp_cons; \
|
|
+ unsigned int nr_ents; \
|
|
+ struct __name##_sring *sring; \
|
|
+}; \
|
|
+ \
|
|
+/* "Back" end's private variables */ \
|
|
+struct __name##_back_ring { \
|
|
+ RING_IDX rsp_prod_pvt; \
|
|
+ RING_IDX req_cons; \
|
|
+ unsigned int nr_ents; \
|
|
+ struct __name##_sring *sring; \
|
|
+}; \
|
|
+ \
|
|
/*
|
|
* Macros for manipulating rings.
|
|
*
|
|
@@ -119,105 +148,99 @@ struct __name##_back_ring { \
|
|
*/
|
|
|
|
/* Initialising empty rings */
|
|
-#define SHARED_RING_INIT(_s) do { \
|
|
- (_s)->req_prod = (_s)->rsp_prod = 0; \
|
|
- (_s)->req_event = (_s)->rsp_event = 1; \
|
|
- memset((_s)->pad, 0, sizeof((_s)->pad)); \
|
|
+#define SHARED_RING_INIT(_s) do { \
|
|
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
|
|
+ (_s)->req_event = (_s)->rsp_event = 1; \
|
|
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
|
|
} while(0)
|
|
|
|
-#define FRONT_RING_INIT(_r, _s, __size) do { \
|
|
- (_r)->req_prod_pvt = 0; \
|
|
- (_r)->rsp_cons = 0; \
|
|
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
- (_r)->sring = (_s); \
|
|
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
|
|
+ (_r)->req_prod_pvt = (_i); \
|
|
+ (_r)->rsp_cons = (_i); \
|
|
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
+ (_r)->sring = (_s); \
|
|
} while (0)
|
|
|
|
-#define BACK_RING_INIT(_r, _s, __size) do { \
|
|
- (_r)->rsp_prod_pvt = 0; \
|
|
- (_r)->req_cons = 0; \
|
|
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
- (_r)->sring = (_s); \
|
|
-} while (0)
|
|
+#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
|
|
|
|
-/* Initialize to existing shared indexes -- for recovery */
|
|
-#define FRONT_RING_ATTACH(_r, _s, __size) do { \
|
|
- (_r)->sring = (_s); \
|
|
- (_r)->req_prod_pvt = (_s)->req_prod; \
|
|
- (_r)->rsp_cons = (_s)->rsp_prod; \
|
|
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
|
|
+ (_r)->rsp_prod_pvt = (_i); \
|
|
+ (_r)->req_cons = (_i); \
|
|
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
+ (_r)->sring = (_s); \
|
|
} while (0)
|
|
|
|
-#define BACK_RING_ATTACH(_r, _s, __size) do { \
|
|
- (_r)->sring = (_s); \
|
|
- (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
|
|
- (_r)->req_cons = (_s)->req_prod; \
|
|
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
|
|
-} while (0)
|
|
+#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
|
|
|
|
/* How big is this ring? */
|
|
-#define RING_SIZE(_r) \
|
|
+#define RING_SIZE(_r) \
|
|
((_r)->nr_ents)
|
|
|
|
/* Number of free requests (for use on front side only). */
|
|
-#define RING_FREE_REQUESTS(_r) \
|
|
+#define RING_FREE_REQUESTS(_r) \
|
|
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
|
|
|
|
/* Test if there is an empty slot available on the front ring.
|
|
* (This is only meaningful from the front. )
|
|
*/
|
|
-#define RING_FULL(_r) \
|
|
+#define RING_FULL(_r) \
|
|
(RING_FREE_REQUESTS(_r) == 0)
|
|
|
|
/* Test if there are outstanding messages to be processed on a ring. */
|
|
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
|
|
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
|
|
((_r)->sring->rsp_prod - (_r)->rsp_cons)
|
|
|
|
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
|
|
- ({ \
|
|
- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
|
|
- unsigned int rsp = RING_SIZE(_r) - \
|
|
- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
|
|
- req < rsp ? req : rsp; \
|
|
- })
|
|
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
|
|
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
|
|
+ unsigned int rsp = RING_SIZE(_r) - \
|
|
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
|
|
+ req < rsp ? req : rsp; \
|
|
+})
|
|
|
|
/* Direct access to individual ring elements, by index. */
|
|
-#define RING_GET_REQUEST(_r, _idx) \
|
|
+#define RING_GET_REQUEST(_r, _idx) \
|
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
|
|
|
+#define RING_GET_RESPONSE(_r, _idx) \
|
|
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
|
+
|
|
/*
|
|
- * Get a local copy of a request.
|
|
+ * Get a local copy of a request/response.
|
|
*
|
|
- * Use this in preference to RING_GET_REQUEST() so all processing is
|
|
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
|
|
* done on a local copy that cannot be modified by the other end.
|
|
*
|
|
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
|
- * to be ineffective where _req is a struct which consists of only bitfields.
|
|
+ * to be ineffective where dest is a struct which consists of only bitfields.
|
|
*/
|
|
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
|
|
- /* Use volatile to force the copy into _req. */ \
|
|
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
|
|
+#define RING_COPY_(type, r, idx, dest) do { \
|
|
+ /* Use volatile to force the copy into dest. */ \
|
|
+ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
|
|
} while (0)
|
|
|
|
-#define RING_GET_RESPONSE(_r, _idx) \
|
|
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
|
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
|
|
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
|
|
|
|
/* Loop termination condition: Would the specified index overflow the ring? */
|
|
-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
|
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
|
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
|
|
|
|
/* Ill-behaved frontend determination: Can there be this many requests? */
|
|
-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
|
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
|
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
|
|
|
|
+/* Ill-behaved backend determination: Can there be this many responses? */
|
|
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
|
|
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
|
|
|
|
-#define RING_PUSH_REQUESTS(_r) do { \
|
|
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
|
- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
|
+#define RING_PUSH_REQUESTS(_r) do { \
|
|
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
|
|
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
|
} while (0)
|
|
|
|
-#define RING_PUSH_RESPONSES(_r) do { \
|
|
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
|
- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
|
|
+#define RING_PUSH_RESPONSES(_r) do { \
|
|
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
|
|
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
|
|
} while (0)
|
|
|
|
/*
|
|
@@ -250,40 +273,40 @@ struct __name##_back_ring { \
|
|
* field appropriately.
|
|
*/
|
|
|
|
-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
|
|
- RING_IDX __old = (_r)->sring->req_prod; \
|
|
- RING_IDX __new = (_r)->req_prod_pvt; \
|
|
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
|
- (_r)->sring->req_prod = __new; \
|
|
- virt_mb(); /* back sees new requests /before/ we check req_event */ \
|
|
- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
|
|
- (RING_IDX)(__new - __old)); \
|
|
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
|
|
+ RING_IDX __old = (_r)->sring->req_prod; \
|
|
+ RING_IDX __new = (_r)->req_prod_pvt; \
|
|
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
|
|
+ (_r)->sring->req_prod = __new; \
|
|
+ virt_mb(); /* back sees new requests /before/ we check req_event */ \
|
|
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
|
|
+ (RING_IDX)(__new - __old)); \
|
|
} while (0)
|
|
|
|
-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
|
|
- RING_IDX __old = (_r)->sring->rsp_prod; \
|
|
- RING_IDX __new = (_r)->rsp_prod_pvt; \
|
|
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
|
- (_r)->sring->rsp_prod = __new; \
|
|
- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
|
|
- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
|
|
- (RING_IDX)(__new - __old)); \
|
|
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
|
|
+ RING_IDX __old = (_r)->sring->rsp_prod; \
|
|
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
|
|
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
|
|
+ (_r)->sring->rsp_prod = __new; \
|
|
+ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
|
|
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
|
|
+ (RING_IDX)(__new - __old)); \
|
|
} while (0)
|
|
|
|
-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
|
|
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
|
- if (_work_to_do) break; \
|
|
- (_r)->sring->req_event = (_r)->req_cons + 1; \
|
|
- virt_mb(); \
|
|
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
|
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
|
|
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
|
+ if (_work_to_do) break; \
|
|
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
|
|
+ virt_mb(); \
|
|
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
|
} while (0)
|
|
|
|
-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
|
|
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
|
- if (_work_to_do) break; \
|
|
- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
|
|
- virt_mb(); \
|
|
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
|
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
|
|
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
|
+ if (_work_to_do) break; \
|
|
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
|
|
+ virt_mb(); \
|
|
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
|
} while (0)
|
|
|
|
|
|
diff --git a/ipc/shm.c b/ipc/shm.c
|
|
index ce1ca9f7c6e97..984addb5aeb5e 100644
|
|
--- a/ipc/shm.c
|
|
+++ b/ipc/shm.c
|
|
@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
|
|
struct pid *shm_lprid;
|
|
struct user_struct *mlock_user;
|
|
|
|
- /* The task created the shm object. NULL if the task is dead. */
|
|
+ /*
|
|
+ * The task created the shm object, for
|
|
+ * task_lock(shp->shm_creator)
|
|
+ */
|
|
struct task_struct *shm_creator;
|
|
- struct list_head shm_clist; /* list by creator */
|
|
+
|
|
+ /*
|
|
+ * List by creator. task_lock(->shm_creator) required for read/write.
|
|
+ * If list_empty(), then the creator is dead already.
|
|
+ */
|
|
+ struct list_head shm_clist;
|
|
+ struct ipc_namespace *ns;
|
|
} __randomize_layout;
|
|
|
|
/* shm_mode upper byte flags */
|
|
@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
|
|
struct shmid_kernel *shp;
|
|
|
|
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
|
|
+ WARN_ON(ns != shp->ns);
|
|
|
|
if (shp->shm_nattch) {
|
|
shp->shm_perm.mode |= SHM_DEST;
|
|
@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
|
|
kvfree(shp);
|
|
}
|
|
|
|
-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
|
|
+/*
|
|
+ * It has to be called with shp locked.
|
|
+ * It must be called before ipc_rmid()
|
|
+ */
|
|
+static inline void shm_clist_rm(struct shmid_kernel *shp)
|
|
{
|
|
- list_del(&s->shm_clist);
|
|
- ipc_rmid(&shm_ids(ns), &s->shm_perm);
|
|
+ struct task_struct *creator;
|
|
+
|
|
+ /* ensure that shm_creator does not disappear */
|
|
+ rcu_read_lock();
|
|
+
|
|
+ /*
|
|
+ * A concurrent exit_shm may do a list_del_init() as well.
|
|
+ * Just do nothing if exit_shm already did the work
|
|
+ */
|
|
+ if (!list_empty(&shp->shm_clist)) {
|
|
+ /*
|
|
+ * shp->shm_creator is guaranteed to be valid *only*
|
|
+ * if shp->shm_clist is not empty.
|
|
+ */
|
|
+ creator = shp->shm_creator;
|
|
+
|
|
+ task_lock(creator);
|
|
+ /*
|
|
+ * list_del_init() is a nop if the entry was already removed
|
|
+ * from the list.
|
|
+ */
|
|
+ list_del_init(&shp->shm_clist);
|
|
+ task_unlock(creator);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+}
|
|
+
|
|
+static inline void shm_rmid(struct shmid_kernel *s)
|
|
+{
|
|
+ shm_clist_rm(s);
|
|
+ ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
|
|
}
|
|
|
|
|
|
@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
|
shm_file = shp->shm_file;
|
|
shp->shm_file = NULL;
|
|
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
- shm_rmid(ns, shp);
|
|
+ shm_rmid(shp);
|
|
shm_unlock(shp);
|
|
if (!is_file_hugepages(shm_file))
|
|
shmem_lock(shm_file, 0, shp->mlock_user);
|
|
@@ -306,10 +349,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
|
*
|
|
* 2) sysctl kernel.shm_rmid_forced is set to 1.
|
|
*/
|
|
-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
|
+static bool shm_may_destroy(struct shmid_kernel *shp)
|
|
{
|
|
return (shp->shm_nattch == 0) &&
|
|
- (ns->shm_rmid_forced ||
|
|
+ (shp->ns->shm_rmid_forced ||
|
|
(shp->shm_perm.mode & SHM_DEST));
|
|
}
|
|
|
|
@@ -340,7 +383,7 @@ static void shm_close(struct vm_area_struct *vma)
|
|
ipc_update_pid(&shp->shm_lprid, task_tgid(current));
|
|
shp->shm_dtim = ktime_get_real_seconds();
|
|
shp->shm_nattch--;
|
|
- if (shm_may_destroy(ns, shp))
|
|
+ if (shm_may_destroy(shp))
|
|
shm_destroy(ns, shp);
|
|
else
|
|
shm_unlock(shp);
|
|
@@ -361,10 +404,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
|
|
*
|
|
* As shp->* are changed under rwsem, it's safe to skip shp locking.
|
|
*/
|
|
- if (shp->shm_creator != NULL)
|
|
+ if (!list_empty(&shp->shm_clist))
|
|
return 0;
|
|
|
|
- if (shm_may_destroy(ns, shp)) {
|
|
+ if (shm_may_destroy(shp)) {
|
|
shm_lock_by_ptr(shp);
|
|
shm_destroy(ns, shp);
|
|
}
|
|
@@ -382,48 +425,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
|
|
/* Locking assumes this will only be called with task == current */
|
|
void exit_shm(struct task_struct *task)
|
|
{
|
|
- struct ipc_namespace *ns = task->nsproxy->ipc_ns;
|
|
- struct shmid_kernel *shp, *n;
|
|
+ for (;;) {
|
|
+ struct shmid_kernel *shp;
|
|
+ struct ipc_namespace *ns;
|
|
|
|
- if (list_empty(&task->sysvshm.shm_clist))
|
|
- return;
|
|
+ task_lock(task);
|
|
+
|
|
+ if (list_empty(&task->sysvshm.shm_clist)) {
|
|
+ task_unlock(task);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
|
|
+ shm_clist);
|
|
|
|
- /*
|
|
- * If kernel.shm_rmid_forced is not set then only keep track of
|
|
- * which shmids are orphaned, so that a later set of the sysctl
|
|
- * can clean them up.
|
|
- */
|
|
- if (!ns->shm_rmid_forced) {
|
|
- down_read(&shm_ids(ns).rwsem);
|
|
- list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
|
|
- shp->shm_creator = NULL;
|
|
/*
|
|
- * Only under read lock but we are only called on current
|
|
- * so no entry on the list will be shared.
|
|
+ * 1) Get pointer to the ipc namespace. It is worth to say
|
|
+ * that this pointer is guaranteed to be valid because
|
|
+ * shp lifetime is always shorter than namespace lifetime
|
|
+ * in which shp lives.
|
|
+ * We taken task_lock it means that shp won't be freed.
|
|
*/
|
|
- list_del(&task->sysvshm.shm_clist);
|
|
- up_read(&shm_ids(ns).rwsem);
|
|
- return;
|
|
- }
|
|
+ ns = shp->ns;
|
|
|
|
- /*
|
|
- * Destroy all already created segments, that were not yet mapped,
|
|
- * and mark any mapped as orphan to cover the sysctl toggling.
|
|
- * Destroy is skipped if shm_may_destroy() returns false.
|
|
- */
|
|
- down_write(&shm_ids(ns).rwsem);
|
|
- list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
|
|
- shp->shm_creator = NULL;
|
|
+ /*
|
|
+ * 2) If kernel.shm_rmid_forced is not set then only keep track of
|
|
+ * which shmids are orphaned, so that a later set of the sysctl
|
|
+ * can clean them up.
|
|
+ */
|
|
+ if (!ns->shm_rmid_forced)
|
|
+ goto unlink_continue;
|
|
|
|
- if (shm_may_destroy(ns, shp)) {
|
|
- shm_lock_by_ptr(shp);
|
|
- shm_destroy(ns, shp);
|
|
+ /*
|
|
+ * 3) get a reference to the namespace.
|
|
+ * The refcount could be already 0. If it is 0, then
|
|
+ * the shm objects will be free by free_ipc_work().
|
|
+ */
|
|
+ ns = get_ipc_ns_not_zero(ns);
|
|
+ if (!ns) {
|
|
+unlink_continue:
|
|
+ list_del_init(&shp->shm_clist);
|
|
+ task_unlock(task);
|
|
+ continue;
|
|
}
|
|
- }
|
|
|
|
- /* Remove the list head from any segments still attached. */
|
|
- list_del(&task->sysvshm.shm_clist);
|
|
- up_write(&shm_ids(ns).rwsem);
|
|
+ /*
|
|
+ * 4) get a reference to shp.
|
|
+ * This cannot fail: shm_clist_rm() is called before
|
|
+ * ipc_rmid(), thus the refcount cannot be 0.
|
|
+ */
|
|
+ WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
|
|
+
|
|
+ /*
|
|
+ * 5) unlink the shm segment from the list of segments
|
|
+ * created by current.
|
|
+ * This must be done last. After unlinking,
|
|
+ * only the refcounts obtained above prevent IPC_RMID
|
|
+ * from destroying the segment or the namespace.
|
|
+ */
|
|
+ list_del_init(&shp->shm_clist);
|
|
+
|
|
+ task_unlock(task);
|
|
+
|
|
+ /*
|
|
+ * 6) we have all references
|
|
+ * Thus lock & if needed destroy shp.
|
|
+ */
|
|
+ down_write(&shm_ids(ns).rwsem);
|
|
+ shm_lock_by_ptr(shp);
|
|
+ /*
|
|
+ * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
|
|
+ * safe to call ipc_rcu_putref here
|
|
+ */
|
|
+ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
|
|
+
|
|
+ if (ipc_valid_object(&shp->shm_perm)) {
|
|
+ if (shm_may_destroy(shp))
|
|
+ shm_destroy(ns, shp);
|
|
+ else
|
|
+ shm_unlock(shp);
|
|
+ } else {
|
|
+ /*
|
|
+ * Someone else deleted the shp from namespace
|
|
+ * idr/kht while we have waited.
|
|
+ * Just unlock and continue.
|
|
+ */
|
|
+ shm_unlock(shp);
|
|
+ }
|
|
+
|
|
+ up_write(&shm_ids(ns).rwsem);
|
|
+ put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
|
|
+ }
|
|
}
|
|
|
|
static vm_fault_t shm_fault(struct vm_fault *vmf)
|
|
@@ -680,7 +772,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
|
if (error < 0)
|
|
goto no_id;
|
|
|
|
+ shp->ns = ns;
|
|
+
|
|
+ task_lock(current);
|
|
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
|
|
+ task_unlock(current);
|
|
|
|
/*
|
|
* shmid gets reported as "inode#" in /proc/pid/maps.
|
|
@@ -1575,7 +1671,8 @@ out_nattch:
|
|
down_write(&shm_ids(ns).rwsem);
|
|
shp = shm_lock(ns, shmid);
|
|
shp->shm_nattch--;
|
|
- if (shm_may_destroy(ns, shp))
|
|
+
|
|
+ if (shm_may_destroy(shp))
|
|
shm_destroy(ns, shp);
|
|
else
|
|
shm_unlock(shp);
|
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
|
index 69c4cd472def3..6cafb2e910a11 100644
|
|
--- a/kernel/power/hibernate.c
|
|
+++ b/kernel/power/hibernate.c
|
|
@@ -676,7 +676,7 @@ static int load_image_and_restore(void)
|
|
goto Unlock;
|
|
|
|
error = swsusp_read(&flags);
|
|
- swsusp_close(FMODE_READ);
|
|
+ swsusp_close(FMODE_READ | FMODE_EXCL);
|
|
if (!error)
|
|
hibernation_restore(flags & SF_PLATFORM_MODE);
|
|
|
|
@@ -871,7 +871,7 @@ static int software_resume(void)
|
|
/* The snapshot device should not be opened while we're running */
|
|
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
|
error = -EBUSY;
|
|
- swsusp_close(FMODE_READ);
|
|
+ swsusp_close(FMODE_READ | FMODE_EXCL);
|
|
goto Unlock;
|
|
}
|
|
|
|
@@ -907,7 +907,7 @@ static int software_resume(void)
|
|
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
|
|
return error;
|
|
Close_Finish:
|
|
- swsusp_close(FMODE_READ);
|
|
+ swsusp_close(FMODE_READ | FMODE_EXCL);
|
|
goto Finish;
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
|
|
index 35e9a01b54800..1d514a1a31554 100644
|
|
--- a/kernel/trace/trace.h
|
|
+++ b/kernel/trace/trace.h
|
|
@@ -1423,14 +1423,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
|
|
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
|
|
*tt = event_triggers_call(file, entry, event);
|
|
|
|
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
|
|
- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
|
|
- !filter_match_preds(file->filter, entry))) {
|
|
- __trace_event_discard_commit(buffer, event);
|
|
- return true;
|
|
- }
|
|
+ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
|
|
+ EVENT_FILE_FL_FILTERED |
|
|
+ EVENT_FILE_FL_PID_FILTER))))
|
|
+ return false;
|
|
+
|
|
+ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
|
|
+ goto discard;
|
|
+
|
|
+ if (file->flags & EVENT_FILE_FL_FILTERED &&
|
|
+ !filter_match_preds(file->filter, entry))
|
|
+ goto discard;
|
|
+
|
|
+ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
|
|
+ trace_event_ignore_this_pid(file))
|
|
+ goto discard;
|
|
|
|
return false;
|
|
+ discard:
|
|
+ __trace_event_discard_commit(buffer, event);
|
|
+ return true;
|
|
}
|
|
|
|
/**
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index e31ee325dad16..4acc77e049e5f 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -2247,12 +2247,19 @@ static struct trace_event_file *
|
|
trace_create_new_event(struct trace_event_call *call,
|
|
struct trace_array *tr)
|
|
{
|
|
+ struct trace_pid_list *pid_list;
|
|
struct trace_event_file *file;
|
|
|
|
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
|
if (!file)
|
|
return NULL;
|
|
|
|
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
|
|
+ lockdep_is_held(&event_mutex));
|
|
+
|
|
+ if (pid_list)
|
|
+ file->flags |= EVENT_FILE_FL_PID_FILTER;
|
|
+
|
|
file->event_call = call;
|
|
file->tr = tr;
|
|
atomic_set(&file->sm_ref, 0);
|
|
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
|
|
index b515db036becc..efb51a23a14f2 100644
|
|
--- a/kernel/trace/trace_uprobe.c
|
|
+++ b/kernel/trace/trace_uprobe.c
|
|
@@ -1299,6 +1299,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
|
|
return 0;
|
|
|
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
+ tu = container_of(pos, struct trace_uprobe, tp);
|
|
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
|
if (err) {
|
|
uprobe_perf_close(call, event);
|
|
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
|
|
index cd7c0429cddf8..796d95797ab40 100644
|
|
--- a/net/8021q/vlan.c
|
|
+++ b/net/8021q/vlan.c
|
|
@@ -177,9 +177,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
|
|
if (err)
|
|
goto out_unregister_netdev;
|
|
|
|
- /* Account for reference in struct vlan_dev_priv */
|
|
- dev_hold(real_dev);
|
|
-
|
|
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
|
|
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
|
|
|
|
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
|
|
index 415a29d42cdf0..589615ec490bb 100644
|
|
--- a/net/8021q/vlan_dev.c
|
|
+++ b/net/8021q/vlan_dev.c
|
|
@@ -583,6 +583,9 @@ static int vlan_dev_init(struct net_device *dev)
|
|
if (!vlan->vlan_pcpu_stats)
|
|
return -ENOMEM;
|
|
|
|
+ /* Get vlan's reference to real_dev */
|
|
+ dev_hold(real_dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
|
|
index 858bb10d8341e..4d69b3de980a6 100644
|
|
--- a/net/ipv4/nexthop.c
|
|
+++ b/net/ipv4/nexthop.c
|
|
@@ -839,15 +839,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
|
|
/* if any FIB entries reference this nexthop, any dst entries
|
|
* need to be regenerated
|
|
*/
|
|
-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
|
|
+static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
|
|
+ struct nexthop *replaced_nh)
|
|
{
|
|
struct fib6_info *f6i;
|
|
+ struct nh_group *nhg;
|
|
+ int i;
|
|
|
|
if (!list_empty(&nh->fi_list))
|
|
rt_cache_flush(net);
|
|
|
|
list_for_each_entry(f6i, &nh->f6i_list, nh_list)
|
|
ipv6_stub->fib6_update_sernum(net, f6i);
|
|
+
|
|
+ /* if an IPv6 group was replaced, we have to release all old
|
|
+ * dsts to make sure all refcounts are released
|
|
+ */
|
|
+ if (!replaced_nh->is_group)
|
|
+ return;
|
|
+
|
|
+ /* new dsts must use only the new nexthop group */
|
|
+ synchronize_net();
|
|
+
|
|
+ nhg = rtnl_dereference(replaced_nh->nh_grp);
|
|
+ for (i = 0; i < nhg->num_nh; i++) {
|
|
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
|
|
+ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
|
|
+
|
|
+ if (nhi->family == AF_INET6)
|
|
+ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
|
|
+ }
|
|
}
|
|
|
|
static int replace_nexthop_grp(struct net *net, struct nexthop *old,
|
|
@@ -994,7 +1015,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
|
|
err = replace_nexthop_single(net, old, new, extack);
|
|
|
|
if (!err) {
|
|
- nh_rt_cache_flush(net, old);
|
|
+ nh_rt_cache_flush(net, old, new);
|
|
|
|
__remove_nexthop(net, new, NULL);
|
|
nexthop_put(new);
|
|
@@ -1231,11 +1252,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
|
|
/* sets nh_dev if successful */
|
|
err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
|
|
extack);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ /* IPv6 is not enabled, don't call fib6_nh_release */
|
|
+ if (err == -EAFNOSUPPORT)
|
|
+ goto out;
|
|
ipv6_stub->fib6_nh_release(fib6_nh);
|
|
- else
|
|
+ } else {
|
|
nh->nh_flags = fib6_nh->fib_nh_flags;
|
|
-
|
|
+ }
|
|
+out:
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
|
|
index ee6c38a73325d..44be7a5a13911 100644
|
|
--- a/net/ipv4/tcp_cubic.c
|
|
+++ b/net/ipv4/tcp_cubic.c
|
|
@@ -341,8 +341,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|
return;
|
|
|
|
if (tcp_in_slow_start(tp)) {
|
|
- if (hystart && after(ack, ca->end_seq))
|
|
- bictcp_hystart_reset(sk);
|
|
acked = tcp_slow_start(tp, acked);
|
|
if (!acked)
|
|
return;
|
|
@@ -384,6 +382,9 @@ static void hystart_update(struct sock *sk, u32 delay)
|
|
if (ca->found & hystart_detect)
|
|
return;
|
|
|
|
+ if (after(tp->snd_una, ca->end_seq))
|
|
+ bictcp_hystart_reset(sk);
|
|
+
|
|
if (hystart_detect & HYSTART_ACK_TRAIN) {
|
|
u32 now = bictcp_clock();
|
|
|
|
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
|
|
index 14ac1d9112877..942da168f18fb 100644
|
|
--- a/net/ipv6/af_inet6.c
|
|
+++ b/net/ipv6/af_inet6.c
|
|
@@ -955,6 +955,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
|
|
.ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
|
|
.fib6_nh_init = fib6_nh_init,
|
|
.fib6_nh_release = fib6_nh_release,
|
|
+ .fib6_nh_release_dsts = fib6_nh_release_dsts,
|
|
.fib6_update_sernum = fib6_update_sernum_stub,
|
|
.fib6_rt_update = fib6_rt_update,
|
|
.ip6_del_rt = ip6_del_rt,
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index fc913f09606db..d847aa32628da 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -192,7 +192,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
|
|
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
|
|
/* Policy lookup after SNAT yielded a new policy */
|
|
if (skb_dst(skb)->xfrm) {
|
|
- IPCB(skb)->flags |= IPSKB_REROUTED;
|
|
+ IP6CB(skb)->flags |= IP6SKB_REROUTED;
|
|
return dst_output(net, sk, skb);
|
|
}
|
|
#endif
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index daa876c6ae8db..f36db3dd97346 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -3585,6 +3585,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
|
|
fib_nh_common_release(&fib6_nh->nh_common);
|
|
}
|
|
|
|
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
|
|
+{
|
|
+ int cpu;
|
|
+
|
|
+ if (!fib6_nh->rt6i_pcpu)
|
|
+ return;
|
|
+
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ struct rt6_info *pcpu_rt, **ppcpu_rt;
|
|
+
|
|
+ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
|
|
+ pcpu_rt = xchg(ppcpu_rt, NULL);
|
|
+ if (pcpu_rt) {
|
|
+ dst_dev_put(&pcpu_rt->dst);
|
|
+ dst_release(&pcpu_rt->dst);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
|
|
gfp_t gfp_flags,
|
|
struct netlink_ext_ack *extack)
|
|
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
|
|
index 0187e65176c05..114ef47db76d3 100644
|
|
--- a/net/ncsi/ncsi-cmd.c
|
|
+++ b/net/ncsi/ncsi-cmd.c
|
|
@@ -18,6 +18,8 @@
|
|
#include "internal.h"
|
|
#include "ncsi-pkt.h"
|
|
|
|
+static const int padding_bytes = 26;
|
|
+
|
|
u32 ncsi_calculate_checksum(unsigned char *data, int len)
|
|
{
|
|
u32 checksum = 0;
|
|
@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
|
|
{
|
|
struct ncsi_cmd_oem_pkt *cmd;
|
|
unsigned int len;
|
|
+ int payload;
|
|
+ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
|
|
+ * requires payload to be padded with 0 to
|
|
+ * 32-bit boundary before the checksum field.
|
|
+ * Ensure the padding bytes are accounted for in
|
|
+ * skb allocation
|
|
+ */
|
|
|
|
+ payload = ALIGN(nca->payload, 4);
|
|
len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
|
|
- if (nca->payload < 26)
|
|
- len += 26;
|
|
- else
|
|
- len += nca->payload;
|
|
+ len += max(payload, padding_bytes);
|
|
|
|
cmd = skb_put_zero(skb, len);
|
|
memcpy(&cmd->mfr_id, nca->data, nca->payload);
|
|
@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
|
|
struct net_device *dev = nd->dev;
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
int tlen = dev->needed_tailroom;
|
|
+ int payload;
|
|
int len = hlen + tlen;
|
|
struct sk_buff *skb;
|
|
struct ncsi_request *nr;
|
|
@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
|
|
return NULL;
|
|
|
|
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
|
|
+ * Payload needs padding so that the checksum field following payload is
|
|
+ * aligned to 32-bit boundary.
|
|
* The packet needs padding if its payload is less than 26 bytes to
|
|
* meet 64 bytes minimal ethernet frame length.
|
|
*/
|
|
len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
|
|
- if (nca->payload < 26)
|
|
- len += 26;
|
|
- else
|
|
- len += nca->payload;
|
|
+ payload = ALIGN(nca->payload, 4);
|
|
+ len += max(payload, padding_bytes);
|
|
|
|
/* Allocate skb */
|
|
skb = alloc_skb(len, GFP_ATOMIC);
|
|
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
|
|
index 89aa1fc334b19..ccd6af1440745 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_core.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_core.c
|
|
@@ -1982,7 +1982,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
|
struct ip_vs_proto_data *pd;
|
|
struct ip_vs_conn *cp;
|
|
int ret, pkts;
|
|
- int conn_reuse_mode;
|
|
struct sock *sk;
|
|
|
|
/* Already marked as IPVS request or reply? */
|
|
@@ -2059,15 +2058,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
|
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
|
|
ipvs, af, skb, &iph);
|
|
|
|
- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
|
|
- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
|
|
+ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
|
|
+ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
|
|
bool old_ct = false, resched = false;
|
|
|
|
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
|
|
unlikely(!atomic_read(&cp->dest->weight))) {
|
|
resched = true;
|
|
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
|
|
- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
|
|
+ } else if (conn_reuse_mode &&
|
|
+ is_new_conn_expected(cp, conn_reuse_mode)) {
|
|
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
|
|
if (!atomic_read(&cp->n_control)) {
|
|
resched = true;
|
|
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
|
|
index 1d0aa9e6044bf..b8ecb002e6238 100644
|
|
--- a/net/nfc/nci/core.c
|
|
+++ b/net/nfc/nci/core.c
|
|
@@ -473,6 +473,11 @@ static int nci_open_device(struct nci_dev *ndev)
|
|
|
|
mutex_lock(&ndev->req_lock);
|
|
|
|
+ if (test_bit(NCI_UNREG, &ndev->flags)) {
|
|
+ rc = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
if (test_bit(NCI_UP, &ndev->flags)) {
|
|
rc = -EALREADY;
|
|
goto done;
|
|
@@ -536,6 +541,10 @@ done:
|
|
static int nci_close_device(struct nci_dev *ndev)
|
|
{
|
|
nci_req_cancel(ndev, ENODEV);
|
|
+
|
|
+ /* This mutex needs to be held as a barrier for
|
|
+ * caller nci_unregister_device
|
|
+ */
|
|
mutex_lock(&ndev->req_lock);
|
|
|
|
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
|
|
@@ -573,8 +582,8 @@ static int nci_close_device(struct nci_dev *ndev)
|
|
/* Flush cmd wq */
|
|
flush_workqueue(ndev->cmd_wq);
|
|
|
|
- /* Clear flags */
|
|
- ndev->flags = 0;
|
|
+ /* Clear flags except NCI_UNREG */
|
|
+ ndev->flags &= BIT(NCI_UNREG);
|
|
|
|
mutex_unlock(&ndev->req_lock);
|
|
|
|
@@ -1256,6 +1265,12 @@ void nci_unregister_device(struct nci_dev *ndev)
|
|
{
|
|
struct nci_conn_info *conn_info, *n;
|
|
|
|
+ /* This set_bit is not protected with specialized barrier,
|
|
+ * However, it is fine because the mutex_lock(&ndev->req_lock);
|
|
+ * in nci_close_device() will help to emit one.
|
|
+ */
|
|
+ set_bit(NCI_UNREG, &ndev->flags);
|
|
+
|
|
nci_close_device(ndev);
|
|
|
|
destroy_workqueue(ndev->cmd_wq);
|
|
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
|
|
index 6b0f09c5b195f..5e1493f8deba7 100644
|
|
--- a/net/smc/af_smc.c
|
|
+++ b/net/smc/af_smc.c
|
|
@@ -1658,8 +1658,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
|
static int smc_shutdown(struct socket *sock, int how)
|
|
{
|
|
struct sock *sk = sock->sk;
|
|
+ bool do_shutdown = true;
|
|
struct smc_sock *smc;
|
|
int rc = -EINVAL;
|
|
+ int old_state;
|
|
int rc1 = 0;
|
|
|
|
smc = smc_sk(sk);
|
|
@@ -1686,7 +1688,11 @@ static int smc_shutdown(struct socket *sock, int how)
|
|
}
|
|
switch (how) {
|
|
case SHUT_RDWR: /* shutdown in both directions */
|
|
+ old_state = sk->sk_state;
|
|
rc = smc_close_active(smc);
|
|
+ if (old_state == SMC_ACTIVE &&
|
|
+ sk->sk_state == SMC_PEERCLOSEWAIT1)
|
|
+ do_shutdown = false;
|
|
break;
|
|
case SHUT_WR:
|
|
rc = smc_close_shutdown_write(smc);
|
|
@@ -1696,7 +1702,7 @@ static int smc_shutdown(struct socket *sock, int how)
|
|
/* nothing more to do because peer is not involved */
|
|
break;
|
|
}
|
|
- if (smc->clcsock)
|
|
+ if (do_shutdown && smc->clcsock)
|
|
rc1 = kernel_sock_shutdown(smc->clcsock, how);
|
|
/* map sock_shutdown_cmd constants to sk_shutdown value range */
|
|
sk->sk_shutdown |= how + 1;
|
|
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
|
|
index fc06720b53c14..2eabf39dee74d 100644
|
|
--- a/net/smc/smc_close.c
|
|
+++ b/net/smc/smc_close.c
|
|
@@ -218,6 +218,12 @@ again:
|
|
if (rc)
|
|
break;
|
|
sk->sk_state = SMC_PEERCLOSEWAIT1;
|
|
+
|
|
+ /* actively shutdown clcsock before peer close it,
|
|
+ * prevent peer from entering TIME_WAIT state.
|
|
+ */
|
|
+ if (smc->clcsock && smc->clcsock->sk)
|
|
+ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
|
|
} else {
|
|
/* peer event has changed the state */
|
|
goto again;
|
|
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
|
|
index d4ff377eb3a34..6d636bdcaa5a3 100644
|
|
--- a/sound/pci/ctxfi/ctamixer.c
|
|
+++ b/sound/pci/ctxfi/ctamixer.c
|
|
@@ -23,16 +23,15 @@
|
|
|
|
#define BLANK_SLOT 4094
|
|
|
|
-static int amixer_master(struct rsc *rsc)
|
|
+static void amixer_master(struct rsc *rsc)
|
|
{
|
|
rsc->conj = 0;
|
|
- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
|
|
+ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
|
|
}
|
|
|
|
-static int amixer_next_conj(struct rsc *rsc)
|
|
+static void amixer_next_conj(struct rsc *rsc)
|
|
{
|
|
rsc->conj++;
|
|
- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
|
|
}
|
|
|
|
static int amixer_index(const struct rsc *rsc)
|
|
@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
|
|
|
|
/* SUM resource management */
|
|
|
|
-static int sum_master(struct rsc *rsc)
|
|
+static void sum_master(struct rsc *rsc)
|
|
{
|
|
rsc->conj = 0;
|
|
- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
|
|
+ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
|
|
}
|
|
|
|
-static int sum_next_conj(struct rsc *rsc)
|
|
+static void sum_next_conj(struct rsc *rsc)
|
|
{
|
|
rsc->conj++;
|
|
- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
|
|
}
|
|
|
|
static int sum_index(const struct rsc *rsc)
|
|
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
|
|
index 27441d498968d..b5e1296af09ee 100644
|
|
--- a/sound/pci/ctxfi/ctdaio.c
|
|
+++ b/sound/pci/ctxfi/ctdaio.c
|
|
@@ -51,12 +51,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
|
|
[SPDIFIO] = {.left = 0x05, .right = 0x85},
|
|
};
|
|
|
|
-static int daio_master(struct rsc *rsc)
|
|
+static void daio_master(struct rsc *rsc)
|
|
{
|
|
/* Actually, this is not the resource index of DAIO.
|
|
* For DAO, it is the input mapper index. And, for DAI,
|
|
* it is the output time-slot index. */
|
|
- return rsc->conj = rsc->idx;
|
|
+ rsc->conj = rsc->idx;
|
|
}
|
|
|
|
static int daio_index(const struct rsc *rsc)
|
|
@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
|
|
return rsc->conj;
|
|
}
|
|
|
|
-static int daio_out_next_conj(struct rsc *rsc)
|
|
+static void daio_out_next_conj(struct rsc *rsc)
|
|
{
|
|
- return rsc->conj += 2;
|
|
+ rsc->conj += 2;
|
|
}
|
|
|
|
-static int daio_in_next_conj_20k1(struct rsc *rsc)
|
|
+static void daio_in_next_conj_20k1(struct rsc *rsc)
|
|
{
|
|
- return rsc->conj += 0x200;
|
|
+ rsc->conj += 0x200;
|
|
}
|
|
|
|
-static int daio_in_next_conj_20k2(struct rsc *rsc)
|
|
+static void daio_in_next_conj_20k2(struct rsc *rsc)
|
|
{
|
|
- return rsc->conj += 0x100;
|
|
+ rsc->conj += 0x100;
|
|
}
|
|
|
|
static const struct rsc_ops daio_out_rsc_ops = {
|
|
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
|
|
index 0bb5696e44b37..ec5f597b580ad 100644
|
|
--- a/sound/pci/ctxfi/ctresource.c
|
|
+++ b/sound/pci/ctxfi/ctresource.c
|
|
@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
|
|
return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
|
|
}
|
|
|
|
-static int rsc_next_conj(struct rsc *rsc)
|
|
+static void rsc_next_conj(struct rsc *rsc)
|
|
{
|
|
unsigned int i;
|
|
for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
|
|
i++;
|
|
rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
|
|
- return rsc->conj;
|
|
}
|
|
|
|
-static int rsc_master(struct rsc *rsc)
|
|
+static void rsc_master(struct rsc *rsc)
|
|
{
|
|
- return rsc->conj = rsc->idx;
|
|
+ rsc->conj = rsc->idx;
|
|
}
|
|
|
|
static const struct rsc_ops rsc_generic_ops = {
|
|
diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
|
|
index 93e47488a1c1c..92146054af582 100644
|
|
--- a/sound/pci/ctxfi/ctresource.h
|
|
+++ b/sound/pci/ctxfi/ctresource.h
|
|
@@ -39,8 +39,8 @@ struct rsc {
|
|
};
|
|
|
|
struct rsc_ops {
|
|
- int (*master)(struct rsc *rsc); /* Move to master resource */
|
|
- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
|
|
+ void (*master)(struct rsc *rsc); /* Move to master resource */
|
|
+ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
|
|
int (*index)(const struct rsc *rsc); /* Return the index of resource */
|
|
/* Return the output slot number */
|
|
int (*output_slot)(const struct rsc *rsc);
|
|
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
|
|
index 37c18ce84974a..7d2bda0c3d3de 100644
|
|
--- a/sound/pci/ctxfi/ctsrc.c
|
|
+++ b/sound/pci/ctxfi/ctsrc.c
|
|
@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
|
|
|
|
/* SRCIMP resource manager operations */
|
|
|
|
-static int srcimp_master(struct rsc *rsc)
|
|
+static void srcimp_master(struct rsc *rsc)
|
|
{
|
|
rsc->conj = 0;
|
|
- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
|
|
+ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
|
|
}
|
|
|
|
-static int srcimp_next_conj(struct rsc *rsc)
|
|
+static void srcimp_next_conj(struct rsc *rsc)
|
|
{
|
|
rsc->conj++;
|
|
- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
|
|
}
|
|
|
|
static int srcimp_index(const struct rsc *rsc)
|
|
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
|
|
index 745cc9dd14f38..16f26dd2d59ed 100644
|
|
--- a/sound/soc/qcom/qdsp6/q6routing.c
|
|
+++ b/sound/soc/qcom/qdsp6/q6routing.c
|
|
@@ -443,7 +443,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
|
|
session->port_id = be_id;
|
|
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
|
|
} else {
|
|
- session->port_id = -1;
|
|
+ if (session->port_id == be_id) {
|
|
+ session->port_id = -1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
|
|
}
|
|
|
|
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
|
|
index c367609433bfc..21f859e56b700 100644
|
|
--- a/sound/soc/soc-topology.c
|
|
+++ b/sound/soc/soc-topology.c
|
|
@@ -2777,6 +2777,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
|
|
/* remove dynamic controls from the component driver */
|
|
int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
|
{
|
|
+ struct snd_card *card = comp->card->snd_card;
|
|
struct snd_soc_dobj *dobj, *next_dobj;
|
|
int pass = SOC_TPLG_PASS_END;
|
|
|
|
@@ -2784,6 +2785,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
|
while (pass >= SOC_TPLG_PASS_START) {
|
|
|
|
/* remove mixer controls */
|
|
+ down_write(&card->controls_rwsem);
|
|
list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
|
|
list) {
|
|
|
|
@@ -2827,6 +2829,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
|
break;
|
|
}
|
|
}
|
|
+ up_write(&card->controls_rwsem);
|
|
pass--;
|
|
}
|
|
|