* Address stability issues, update configuration. * Cleanup - remove not needed patches * Add upstream patches Board survived regular stress testing while previous config/version hanged instantly.
5663 lines
184 KiB
Diff
5663 lines
184 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 30600e309c73..e16d2e58ed4b 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 15
|
|
+SUBLEVEL = 16
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
|
|
index 15b75005bc34..3fa1b962dc27 100644
|
|
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
|
|
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
|
|
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
|
|
*
|
|
*/
|
|
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
|
|
+
|
|
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
|
|
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
|
|
- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
|
|
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
|
|
+
|
|
/*
|
|
* For platforms that support on 65bit VA we limit the context bits
|
|
*/
|
|
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
|
|
index f2dfcd50a2d3..33aee7490cbb 100644
|
|
--- a/arch/powerpc/include/asm/xive-regs.h
|
|
+++ b/arch/powerpc/include/asm/xive-regs.h
|
|
@@ -39,6 +39,7 @@
|
|
|
|
#define XIVE_ESB_VAL_P 0x2
|
|
#define XIVE_ESB_VAL_Q 0x1
|
|
+#define XIVE_ESB_INVALID 0xFF
|
|
|
|
/*
|
|
* Thread Management (aka "TM") registers
|
|
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
|
|
index f5fadbd2533a..9651ca061828 100644
|
|
--- a/arch/powerpc/sysdev/xive/common.c
|
|
+++ b/arch/powerpc/sysdev/xive/common.c
|
|
@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
|
|
enum irqchip_irq_state which, bool *state)
|
|
{
|
|
struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
|
|
+ u8 pq;
|
|
|
|
switch (which) {
|
|
case IRQCHIP_STATE_ACTIVE:
|
|
- *state = !xd->stale_p &&
|
|
- (xd->saved_p ||
|
|
- !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
|
|
+ pq = xive_esb_read(xd, XIVE_ESB_GET);
|
|
+
|
|
+ /*
|
|
+ * The esb value being all 1's means we couldn't get
|
|
+ * the PQ state of the interrupt through mmio. It may
|
|
+ * happen, for example when querying a PHB interrupt
|
|
+ * while the PHB is in an error state. We consider the
|
|
+ * interrupt to be inactive in that case.
|
|
+ */
|
|
+ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
|
|
+ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
|
|
return 0;
|
|
default:
|
|
return -EINVAL;
|
|
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
|
|
index 2bbab0230aeb..d287837ed755 100644
|
|
--- a/drivers/atm/firestream.c
|
|
+++ b/drivers/atm/firestream.c
|
|
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
|
}
|
|
if (!to) {
|
|
printk ("No more free channels for FS50..\n");
|
|
+ kfree(vcc);
|
|
return -EBUSY;
|
|
}
|
|
vcc->channo = dev->channo;
|
|
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
|
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
|
|
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
|
|
printk ("Channel is in use for FS155.\n");
|
|
+ kfree(vcc);
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
|
tc, sizeof (struct fs_transmit_config));
|
|
if (!tc) {
|
|
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
|
|
+ kfree(vcc);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
|
|
index 3d4f5775a4ba..25235ef630c1 100644
|
|
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
|
|
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
|
|
@@ -9,16 +9,16 @@
|
|
#include "i915_gem_ioctls.h"
|
|
#include "i915_gem_object.h"
|
|
|
|
-static __always_inline u32 __busy_read_flag(u8 id)
|
|
+static __always_inline u32 __busy_read_flag(u16 id)
|
|
{
|
|
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
|
|
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
|
|
return 0xffff0000u;
|
|
|
|
GEM_BUG_ON(id >= 16);
|
|
return 0x10000u << id;
|
|
}
|
|
|
|
-static __always_inline u32 __busy_write_id(u8 id)
|
|
+static __always_inline u32 __busy_write_id(u16 id)
|
|
{
|
|
/*
|
|
* The uABI guarantees an active writer is also amongst the read
|
|
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
|
|
* last_read - hence we always set both read and write busy for
|
|
* last_write.
|
|
*/
|
|
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
|
|
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
|
|
return 0xffffffffu;
|
|
|
|
return (id + 1) | __busy_read_flag(id);
|
|
}
|
|
|
|
static __always_inline unsigned int
|
|
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
|
|
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
|
|
{
|
|
const struct i915_request *rq;
|
|
|
|
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
|
|
return 0;
|
|
|
|
/* Beware type-expansion follies! */
|
|
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
|
|
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
|
|
return flag(rq->engine->uabi_class);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
|
|
index abfbac49b8e8..968d9b2705d0 100644
|
|
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
|
|
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
|
|
@@ -427,7 +427,7 @@ struct get_pages_work {
|
|
|
|
static struct sg_table *
|
|
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
|
|
- struct page **pvec, int num_pages)
|
|
+ struct page **pvec, unsigned long num_pages)
|
|
{
|
|
unsigned int max_segment = i915_sg_segment_size();
|
|
struct sg_table *st;
|
|
@@ -473,9 +473,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|
{
|
|
struct get_pages_work *work = container_of(_work, typeof(*work), work);
|
|
struct drm_i915_gem_object *obj = work->obj;
|
|
- const int npages = obj->base.size >> PAGE_SHIFT;
|
|
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
|
|
+ unsigned long pinned;
|
|
struct page **pvec;
|
|
- int pinned, ret;
|
|
+ int ret;
|
|
|
|
ret = -ENOMEM;
|
|
pinned = 0;
|
|
@@ -578,7 +579,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
|
|
|
|
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
- const int num_pages = obj->base.size >> PAGE_SHIFT;
|
|
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
|
|
struct mm_struct *mm = obj->userptr.mm->mm;
|
|
struct page **pvec;
|
|
struct sg_table *pages;
|
|
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
|
|
index 9dd8c299cb2d..798e1b024406 100644
|
|
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
|
|
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
|
|
@@ -300,8 +300,8 @@ struct intel_engine_cs {
|
|
u8 class;
|
|
u8 instance;
|
|
|
|
- u8 uabi_class;
|
|
- u8 uabi_instance;
|
|
+ u16 uabi_class;
|
|
+ u16 uabi_instance;
|
|
|
|
u32 context_size;
|
|
u32 mmio_base;
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
index b1a7a8b9b46a..f614646ed3f9 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
@@ -1178,6 +1178,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
|
|
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
|
|
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
|
|
do {
|
|
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
|
|
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
|
|
|
|
iter->dma += I915_GTT_PAGE_SIZE;
|
|
@@ -1657,6 +1658,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
|
|
|
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
|
|
do {
|
|
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
|
|
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
|
|
|
|
iter.dma += I915_GTT_PAGE_SIZE;
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
index 1c67ac434e10..5906c80c4b2c 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
|
|
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
+ struct panfrost_file_priv *priv = file->driver_priv;
|
|
struct panfrost_gem_object *bo;
|
|
struct drm_panfrost_create_bo *args = data;
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
|
|
if (!args->size || args->pad ||
|
|
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
|
|
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
|
|
if (IS_ERR(bo))
|
|
return PTR_ERR(bo);
|
|
|
|
- args->offset = bo->node.start << PAGE_SHIFT;
|
|
+ mapping = panfrost_gem_mapping_get(bo, priv);
|
|
+ if (!mapping) {
|
|
+ drm_gem_object_put_unlocked(&bo->base.base);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
|
|
+ panfrost_gem_mapping_put(mapping);
|
|
|
|
return 0;
|
|
}
|
|
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
|
|
struct drm_panfrost_submit *args,
|
|
struct panfrost_job *job)
|
|
{
|
|
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
|
|
+ struct panfrost_gem_object *bo;
|
|
+ unsigned int i;
|
|
+ int ret;
|
|
+
|
|
job->bo_count = args->bo_handle_count;
|
|
|
|
if (!job->bo_count)
|
|
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
|
|
if (!job->implicit_fences)
|
|
return -ENOMEM;
|
|
|
|
- return drm_gem_objects_lookup(file_priv,
|
|
- (void __user *)(uintptr_t)args->bo_handles,
|
|
- job->bo_count, &job->bos);
|
|
+ ret = drm_gem_objects_lookup(file_priv,
|
|
+ (void __user *)(uintptr_t)args->bo_handles,
|
|
+ job->bo_count, &job->bos);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ job->mappings = kvmalloc_array(job->bo_count,
|
|
+ sizeof(struct panfrost_gem_mapping *),
|
|
+ GFP_KERNEL | __GFP_ZERO);
|
|
+ if (!job->mappings)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for (i = 0; i < job->bo_count; i++) {
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
+
|
|
+ bo = to_panfrost_bo(job->bos[i]);
|
|
+ mapping = panfrost_gem_mapping_get(bo, priv);
|
|
+ if (!mapping) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ job->mappings[i] = mapping;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -320,7 +357,9 @@ out:
|
|
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
|
|
struct drm_file *file_priv)
|
|
{
|
|
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
|
|
struct drm_panfrost_get_bo_offset *args = data;
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
struct drm_gem_object *gem_obj;
|
|
struct panfrost_gem_object *bo;
|
|
|
|
@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
|
|
}
|
|
bo = to_panfrost_bo(gem_obj);
|
|
|
|
- args->offset = bo->node.start << PAGE_SHIFT;
|
|
-
|
|
+ mapping = panfrost_gem_mapping_get(bo, priv);
|
|
drm_gem_object_put_unlocked(gem_obj);
|
|
+
|
|
+ if (!mapping)
|
|
+ return -EINVAL;
|
|
+
|
|
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
|
|
+ panfrost_gem_mapping_put(mapping);
|
|
return 0;
|
|
}
|
|
|
|
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
|
|
struct drm_file *file_priv)
|
|
{
|
|
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
|
|
struct drm_panfrost_madvise *args = data;
|
|
struct panfrost_device *pfdev = dev->dev_private;
|
|
struct drm_gem_object *gem_obj;
|
|
+ struct panfrost_gem_object *bo;
|
|
+ int ret = 0;
|
|
|
|
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
|
if (!gem_obj) {
|
|
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
|
|
return -ENOENT;
|
|
}
|
|
|
|
+ bo = to_panfrost_bo(gem_obj);
|
|
+
|
|
mutex_lock(&pfdev->shrinker_lock);
|
|
+ mutex_lock(&bo->mappings.lock);
|
|
+ if (args->madv == PANFROST_MADV_DONTNEED) {
|
|
+ struct panfrost_gem_mapping *first;
|
|
+
|
|
+ first = list_first_entry(&bo->mappings.list,
|
|
+ struct panfrost_gem_mapping,
|
|
+ node);
|
|
+
|
|
+ /*
|
|
+ * If we want to mark the BO purgeable, there must be only one
|
|
+ * user: the caller FD.
|
|
+ * We could do something smarter and mark the BO purgeable only
|
|
+ * when all its users have marked it purgeable, but globally
|
|
+ * visible/shared BOs are likely to never be marked purgeable
|
|
+ * anyway, so let's not bother.
|
|
+ */
|
|
+ if (!list_is_singular(&bo->mappings.list) ||
|
|
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
|
|
+ ret = -EINVAL;
|
|
+ goto out_unlock_mappings;
|
|
+ }
|
|
+ }
|
|
+
|
|
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
|
|
|
|
if (args->retained) {
|
|
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
|
|
-
|
|
if (args->madv == PANFROST_MADV_DONTNEED)
|
|
list_add_tail(&bo->base.madv_list,
|
|
&pfdev->shrinker_list);
|
|
else if (args->madv == PANFROST_MADV_WILLNEED)
|
|
list_del_init(&bo->base.madv_list);
|
|
}
|
|
+
|
|
+out_unlock_mappings:
|
|
+ mutex_unlock(&bo->mappings.lock);
|
|
mutex_unlock(&pfdev->shrinker_lock);
|
|
|
|
drm_gem_object_put_unlocked(gem_obj);
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
int panfrost_unstable_ioctl_check(void)
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
index 92a95210a899..77c3a3855c68 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
|
|
list_del_init(&bo->base.madv_list);
|
|
mutex_unlock(&pfdev->shrinker_lock);
|
|
|
|
+ /*
|
|
+ * If we still have mappings attached to the BO, there's a problem in
|
|
+ * our refcounting.
|
|
+ */
|
|
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
|
|
+
|
|
if (bo->sgts) {
|
|
int i;
|
|
int n_sgt = bo->base.base.size / SZ_2M;
|
|
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
|
|
drm_gem_shmem_free_object(obj);
|
|
}
|
|
|
|
+struct panfrost_gem_mapping *
|
|
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
|
+ struct panfrost_file_priv *priv)
|
|
+{
|
|
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
|
|
+
|
|
+ mutex_lock(&bo->mappings.lock);
|
|
+ list_for_each_entry(iter, &bo->mappings.list, node) {
|
|
+ if (iter->mmu == &priv->mmu) {
|
|
+ kref_get(&iter->refcount);
|
|
+ mapping = iter;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&bo->mappings.lock);
|
|
+
|
|
+ return mapping;
|
|
+}
|
|
+
|
|
+static void
|
|
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
|
|
+{
|
|
+ struct panfrost_file_priv *priv;
|
|
+
|
|
+ if (mapping->active)
|
|
+ panfrost_mmu_unmap(mapping);
|
|
+
|
|
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
|
|
+ spin_lock(&priv->mm_lock);
|
|
+ if (drm_mm_node_allocated(&mapping->mmnode))
|
|
+ drm_mm_remove_node(&mapping->mmnode);
|
|
+ spin_unlock(&priv->mm_lock);
|
|
+}
|
|
+
|
|
+static void panfrost_gem_mapping_release(struct kref *kref)
|
|
+{
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
+
|
|
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
|
|
+
|
|
+ panfrost_gem_teardown_mapping(mapping);
|
|
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
|
|
+ kfree(mapping);
|
|
+}
|
|
+
|
|
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
|
|
+{
|
|
+ if (!mapping)
|
|
+ return;
|
|
+
|
|
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
|
|
+}
|
|
+
|
|
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
|
|
+{
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
+
|
|
+ mutex_lock(&bo->mappings.lock);
|
|
+ list_for_each_entry(mapping, &bo->mappings.list, node)
|
|
+ panfrost_gem_teardown_mapping(mapping);
|
|
+ mutex_unlock(&bo->mappings.lock);
|
|
+}
|
|
+
|
|
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
{
|
|
int ret;
|
|
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
|
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
|
|
struct panfrost_file_priv *priv = file_priv->driver_priv;
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
+
|
|
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
|
+ if (!mapping)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ INIT_LIST_HEAD(&mapping->node);
|
|
+ kref_init(&mapping->refcount);
|
|
+ drm_gem_object_get(obj);
|
|
+ mapping->obj = bo;
|
|
|
|
/*
|
|
* Executable buffers cannot cross a 16MB boundary as the program
|
|
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
else
|
|
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
|
|
|
|
- bo->mmu = &priv->mmu;
|
|
+ mapping->mmu = &priv->mmu;
|
|
spin_lock(&priv->mm_lock);
|
|
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
|
|
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
|
|
size >> PAGE_SHIFT, align, color, 0);
|
|
spin_unlock(&priv->mm_lock);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err;
|
|
|
|
if (!bo->is_heap) {
|
|
- ret = panfrost_mmu_map(bo);
|
|
- if (ret) {
|
|
- spin_lock(&priv->mm_lock);
|
|
- drm_mm_remove_node(&bo->node);
|
|
- spin_unlock(&priv->mm_lock);
|
|
- }
|
|
+ ret = panfrost_mmu_map(mapping);
|
|
+ if (ret)
|
|
+ goto err;
|
|
}
|
|
+
|
|
+ mutex_lock(&bo->mappings.lock);
|
|
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
|
|
+ list_add_tail(&mapping->node, &bo->mappings.list);
|
|
+ mutex_unlock(&bo->mappings.lock);
|
|
+
|
|
+err:
|
|
+ if (ret)
|
|
+ panfrost_gem_mapping_put(mapping);
|
|
return ret;
|
|
}
|
|
|
|
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
{
|
|
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
|
struct panfrost_file_priv *priv = file_priv->driver_priv;
|
|
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
|
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
|
|
|
|
- if (bo->is_mapped)
|
|
- panfrost_mmu_unmap(bo);
|
|
+ mutex_lock(&bo->mappings.lock);
|
|
+ list_for_each_entry(iter, &bo->mappings.list, node) {
|
|
+ if (iter->mmu == &priv->mmu) {
|
|
+ mapping = iter;
|
|
+ list_del(&iter->node);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&bo->mappings.lock);
|
|
|
|
- spin_lock(&priv->mm_lock);
|
|
- if (drm_mm_node_allocated(&bo->node))
|
|
- drm_mm_remove_node(&bo->node);
|
|
- spin_unlock(&priv->mm_lock);
|
|
+ panfrost_gem_mapping_put(mapping);
|
|
}
|
|
|
|
static int panfrost_gem_pin(struct drm_gem_object *obj)
|
|
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
|
|
if (!obj)
|
|
return NULL;
|
|
|
|
+ INIT_LIST_HEAD(&obj->mappings.list);
|
|
+ mutex_init(&obj->mappings.lock);
|
|
obj->base.base.funcs = &panfrost_gem_funcs;
|
|
|
|
return &obj->base.base;
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
index 4b17e7308764..ca1bc9019600 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
|
|
struct drm_gem_shmem_object base;
|
|
struct sg_table *sgts;
|
|
|
|
- struct panfrost_mmu *mmu;
|
|
- struct drm_mm_node node;
|
|
- bool is_mapped :1;
|
|
+ /*
|
|
+ * Use a list for now. If searching a mapping ever becomes the
|
|
+ * bottleneck, we should consider using an RB-tree, or even better,
|
|
+ * let the core store drm_gem_object_mapping entries (where we
|
|
+ * could place driver specific data) instead of drm_gem_object ones
|
|
+ * in its drm_file->object_idr table.
|
|
+ *
|
|
+ * struct drm_gem_object_mapping {
|
|
+ * struct drm_gem_object *obj;
|
|
+ * void *driver_priv;
|
|
+ * };
|
|
+ */
|
|
+ struct {
|
|
+ struct list_head list;
|
|
+ struct mutex lock;
|
|
+ } mappings;
|
|
+
|
|
bool noexec :1;
|
|
bool is_heap :1;
|
|
};
|
|
|
|
+struct panfrost_gem_mapping {
|
|
+ struct list_head node;
|
|
+ struct kref refcount;
|
|
+ struct panfrost_gem_object *obj;
|
|
+ struct drm_mm_node mmnode;
|
|
+ struct panfrost_mmu *mmu;
|
|
+ bool active :1;
|
|
+};
|
|
+
|
|
static inline
|
|
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
|
|
{
|
|
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
|
|
}
|
|
|
|
-static inline
|
|
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
|
|
+static inline struct panfrost_gem_mapping *
|
|
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
|
|
{
|
|
- return container_of(node, struct panfrost_gem_object, node);
|
|
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
|
|
}
|
|
|
|
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
|
|
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
|
void panfrost_gem_close(struct drm_gem_object *obj,
|
|
struct drm_file *file_priv);
|
|
|
|
+struct panfrost_gem_mapping *
|
|
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
|
+ struct panfrost_file_priv *priv);
|
|
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
|
|
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
|
|
+
|
|
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
|
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
index 458f0fa68111..f5dd7b29bc95 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
|
|
static bool panfrost_gem_purge(struct drm_gem_object *obj)
|
|
{
|
|
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
|
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
|
|
|
if (!mutex_trylock(&shmem->pages_lock))
|
|
return false;
|
|
|
|
- panfrost_mmu_unmap(to_panfrost_bo(obj));
|
|
+ panfrost_gem_teardown_mappings(bo);
|
|
drm_gem_shmem_purge_locked(obj);
|
|
|
|
mutex_unlock(&shmem->pages_lock);
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
index 21f34d44aac2..bbb0c5e3ca6f 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
@@ -269,9 +269,20 @@ static void panfrost_job_cleanup(struct kref *ref)
|
|
dma_fence_put(job->done_fence);
|
|
dma_fence_put(job->render_done_fence);
|
|
|
|
- if (job->bos) {
|
|
+ if (job->mappings) {
|
|
for (i = 0; i < job->bo_count; i++)
|
|
+ panfrost_gem_mapping_put(job->mappings[i]);
|
|
+ kvfree(job->mappings);
|
|
+ }
|
|
+
|
|
+ if (job->bos) {
|
|
+ struct panfrost_gem_object *bo;
|
|
+
|
|
+ for (i = 0; i < job->bo_count; i++) {
|
|
+ bo = to_panfrost_bo(job->bos[i]);
|
|
drm_gem_object_put_unlocked(job->bos[i]);
|
|
+ }
|
|
+
|
|
kvfree(job->bos);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
|
|
index 62454128a792..bbd3ba97ff67 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
|
|
@@ -32,6 +32,7 @@ struct panfrost_job {
|
|
|
|
/* Exclusive fences we have taken from the BOs to wait for */
|
|
struct dma_fence **implicit_fences;
|
|
+ struct panfrost_gem_mapping **mappings;
|
|
struct drm_gem_object **bos;
|
|
u32 bo_count;
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
index a3ed64a1f15e..763cfca886a7 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
|
return 0;
|
|
}
|
|
|
|
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
|
|
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
|
|
{
|
|
+ struct panfrost_gem_object *bo = mapping->obj;
|
|
struct drm_gem_object *obj = &bo->base.base;
|
|
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
|
|
struct sg_table *sgt;
|
|
int prot = IOMMU_READ | IOMMU_WRITE;
|
|
|
|
- if (WARN_ON(bo->is_mapped))
|
|
+ if (WARN_ON(mapping->active))
|
|
return 0;
|
|
|
|
if (bo->noexec)
|
|
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
|
|
if (WARN_ON(IS_ERR(sgt)))
|
|
return PTR_ERR(sgt);
|
|
|
|
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
|
|
- bo->is_mapped = true;
|
|
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
|
|
+ prot, sgt);
|
|
+ mapping->active = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
|
|
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
|
|
{
|
|
+ struct panfrost_gem_object *bo = mapping->obj;
|
|
struct drm_gem_object *obj = &bo->base.base;
|
|
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
|
|
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
|
|
- u64 iova = bo->node.start << PAGE_SHIFT;
|
|
- size_t len = bo->node.size << PAGE_SHIFT;
|
|
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
|
|
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
|
|
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
|
|
size_t unmapped_len = 0;
|
|
|
|
- if (WARN_ON(!bo->is_mapped))
|
|
+ if (WARN_ON(!mapping->active))
|
|
return;
|
|
|
|
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
|
|
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
|
|
+ mapping->mmu->as, iova, len);
|
|
|
|
while (unmapped_len < len) {
|
|
size_t unmapped_page;
|
|
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
|
|
unmapped_len += pgsize;
|
|
}
|
|
|
|
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
|
|
- bo->is_mapped = false;
|
|
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
|
|
+ mapping->mmnode.start << PAGE_SHIFT, len);
|
|
+ mapping->active = false;
|
|
}
|
|
|
|
static void mmu_tlb_inv_context_s1(void *cookie)
|
|
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
|
|
free_io_pgtable_ops(mmu->pgtbl_ops);
|
|
}
|
|
|
|
-static struct panfrost_gem_object *
|
|
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
|
|
+static struct panfrost_gem_mapping *
|
|
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
|
|
{
|
|
- struct panfrost_gem_object *bo = NULL;
|
|
+ struct panfrost_gem_mapping *mapping = NULL;
|
|
struct panfrost_file_priv *priv;
|
|
struct drm_mm_node *node;
|
|
u64 offset = addr >> PAGE_SHIFT;
|
|
@@ -418,8 +423,9 @@ found_mmu:
|
|
drm_mm_for_each_node(node, &priv->mm) {
|
|
if (offset >= node->start &&
|
|
offset < (node->start + node->size)) {
|
|
- bo = drm_mm_node_to_panfrost_bo(node);
|
|
- drm_gem_object_get(&bo->base.base);
|
|
+ mapping = drm_mm_node_to_panfrost_mapping(node);
|
|
+
|
|
+ kref_get(&mapping->refcount);
|
|
break;
|
|
}
|
|
}
|
|
@@ -427,7 +433,7 @@ found_mmu:
|
|
spin_unlock(&priv->mm_lock);
|
|
out:
|
|
spin_unlock(&pfdev->as_lock);
|
|
- return bo;
|
|
+ return mapping;
|
|
}
|
|
|
|
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
|
|
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
|
|
u64 addr)
|
|
{
|
|
int ret, i;
|
|
+ struct panfrost_gem_mapping *bomapping;
|
|
struct panfrost_gem_object *bo;
|
|
struct address_space *mapping;
|
|
pgoff_t page_offset;
|
|
struct sg_table *sgt;
|
|
struct page **pages;
|
|
|
|
- bo = addr_to_drm_mm_node(pfdev, as, addr);
|
|
- if (!bo)
|
|
+ bomapping = addr_to_mapping(pfdev, as, addr);
|
|
+ if (!bomapping)
|
|
return -ENOENT;
|
|
|
|
+ bo = bomapping->obj;
|
|
if (!bo->is_heap) {
|
|
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
|
|
- bo->node.start << PAGE_SHIFT);
|
|
+ bomapping->mmnode.start << PAGE_SHIFT);
|
|
ret = -EINVAL;
|
|
goto err_bo;
|
|
}
|
|
- WARN_ON(bo->mmu->as != as);
|
|
+ WARN_ON(bomapping->mmu->as != as);
|
|
|
|
/* Assume 2MB alignment and size multiple */
|
|
addr &= ~((u64)SZ_2M - 1);
|
|
page_offset = addr >> PAGE_SHIFT;
|
|
- page_offset -= bo->node.start;
|
|
+ page_offset -= bomapping->mmnode.start;
|
|
|
|
mutex_lock(&bo->base.pages_lock);
|
|
|
|
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
|
|
goto err_map;
|
|
}
|
|
|
|
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
|
|
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
|
|
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
|
|
|
|
- bo->is_mapped = true;
|
|
+ bomapping->active = true;
|
|
|
|
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
|
|
|
|
- drm_gem_object_put_unlocked(&bo->base.base);
|
|
+ panfrost_gem_mapping_put(bomapping);
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
index 7c5b6775ae23..44fc2edf63ce 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
@@ -4,12 +4,12 @@
|
|
#ifndef __PANFROST_MMU_H__
|
|
#define __PANFROST_MMU_H__
|
|
|
|
-struct panfrost_gem_object;
|
|
+struct panfrost_gem_mapping;
|
|
struct panfrost_file_priv;
|
|
struct panfrost_mmu;
|
|
|
|
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
|
|
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
|
|
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
|
|
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
|
|
|
|
int panfrost_mmu_init(struct panfrost_device *pfdev);
|
|
void panfrost_mmu_fini(struct panfrost_device *pfdev);
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
|
|
index 2c04e858c50a..684820448be3 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
|
|
@@ -25,7 +25,7 @@
|
|
#define V4_SHADERS_PER_COREGROUP 4
|
|
|
|
struct panfrost_perfcnt {
|
|
- struct panfrost_gem_object *bo;
|
|
+ struct panfrost_gem_mapping *mapping;
|
|
size_t bosize;
|
|
void *buf;
|
|
struct panfrost_file_priv *user;
|
|
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
|
|
int ret;
|
|
|
|
reinit_completion(&pfdev->perfcnt->dump_comp);
|
|
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
|
|
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
|
|
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
|
|
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
|
|
gpu_write(pfdev, GPU_INT_CLEAR,
|
|
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
|
|
if (IS_ERR(bo))
|
|
return PTR_ERR(bo);
|
|
|
|
- perfcnt->bo = to_panfrost_bo(&bo->base);
|
|
-
|
|
/* Map the perfcnt buf in the address space attached to file_priv. */
|
|
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
|
|
+ ret = panfrost_gem_open(&bo->base, file_priv);
|
|
if (ret)
|
|
goto err_put_bo;
|
|
|
|
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
|
|
+ user);
|
|
+ if (!perfcnt->mapping) {
|
|
+ ret = -EINVAL;
|
|
+ goto err_close_bo;
|
|
+ }
|
|
+
|
|
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
|
|
if (IS_ERR(perfcnt->buf)) {
|
|
ret = PTR_ERR(perfcnt->buf);
|
|
- goto err_close_bo;
|
|
+ goto err_put_mapping;
|
|
}
|
|
|
|
/*
|
|
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
|
|
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
|
|
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
|
|
|
|
+ /* The BO ref is retained by the mapping. */
|
|
+ drm_gem_object_put_unlocked(&bo->base);
|
|
+
|
|
return 0;
|
|
|
|
err_vunmap:
|
|
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
|
|
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
|
|
+err_put_mapping:
|
|
+ panfrost_gem_mapping_put(perfcnt->mapping);
|
|
err_close_bo:
|
|
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
|
|
+ panfrost_gem_close(&bo->base, file_priv);
|
|
err_put_bo:
|
|
drm_gem_object_put_unlocked(&bo->base);
|
|
return ret;
|
|
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
|
|
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
|
|
|
|
perfcnt->user = NULL;
|
|
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
|
|
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
|
|
perfcnt->buf = NULL;
|
|
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
|
|
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
|
|
- perfcnt->bo = NULL;
|
|
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
|
|
+ panfrost_gem_mapping_put(perfcnt->mapping);
|
|
+ perfcnt->mapping = NULL;
|
|
pm_runtime_mark_last_busy(pfdev->dev);
|
|
pm_runtime_put_autosuspend(pfdev->dev);
|
|
|
|
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
|
|
index 6c64d50c9aae..01c2eeb02aa9 100644
|
|
--- a/drivers/hwmon/adt7475.c
|
|
+++ b/drivers/hwmon/adt7475.c
|
|
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
|
|
long reg;
|
|
|
|
if (bypass_attn & (1 << channel))
|
|
- reg = (volt * 1024) / 2250;
|
|
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
|
|
else
|
|
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
|
|
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
|
|
+ (r[0] + r[1]) * 2250);
|
|
return clamp_val(reg, 0, 1023) & (0xff << 2);
|
|
}
|
|
|
|
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
|
|
index 1f3b30b085b9..d018b20089ec 100644
|
|
--- a/drivers/hwmon/hwmon.c
|
|
+++ b/drivers/hwmon/hwmon.c
|
|
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
|
|
|
|
#define to_hwmon_attr(d) \
|
|
container_of(d, struct hwmon_device_attribute, dev_attr)
|
|
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
|
|
|
|
/*
|
|
* Thermal zone information
|
|
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
|
|
* also provides the sensor index.
|
|
*/
|
|
struct hwmon_thermal_data {
|
|
- struct hwmon_device *hwdev; /* Reference to hwmon device */
|
|
+ struct device *dev; /* Reference to hwmon device */
|
|
int index; /* sensor index */
|
|
};
|
|
|
|
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
|
|
NULL
|
|
};
|
|
|
|
+static void hwmon_free_attrs(struct attribute **attrs)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; attrs[i]; i++) {
|
|
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
|
|
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
|
|
+
|
|
+ kfree(hattr);
|
|
+ }
|
|
+ kfree(attrs);
|
|
+}
|
|
+
|
|
static void hwmon_dev_release(struct device *dev)
|
|
{
|
|
- kfree(to_hwmon_device(dev));
|
|
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
|
|
+
|
|
+ if (hwdev->group.attrs)
|
|
+ hwmon_free_attrs(hwdev->group.attrs);
|
|
+ kfree(hwdev->groups);
|
|
+ kfree(hwdev);
|
|
}
|
|
|
|
static struct class hwmon_class = {
|
|
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
|
|
static int hwmon_thermal_get_temp(void *data, int *temp)
|
|
{
|
|
struct hwmon_thermal_data *tdata = data;
|
|
- struct hwmon_device *hwdev = tdata->hwdev;
|
|
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
|
|
int ret;
|
|
long t;
|
|
|
|
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
|
|
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
|
|
tdata->index, &t);
|
|
if (ret < 0)
|
|
return ret;
|
|
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
|
|
.get_temp = hwmon_thermal_get_temp,
|
|
};
|
|
|
|
-static int hwmon_thermal_add_sensor(struct device *dev,
|
|
- struct hwmon_device *hwdev, int index)
|
|
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
|
|
{
|
|
struct hwmon_thermal_data *tdata;
|
|
struct thermal_zone_device *tzd;
|
|
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
|
|
if (!tdata)
|
|
return -ENOMEM;
|
|
|
|
- tdata->hwdev = hwdev;
|
|
+ tdata->dev = dev;
|
|
tdata->index = index;
|
|
|
|
- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
|
|
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
|
|
&hwmon_thermal_ops);
|
|
/*
|
|
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
|
|
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
|
|
return 0;
|
|
}
|
|
#else
|
|
-static int hwmon_thermal_add_sensor(struct device *dev,
|
|
- struct hwmon_device *hwdev, int index)
|
|
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
|
|
{
|
|
return 0;
|
|
}
|
|
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
|
|
(type == hwmon_fan && attr == hwmon_fan_label);
|
|
}
|
|
|
|
-static struct attribute *hwmon_genattr(struct device *dev,
|
|
- const void *drvdata,
|
|
+static struct attribute *hwmon_genattr(const void *drvdata,
|
|
enum hwmon_sensor_types type,
|
|
u32 attr,
|
|
int index,
|
|
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
|
|
if ((mode & 0222) && !ops->write)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
|
|
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
|
|
if (!hattr)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
@@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
|
|
return n;
|
|
}
|
|
|
|
-static int hwmon_genattrs(struct device *dev,
|
|
- const void *drvdata,
|
|
+static int hwmon_genattrs(const void *drvdata,
|
|
struct attribute **attrs,
|
|
const struct hwmon_ops *ops,
|
|
const struct hwmon_channel_info *info)
|
|
@@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev,
|
|
attr_mask &= ~BIT(attr);
|
|
if (attr >= template_size)
|
|
return -EINVAL;
|
|
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
|
|
+ a = hwmon_genattr(drvdata, info->type, attr, i,
|
|
templates[attr], ops);
|
|
if (IS_ERR(a)) {
|
|
if (PTR_ERR(a) != -ENOENT)
|
|
@@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev,
|
|
}
|
|
|
|
static struct attribute **
|
|
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
|
|
- const struct hwmon_chip_info *chip)
|
|
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
|
|
{
|
|
int ret, i, aindex = 0, nattrs = 0;
|
|
struct attribute **attrs;
|
|
@@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
|
|
if (nattrs == 0)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
|
|
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
|
|
if (!attrs)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
for (i = 0; chip->info[i]; i++) {
|
|
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
|
|
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
|
|
chip->info[i]);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ hwmon_free_attrs(attrs);
|
|
return ERR_PTR(ret);
|
|
+ }
|
|
aindex += ret;
|
|
}
|
|
|
|
@@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
|
|
for (i = 0; groups[i]; i++)
|
|
ngroups++;
|
|
|
|
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
|
|
- GFP_KERNEL);
|
|
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
|
|
if (!hwdev->groups) {
|
|
err = -ENOMEM;
|
|
goto free_hwmon;
|
|
}
|
|
|
|
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
|
|
+ attrs = __hwmon_create_attrs(drvdata, chip);
|
|
if (IS_ERR(attrs)) {
|
|
err = PTR_ERR(attrs);
|
|
goto free_hwmon;
|
|
@@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
|
|
hwmon_temp_input, j))
|
|
continue;
|
|
if (info[i]->config[j] & HWMON_T_INPUT) {
|
|
- err = hwmon_thermal_add_sensor(dev,
|
|
- hwdev, j);
|
|
+ err = hwmon_thermal_add_sensor(hdev, j);
|
|
if (err) {
|
|
device_unregister(hdev);
|
|
/*
|
|
@@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
|
|
return hdev;
|
|
|
|
free_hwmon:
|
|
- kfree(hwdev);
|
|
+ hwmon_dev_release(hdev);
|
|
ida_remove:
|
|
ida_simple_remove(&hwmon_ida, id);
|
|
return ERR_PTR(err);
|
|
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
|
|
index f3dd2a17bd42..2e97e56c72c7 100644
|
|
--- a/drivers/hwmon/nct7802.c
|
|
+++ b/drivers/hwmon/nct7802.c
|
|
@@ -23,8 +23,8 @@
|
|
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
|
|
|
|
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
|
|
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
|
|
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
|
|
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
|
|
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
|
|
};
|
|
|
|
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
|
|
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
|
|
struct nct7802_data {
|
|
struct regmap *regmap;
|
|
struct mutex access_lock; /* for multi-byte read and write operations */
|
|
+ u8 in_status;
|
|
+ struct mutex in_alarm_lock;
|
|
};
|
|
|
|
static ssize_t temp_type_show(struct device *dev,
|
|
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
|
|
return err ? : count;
|
|
}
|
|
|
|
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
|
|
+ struct nct7802_data *data = dev_get_drvdata(dev);
|
|
+ int volt, min, max, ret;
|
|
+ unsigned int val;
|
|
+
|
|
+ mutex_lock(&data->in_alarm_lock);
|
|
+
|
|
+ /*
|
|
+ * The SMI Voltage status register is the only register giving a status
|
|
+ * for voltages. A bit is set for each input crossing a threshold, in
|
|
+ * both direction, but the "inside" or "outside" limits info is not
|
|
+ * available. Also this register is cleared on read.
|
|
+ * Note: this is not explicitly spelled out in the datasheet, but
|
|
+ * from experiment.
|
|
+ * To deal with this we use a status cache with one validity bit and
|
|
+ * one status bit for each input. Validity is cleared at startup and
|
|
+ * each time the register reports a change, and the status is processed
|
|
+ * by software based on current input value and limits.
|
|
+ */
|
|
+ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
|
|
+ if (ret < 0)
|
|
+ goto abort;
|
|
+
|
|
+ /* invalidate cached status for all inputs crossing a threshold */
|
|
+ data->in_status &= ~((val & 0x0f) << 4);
|
|
+
|
|
+ /* if cached status for requested input is invalid, update it */
|
|
+ if (!(data->in_status & (0x10 << sattr->index))) {
|
|
+ ret = nct7802_read_voltage(data, sattr->nr, 0);
|
|
+ if (ret < 0)
|
|
+ goto abort;
|
|
+ volt = ret;
|
|
+
|
|
+ ret = nct7802_read_voltage(data, sattr->nr, 1);
|
|
+ if (ret < 0)
|
|
+ goto abort;
|
|
+ min = ret;
|
|
+
|
|
+ ret = nct7802_read_voltage(data, sattr->nr, 2);
|
|
+ if (ret < 0)
|
|
+ goto abort;
|
|
+ max = ret;
|
|
+
|
|
+ if (volt < min || volt > max)
|
|
+ data->in_status |= (1 << sattr->index);
|
|
+ else
|
|
+ data->in_status &= ~(1 << sattr->index);
|
|
+
|
|
+ data->in_status |= 0x10 << sattr->index;
|
|
+ }
|
|
+
|
|
+ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
|
|
+abort:
|
|
+ mutex_unlock(&data->in_alarm_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
|
|
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
|
|
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
|
|
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
|
|
|
|
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
|
|
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
|
|
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
|
|
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
|
|
|
|
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
|
|
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
|
|
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
|
|
|
|
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
|
|
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
|
|
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
|
|
static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
|
|
|
|
static struct attribute *nct7802_in_attrs[] = {
|
|
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
|
|
return PTR_ERR(data->regmap);
|
|
|
|
mutex_init(&data->access_lock);
|
|
+ mutex_init(&data->in_alarm_lock);
|
|
|
|
ret = nct7802_init_chip(data);
|
|
if (ret < 0)
|
|
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
|
|
index a1a035270cab..b273e421e910 100644
|
|
--- a/drivers/infiniband/ulp/isert/ib_isert.c
|
|
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
|
|
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
|
|
}
|
|
}
|
|
|
|
-static void
|
|
-isert_wait4cmds(struct iscsi_conn *conn)
|
|
-{
|
|
- isert_info("iscsi_conn %p\n", conn);
|
|
-
|
|
- if (conn->sess) {
|
|
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
|
|
- target_wait_for_sess_cmds(conn->sess->se_sess);
|
|
- }
|
|
-}
|
|
-
|
|
/**
|
|
* isert_put_unsol_pending_cmds() - Drop commands waiting for
|
|
* unsolicitate dataout
|
|
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|
|
|
ib_drain_qp(isert_conn->qp);
|
|
isert_put_unsol_pending_cmds(conn);
|
|
- isert_wait4cmds(conn);
|
|
isert_wait4logout(isert_conn);
|
|
|
|
queue_work(isert_release_wq, &isert_conn->release_work);
|
|
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
|
|
index 83368f1e7c4e..4650f4a94989 100644
|
|
--- a/drivers/input/misc/keyspan_remote.c
|
|
+++ b/drivers/input/misc/keyspan_remote.c
|
|
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
|
|
int retval = 0;
|
|
|
|
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
|
|
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
|
|
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
|
|
+ USB_CTRL_SET_TIMEOUT);
|
|
if (retval) {
|
|
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
|
|
__func__, retval);
|
|
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
|
|
}
|
|
|
|
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
|
|
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
|
|
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
|
|
+ USB_CTRL_SET_TIMEOUT);
|
|
if (retval) {
|
|
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
|
|
__func__, retval);
|
|
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
|
|
}
|
|
|
|
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
|
|
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
|
|
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
|
|
+ USB_CTRL_SET_TIMEOUT);
|
|
if (retval) {
|
|
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
|
|
__func__, retval);
|
|
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
|
|
index ecd762f93732..53ad25eaf1a2 100644
|
|
--- a/drivers/input/misc/pm8xxx-vibrator.c
|
|
+++ b/drivers/input/misc/pm8xxx-vibrator.c
|
|
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
|
|
|
|
if (regs->enable_mask)
|
|
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
|
|
- on ? regs->enable_mask : 0, val);
|
|
+ regs->enable_mask, on ? ~0 : 0);
|
|
|
|
return rc;
|
|
}
|
|
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
|
|
index b313c579914f..2407ea43de59 100644
|
|
--- a/drivers/input/rmi4/rmi_smbus.c
|
|
+++ b/drivers/input/rmi4/rmi_smbus.c
|
|
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
|
|
/* prepare to write next block of bytes */
|
|
cur_len -= SMB_MAX_COUNT;
|
|
databuff += SMB_MAX_COUNT;
|
|
+ rmiaddr += SMB_MAX_COUNT;
|
|
}
|
|
exit:
|
|
mutex_unlock(&rmi_smb->page_mutex);
|
|
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
|
|
/* prepare to read next block of bytes */
|
|
cur_len -= SMB_MAX_COUNT;
|
|
databuff += SMB_MAX_COUNT;
|
|
+ rmiaddr += SMB_MAX_COUNT;
|
|
}
|
|
|
|
retval = 0;
|
|
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
|
|
index 2ca586fb914f..06d0ffef4a17 100644
|
|
--- a/drivers/input/tablet/aiptek.c
|
|
+++ b/drivers/input/tablet/aiptek.c
|
|
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
|
|
|
|
/* Verify that a device really has an endpoint */
|
|
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
|
|
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
|
|
dev_err(&intf->dev,
|
|
"interface has %d endpoints, but must have minimum 1\n",
|
|
- intf->altsetting[0].desc.bNumEndpoints);
|
|
+ intf->cur_altsetting->desc.bNumEndpoints);
|
|
err = -EINVAL;
|
|
goto fail3;
|
|
}
|
|
- endpoint = &intf->altsetting[0].endpoint[0].desc;
|
|
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
|
|
|
|
/* Go set up our URB, which is called when the tablet receives
|
|
* input.
|
|
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
|
|
index 35031228a6d0..799c94dda651 100644
|
|
--- a/drivers/input/tablet/gtco.c
|
|
+++ b/drivers/input/tablet/gtco.c
|
|
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
|
}
|
|
|
|
/* Sanity check that a device has an endpoint */
|
|
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
|
|
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
|
|
dev_err(&usbinterface->dev,
|
|
"Invalid number of endpoints\n");
|
|
error = -EINVAL;
|
|
goto err_free_urb;
|
|
}
|
|
|
|
- /*
|
|
- * The endpoint is always altsetting 0, we know this since we know
|
|
- * this device only has one interrupt endpoint
|
|
- */
|
|
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
|
|
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
|
|
|
|
/* Some debug */
|
|
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
|
|
@@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
|
input_dev->dev.parent = &usbinterface->dev;
|
|
|
|
/* Setup the URB, it will be posted later on open of input device */
|
|
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
|
|
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
|
|
|
|
usb_fill_int_urb(gtco->urbinfo,
|
|
udev,
|
|
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
|
|
index a1f3a0cb197e..38f087404f7a 100644
|
|
--- a/drivers/input/tablet/pegasus_notetaker.c
|
|
+++ b/drivers/input/tablet/pegasus_notetaker.c
|
|
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
|
|
return -ENODEV;
|
|
|
|
/* Sanity check that the device has an endpoint */
|
|
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
|
|
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
|
|
dev_err(&intf->dev, "Invalid number of endpoints\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
|
|
index 0af0fe8c40d7..742a7e96c1b5 100644
|
|
--- a/drivers/input/touchscreen/sun4i-ts.c
|
|
+++ b/drivers/input/touchscreen/sun4i-ts.c
|
|
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
|
|
struct device *dev = &pdev->dev;
|
|
struct device_node *np = dev->of_node;
|
|
struct device *hwmon;
|
|
+ struct thermal_zone_device *thermal;
|
|
int error;
|
|
u32 reg;
|
|
bool ts_attached;
|
|
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
|
|
if (IS_ERR(hwmon))
|
|
return PTR_ERR(hwmon);
|
|
|
|
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
|
|
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
|
|
+ &sun4i_ts_tz_ops);
|
|
+ if (IS_ERR(thermal))
|
|
+ return PTR_ERR(thermal);
|
|
|
|
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
|
|
|
|
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
|
|
index 3fd3e862269b..2e2ea5719c90 100644
|
|
--- a/drivers/input/touchscreen/sur40.c
|
|
+++ b/drivers/input/touchscreen/sur40.c
|
|
@@ -653,7 +653,7 @@ static int sur40_probe(struct usb_interface *interface,
|
|
int error;
|
|
|
|
/* Check if we really have the right interface. */
|
|
- iface_desc = &interface->altsetting[0];
|
|
+ iface_desc = interface->cur_altsetting;
|
|
if (iface_desc->desc.bInterfaceClass != 0xFF)
|
|
return -ENODEV;
|
|
|
|
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
|
|
index 568c52317757..483f7bc379fa 100644
|
|
--- a/drivers/iommu/amd_iommu_init.c
|
|
+++ b/drivers/iommu/amd_iommu_init.c
|
|
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
|
|
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
|
|
{
|
|
struct pci_dev *pdev = iommu->dev;
|
|
- u64 val = 0xabcd, val2 = 0;
|
|
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
|
|
|
|
if (!iommu_feature(iommu, FEATURE_PC))
|
|
return;
|
|
|
|
amd_iommu_pc_present = true;
|
|
|
|
+ /* save the value to restore, if writable */
|
|
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
|
|
+ goto pc_false;
|
|
+
|
|
/* Check if the performance counters can be written to */
|
|
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
|
|
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
|
|
- (val != val2)) {
|
|
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
|
|
- amd_iommu_pc_present = false;
|
|
- return;
|
|
- }
|
|
+ (val != val2))
|
|
+ goto pc_false;
|
|
+
|
|
+ /* restore */
|
|
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
|
|
+ goto pc_false;
|
|
|
|
pci_info(pdev, "IOMMU performance counters supported\n");
|
|
|
|
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
|
|
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
|
|
iommu->max_counters = (u8) ((val >> 7) & 0xf);
|
|
+
|
|
+ return;
|
|
+
|
|
+pc_false:
|
|
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
|
|
+ amd_iommu_pc_present = false;
|
|
+ return;
|
|
}
|
|
|
|
static ssize_t amd_iommu_show_cap(struct device *dev,
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index e84c5dfe146f..dd5db856dcaf 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -5132,7 +5132,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
|
|
|
|
spin_lock_irqsave(&device_domain_lock, flags);
|
|
info = dev->archdata.iommu;
|
|
- if (info)
|
|
+ if (info && info != DEFER_DEVICE_DOMAIN_INFO
|
|
+ && info != DUMMY_DEVICE_DOMAIN_INFO)
|
|
__dmar_remove_one_dev_info(info);
|
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
|
}
|
|
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
|
|
index a5c73f3d5f79..2bf74595610f 100644
|
|
--- a/drivers/leds/leds-gpio.c
|
|
+++ b/drivers/leds/leds-gpio.c
|
|
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
|
|
struct gpio_led led = {};
|
|
const char *state = NULL;
|
|
|
|
+ /*
|
|
+ * Acquire gpiod from DT with uninitialized label, which
|
|
+ * will be updated after LED class device is registered,
|
|
+ * Only then the final LED name is known.
|
|
+ */
|
|
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
|
|
GPIOD_ASIS,
|
|
- led.name);
|
|
+ NULL);
|
|
if (IS_ERR(led.gpiod)) {
|
|
fwnode_handle_put(child);
|
|
return ERR_CAST(led.gpiod);
|
|
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
|
|
fwnode_handle_put(child);
|
|
return ERR_PTR(ret);
|
|
}
|
|
+ /* Set gpiod label to match the corresponding LED name. */
|
|
+ gpiod_set_consumer_name(led_dat->gpiod,
|
|
+ led_dat->cdev.dev->kobj.name);
|
|
priv->num_leds++;
|
|
}
|
|
|
|
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
index 21bb96ce4cd6..58868d7129eb 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
@@ -1605,12 +1605,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
|
|
case V4L2_BUF_TYPE_VBI_CAPTURE:
|
|
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.vbi);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
|
|
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
|
|
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sliced);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
|
|
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
|
if (unlikely(!ops->vidioc_s_fmt_vid_out))
|
|
@@ -1636,22 +1636,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
|
|
case V4L2_BUF_TYPE_VBI_OUTPUT:
|
|
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.vbi);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
|
|
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
|
|
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sliced);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
|
|
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SDR_CAPTURE:
|
|
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sdr);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
|
|
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SDR_OUTPUT:
|
|
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sdr);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
|
|
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_META_CAPTURE:
|
|
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
|
|
@@ -1707,12 +1707,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
|
|
case V4L2_BUF_TYPE_VBI_CAPTURE:
|
|
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.vbi);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
|
|
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
|
|
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sliced);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
|
|
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
|
if (unlikely(!ops->vidioc_try_fmt_vid_out))
|
|
@@ -1738,22 +1738,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
|
|
case V4L2_BUF_TYPE_VBI_OUTPUT:
|
|
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.vbi);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
|
|
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
|
|
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sliced);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
|
|
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SDR_CAPTURE:
|
|
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sdr);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
|
|
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
|
|
case V4L2_BUF_TYPE_SDR_OUTPUT:
|
|
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
|
|
break;
|
|
- CLEAR_AFTER_FIELD(p, fmt.sdr);
|
|
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
|
|
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
|
|
case V4L2_BUF_TYPE_META_CAPTURE:
|
|
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
|
|
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
|
|
index 7bc950520fd9..403ac44a7378 100644
|
|
--- a/drivers/mmc/host/sdhci-tegra.c
|
|
+++ b/drivers/mmc/host/sdhci-tegra.c
|
|
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
|
|
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
|
|
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
|
|
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
|
|
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
|
|
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
|
|
}
|
|
|
|
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
|
|
index 5f9df2dbde06..4478b94d4791 100644
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -3902,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *host)
|
|
if (host->ops->get_min_clock)
|
|
mmc->f_min = host->ops->get_min_clock(host);
|
|
else if (host->version >= SDHCI_SPEC_300) {
|
|
- if (host->clk_mul) {
|
|
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
|
|
+ if (host->clk_mul)
|
|
max_clk = host->max_clk * host->clk_mul;
|
|
- } else
|
|
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
|
|
+ /*
|
|
+ * Divided Clock Mode minimum clock rate is always less than
|
|
+ * Programmable Clock Mode minimum clock rate.
|
|
+ */
|
|
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
|
|
} else
|
|
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
|
|
|
|
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
|
|
index bb90757ecace..4cbb764c9822 100644
|
|
--- a/drivers/mmc/host/sdhci_am654.c
|
|
+++ b/drivers/mmc/host/sdhci_am654.c
|
|
@@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
|
|
writeb(val, host->ioaddr + reg);
|
|
}
|
|
|
|
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
+{
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
+ int err = sdhci_execute_tuning(mmc, opcode);
|
|
+
|
|
+ if (err)
|
|
+ return err;
|
|
+ /*
|
|
+ * Tuning data remains in the buffer after tuning.
|
|
+ * Do a command and data reset to get rid of it
|
|
+ */
|
|
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static struct sdhci_ops sdhci_am654_ops = {
|
|
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
|
|
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
|
|
@@ -249,8 +265,7 @@ static struct sdhci_ops sdhci_am654_ops = {
|
|
|
|
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
|
|
.ops = &sdhci_am654_ops,
|
|
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
|
|
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
|
};
|
|
|
|
@@ -272,8 +287,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
|
|
|
|
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
|
|
.ops = &sdhci_j721e_8bit_ops,
|
|
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
|
|
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
|
};
|
|
|
|
@@ -295,8 +309,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
|
|
|
|
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
|
|
.ops = &sdhci_j721e_4bit_ops,
|
|
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
|
|
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
|
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
|
|
};
|
|
|
|
@@ -480,6 +493,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
|
|
goto pm_runtime_put;
|
|
}
|
|
|
|
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
|
|
+
|
|
ret = sdhci_am654_init(host);
|
|
if (ret)
|
|
goto pm_runtime_put;
|
|
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
|
|
index 2e57122f02fb..2f5c287eac95 100644
|
|
--- a/drivers/net/can/slcan.c
|
|
+++ b/drivers/net/can/slcan.c
|
|
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
|
|
*/
|
|
static void slcan_write_wakeup(struct tty_struct *tty)
|
|
{
|
|
- struct slcan *sl = tty->disc_data;
|
|
+ struct slcan *sl;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ sl = rcu_dereference(tty->disc_data);
|
|
+ if (!sl)
|
|
+ goto out;
|
|
|
|
schedule_work(&sl->tx_work);
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
/* Send a can_frame to a TTY queue. */
|
|
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
|
|
return;
|
|
|
|
spin_lock_bh(&sl->lock);
|
|
- tty->disc_data = NULL;
|
|
+ rcu_assign_pointer(tty->disc_data, NULL);
|
|
sl->tty = NULL;
|
|
spin_unlock_bh(&sl->lock);
|
|
|
|
+ synchronize_rcu();
|
|
flush_work(&sl->tx_work);
|
|
|
|
/* Flush network side */
|
|
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
index 1de51811fcb4..8f909d57501f 100644
|
|
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
@@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
|
|
DMA_END_ADDR);
|
|
|
|
/* Initialize Tx NAPI */
|
|
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
|
|
- NAPI_POLL_WEIGHT);
|
|
+ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
|
|
+ NAPI_POLL_WEIGHT);
|
|
}
|
|
|
|
/* Initialize a RDMA ring */
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
|
|
index 58f89f6a040f..97ff8608f0ab 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
|
|
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|
|
|
if (!is_offload(adapter))
|
|
return -EOPNOTSUPP;
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
+ return -EPERM;
|
|
if (!(adapter->flags & FULL_INIT_DONE))
|
|
return -EIO; /* need the memory controllers */
|
|
if (copy_from_user(&t, useraddr, sizeof(t)))
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
|
|
index 778dab1af8fc..f260dd96873b 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
|
|
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
|
|
|
|
struct tx_sync_info {
|
|
u64 rcd_sn;
|
|
- s32 sync_len;
|
|
+ u32 sync_len;
|
|
int nr_frags;
|
|
skb_frag_t frags[MAX_SKB_FRAGS];
|
|
};
|
|
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
|
|
|
|
static enum mlx5e_ktls_sync_retval
|
|
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
|
- u32 tcp_seq, struct tx_sync_info *info)
|
|
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
|
|
{
|
|
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
|
|
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
|
|
struct tls_record_info *record;
|
|
int remaining, i = 0;
|
|
unsigned long flags;
|
|
+ bool ends_before;
|
|
|
|
spin_lock_irqsave(&tx_ctx->lock, flags);
|
|
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
|
|
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
|
goto out;
|
|
}
|
|
|
|
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
|
|
- ret = tls_record_is_start_marker(record) ?
|
|
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
|
|
+ /* There are the following cases:
|
|
+ * 1. packet ends before start marker: bypass offload.
|
|
+ * 2. packet starts before start marker and ends after it: drop,
|
|
+ * not supported, breaks contract with kernel.
|
|
+ * 3. packet ends before tls record info starts: drop,
|
|
+ * this packet was already acknowledged and its record info
|
|
+ * was released.
|
|
+ */
|
|
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
|
|
+
|
|
+ if (unlikely(tls_record_is_start_marker(record))) {
|
|
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
|
|
+ goto out;
|
|
+ } else if (ends_before) {
|
|
+ ret = MLX5E_KTLS_SYNC_FAIL;
|
|
goto out;
|
|
}
|
|
|
|
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
|
u8 num_wqebbs;
|
|
int i = 0;
|
|
|
|
- ret = tx_sync_info_get(priv_tx, seq, &info);
|
|
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
|
|
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
|
|
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
|
|
stats->tls_skip_no_sync_data++;
|
|
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
|
goto err_out;
|
|
}
|
|
|
|
- if (unlikely(info.sync_len < 0)) {
|
|
- if (likely(datalen <= -info.sync_len))
|
|
- return MLX5E_KTLS_SYNC_DONE;
|
|
-
|
|
- stats->tls_drop_bypass_req++;
|
|
- goto err_out;
|
|
- }
|
|
-
|
|
stats->tls_ooo++;
|
|
|
|
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
|
|
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
|
if (unlikely(contig_wqebbs_room < num_wqebbs))
|
|
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
|
|
|
- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
|
|
-
|
|
for (; i < info.nr_frags; i++) {
|
|
unsigned int orig_fsz, frag_offset = 0, n = 0;
|
|
skb_frag_t *f = &info.frags[i];
|
|
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
|
enum mlx5e_ktls_sync_retval ret =
|
|
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
|
|
|
|
- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
|
|
+ switch (ret) {
|
|
+ case MLX5E_KTLS_SYNC_DONE:
|
|
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
|
|
- else if (ret == MLX5E_KTLS_SYNC_FAIL)
|
|
+ break;
|
|
+ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
|
|
+ if (likely(!skb->decrypted))
|
|
+ goto out;
|
|
+ WARN_ON_ONCE(1);
|
|
+ /* fall-through */
|
|
+ default: /* MLX5E_KTLS_SYNC_FAIL */
|
|
goto err_out;
|
|
- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
|
|
- goto out;
|
|
+ }
|
|
}
|
|
|
|
priv_tx->expected_seq = seq + datalen;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
index 96711e34d248..1f9107d83848 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
@@ -3951,6 +3951,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
|
u32 rate_mbps;
|
|
int err;
|
|
|
|
+ vport_num = rpriv->rep->vport;
|
|
+ if (vport_num >= MLX5_VPORT_ECPF) {
|
|
+ NL_SET_ERR_MSG_MOD(extack,
|
|
+ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
esw = priv->mdev->priv.eswitch;
|
|
/* rate is given in bytes/sec.
|
|
* First convert to bits/sec and then round to the nearest mbit/secs.
|
|
@@ -3959,8 +3966,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
|
* 1 mbit/sec.
|
|
*/
|
|
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
|
|
- vport_num = rpriv->rep->vport;
|
|
-
|
|
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
|
if (err)
|
|
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
index 9004a07e457a..5acfdea3a75a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
@@ -858,7 +858,7 @@ out:
|
|
*/
|
|
#define ESW_SIZE (16 * 1024 * 1024)
|
|
const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
|
|
- 64 * 1024, 4 * 1024 };
|
|
+ 64 * 1024, 128 };
|
|
|
|
static int
|
|
get_sz_from_pool(struct mlx5_eswitch *esw)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 051ab845b501..c96a0e501007 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -1569,6 +1569,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
|
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
|
|
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
|
|
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
|
|
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
index 51803eef13dd..c7f10d4f8f8d 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
@@ -1,6 +1,7 @@
|
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
/* Copyright (c) 2019 Mellanox Technologies. */
|
|
|
|
+#include <linux/smp.h>
|
|
#include "dr_types.h"
|
|
|
|
#define QUEUE_SIZE 128
|
|
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|
if (!in)
|
|
goto err_cqwq;
|
|
|
|
- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
|
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
|
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
|
|
if (err) {
|
|
kvfree(in);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
|
|
index 3d587d0bdbbe..1e32e2443f73 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
|
|
@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
|
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
|
list_for_each_entry(dst, &fte->node.children, node.list) {
|
|
enum mlx5_flow_destination_type type = dst->dest_attr.type;
|
|
- u32 id;
|
|
|
|
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
|
err = -ENOSPC;
|
|
goto free_actions;
|
|
}
|
|
|
|
- switch (type) {
|
|
- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
|
|
- id = dst->dest_attr.counter_id;
|
|
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
|
|
+ continue;
|
|
|
|
- tmp_action =
|
|
- mlx5dr_action_create_flow_counter(id);
|
|
- if (!tmp_action) {
|
|
- err = -ENOMEM;
|
|
- goto free_actions;
|
|
- }
|
|
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
|
- actions[num_actions++] = tmp_action;
|
|
- break;
|
|
+ switch (type) {
|
|
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
|
|
tmp_action = create_ft_action(dev, dst);
|
|
if (!tmp_action) {
|
|
@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
|
}
|
|
}
|
|
|
|
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
|
+ list_for_each_entry(dst, &fte->node.children, node.list) {
|
|
+ u32 id;
|
|
+
|
|
+ if (dst->dest_attr.type !=
|
|
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
|
|
+ continue;
|
|
+
|
|
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
|
+ err = -ENOSPC;
|
|
+ goto free_actions;
|
|
+ }
|
|
+
|
|
+ id = dst->dest_attr.counter_id;
|
|
+ tmp_action =
|
|
+ mlx5dr_action_create_flow_counter(id);
|
|
+ if (!tmp_action) {
|
|
+ err = -ENOMEM;
|
|
+ goto free_actions;
|
|
+ }
|
|
+
|
|
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
|
+ actions[num_actions++] = tmp_action;
|
|
+ }
|
|
+ }
|
|
+
|
|
params.match_sz = match_sz;
|
|
params.match_buf = (u64 *)fte->val;
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
|
|
index 150b3a144b83..3d3cca596116 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/string.h>
|
|
#include <linux/rhashtable.h>
|
|
#include <linux/netdevice.h>
|
|
+#include <linux/mutex.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/tc_act/tc_vlan.h>
|
|
|
|
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
|
|
struct mlxsw_sp_fid *dummy_fid;
|
|
struct rhashtable ruleset_ht;
|
|
struct list_head rules;
|
|
+ struct mutex rules_lock; /* Protects rules list */
|
|
struct {
|
|
struct delayed_work dw;
|
|
unsigned long interval; /* ms */
|
|
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
|
|
goto err_ruleset_block_bind;
|
|
}
|
|
|
|
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
|
|
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
|
|
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
|
|
block->rule_count++;
|
|
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
|
|
return 0;
|
|
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
|
|
ruleset->ht_key.block->rule_count--;
|
|
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
|
|
list_del(&rule->list);
|
|
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
|
|
if (!ruleset->ht_key.chain_index &&
|
|
mlxsw_sp_acl_ruleset_is_singular(ruleset))
|
|
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
|
|
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
|
|
struct mlxsw_sp_acl_rule *rule;
|
|
int err;
|
|
|
|
- /* Protect internal structures from changes */
|
|
- rtnl_lock();
|
|
+ mutex_lock(&acl->rules_lock);
|
|
list_for_each_entry(rule, &acl->rules, list) {
|
|
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
|
|
rule);
|
|
if (err)
|
|
goto err_rule_update;
|
|
}
|
|
- rtnl_unlock();
|
|
+ mutex_unlock(&acl->rules_lock);
|
|
return 0;
|
|
|
|
err_rule_update:
|
|
- rtnl_unlock();
|
|
+ mutex_unlock(&acl->rules_lock);
|
|
return err;
|
|
}
|
|
|
|
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
|
|
acl->dummy_fid = fid;
|
|
|
|
INIT_LIST_HEAD(&acl->rules);
|
|
+ mutex_init(&acl->rules_lock);
|
|
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
|
|
if (err)
|
|
goto err_acl_ops_init;
|
|
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
|
|
return 0;
|
|
|
|
err_acl_ops_init:
|
|
+ mutex_destroy(&acl->rules_lock);
|
|
mlxsw_sp_fid_put(fid);
|
|
err_fid_get:
|
|
rhashtable_destroy(&acl->ruleset_ht);
|
|
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
|
|
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
|
|
+ mutex_destroy(&acl->rules_lock);
|
|
WARN_ON(!list_empty(&acl->rules));
|
|
mlxsw_sp_fid_put(acl->dummy_fid);
|
|
rhashtable_destroy(&acl->ruleset_ht);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
|
|
index 1c14c051ee52..63e7a058b7c6 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
|
|
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
|
|
u64 len;
|
|
int err;
|
|
|
|
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
|
|
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return NETDEV_TX_OK;
|
|
+ }
|
|
+
|
|
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
|
|
|
|
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
|
|
return NETDEV_TX_BUSY;
|
|
|
|
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
|
|
- struct sk_buff *skb_orig = skb;
|
|
-
|
|
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
|
|
- if (!skb) {
|
|
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
|
|
- dev_kfree_skb_any(skb_orig);
|
|
- return NETDEV_TX_OK;
|
|
- }
|
|
- dev_consume_skb_any(skb_orig);
|
|
- }
|
|
mlxsw_sx_txhdr_construct(skb, &tx_info);
|
|
/* TX header is consumed by HW on the way so we shouldn't count its
|
|
* bytes as being sent.
|
|
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
|
|
index b339125b2f09..05e760444a92 100644
|
|
--- a/drivers/net/ethernet/natsemi/sonic.c
|
|
+++ b/drivers/net/ethernet/natsemi/sonic.c
|
|
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
|
|
|
|
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
|
|
|
|
+ spin_lock_init(&lp->lock);
|
|
+
|
|
for (i = 0; i < SONIC_NUM_RRS; i++) {
|
|
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
|
if (skb == NULL) {
|
|
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
|
|
return 0;
|
|
}
|
|
|
|
+/* Wait for the SONIC to become idle. */
|
|
+static void sonic_quiesce(struct net_device *dev, u16 mask)
|
|
+{
|
|
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
|
|
+ int i;
|
|
+ u16 bits;
|
|
+
|
|
+ for (i = 0; i < 1000; ++i) {
|
|
+ bits = SONIC_READ(SONIC_CMD) & mask;
|
|
+ if (!bits)
|
|
+ return;
|
|
+ if (irqs_disabled() || in_interrupt())
|
|
+ udelay(20);
|
|
+ else
|
|
+ usleep_range(100, 200);
|
|
+ }
|
|
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
|
|
+}
|
|
|
|
/*
|
|
* Close the SONIC device
|
|
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
|
|
/*
|
|
* stop the SONIC, disable interrupts
|
|
*/
|
|
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
|
+ sonic_quiesce(dev, SONIC_CR_ALL);
|
|
+
|
|
SONIC_WRITE(SONIC_IMR, 0);
|
|
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
|
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
|
|
* put the Sonic into software-reset mode and
|
|
* disable all interrupts before releasing DMA buffers
|
|
*/
|
|
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
|
+ sonic_quiesce(dev, SONIC_CR_ALL);
|
|
+
|
|
SONIC_WRITE(SONIC_IMR, 0);
|
|
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
|
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
|
|
* wake the tx queue
|
|
* Concurrently with all of this, the SONIC is potentially writing to
|
|
* the status flags of the TDs.
|
|
- * Until some mutual exclusion is added, this code will not work with SMP. However,
|
|
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
|
|
*/
|
|
|
|
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|
struct sonic_local *lp = netdev_priv(dev);
|
|
dma_addr_t laddr;
|
|
int length;
|
|
- int entry = lp->next_tx;
|
|
+ int entry;
|
|
+ unsigned long flags;
|
|
|
|
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
|
|
|
|
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
+ spin_lock_irqsave(&lp->lock, flags);
|
|
+
|
|
+ entry = lp->next_tx;
|
|
+
|
|
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
|
|
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
|
|
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
|
|
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|
sonic_tda_put(dev, entry, SONIC_TD_LINK,
|
|
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
|
|
|
|
- /*
|
|
- * Must set tx_skb[entry] only after clearing status, and
|
|
- * before clearing EOL and before stopping queue
|
|
- */
|
|
wmb();
|
|
lp->tx_len[entry] = length;
|
|
lp->tx_laddr[entry] = laddr;
|
|
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
|
|
|
|
+ spin_unlock_irqrestore(&lp->lock, flags);
|
|
+
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
struct net_device *dev = dev_id;
|
|
struct sonic_local *lp = netdev_priv(dev);
|
|
int status;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
|
|
+ * with sonic_send_packet() so that the two functions can share state.
|
|
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
|
|
+ * by macsonic which must use two IRQs with different priority levels.
|
|
+ */
|
|
+ spin_lock_irqsave(&lp->lock, flags);
|
|
+
|
|
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
|
|
+ if (!status) {
|
|
+ spin_unlock_irqrestore(&lp->lock, flags);
|
|
|
|
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
|
|
return IRQ_NONE;
|
|
+ }
|
|
|
|
do {
|
|
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
|
|
+
|
|
if (status & SONIC_INT_PKTRX) {
|
|
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
|
|
sonic_rx(dev); /* got packet(s) */
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
|
|
}
|
|
|
|
if (status & SONIC_INT_TXDN) {
|
|
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
int td_status;
|
|
int freed_some = 0;
|
|
|
|
- /* At this point, cur_tx is the index of a TD that is one of:
|
|
- * unallocated/freed (status set & tx_skb[entry] clear)
|
|
- * allocated and sent (status set & tx_skb[entry] set )
|
|
- * allocated and not yet sent (status clear & tx_skb[entry] set )
|
|
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
|
|
+ /* The state of a Transmit Descriptor may be inferred
|
|
+ * from { tx_skb[entry], td_status } as follows.
|
|
+ * { clear, clear } => the TD has never been used
|
|
+ * { set, clear } => the TD was handed to SONIC
|
|
+ * { set, set } => the TD was handed back
|
|
+ * { clear, set } => the TD is available for re-use
|
|
*/
|
|
|
|
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
|
|
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
|
|
break;
|
|
|
|
- if (td_status & 0x0001) {
|
|
+ if (td_status & SONIC_TCR_PTX) {
|
|
lp->stats.tx_packets++;
|
|
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
|
|
} else {
|
|
- lp->stats.tx_errors++;
|
|
- if (td_status & 0x0642)
|
|
+ if (td_status & (SONIC_TCR_EXD |
|
|
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
|
|
lp->stats.tx_aborted_errors++;
|
|
- if (td_status & 0x0180)
|
|
+ if (td_status &
|
|
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
|
|
lp->stats.tx_carrier_errors++;
|
|
- if (td_status & 0x0020)
|
|
+ if (td_status & SONIC_TCR_OWC)
|
|
lp->stats.tx_window_errors++;
|
|
- if (td_status & 0x0004)
|
|
+ if (td_status & SONIC_TCR_FU)
|
|
lp->stats.tx_fifo_errors++;
|
|
}
|
|
|
|
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
if (freed_some || lp->tx_skb[entry] == NULL)
|
|
netif_wake_queue(dev); /* The ring is no longer full */
|
|
lp->cur_tx = entry;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
|
|
}
|
|
|
|
/*
|
|
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
if (status & SONIC_INT_RFO) {
|
|
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
|
|
__func__);
|
|
- lp->stats.rx_fifo_errors++;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
|
|
}
|
|
if (status & SONIC_INT_RDE) {
|
|
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
|
|
__func__);
|
|
- lp->stats.rx_dropped++;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
|
|
}
|
|
if (status & SONIC_INT_RBAE) {
|
|
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
|
|
__func__);
|
|
- lp->stats.rx_dropped++;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
|
|
}
|
|
|
|
/* counter overruns; all counters are 16bit wide */
|
|
- if (status & SONIC_INT_FAE) {
|
|
+ if (status & SONIC_INT_FAE)
|
|
lp->stats.rx_frame_errors += 65536;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
|
|
- }
|
|
- if (status & SONIC_INT_CRC) {
|
|
+ if (status & SONIC_INT_CRC)
|
|
lp->stats.rx_crc_errors += 65536;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
|
|
- }
|
|
- if (status & SONIC_INT_MP) {
|
|
+ if (status & SONIC_INT_MP)
|
|
lp->stats.rx_missed_errors += 65536;
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
|
|
- }
|
|
|
|
/* transmit error */
|
|
if (status & SONIC_INT_TXER) {
|
|
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
|
|
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
|
|
- __func__);
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
|
|
+ u16 tcr = SONIC_READ(SONIC_TCR);
|
|
+
|
|
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
|
|
+ __func__, tcr);
|
|
+
|
|
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
|
|
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
|
|
+ /* Aborted transmission. Try again. */
|
|
+ netif_stop_queue(dev);
|
|
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
|
|
+ }
|
|
}
|
|
|
|
/* bus retry */
|
|
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
|
/* ... to help debug DMA problems causing endless interrupts. */
|
|
/* Bounce the eth interface to turn on the interrupt again. */
|
|
SONIC_WRITE(SONIC_IMR, 0);
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
|
|
}
|
|
|
|
- /* load CAM done */
|
|
- if (status & SONIC_INT_LCD)
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
|
|
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
|
|
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
|
|
+ } while (status);
|
|
+
|
|
+ spin_unlock_irqrestore(&lp->lock, flags);
|
|
+
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
+/* Return the array index corresponding to a given Receive Buffer pointer. */
|
|
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
|
|
+ unsigned int last)
|
|
+{
|
|
+ unsigned int i = last;
|
|
+
|
|
+ do {
|
|
+ i = (i + 1) & SONIC_RRS_MASK;
|
|
+ if (addr == lp->rx_laddr[i])
|
|
+ return i;
|
|
+ } while (i != last);
|
|
+
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
+/* Allocate and map a new skb to be used as a receive buffer. */
|
|
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
|
|
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
|
|
+{
|
|
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
|
+ if (!*new_skb)
|
|
+ return false;
|
|
+
|
|
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
|
|
+ skb_reserve(*new_skb, 2);
|
|
+
|
|
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
|
|
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
|
|
+ if (!*new_addr) {
|
|
+ dev_kfree_skb(*new_skb);
|
|
+ *new_skb = NULL;
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
|
|
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
|
|
+ dma_addr_t old_addr, dma_addr_t new_addr)
|
|
+{
|
|
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
|
|
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
|
|
+ u32 buf;
|
|
+
|
|
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
|
|
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
|
|
+ */
|
|
+ do {
|
|
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
|
|
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
|
|
+
|
|
+ if (buf == old_addr)
|
|
+ break;
|
|
+
|
|
+ entry = (entry + 1) & SONIC_RRS_MASK;
|
|
+ } while (entry != end);
|
|
+
|
|
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
|
|
+
|
|
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
|
|
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
|
|
+
|
|
+ entry = (entry + 1) & SONIC_RRS_MASK;
|
|
+
|
|
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
|
|
+}
|
|
+
|
|
/*
|
|
* We have a good packet(s), pass it/them up the network stack.
|
|
*/
|
|
static void sonic_rx(struct net_device *dev)
|
|
{
|
|
struct sonic_local *lp = netdev_priv(dev);
|
|
- int status;
|
|
int entry = lp->cur_rx;
|
|
+ int prev_entry = lp->eol_rx;
|
|
+ bool rbe = false;
|
|
|
|
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
|
|
- struct sk_buff *used_skb;
|
|
- struct sk_buff *new_skb;
|
|
- dma_addr_t new_laddr;
|
|
- u16 bufadr_l;
|
|
- u16 bufadr_h;
|
|
- int pkt_len;
|
|
-
|
|
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
|
|
- if (status & SONIC_RCR_PRX) {
|
|
- /* Malloc up new buffer. */
|
|
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
|
- if (new_skb == NULL) {
|
|
- lp->stats.rx_dropped++;
|
|
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
|
|
+
|
|
+ /* If the RD has LPKT set, the chip has finished with the RB */
|
|
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
|
|
+ struct sk_buff *new_skb;
|
|
+ dma_addr_t new_laddr;
|
|
+ u32 addr = (sonic_rda_get(dev, entry,
|
|
+ SONIC_RD_PKTPTR_H) << 16) |
|
|
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
|
|
+ int i = index_from_addr(lp, addr, entry);
|
|
+
|
|
+ if (i < 0) {
|
|
+ WARN_ONCE(1, "failed to find buffer!\n");
|
|
break;
|
|
}
|
|
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
|
|
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
|
|
- skb_reserve(new_skb, 2);
|
|
-
|
|
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
|
|
- SONIC_RBSIZE, DMA_FROM_DEVICE);
|
|
- if (!new_laddr) {
|
|
- dev_kfree_skb(new_skb);
|
|
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
|
|
+
|
|
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
|
|
+ struct sk_buff *used_skb = lp->rx_skb[i];
|
|
+ int pkt_len;
|
|
+
|
|
+ /* Pass the used buffer up the stack */
|
|
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
|
|
+ DMA_FROM_DEVICE);
|
|
+
|
|
+ pkt_len = sonic_rda_get(dev, entry,
|
|
+ SONIC_RD_PKTLEN);
|
|
+ skb_trim(used_skb, pkt_len);
|
|
+ used_skb->protocol = eth_type_trans(used_skb,
|
|
+ dev);
|
|
+ netif_rx(used_skb);
|
|
+ lp->stats.rx_packets++;
|
|
+ lp->stats.rx_bytes += pkt_len;
|
|
+
|
|
+ lp->rx_skb[i] = new_skb;
|
|
+ lp->rx_laddr[i] = new_laddr;
|
|
+ } else {
|
|
+ /* Failed to obtain a new buffer so re-use it */
|
|
+ new_laddr = addr;
|
|
lp->stats.rx_dropped++;
|
|
- break;
|
|
}
|
|
-
|
|
- /* now we have a new skb to replace it, pass the used one up the stack */
|
|
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
|
|
- used_skb = lp->rx_skb[entry];
|
|
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
|
|
- skb_trim(used_skb, pkt_len);
|
|
- used_skb->protocol = eth_type_trans(used_skb, dev);
|
|
- netif_rx(used_skb);
|
|
- lp->stats.rx_packets++;
|
|
- lp->stats.rx_bytes += pkt_len;
|
|
-
|
|
- /* and insert the new skb */
|
|
- lp->rx_laddr[entry] = new_laddr;
|
|
- lp->rx_skb[entry] = new_skb;
|
|
-
|
|
- bufadr_l = (unsigned long)new_laddr & 0xffff;
|
|
- bufadr_h = (unsigned long)new_laddr >> 16;
|
|
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
|
|
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
|
|
- } else {
|
|
- /* This should only happen, if we enable accepting broken packets. */
|
|
- lp->stats.rx_errors++;
|
|
- if (status & SONIC_RCR_FAER)
|
|
- lp->stats.rx_frame_errors++;
|
|
- if (status & SONIC_RCR_CRCR)
|
|
- lp->stats.rx_crc_errors++;
|
|
- }
|
|
- if (status & SONIC_RCR_LPKT) {
|
|
- /*
|
|
- * this was the last packet out of the current receive buffer
|
|
- * give the buffer back to the SONIC
|
|
+ /* If RBE is already asserted when RWP advances then
|
|
+ * it's safe to clear RBE after processing this packet.
|
|
*/
|
|
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
|
|
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
|
|
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
|
|
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
|
|
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
|
|
- __func__);
|
|
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
|
|
- }
|
|
- } else
|
|
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
|
|
- dev->name);
|
|
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
|
|
+ sonic_update_rra(dev, lp, addr, new_laddr);
|
|
+ }
|
|
/*
|
|
* give back the descriptor
|
|
*/
|
|
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
|
|
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
|
|
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
|
|
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
|
|
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
|
|
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
|
|
- lp->eol_rx = entry;
|
|
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
|
|
+
|
|
+ prev_entry = entry;
|
|
+ entry = (entry + 1) & SONIC_RDS_MASK;
|
|
+ }
|
|
+
|
|
+ lp->cur_rx = entry;
|
|
+
|
|
+ if (prev_entry != lp->eol_rx) {
|
|
+ /* Advance the EOL flag to put descriptors back into service */
|
|
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
|
|
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
|
|
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
|
|
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
|
|
+ lp->eol_rx = prev_entry;
|
|
}
|
|
+
|
|
+ if (rbe)
|
|
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
|
|
/*
|
|
* If any worth-while packets have been received, netif_rx()
|
|
* has done a mark_bh(NET_BH) for us and will work on them
|
|
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
|
|
(netdev_mc_count(dev) > 15)) {
|
|
rcr |= SONIC_RCR_AMC;
|
|
} else {
|
|
+ unsigned long flags;
|
|
+
|
|
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
|
|
netdev_mc_count(dev));
|
|
sonic_set_cam_enable(dev, 1); /* always enable our own address */
|
|
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
|
|
i++;
|
|
}
|
|
SONIC_WRITE(SONIC_CDC, 16);
|
|
- /* issue Load CAM command */
|
|
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
|
|
+
|
|
+ /* LCAM and TXP commands can't be used simultaneously */
|
|
+ spin_lock_irqsave(&lp->lock, flags);
|
|
+ sonic_quiesce(dev, SONIC_CR_TXP);
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
|
|
+ sonic_quiesce(dev, SONIC_CR_LCAM);
|
|
+ spin_unlock_irqrestore(&lp->lock, flags);
|
|
}
|
|
}
|
|
|
|
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
|
|
*/
|
|
static int sonic_init(struct net_device *dev)
|
|
{
|
|
- unsigned int cmd;
|
|
struct sonic_local *lp = netdev_priv(dev);
|
|
int i;
|
|
|
|
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
|
|
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
|
|
|
+ /* While in reset mode, clear CAM Enable register */
|
|
+ SONIC_WRITE(SONIC_CE, 0);
|
|
+
|
|
/*
|
|
* clear software reset flag, disable receiver, clear and
|
|
* enable interrupts, then completely initialize the SONIC
|
|
*/
|
|
SONIC_WRITE(SONIC_CMD, 0);
|
|
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
|
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
|
|
+ sonic_quiesce(dev, SONIC_CR_ALL);
|
|
|
|
/*
|
|
* initialize the receive resource area
|
|
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
|
|
}
|
|
|
|
/* initialize all RRA registers */
|
|
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
|
|
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
|
|
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
|
|
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
|
|
-
|
|
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
|
|
- SONIC_WRITE(SONIC_REA, lp->rra_end);
|
|
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
|
|
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
|
|
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
|
|
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
|
|
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
|
|
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
|
|
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
|
|
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
|
|
|
|
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
|
|
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
|
|
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
|
|
- i = 0;
|
|
- while (i++ < 100) {
|
|
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
|
|
- break;
|
|
- }
|
|
-
|
|
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
|
|
- SONIC_READ(SONIC_CMD), i);
|
|
+ sonic_quiesce(dev, SONIC_CR_RRRA);
|
|
|
|
/*
|
|
* Initialize the receive descriptors so that they
|
|
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
|
|
* load the CAM
|
|
*/
|
|
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
|
|
-
|
|
- i = 0;
|
|
- while (i++ < 100) {
|
|
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
|
|
- break;
|
|
- }
|
|
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
|
|
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
|
|
+ sonic_quiesce(dev, SONIC_CR_LCAM);
|
|
|
|
/*
|
|
* enable receiver, disable loopback
|
|
* and enable all interrupts
|
|
*/
|
|
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
|
|
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
|
|
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
|
|
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
|
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
|
|
-
|
|
- cmd = SONIC_READ(SONIC_CMD);
|
|
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
|
|
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
|
|
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
|
|
|
|
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
|
|
SONIC_READ(SONIC_CMD));
|
|
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
|
|
index 2b27f7049acb..1df6d2f06cc4 100644
|
|
--- a/drivers/net/ethernet/natsemi/sonic.h
|
|
+++ b/drivers/net/ethernet/natsemi/sonic.h
|
|
@@ -110,6 +110,9 @@
|
|
#define SONIC_CR_TXP 0x0002
|
|
#define SONIC_CR_HTX 0x0001
|
|
|
|
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
|
|
+ SONIC_CR_RXEN | SONIC_CR_TXP)
|
|
+
|
|
/*
|
|
* SONIC data configuration bits
|
|
*/
|
|
@@ -175,6 +178,7 @@
|
|
#define SONIC_TCR_NCRS 0x0100
|
|
#define SONIC_TCR_CRLS 0x0080
|
|
#define SONIC_TCR_EXC 0x0040
|
|
+#define SONIC_TCR_OWC 0x0020
|
|
#define SONIC_TCR_PMB 0x0008
|
|
#define SONIC_TCR_FU 0x0004
|
|
#define SONIC_TCR_BCM 0x0002
|
|
@@ -274,8 +278,9 @@
|
|
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
|
|
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
|
|
|
|
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
|
|
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
|
|
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
|
|
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
|
|
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
|
|
|
|
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
|
|
|
|
@@ -312,8 +317,6 @@ struct sonic_local {
|
|
u32 rda_laddr; /* logical DMA address of RDA */
|
|
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
|
|
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
|
|
- unsigned int rra_end;
|
|
- unsigned int cur_rwp;
|
|
unsigned int cur_rx;
|
|
unsigned int cur_tx; /* first unacked transmit packet */
|
|
unsigned int eol_rx;
|
|
@@ -322,6 +325,7 @@ struct sonic_local {
|
|
int msg_enable;
|
|
struct device *device; /* generic device */
|
|
struct net_device_stats stats;
|
|
+ spinlock_t lock;
|
|
};
|
|
|
|
#define TX_TIMEOUT (3 * HZ)
|
|
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
|
|
as far as we can tell. */
|
|
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
|
|
is a much better name. */
|
|
-static inline void sonic_buf_put(void* base, int bitmode,
|
|
+static inline void sonic_buf_put(u16 *base, int bitmode,
|
|
int offset, __u16 val)
|
|
{
|
|
if (bitmode)
|
|
#ifdef __BIG_ENDIAN
|
|
- ((__u16 *) base + (offset*2))[1] = val;
|
|
+ __raw_writew(val, base + (offset * 2) + 1);
|
|
#else
|
|
- ((__u16 *) base + (offset*2))[0] = val;
|
|
+ __raw_writew(val, base + (offset * 2) + 0);
|
|
#endif
|
|
else
|
|
- ((__u16 *) base)[offset] = val;
|
|
+ __raw_writew(val, base + (offset * 1) + 0);
|
|
}
|
|
|
|
-static inline __u16 sonic_buf_get(void* base, int bitmode,
|
|
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
|
|
int offset)
|
|
{
|
|
if (bitmode)
|
|
#ifdef __BIG_ENDIAN
|
|
- return ((volatile __u16 *) base + (offset*2))[1];
|
|
+ return __raw_readw(base + (offset * 2) + 1);
|
|
#else
|
|
- return ((volatile __u16 *) base + (offset*2))[0];
|
|
+ return __raw_readw(base + (offset * 2) + 0);
|
|
#endif
|
|
else
|
|
- return ((volatile __u16 *) base)[offset];
|
|
+ return __raw_readw(base + (offset * 1) + 0);
|
|
}
|
|
|
|
/* Inlines that you should actually use for reading/writing DMA buffers */
|
|
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
|
|
(entry * SIZEOF_SONIC_RR) + offset);
|
|
}
|
|
|
|
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
|
|
+{
|
|
+ struct sonic_local *lp = netdev_priv(dev);
|
|
+
|
|
+ return lp->rra_laddr +
|
|
+ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
|
|
+}
|
|
+
|
|
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
|
|
+{
|
|
+ struct sonic_local *lp = netdev_priv(dev);
|
|
+
|
|
+ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
|
|
+ SONIC_BUS_SCALE(lp->dma_bitmode));
|
|
+}
|
|
+
|
|
static const char version[] =
|
|
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
|
|
|
|
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
|
|
index f6222ada6818..9b3ba98726d7 100644
|
|
--- a/drivers/net/gtp.c
|
|
+++ b/drivers/net/gtp.c
|
|
@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
|
|
return NULL;
|
|
}
|
|
|
|
- if (sock->sk->sk_protocol != IPPROTO_UDP) {
|
|
+ sk = sock->sk;
|
|
+ if (sk->sk_protocol != IPPROTO_UDP ||
|
|
+ sk->sk_type != SOCK_DGRAM ||
|
|
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
|
|
pr_debug("socket fd=%d not UDP\n", fd);
|
|
sk = ERR_PTR(-EINVAL);
|
|
goto out_sock;
|
|
}
|
|
|
|
- lock_sock(sock->sk);
|
|
- if (sock->sk->sk_user_data) {
|
|
+ lock_sock(sk);
|
|
+ if (sk->sk_user_data) {
|
|
sk = ERR_PTR(-EBUSY);
|
|
goto out_rel_sock;
|
|
}
|
|
|
|
- sk = sock->sk;
|
|
sock_hold(sk);
|
|
|
|
tuncfg.sk_user_data = gtp;
|
|
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
|
|
index 2a91c192659f..61d7e0d1d77d 100644
|
|
--- a/drivers/net/slip/slip.c
|
|
+++ b/drivers/net/slip/slip.c
|
|
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
|
|
*/
|
|
static void slip_write_wakeup(struct tty_struct *tty)
|
|
{
|
|
- struct slip *sl = tty->disc_data;
|
|
+ struct slip *sl;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ sl = rcu_dereference(tty->disc_data);
|
|
+ if (!sl)
|
|
+ goto out;
|
|
|
|
schedule_work(&sl->tx_work);
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static void sl_tx_timeout(struct net_device *dev)
|
|
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
|
|
return;
|
|
|
|
spin_lock_bh(&sl->lock);
|
|
- tty->disc_data = NULL;
|
|
+ rcu_assign_pointer(tty->disc_data, NULL);
|
|
sl->tty = NULL;
|
|
spin_unlock_bh(&sl->lock);
|
|
|
|
+ synchronize_rcu();
|
|
flush_work(&sl->tx_work);
|
|
|
|
/* VSV = very important to remove timers */
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index 16564ebcde50..69f553a028ee 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -1936,6 +1936,10 @@ drop:
|
|
if (ret != XDP_PASS) {
|
|
rcu_read_unlock();
|
|
local_bh_enable();
|
|
+ if (frags) {
|
|
+ tfile->napi.skb = NULL;
|
|
+ mutex_unlock(&tfile->napi_mutex);
|
|
+ }
|
|
return total_len;
|
|
}
|
|
}
|
|
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
|
|
index c232f1612083..0170a441208a 100644
|
|
--- a/drivers/net/usb/lan78xx.c
|
|
+++ b/drivers/net/usb/lan78xx.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/mdio.h>
|
|
#include <linux/phy.h>
|
|
#include <net/ip6_checksum.h>
|
|
+#include <net/vxlan.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/irq.h>
|
|
@@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
|
|
tasklet_schedule(&dev->bh);
|
|
}
|
|
|
|
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
|
|
+ struct net_device *netdev,
|
|
+ netdev_features_t features)
|
|
+{
|
|
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
|
|
+ features &= ~NETIF_F_GSO_MASK;
|
|
+
|
|
+ features = vlan_features_check(skb, features);
|
|
+ features = vxlan_features_check(skb, features);
|
|
+
|
|
+ return features;
|
|
+}
|
|
+
|
|
static const struct net_device_ops lan78xx_netdev_ops = {
|
|
.ndo_open = lan78xx_open,
|
|
.ndo_stop = lan78xx_stop,
|
|
@@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
|
|
.ndo_set_features = lan78xx_set_features,
|
|
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
|
|
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
|
|
+ .ndo_features_check = lan78xx_features_check,
|
|
};
|
|
|
|
static void lan78xx_stat_monitor(struct timer_list *t)
|
|
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
|
|
index f43c06569ea1..c4c8f1b62e1e 100644
|
|
--- a/drivers/net/wireless/cisco/airo.c
|
|
+++ b/drivers/net/wireless/cisco/airo.c
|
|
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
|
|
case AIROGVLIST: ridcode = RID_APLIST; break;
|
|
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
|
|
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
|
|
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
|
|
- /* Only super-user can read WEP keys */
|
|
- if (!capable(CAP_NET_ADMIN))
|
|
- return -EPERM;
|
|
- break;
|
|
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
|
|
- /* Only super-user can read WEP keys */
|
|
- if (!capable(CAP_NET_ADMIN))
|
|
- return -EPERM;
|
|
- break;
|
|
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
|
|
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
|
|
case AIROGSTAT: ridcode = RID_STATUS; break;
|
|
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
|
|
case AIROGSTATSC32: ridcode = RID_STATS; break;
|
|
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
|
|
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
|
|
+ /* Only super-user can read WEP keys */
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
|
|
return -ENOMEM;
|
|
|
|
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
|
|
index 60aff2ecec12..58df25e2fb32 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
|
|
@@ -154,5 +154,6 @@
|
|
#define IWL_MVM_D3_DEBUG false
|
|
#define IWL_MVM_USE_TWT false
|
|
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
|
|
+#define IWL_MVM_USE_NSSN_SYNC 0
|
|
|
|
#endif /* __MVM_CONSTANTS_H */
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
index d31f96c3f925..49aeab7c27a2 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
@@ -742,6 +742,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|
return ret;
|
|
}
|
|
|
|
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
+ struct ieee80211_sta *sta)
|
|
+{
|
|
+ if (likely(sta)) {
|
|
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
|
|
+ return;
|
|
+ } else {
|
|
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ieee80211_free_txskb(mvm->hw, skb);
|
|
+}
|
|
+
|
|
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|
struct ieee80211_tx_control *control,
|
|
struct sk_buff *skb)
|
|
@@ -785,14 +799,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|
}
|
|
}
|
|
|
|
- if (sta) {
|
|
- if (iwl_mvm_tx_skb(mvm, skb, sta))
|
|
- goto drop;
|
|
- return;
|
|
- }
|
|
-
|
|
- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
|
|
- goto drop;
|
|
+ iwl_mvm_tx_skb(mvm, skb, sta);
|
|
return;
|
|
drop:
|
|
ieee80211_free_txskb(hw, skb);
|
|
@@ -842,10 +849,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|
break;
|
|
}
|
|
|
|
- if (!txq->sta)
|
|
- iwl_mvm_tx_skb_non_sta(mvm, skb);
|
|
- else
|
|
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
|
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
|
}
|
|
} while (atomic_dec_return(&mvmtxq->tx_request));
|
|
rcu_read_unlock();
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
|
|
index 5ca50f39a023..5f1ecbb6fb71 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
|
|
@@ -1508,8 +1508,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
|
|
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
|
|
u16 len, const void *data,
|
|
u32 *status);
|
|
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
- struct ieee80211_sta *sta);
|
|
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
+ struct ieee80211_sta *sta);
|
|
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
|
|
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
struct iwl_tx_cmd *tx_cmd,
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
index 77b03b757193..a6e2a30eb310 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
|
|
|
|
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
|
|
{
|
|
- struct iwl_mvm_rss_sync_notif notif = {
|
|
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
|
- .metadata.sync = 0,
|
|
- .nssn_sync.baid = baid,
|
|
- .nssn_sync.nssn = nssn,
|
|
- };
|
|
-
|
|
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
|
|
+ if (IWL_MVM_USE_NSSN_SYNC) {
|
|
+ struct iwl_mvm_rss_sync_notif notif = {
|
|
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
|
+ .metadata.sync = 0,
|
|
+ .nssn_sync.baid = baid,
|
|
+ .nssn_sync.nssn = nssn,
|
|
+ };
|
|
+
|
|
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if,
|
|
+ sizeof(notif));
|
|
+ }
|
|
}
|
|
|
|
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
index fcafa22ec6ce..8aa567d7912c 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
@@ -1220,7 +1220,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
|
|
cmd_size = sizeof(struct iwl_scan_config_v2);
|
|
else
|
|
cmd_size = sizeof(struct iwl_scan_config_v1);
|
|
- cmd_size += num_channels;
|
|
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
|
|
|
|
cfg = kzalloc(cmd_size, GFP_KERNEL);
|
|
if (!cfg)
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
|
|
index e3b2a2bf3863..d9d82f6b5e87 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
|
|
@@ -1151,7 +1151,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
|
|
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
|
spin_unlock(&mvmsta->lock);
|
|
- return 0;
|
|
+ return -1;
|
|
}
|
|
|
|
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
|
@@ -1203,8 +1203,8 @@ drop:
|
|
return -1;
|
|
}
|
|
|
|
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
- struct ieee80211_sta *sta)
|
|
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
+ struct ieee80211_sta *sta)
|
|
{
|
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
struct ieee80211_tx_info info;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
index 041dd75ac72b..64c74acadb99 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
@@ -1537,13 +1537,13 @@ out:
|
|
|
|
napi = &rxq->napi;
|
|
if (napi->poll) {
|
|
+ napi_gro_flush(napi, false);
|
|
+
|
|
if (napi->rx_count) {
|
|
netif_receive_skb_list(&napi->rx_list);
|
|
INIT_LIST_HEAD(&napi->rx_list);
|
|
napi->rx_count = 0;
|
|
}
|
|
-
|
|
- napi_gro_flush(napi, false);
|
|
}
|
|
|
|
iwl_pcie_rxq_restock(trans, rxq);
|
|
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
|
|
index 57edfada0665..c9401c121a14 100644
|
|
--- a/drivers/net/wireless/marvell/libertas/cfg.c
|
|
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
|
|
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
|
|
int hw, ap, ap_max = ie[1];
|
|
u8 hw_rate;
|
|
|
|
+ if (ap_max > MAX_RATES) {
|
|
+ lbs_deb_assoc("invalid rates\n");
|
|
+ return tlv;
|
|
+ }
|
|
/* Advance past IE header */
|
|
ie += 2;
|
|
|
|
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
|
struct cmd_ds_802_11_ad_hoc_join cmd;
|
|
u8 preamble = RADIO_PREAMBLE_SHORT;
|
|
int ret = 0;
|
|
+ int hw, i;
|
|
+ u8 rates_max;
|
|
+ u8 *rates;
|
|
|
|
/* TODO: set preamble based on scan result */
|
|
ret = lbs_set_radio(priv, preamble, 1);
|
|
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
|
if (!rates_eid) {
|
|
lbs_add_rates(cmd.bss.rates);
|
|
} else {
|
|
- int hw, i;
|
|
- u8 rates_max = rates_eid[1];
|
|
- u8 *rates = cmd.bss.rates;
|
|
+ rates_max = rates_eid[1];
|
|
+ if (rates_max > MAX_RATES) {
|
|
+ lbs_deb_join("invalid rates");
|
|
+ goto out;
|
|
+ }
|
|
+ rates = cmd.bss.rates;
|
|
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
|
|
u8 hw_rate = lbs_rates[hw].bitrate / 5;
|
|
for (i = 0; i < rates_max; i++) {
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index 308f744393eb..1593b8494ebb 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -5021,18 +5021,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
|
|
|
|
#ifdef CONFIG_PCI_ATS
|
|
/*
|
|
- * Some devices have a broken ATS implementation causing IOMMU stalls.
|
|
- * Don't use ATS for those devices.
|
|
+ * Some devices require additional driver setup to enable ATS. Don't use
|
|
+ * ATS for those devices as ATS will be enabled before the driver has had a
|
|
+ * chance to load and configure the device.
|
|
*/
|
|
-static void quirk_no_ats(struct pci_dev *pdev)
|
|
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
|
|
{
|
|
- pci_info(pdev, "disabling ATS (broken on this device)\n");
|
|
+ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
|
|
+ return;
|
|
+
|
|
+ pci_info(pdev, "disabling ATS\n");
|
|
pdev->ats_cap = 0;
|
|
}
|
|
|
|
/* AMD Stoney platform GPU */
|
|
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
|
|
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
|
|
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
|
|
+/* AMD Iceland dGPU */
|
|
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
|
|
+/* AMD Navi14 dGPU */
|
|
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
|
|
#endif /* CONFIG_PCI_ATS */
|
|
|
|
/* Freescale PCIe doesn't support MSI in RC mode */
|
|
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
index 44d7f50bbc82..d936e7aa74c4 100644
|
|
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
@@ -49,6 +49,7 @@
|
|
.padown_offset = SPT_PAD_OWN, \
|
|
.padcfglock_offset = SPT_PADCFGLOCK, \
|
|
.hostown_offset = SPT_HOSTSW_OWN, \
|
|
+ .is_offset = SPT_GPI_IS, \
|
|
.ie_offset = SPT_GPI_IE, \
|
|
.pin_base = (s), \
|
|
.npins = ((e) - (s) + 1), \
|
|
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
|
|
index f194ffc4699e..c070cb2a6a5b 100644
|
|
--- a/drivers/target/iscsi/iscsi_target.c
|
|
+++ b/drivers/target/iscsi/iscsi_target.c
|
|
@@ -4151,9 +4151,6 @@ int iscsit_close_connection(
|
|
iscsit_stop_nopin_response_timer(conn);
|
|
iscsit_stop_nopin_timer(conn);
|
|
|
|
- if (conn->conn_transport->iscsit_wait_conn)
|
|
- conn->conn_transport->iscsit_wait_conn(conn);
|
|
-
|
|
/*
|
|
* During Connection recovery drop unacknowledged out of order
|
|
* commands for this connection, and prepare the other commands
|
|
@@ -4239,6 +4236,9 @@ int iscsit_close_connection(
|
|
target_sess_cmd_list_set_waiting(sess->se_sess);
|
|
target_wait_for_sess_cmds(sess->se_sess);
|
|
|
|
+ if (conn->conn_transport->iscsit_wait_conn)
|
|
+ conn->conn_transport->iscsit_wait_conn(conn);
|
|
+
|
|
ahash_request_free(conn->conn_tx_hash);
|
|
if (conn->conn_rx_hash) {
|
|
struct crypto_ahash *tfm;
|
|
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
|
|
index fd5133e26a38..78ba5f932287 100644
|
|
--- a/fs/afs/cell.c
|
|
+++ b/fs/afs/cell.c
|
|
@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
|
_leave(" = -ENAMETOOLONG");
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
}
|
|
- if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
|
|
+
|
|
+ /* Prohibit cell names that contain unprintable chars, '/' and '@' or
|
|
+ * that begin with a dot. This also precludes "@cell".
|
|
+ */
|
|
+ if (name[0] == '.')
|
|
return ERR_PTR(-EINVAL);
|
|
+ for (i = 0; i < namelen; i++) {
|
|
+ char ch = name[i];
|
|
+ if (!isprint(ch) || ch == '/' || ch == '@')
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
|
|
_enter("%*.*s,%s", namelen, namelen, name, addresses);
|
|
|
|
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
|
|
index a5163296d9d9..ee02a742fff5 100644
|
|
--- a/fs/ceph/mds_client.c
|
|
+++ b/fs/ceph/mds_client.c
|
|
@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
|
|
/* avoid calling iput_final() in mds dispatch threads */
|
|
ceph_async_iput(req->r_inode);
|
|
}
|
|
- if (req->r_parent)
|
|
+ if (req->r_parent) {
|
|
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
|
|
+ ceph_async_iput(req->r_parent);
|
|
+ }
|
|
ceph_async_iput(req->r_target_inode);
|
|
if (req->r_dentry)
|
|
dput(req->r_dentry);
|
|
@@ -2670,8 +2672,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
|
|
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
|
|
if (req->r_inode)
|
|
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
|
|
- if (req->r_parent)
|
|
+ if (req->r_parent) {
|
|
ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
|
|
+ ihold(req->r_parent);
|
|
+ }
|
|
if (req->r_old_dentry_dir)
|
|
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
|
|
CEPH_CAP_PIN);
|
|
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
|
index b1c9ad1fb9e1..709671faaed6 100644
|
|
--- a/fs/io_uring.c
|
|
+++ b/fs/io_uring.c
|
|
@@ -3716,12 +3716,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
|
wake_up(&ctx->sqo_wait);
|
|
submitted = to_submit;
|
|
} else if (to_submit) {
|
|
- if (current->mm != ctx->sqo_mm ||
|
|
- current_cred() != ctx->creds) {
|
|
- ret = -EPERM;
|
|
- goto out;
|
|
- }
|
|
-
|
|
to_submit = min(to_submit, ctx->sq_entries);
|
|
|
|
mutex_lock(&ctx->uring_lock);
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index 671c3c1a3425..e81521c87f98 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link)
|
|
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
|
|
* should be allowed, or not, on files that already
|
|
* exist.
|
|
- * @dir: the sticky parent directory
|
|
+ * @dir_mode: mode bits of directory
|
|
+ * @dir_uid: owner of directory
|
|
* @inode: the inode of the file to open
|
|
*
|
|
* Block an O_CREAT open of a FIFO (or a regular file) when:
|
|
@@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link)
|
|
*
|
|
* Returns 0 if the open is allowed, -ve on error.
|
|
*/
|
|
-static int may_create_in_sticky(struct dentry * const dir,
|
|
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
|
|
struct inode * const inode)
|
|
{
|
|
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
|
|
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
|
|
- likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
|
|
- uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
|
|
+ likely(!(dir_mode & S_ISVTX)) ||
|
|
+ uid_eq(inode->i_uid, dir_uid) ||
|
|
uid_eq(current_fsuid(), inode->i_uid))
|
|
return 0;
|
|
|
|
- if (likely(dir->d_inode->i_mode & 0002) ||
|
|
- (dir->d_inode->i_mode & 0020 &&
|
|
+ if (likely(dir_mode & 0002) ||
|
|
+ (dir_mode & 0020 &&
|
|
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
|
|
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
|
|
return -EACCES;
|
|
@@ -3248,6 +3249,8 @@ static int do_last(struct nameidata *nd,
|
|
struct file *file, const struct open_flags *op)
|
|
{
|
|
struct dentry *dir = nd->path.dentry;
|
|
+ kuid_t dir_uid = dir->d_inode->i_uid;
|
|
+ umode_t dir_mode = dir->d_inode->i_mode;
|
|
int open_flag = op->open_flag;
|
|
bool will_truncate = (open_flag & O_TRUNC) != 0;
|
|
bool got_write = false;
|
|
@@ -3383,7 +3386,7 @@ finish_open:
|
|
error = -EISDIR;
|
|
if (d_is_dir(nd->path.dentry))
|
|
goto out;
|
|
- error = may_create_in_sticky(dir,
|
|
+ error = may_create_in_sticky(dir_mode, dir_uid,
|
|
d_backing_inode(nd->path.dentry));
|
|
if (unlikely(error))
|
|
goto out;
|
|
diff --git a/fs/readdir.c b/fs/readdir.c
|
|
index d26d5ea4de7b..de2eceffdee8 100644
|
|
--- a/fs/readdir.c
|
|
+++ b/fs/readdir.c
|
|
@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
|
|
* filename length, and the above "soft error" worry means
|
|
* that it's probably better left alone until we have that
|
|
* issue clarified.
|
|
+ *
|
|
+ * Note the PATH_MAX check - it's arbitrary but the real
|
|
+ * kernel limit on a possible path component, not NAME_MAX,
|
|
+ * which is the technical standard limit.
|
|
*/
|
|
static int verify_dirent_name(const char *name, int len)
|
|
{
|
|
- if (!len)
|
|
+ if (len <= 0 || len >= PATH_MAX)
|
|
return -EIO;
|
|
if (memchr(name, '/', len))
|
|
return -EIO;
|
|
@@ -206,7 +210,7 @@ struct linux_dirent {
|
|
struct getdents_callback {
|
|
struct dir_context ctx;
|
|
struct linux_dirent __user * current_dir;
|
|
- struct linux_dirent __user * previous;
|
|
+ int prev_reclen;
|
|
int count;
|
|
int error;
|
|
};
|
|
@@ -214,12 +218,13 @@ struct getdents_callback {
|
|
static int filldir(struct dir_context *ctx, const char *name, int namlen,
|
|
loff_t offset, u64 ino, unsigned int d_type)
|
|
{
|
|
- struct linux_dirent __user * dirent;
|
|
+ struct linux_dirent __user *dirent, *prev;
|
|
struct getdents_callback *buf =
|
|
container_of(ctx, struct getdents_callback, ctx);
|
|
unsigned long d_ino;
|
|
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
|
|
sizeof(long));
|
|
+ int prev_reclen;
|
|
|
|
buf->error = verify_dirent_name(name, namlen);
|
|
if (unlikely(buf->error))
|
|
@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
|
|
buf->error = -EOVERFLOW;
|
|
return -EOVERFLOW;
|
|
}
|
|
- dirent = buf->previous;
|
|
- if (dirent && signal_pending(current))
|
|
+ prev_reclen = buf->prev_reclen;
|
|
+ if (prev_reclen && signal_pending(current))
|
|
return -EINTR;
|
|
-
|
|
- /*
|
|
- * Note! This range-checks 'previous' (which may be NULL).
|
|
- * The real range was checked in getdents
|
|
- */
|
|
- if (!user_access_begin(dirent, sizeof(*dirent)))
|
|
- goto efault;
|
|
- if (dirent)
|
|
- unsafe_put_user(offset, &dirent->d_off, efault_end);
|
|
dirent = buf->current_dir;
|
|
+ prev = (void __user *) dirent - prev_reclen;
|
|
+ if (!user_access_begin(prev, reclen + prev_reclen))
|
|
+ goto efault;
|
|
+
|
|
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
|
|
+ unsafe_put_user(offset, &prev->d_off, efault_end);
|
|
unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
|
|
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
|
|
unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
|
|
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
|
|
user_access_end();
|
|
|
|
- buf->previous = dirent;
|
|
- dirent = (void __user *)dirent + reclen;
|
|
- buf->current_dir = dirent;
|
|
+ buf->current_dir = (void __user *)dirent + reclen;
|
|
+ buf->prev_reclen = reclen;
|
|
buf->count -= reclen;
|
|
return 0;
|
|
efault_end:
|
|
@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
|
|
struct linux_dirent __user *, dirent, unsigned int, count)
|
|
{
|
|
struct fd f;
|
|
- struct linux_dirent __user * lastdirent;
|
|
struct getdents_callback buf = {
|
|
.ctx.actor = filldir,
|
|
.count = count,
|
|
@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
|
|
error = iterate_dir(f.file, &buf.ctx);
|
|
if (error >= 0)
|
|
error = buf.error;
|
|
- lastdirent = buf.previous;
|
|
- if (lastdirent) {
|
|
+ if (buf.prev_reclen) {
|
|
+ struct linux_dirent __user * lastdirent;
|
|
+ lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
|
|
+
|
|
if (put_user(buf.ctx.pos, &lastdirent->d_off))
|
|
error = -EFAULT;
|
|
else
|
|
@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
|
|
struct getdents_callback64 {
|
|
struct dir_context ctx;
|
|
struct linux_dirent64 __user * current_dir;
|
|
- struct linux_dirent64 __user * previous;
|
|
+ int prev_reclen;
|
|
int count;
|
|
int error;
|
|
};
|
|
@@ -307,11 +309,12 @@ struct getdents_callback64 {
|
|
static int filldir64(struct dir_context *ctx, const char *name, int namlen,
|
|
loff_t offset, u64 ino, unsigned int d_type)
|
|
{
|
|
- struct linux_dirent64 __user *dirent;
|
|
+ struct linux_dirent64 __user *dirent, *prev;
|
|
struct getdents_callback64 *buf =
|
|
container_of(ctx, struct getdents_callback64, ctx);
|
|
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
|
|
sizeof(u64));
|
|
+ int prev_reclen;
|
|
|
|
buf->error = verify_dirent_name(name, namlen);
|
|
if (unlikely(buf->error))
|
|
@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
|
|
buf->error = -EINVAL; /* only used if we fail.. */
|
|
if (reclen > buf->count)
|
|
return -EINVAL;
|
|
- dirent = buf->previous;
|
|
- if (dirent && signal_pending(current))
|
|
+ prev_reclen = buf->prev_reclen;
|
|
+ if (prev_reclen && signal_pending(current))
|
|
return -EINTR;
|
|
-
|
|
- /*
|
|
- * Note! This range-checks 'previous' (which may be NULL).
|
|
- * The real range was checked in getdents
|
|
- */
|
|
- if (!user_access_begin(dirent, sizeof(*dirent)))
|
|
- goto efault;
|
|
- if (dirent)
|
|
- unsafe_put_user(offset, &dirent->d_off, efault_end);
|
|
dirent = buf->current_dir;
|
|
+ prev = (void __user *)dirent - prev_reclen;
|
|
+ if (!user_access_begin(prev, reclen + prev_reclen))
|
|
+ goto efault;
|
|
+
|
|
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
|
|
+ unsafe_put_user(offset, &prev->d_off, efault_end);
|
|
unsafe_put_user(ino, &dirent->d_ino, efault_end);
|
|
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
|
|
unsafe_put_user(d_type, &dirent->d_type, efault_end);
|
|
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
|
|
user_access_end();
|
|
|
|
- buf->previous = dirent;
|
|
- dirent = (void __user *)dirent + reclen;
|
|
- buf->current_dir = dirent;
|
|
+ buf->prev_reclen = reclen;
|
|
+ buf->current_dir = (void __user *)dirent + reclen;
|
|
buf->count -= reclen;
|
|
return 0;
|
|
+
|
|
efault_end:
|
|
user_access_end();
|
|
efault:
|
|
@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
|
|
unsigned int count)
|
|
{
|
|
struct fd f;
|
|
- struct linux_dirent64 __user * lastdirent;
|
|
struct getdents_callback64 buf = {
|
|
.ctx.actor = filldir64,
|
|
.count = count,
|
|
@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
|
|
error = iterate_dir(f.file, &buf.ctx);
|
|
if (error >= 0)
|
|
error = buf.error;
|
|
- lastdirent = buf.previous;
|
|
- if (lastdirent) {
|
|
+ if (buf.prev_reclen) {
|
|
+ struct linux_dirent64 __user * lastdirent;
|
|
typeof(lastdirent->d_off) d_off = buf.ctx.pos;
|
|
+
|
|
+ lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
|
|
if (__put_user(d_off, &lastdirent->d_off))
|
|
error = -EFAULT;
|
|
else
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 13f09706033a..f8fde9fa479c 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -3666,6 +3666,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
|
|
int dev_get_alias(const struct net_device *, char *, size_t);
|
|
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
|
|
int __dev_set_mtu(struct net_device *, int);
|
|
+int dev_validate_mtu(struct net_device *dev, int mtu,
|
|
+ struct netlink_ext_ack *extack);
|
|
int dev_set_mtu_ext(struct net_device *dev, int mtu,
|
|
struct netlink_ext_ack *extack);
|
|
int dev_set_mtu(struct net_device *, int);
|
|
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
|
|
index 9bc255a8461b..77336f4c4b1c 100644
|
|
--- a/include/linux/netfilter/ipset/ip_set.h
|
|
+++ b/include/linux/netfilter/ipset/ip_set.h
|
|
@@ -445,13 +445,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
|
|
sizeof(*addr));
|
|
}
|
|
|
|
-/* Calculate the bytes required to store the inclusive range of a-b */
|
|
-static inline int
|
|
-bitmap_bytes(u32 a, u32 b)
|
|
-{
|
|
- return 4 * ((((b - a + 8) / 8) + 3) / 4);
|
|
-}
|
|
-
|
|
/* How often should the gc be run by default */
|
|
#define IPSET_GC_TIME (3 * 60)
|
|
|
|
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
|
|
index cf09ab37b45b..851425c3178f 100644
|
|
--- a/include/linux/netfilter/nfnetlink.h
|
|
+++ b/include/linux/netfilter/nfnetlink.h
|
|
@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
|
|
const struct nfnl_callback *cb; /* callback for individual types */
|
|
struct module *owner;
|
|
int (*commit)(struct net *net, struct sk_buff *skb);
|
|
- int (*abort)(struct net *net, struct sk_buff *skb);
|
|
+ int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
|
|
void (*cleanup)(struct net *net);
|
|
bool (*valid_genid)(struct net *net, u32 genid);
|
|
};
|
|
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
|
|
index 286fd960896f..a1a8d45adb42 100644
|
|
--- a/include/net/netns/nftables.h
|
|
+++ b/include/net/netns/nftables.h
|
|
@@ -7,6 +7,7 @@
|
|
struct netns_nftables {
|
|
struct list_head tables;
|
|
struct list_head commit_list;
|
|
+ struct list_head module_list;
|
|
struct mutex commit_mutex;
|
|
unsigned int base_seq;
|
|
u8 gencursor;
|
|
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
|
|
index 9a0e8af21310..a5ccfa67bc5c 100644
|
|
--- a/include/trace/events/xen.h
|
|
+++ b/include/trace/events/xen.h
|
|
@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
|
|
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
|
|
TP_ARGS(fn, data),
|
|
TP_STRUCT__entry(
|
|
- __field(xen_mc_callback_fn_t, fn)
|
|
+ /*
|
|
+ * Use field_struct to avoid is_signed_type()
|
|
+ * comparison of a function pointer.
|
|
+ */
|
|
+ __field_struct(xen_mc_callback_fn_t, fn)
|
|
__field(void *, data)
|
|
),
|
|
TP_fast_assign(
|
|
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
|
|
index 26b9168321e7..d65f2d5ab694 100644
|
|
--- a/kernel/power/snapshot.c
|
|
+++ b/kernel/power/snapshot.c
|
|
@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
|
|
|
|
void clear_free_pages(void)
|
|
{
|
|
-#ifdef CONFIG_PAGE_POISONING_ZERO
|
|
struct memory_bitmap *bm = free_pages_map;
|
|
unsigned long pfn;
|
|
|
|
if (WARN_ON(!(free_pages_map)))
|
|
return;
|
|
|
|
- memory_bm_position_reset(bm);
|
|
- pfn = memory_bm_next_pfn(bm);
|
|
- while (pfn != BM_END_OF_MAP) {
|
|
- if (pfn_valid(pfn))
|
|
- clear_highpage(pfn_to_page(pfn));
|
|
-
|
|
+ if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
|
|
+ memory_bm_position_reset(bm);
|
|
pfn = memory_bm_next_pfn(bm);
|
|
+ while (pfn != BM_END_OF_MAP) {
|
|
+ if (pfn_valid(pfn))
|
|
+ clear_highpage(pfn_to_page(pfn));
|
|
+
|
|
+ pfn = memory_bm_next_pfn(bm);
|
|
+ }
|
|
+ memory_bm_position_reset(bm);
|
|
+ pr_info("free pages cleared after restore\n");
|
|
}
|
|
- memory_bm_position_reset(bm);
|
|
- pr_info("free pages cleared after restore\n");
|
|
-#endif /* PAGE_POISONING_ZERO */
|
|
}
|
|
|
|
/**
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index bcb72f102613..341aab32c946 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -9270,6 +9270,11 @@ __init static int tracing_set_default_clock(void)
|
|
{
|
|
/* sched_clock_stable() is determined in late_initcall */
|
|
if (!trace_boot_clock && !sched_clock_stable()) {
|
|
+ if (security_locked_down(LOCKDOWN_TRACEFS)) {
|
|
+ pr_warn("Can not set tracing clock due to lockdown\n");
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
printk(KERN_WARNING
|
|
"Unstable clock detected, switching default tracing clock to \"global\"\n"
|
|
"If you want to keep using the local clock, then add:\n"
|
|
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
|
|
index c2783915600c..205692181e7b 100644
|
|
--- a/kernel/trace/trace_events_hist.c
|
|
+++ b/kernel/trace/trace_events_hist.c
|
|
@@ -116,6 +116,7 @@ struct hist_field {
|
|
struct ftrace_event_field *field;
|
|
unsigned long flags;
|
|
hist_field_fn_t fn;
|
|
+ unsigned int ref;
|
|
unsigned int size;
|
|
unsigned int offset;
|
|
unsigned int is_signed;
|
|
@@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
|
|
struct event_trigger_data *test;
|
|
struct hist_field *hist_field;
|
|
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
hist_field = find_var_field(hist_data, var_name);
|
|
if (hist_field)
|
|
return hist_field;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
test_data = test->private_data;
|
|
hist_field = find_var_field(test_data, var_name);
|
|
@@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
|
|
struct event_trigger_data *test;
|
|
struct hist_field *hist_field;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
test_data = test->private_data;
|
|
hist_field = find_var_field(test_data, var_name);
|
|
@@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
|
|
return field_op;
|
|
}
|
|
|
|
+static void get_hist_field(struct hist_field *hist_field)
|
|
+{
|
|
+ hist_field->ref++;
|
|
+}
|
|
+
|
|
static void __destroy_hist_field(struct hist_field *hist_field)
|
|
{
|
|
+ if (--hist_field->ref > 1)
|
|
+ return;
|
|
+
|
|
kfree(hist_field->var.name);
|
|
kfree(hist_field->name);
|
|
kfree(hist_field->type);
|
|
@@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
|
|
if (!hist_field)
|
|
return NULL;
|
|
|
|
+ hist_field->ref = 1;
|
|
+
|
|
hist_field->hist_data = hist_data;
|
|
|
|
if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
|
|
@@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
|
|
{
|
|
unsigned long flags = HIST_FIELD_FL_VAR_REF;
|
|
struct hist_field *ref_field;
|
|
+ int i;
|
|
+
|
|
+ /* Check if the variable already exists */
|
|
+ for (i = 0; i < hist_data->n_var_refs; i++) {
|
|
+ ref_field = hist_data->var_refs[i];
|
|
+ if (ref_field->var.idx == var_field->var.idx &&
|
|
+ ref_field->var.hist_data == var_field->hist_data) {
|
|
+ get_hist_field(ref_field);
|
|
+ return ref_field;
|
|
+ }
|
|
+ }
|
|
|
|
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
|
|
if (ref_field) {
|
|
@@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
|
|
{
|
|
struct event_trigger_data *test;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (test->private_data == hist_data)
|
|
return test->filter_str;
|
|
@@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
|
|
struct event_trigger_data *test;
|
|
unsigned int n_keys;
|
|
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
hist_data = test->private_data;
|
|
|
|
@@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
|
|
goto out_unlock;
|
|
}
|
|
|
|
- list_for_each_entry_rcu(data, &event_file->triggers, list) {
|
|
+ list_for_each_entry(data, &event_file->triggers, list) {
|
|
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
|
|
hist_trigger_show(m, data, n++);
|
|
}
|
|
@@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
|
|
if (hist_data->attrs->name && !named_data)
|
|
goto new;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (!hist_trigger_match(data, test, named_data, false))
|
|
continue;
|
|
@@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
|
|
struct event_trigger_data *test, *named_data = NULL;
|
|
bool match = false;
|
|
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
if (hist_data->attrs->name)
|
|
named_data = find_named_trigger(hist_data->attrs->name);
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (hist_trigger_match(data, test, named_data, false)) {
|
|
match = true;
|
|
@@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
|
|
struct hist_trigger_data *hist_data = data->private_data;
|
|
struct event_trigger_data *test, *named_data = NULL;
|
|
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
if (hist_data->attrs->name)
|
|
named_data = find_named_trigger(hist_data->attrs->name);
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (!hist_trigger_match(data, test, named_data, false))
|
|
continue;
|
|
@@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|
struct event_trigger_data *test, *named_data = NULL;
|
|
bool unregistered = false;
|
|
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
if (hist_data->attrs->name)
|
|
named_data = find_named_trigger(hist_data->attrs->name);
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (!hist_trigger_match(data, test, named_data, false))
|
|
continue;
|
|
@@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
|
|
struct hist_trigger_data *hist_data;
|
|
struct event_trigger_data *test;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
hist_data = test->private_data;
|
|
if (check_var_refs(hist_data))
|
|
@@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
|
|
struct enable_trigger_data *enable_data = data->private_data;
|
|
struct event_trigger_data *test;
|
|
|
|
- list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
|
|
+ list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
|
|
+ lockdep_is_held(&event_mutex)) {
|
|
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
|
if (enable_data->enable)
|
|
test->paused = false;
|
|
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
|
|
index 2cd53ca21b51..40106fff06a4 100644
|
|
--- a/kernel/trace/trace_events_trigger.c
|
|
+++ b/kernel/trace/trace_events_trigger.c
|
|
@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
|
|
struct event_trigger_data *data;
|
|
bool set_cond = false;
|
|
|
|
- list_for_each_entry_rcu(data, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(data, &file->triggers, list) {
|
|
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
|
|
event_command_needs_rec(data->cmd_ops)) {
|
|
set_cond = true;
|
|
@@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
|
|
struct event_trigger_data *test;
|
|
int ret = 0;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
|
|
ret = -EEXIST;
|
|
goto out;
|
|
@@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
|
struct event_trigger_data *data;
|
|
bool unregistered = false;
|
|
|
|
- list_for_each_entry_rcu(data, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(data, &file->triggers, list) {
|
|
if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
|
|
unregistered = true;
|
|
list_del_rcu(&data->list);
|
|
@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
|
|
struct event_trigger_data *test;
|
|
int ret = 0;
|
|
|
|
- list_for_each_entry_rcu(test, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(test, &file->triggers, list) {
|
|
test_enable_data = test->private_data;
|
|
if (test_enable_data &&
|
|
(test->cmd_ops->trigger_type ==
|
|
@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
|
|
struct event_trigger_data *data;
|
|
bool unregistered = false;
|
|
|
|
- list_for_each_entry_rcu(data, &file->triggers, list) {
|
|
+ lockdep_assert_held(&event_mutex);
|
|
+
|
|
+ list_for_each_entry(data, &file->triggers, list) {
|
|
enable_data = data->private_data;
|
|
if (enable_data &&
|
|
(data->cmd_ops->trigger_type ==
|
|
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
|
|
index 7f890262c8a3..3e5f9c7d939c 100644
|
|
--- a/kernel/trace/trace_kprobe.c
|
|
+++ b/kernel/trace/trace_kprobe.c
|
|
@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
|
INIT_HLIST_NODE(&tk->rp.kp.hlist);
|
|
INIT_LIST_HEAD(&tk->rp.kp.list);
|
|
|
|
- ret = trace_probe_init(&tk->tp, event, group);
|
|
+ ret = trace_probe_init(&tk->tp, event, group, 0);
|
|
if (ret < 0)
|
|
goto error;
|
|
|
|
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
|
|
index 905b10af5d5c..bba18cf44a30 100644
|
|
--- a/kernel/trace/trace_probe.c
|
|
+++ b/kernel/trace/trace_probe.c
|
|
@@ -984,7 +984,7 @@ void trace_probe_cleanup(struct trace_probe *tp)
|
|
}
|
|
|
|
int trace_probe_init(struct trace_probe *tp, const char *event,
|
|
- const char *group)
|
|
+ const char *group, size_t event_data_size)
|
|
{
|
|
struct trace_event_call *call;
|
|
int ret = 0;
|
|
@@ -992,7 +992,8 @@ int trace_probe_init(struct trace_probe *tp, const char *event,
|
|
if (!event || !group)
|
|
return -EINVAL;
|
|
|
|
- tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
|
|
+ tp->event = kzalloc(sizeof(struct trace_probe_event) + event_data_size,
|
|
+ GFP_KERNEL);
|
|
if (!tp->event)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
|
|
index 4ee703728aec..03e4e180058d 100644
|
|
--- a/kernel/trace/trace_probe.h
|
|
+++ b/kernel/trace/trace_probe.h
|
|
@@ -230,6 +230,7 @@ struct trace_probe_event {
|
|
struct trace_event_call call;
|
|
struct list_head files;
|
|
struct list_head probes;
|
|
+ char data[0];
|
|
};
|
|
|
|
struct trace_probe {
|
|
@@ -322,7 +323,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
|
|
}
|
|
|
|
int trace_probe_init(struct trace_probe *tp, const char *event,
|
|
- const char *group);
|
|
+ const char *group, size_t event_data_size);
|
|
void trace_probe_cleanup(struct trace_probe *tp);
|
|
int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
|
|
void trace_probe_unlink(struct trace_probe *tp);
|
|
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
|
|
index 352073d36585..f66e202fec13 100644
|
|
--- a/kernel/trace/trace_uprobe.c
|
|
+++ b/kernel/trace/trace_uprobe.c
|
|
@@ -60,7 +60,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
|
|
*/
|
|
struct trace_uprobe {
|
|
struct dyn_event devent;
|
|
- struct trace_uprobe_filter filter;
|
|
struct uprobe_consumer consumer;
|
|
struct path path;
|
|
struct inode *inode;
|
|
@@ -264,6 +263,14 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
|
|
}
|
|
NOKPROBE_SYMBOL(process_fetch_insn)
|
|
|
|
+static struct trace_uprobe_filter *
|
|
+trace_uprobe_get_filter(struct trace_uprobe *tu)
|
|
+{
|
|
+ struct trace_probe_event *event = tu->tp.event;
|
|
+
|
|
+ return (struct trace_uprobe_filter *)&event->data[0];
|
|
+}
|
|
+
|
|
static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
|
|
{
|
|
rwlock_init(&filter->rwlock);
|
|
@@ -351,7 +358,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
|
if (!tu)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
- ret = trace_probe_init(&tu->tp, event, group);
|
|
+ ret = trace_probe_init(&tu->tp, event, group,
|
|
+ sizeof(struct trace_uprobe_filter));
|
|
if (ret < 0)
|
|
goto error;
|
|
|
|
@@ -359,7 +367,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
|
tu->consumer.handler = uprobe_dispatcher;
|
|
if (is_ret)
|
|
tu->consumer.ret_handler = uretprobe_dispatcher;
|
|
- init_trace_uprobe_filter(&tu->filter);
|
|
+ init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
|
|
return tu;
|
|
|
|
error:
|
|
@@ -1067,13 +1075,14 @@ static void __probe_event_disable(struct trace_probe *tp)
|
|
struct trace_probe *pos;
|
|
struct trace_uprobe *tu;
|
|
|
|
+ tu = container_of(tp, struct trace_uprobe, tp);
|
|
+ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
|
|
+
|
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
tu = container_of(pos, struct trace_uprobe, tp);
|
|
if (!tu->inode)
|
|
continue;
|
|
|
|
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
|
-
|
|
uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
|
|
tu->inode = NULL;
|
|
}
|
|
@@ -1108,7 +1117,7 @@ static int probe_event_enable(struct trace_event_call *call,
|
|
}
|
|
|
|
tu = container_of(tp, struct trace_uprobe, tp);
|
|
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
|
|
+ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
|
|
|
|
if (enabled)
|
|
return 0;
|
|
@@ -1205,39 +1214,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
|
|
}
|
|
|
|
static inline bool
|
|
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
|
|
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
|
|
+ struct perf_event *event)
|
|
{
|
|
- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
|
|
+ return __uprobe_perf_filter(filter, event->hw.target->mm);
|
|
}
|
|
|
|
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
|
|
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
|
|
+ struct perf_event *event)
|
|
{
|
|
bool done;
|
|
|
|
- write_lock(&tu->filter.rwlock);
|
|
+ write_lock(&filter->rwlock);
|
|
if (event->hw.target) {
|
|
list_del(&event->hw.tp_list);
|
|
- done = tu->filter.nr_systemwide ||
|
|
+ done = filter->nr_systemwide ||
|
|
(event->hw.target->flags & PF_EXITING) ||
|
|
- uprobe_filter_event(tu, event);
|
|
+ trace_uprobe_filter_event(filter, event);
|
|
} else {
|
|
- tu->filter.nr_systemwide--;
|
|
- done = tu->filter.nr_systemwide;
|
|
+ filter->nr_systemwide--;
|
|
+ done = filter->nr_systemwide;
|
|
}
|
|
- write_unlock(&tu->filter.rwlock);
|
|
-
|
|
- if (!done)
|
|
- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
|
+ write_unlock(&filter->rwlock);
|
|
|
|
- return 0;
|
|
+ return done;
|
|
}
|
|
|
|
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
|
+/* This returns true if the filter always covers target mm */
|
|
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
|
|
+ struct perf_event *event)
|
|
{
|
|
bool done;
|
|
- int err;
|
|
|
|
- write_lock(&tu->filter.rwlock);
|
|
+ write_lock(&filter->rwlock);
|
|
if (event->hw.target) {
|
|
/*
|
|
* event->parent != NULL means copy_process(), we can avoid
|
|
@@ -1247,28 +1256,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
|
|
* attr.enable_on_exec means that exec/mmap will install the
|
|
* breakpoints we need.
|
|
*/
|
|
- done = tu->filter.nr_systemwide ||
|
|
+ done = filter->nr_systemwide ||
|
|
event->parent || event->attr.enable_on_exec ||
|
|
- uprobe_filter_event(tu, event);
|
|
- list_add(&event->hw.tp_list, &tu->filter.perf_events);
|
|
+ trace_uprobe_filter_event(filter, event);
|
|
+ list_add(&event->hw.tp_list, &filter->perf_events);
|
|
} else {
|
|
- done = tu->filter.nr_systemwide;
|
|
- tu->filter.nr_systemwide++;
|
|
+ done = filter->nr_systemwide;
|
|
+ filter->nr_systemwide++;
|
|
}
|
|
- write_unlock(&tu->filter.rwlock);
|
|
+ write_unlock(&filter->rwlock);
|
|
|
|
- err = 0;
|
|
- if (!done) {
|
|
- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
|
- if (err)
|
|
- uprobe_perf_close(tu, event);
|
|
- }
|
|
- return err;
|
|
+ return done;
|
|
}
|
|
|
|
-static int uprobe_perf_multi_call(struct trace_event_call *call,
|
|
- struct perf_event *event,
|
|
- int (*op)(struct trace_uprobe *tu, struct perf_event *event))
|
|
+static int uprobe_perf_close(struct trace_event_call *call,
|
|
+ struct perf_event *event)
|
|
{
|
|
struct trace_probe *pos, *tp;
|
|
struct trace_uprobe *tu;
|
|
@@ -1278,25 +1280,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
|
|
if (WARN_ON_ONCE(!tp))
|
|
return -ENODEV;
|
|
|
|
+ tu = container_of(tp, struct trace_uprobe, tp);
|
|
+ if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
|
|
+ return 0;
|
|
+
|
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
tu = container_of(pos, struct trace_uprobe, tp);
|
|
- ret = op(tu, event);
|
|
+ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
|
if (ret)
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
+
|
|
+static int uprobe_perf_open(struct trace_event_call *call,
|
|
+ struct perf_event *event)
|
|
+{
|
|
+ struct trace_probe *pos, *tp;
|
|
+ struct trace_uprobe *tu;
|
|
+ int err = 0;
|
|
+
|
|
+ tp = trace_probe_primary_from_call(call);
|
|
+ if (WARN_ON_ONCE(!tp))
|
|
+ return -ENODEV;
|
|
+
|
|
+ tu = container_of(tp, struct trace_uprobe, tp);
|
|
+ if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
|
|
+ return 0;
|
|
+
|
|
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
+ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
|
+ if (err) {
|
|
+ uprobe_perf_close(call, event);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
|
|
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
|
|
{
|
|
+ struct trace_uprobe_filter *filter;
|
|
struct trace_uprobe *tu;
|
|
int ret;
|
|
|
|
tu = container_of(uc, struct trace_uprobe, consumer);
|
|
- read_lock(&tu->filter.rwlock);
|
|
- ret = __uprobe_perf_filter(&tu->filter, mm);
|
|
- read_unlock(&tu->filter.rwlock);
|
|
+ filter = trace_uprobe_get_filter(tu);
|
|
+
|
|
+ read_lock(&filter->rwlock);
|
|
+ ret = __uprobe_perf_filter(filter, mm);
|
|
+ read_unlock(&filter->rwlock);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1419,10 +1455,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
|
|
return 0;
|
|
|
|
case TRACE_REG_PERF_OPEN:
|
|
- return uprobe_perf_multi_call(event, data, uprobe_perf_open);
|
|
+ return uprobe_perf_open(event, data);
|
|
|
|
case TRACE_REG_PERF_CLOSE:
|
|
- return uprobe_perf_multi_call(event, data, uprobe_perf_close);
|
|
+ return uprobe_perf_close(event, data);
|
|
|
|
#endif
|
|
default:
|
|
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
|
|
index dccb95af6003..706020b06617 100644
|
|
--- a/lib/strncpy_from_user.c
|
|
+++ b/lib/strncpy_from_user.c
|
|
@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
|
|
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
|
|
unsigned long res = 0;
|
|
|
|
- /*
|
|
- * Truncate 'max' to the user-specified limit, so that
|
|
- * we only have one limit we need to check in the loop
|
|
- */
|
|
- if (max > count)
|
|
- max = count;
|
|
-
|
|
if (IS_UNALIGNED(src, dst))
|
|
goto byte_at_a_time;
|
|
|
|
@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
|
unsigned long max = max_addr - src_addr;
|
|
long retval;
|
|
|
|
+ /*
|
|
+ * Truncate 'max' to the user-specified limit, so that
|
|
+ * we only have one limit we need to check in the loop
|
|
+ */
|
|
+ if (max > count)
|
|
+ max = count;
|
|
+
|
|
kasan_check_write(dst, count);
|
|
check_object_size(dst, count, false);
|
|
if (user_access_begin(src, max)) {
|
|
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
|
|
index 6c0005d5dd5c..41670d4a5816 100644
|
|
--- a/lib/strnlen_user.c
|
|
+++ b/lib/strnlen_user.c
|
|
@@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|
unsigned long align, res = 0;
|
|
unsigned long c;
|
|
|
|
- /*
|
|
- * Truncate 'max' to the user-specified limit, so that
|
|
- * we only have one limit we need to check in the loop
|
|
- */
|
|
- if (max > count)
|
|
- max = count;
|
|
-
|
|
/*
|
|
* Do everything aligned. But that means that we
|
|
* need to also expand the maximum..
|
|
@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
|
|
unsigned long max = max_addr - src_addr;
|
|
long retval;
|
|
|
|
+ /*
|
|
+ * Truncate 'max' to the user-specified limit, so that
|
|
+ * we only have one limit we need to check in the loop
|
|
+ */
|
|
+ if (max > count)
|
|
+ max = count;
|
|
+
|
|
if (user_access_begin(str, max)) {
|
|
retval = do_strnlen_user(str, count, max);
|
|
user_access_end();
|
|
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
|
|
index 7df4f7f395bf..03c3f42966ce 100644
|
|
--- a/lib/test_xarray.c
|
|
+++ b/lib/test_xarray.c
|
|
@@ -2,6 +2,7 @@
|
|
/*
|
|
* test_xarray.c: Test the XArray API
|
|
* Copyright (c) 2017-2018 Microsoft Corporation
|
|
+ * Copyright (c) 2019-2020 Oracle
|
|
* Author: Matthew Wilcox <willy@infradead.org>
|
|
*/
|
|
|
|
@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
}
|
|
|
|
-static noinline void check_multi_find(struct xarray *xa)
|
|
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
|
|
{
|
|
#ifdef CONFIG_XARRAY_MULTI
|
|
+ unsigned long multi = 3 << order;
|
|
+ unsigned long next = 4 << order;
|
|
unsigned long index;
|
|
|
|
- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
|
|
- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
|
|
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
|
|
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
|
|
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
|
|
|
|
index = 0;
|
|
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
|
- xa_mk_value(12));
|
|
- XA_BUG_ON(xa, index != 12);
|
|
- index = 13;
|
|
+ xa_mk_value(multi));
|
|
+ XA_BUG_ON(xa, index != multi);
|
|
+ index = multi + 1;
|
|
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
|
- xa_mk_value(12));
|
|
- XA_BUG_ON(xa, (index < 12) || (index >= 16));
|
|
+ xa_mk_value(multi));
|
|
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
|
|
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
|
- xa_mk_value(16));
|
|
- XA_BUG_ON(xa, index != 16);
|
|
-
|
|
- xa_erase_index(xa, 12);
|
|
- xa_erase_index(xa, 16);
|
|
+ xa_mk_value(next));
|
|
+ XA_BUG_ON(xa, index != next);
|
|
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
|
|
+ XA_BUG_ON(xa, index != next);
|
|
+
|
|
+ xa_erase_index(xa, multi);
|
|
+ xa_erase_index(xa, next);
|
|
+ xa_erase_index(xa, next + 1);
|
|
XA_BUG_ON(xa, !xa_empty(xa));
|
|
#endif
|
|
}
|
|
@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
|
|
xa_destroy(xa);
|
|
}
|
|
|
|
+static noinline void check_find_4(struct xarray *xa)
|
|
+{
|
|
+ unsigned long index = 0;
|
|
+ void *entry;
|
|
+
|
|
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
|
|
+
|
|
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
|
|
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
|
|
+
|
|
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
|
|
+ XA_BUG_ON(xa, entry);
|
|
+
|
|
+ xa_erase_index(xa, ULONG_MAX);
|
|
+}
|
|
+
|
|
static noinline void check_find(struct xarray *xa)
|
|
{
|
|
+ unsigned i;
|
|
+
|
|
check_find_1(xa);
|
|
check_find_2(xa);
|
|
check_find_3(xa);
|
|
- check_multi_find(xa);
|
|
+ check_find_4(xa);
|
|
+
|
|
+ for (i = 2; i < 10; i++)
|
|
+ check_multi_find_1(xa, i);
|
|
check_multi_find_2(xa);
|
|
}
|
|
|
|
diff --git a/lib/xarray.c b/lib/xarray.c
|
|
index 1237c213f52b..47e17d46e5f8 100644
|
|
--- a/lib/xarray.c
|
|
+++ b/lib/xarray.c
|
|
@@ -1,7 +1,8 @@
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* XArray implementation
|
|
- * Copyright (c) 2017 Microsoft Corporation
|
|
+ * Copyright (c) 2017-2018 Microsoft Corporation
|
|
+ * Copyright (c) 2018-2020 Oracle
|
|
* Author: Matthew Wilcox <willy@infradead.org>
|
|
*/
|
|
|
|
@@ -1081,6 +1082,8 @@ void *xas_find(struct xa_state *xas, unsigned long max)
|
|
|
|
if (xas_error(xas))
|
|
return NULL;
|
|
+ if (xas->xa_index > max)
|
|
+ return set_bounds(xas);
|
|
|
|
if (!xas->xa_node) {
|
|
xas->xa_index = 1;
|
|
@@ -1150,6 +1153,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
|
|
|
|
if (xas_error(xas))
|
|
return NULL;
|
|
+ if (xas->xa_index > max)
|
|
+ goto max;
|
|
|
|
if (!xas->xa_node) {
|
|
xas->xa_index = 1;
|
|
@@ -1824,6 +1829,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
|
|
}
|
|
EXPORT_SYMBOL(xa_find);
|
|
|
|
+static bool xas_sibling(struct xa_state *xas)
|
|
+{
|
|
+ struct xa_node *node = xas->xa_node;
|
|
+ unsigned long mask;
|
|
+
|
|
+ if (!node)
|
|
+ return false;
|
|
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
|
|
+ return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
|
|
+}
|
|
+
|
|
/**
|
|
* xa_find_after() - Search the XArray for a present entry.
|
|
* @xa: XArray.
|
|
@@ -1847,21 +1863,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
|
|
XA_STATE(xas, xa, *indexp + 1);
|
|
void *entry;
|
|
|
|
+ if (xas.xa_index == 0)
|
|
+ return NULL;
|
|
+
|
|
rcu_read_lock();
|
|
for (;;) {
|
|
if ((__force unsigned int)filter < XA_MAX_MARKS)
|
|
entry = xas_find_marked(&xas, max, filter);
|
|
else
|
|
entry = xas_find(&xas, max);
|
|
- if (xas.xa_node == XAS_BOUNDS)
|
|
+
|
|
+ if (xas_invalid(&xas))
|
|
break;
|
|
- if (xas.xa_shift) {
|
|
- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
|
|
- continue;
|
|
- } else {
|
|
- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
|
|
- continue;
|
|
- }
|
|
+ if (xas_sibling(&xas))
|
|
+ continue;
|
|
if (!xas_retry(&xas, entry))
|
|
break;
|
|
}
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 3098c90d60e2..82325d3d1371 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -5270,9 +5270,29 @@ static void flush_all_backlogs(void)
|
|
put_online_cpus();
|
|
}
|
|
|
|
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
|
|
+static void gro_normal_list(struct napi_struct *napi)
|
|
+{
|
|
+ if (!napi->rx_count)
|
|
+ return;
|
|
+ netif_receive_skb_list_internal(&napi->rx_list);
|
|
+ INIT_LIST_HEAD(&napi->rx_list);
|
|
+ napi->rx_count = 0;
|
|
+}
|
|
+
|
|
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
|
|
+ * pass the whole batch up to the stack.
|
|
+ */
|
|
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
|
|
+{
|
|
+ list_add_tail(&skb->list, &napi->rx_list);
|
|
+ if (++napi->rx_count >= gro_normal_batch)
|
|
+ gro_normal_list(napi);
|
|
+}
|
|
+
|
|
INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
|
|
INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
|
|
-static int napi_gro_complete(struct sk_buff *skb)
|
|
+static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
|
|
{
|
|
struct packet_offload *ptype;
|
|
__be16 type = skb->protocol;
|
|
@@ -5305,7 +5325,8 @@ static int napi_gro_complete(struct sk_buff *skb)
|
|
}
|
|
|
|
out:
|
|
- return netif_receive_skb_internal(skb);
|
|
+ gro_normal_one(napi, skb);
|
|
+ return NET_RX_SUCCESS;
|
|
}
|
|
|
|
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
|
|
@@ -5318,7 +5339,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
|
|
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
|
|
return;
|
|
skb_list_del_init(skb);
|
|
- napi_gro_complete(skb);
|
|
+ napi_gro_complete(napi, skb);
|
|
napi->gro_hash[index].count--;
|
|
}
|
|
|
|
@@ -5421,7 +5442,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
|
|
}
|
|
}
|
|
|
|
-static void gro_flush_oldest(struct list_head *head)
|
|
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
|
|
{
|
|
struct sk_buff *oldest;
|
|
|
|
@@ -5437,7 +5458,7 @@ static void gro_flush_oldest(struct list_head *head)
|
|
* SKB to the chain.
|
|
*/
|
|
skb_list_del_init(oldest);
|
|
- napi_gro_complete(oldest);
|
|
+ napi_gro_complete(napi, oldest);
|
|
}
|
|
|
|
INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
|
|
@@ -5513,7 +5534,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
|
|
|
if (pp) {
|
|
skb_list_del_init(pp);
|
|
- napi_gro_complete(pp);
|
|
+ napi_gro_complete(napi, pp);
|
|
napi->gro_hash[hash].count--;
|
|
}
|
|
|
|
@@ -5524,7 +5545,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
|
goto normal;
|
|
|
|
if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
|
|
- gro_flush_oldest(gro_head);
|
|
+ gro_flush_oldest(napi, gro_head);
|
|
} else {
|
|
napi->gro_hash[hash].count++;
|
|
}
|
|
@@ -5672,26 +5693,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
|
|
}
|
|
EXPORT_SYMBOL(napi_get_frags);
|
|
|
|
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
|
|
-static void gro_normal_list(struct napi_struct *napi)
|
|
-{
|
|
- if (!napi->rx_count)
|
|
- return;
|
|
- netif_receive_skb_list_internal(&napi->rx_list);
|
|
- INIT_LIST_HEAD(&napi->rx_list);
|
|
- napi->rx_count = 0;
|
|
-}
|
|
-
|
|
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
|
|
- * pass the whole batch up to the stack.
|
|
- */
|
|
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
|
|
-{
|
|
- list_add_tail(&skb->list, &napi->rx_list);
|
|
- if (++napi->rx_count >= gro_normal_batch)
|
|
- gro_normal_list(napi);
|
|
-}
|
|
-
|
|
static gro_result_t napi_frags_finish(struct napi_struct *napi,
|
|
struct sk_buff *skb,
|
|
gro_result_t ret)
|
|
@@ -5979,8 +5980,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
|
|
NAPIF_STATE_IN_BUSY_POLL)))
|
|
return false;
|
|
|
|
- gro_normal_list(n);
|
|
-
|
|
if (n->gro_bitmask) {
|
|
unsigned long timeout = 0;
|
|
|
|
@@ -5996,6 +5995,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
|
|
hrtimer_start(&n->timer, ns_to_ktime(timeout),
|
|
HRTIMER_MODE_REL_PINNED);
|
|
}
|
|
+
|
|
+ gro_normal_list(n);
|
|
+
|
|
if (unlikely(!list_empty(&n->poll_list))) {
|
|
/* If n->poll_list is not empty, we need to mask irqs */
|
|
local_irq_save(flags);
|
|
@@ -6327,8 +6329,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
|
|
goto out_unlock;
|
|
}
|
|
|
|
- gro_normal_list(n);
|
|
-
|
|
if (n->gro_bitmask) {
|
|
/* flush too old packets
|
|
* If HZ < 1000, flush all packets.
|
|
@@ -6336,6 +6336,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
|
|
napi_gro_flush(n, HZ >= 1000);
|
|
}
|
|
|
|
+ gro_normal_list(n);
|
|
+
|
|
/* Some drivers may have called napi_schedule
|
|
* prior to exhausting their budget.
|
|
*/
|
|
@@ -7973,6 +7975,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
|
|
}
|
|
EXPORT_SYMBOL(__dev_set_mtu);
|
|
|
|
+int dev_validate_mtu(struct net_device *dev, int new_mtu,
|
|
+ struct netlink_ext_ack *extack)
|
|
+{
|
|
+ /* MTU must be positive, and in range */
|
|
+ if (new_mtu < 0 || new_mtu < dev->min_mtu) {
|
|
+ NL_SET_ERR_MSG(extack, "mtu less than device minimum");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
|
|
+ NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* dev_set_mtu_ext - Change maximum transfer unit
|
|
* @dev: device
|
|
@@ -7989,16 +8007,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
|
|
if (new_mtu == dev->mtu)
|
|
return 0;
|
|
|
|
- /* MTU must be positive, and in range */
|
|
- if (new_mtu < 0 || new_mtu < dev->min_mtu) {
|
|
- NL_SET_ERR_MSG(extack, "mtu less than device minimum");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
|
|
- NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
|
|
- return -EINVAL;
|
|
- }
|
|
+ err = dev_validate_mtu(dev, new_mtu, extack);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
if (!netif_device_present(dev))
|
|
return -ENODEV;
|
|
@@ -9073,8 +9084,10 @@ int register_netdevice(struct net_device *dev)
|
|
goto err_uninit;
|
|
|
|
ret = netdev_register_kobject(dev);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ dev->reg_state = NETREG_UNREGISTERED;
|
|
goto err_uninit;
|
|
+ }
|
|
dev->reg_state = NETREG_REGISTERED;
|
|
|
|
__netdev_update_features(dev);
|
|
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
|
|
index e4ec575c1fba..944acb1a9f29 100644
|
|
--- a/net/core/rtnetlink.c
|
|
+++ b/net/core/rtnetlink.c
|
|
@@ -2959,8 +2959,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
|
|
dev->rtnl_link_ops = ops;
|
|
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
|
|
|
|
- if (tb[IFLA_MTU])
|
|
- dev->mtu = nla_get_u32(tb[IFLA_MTU]);
|
|
+ if (tb[IFLA_MTU]) {
|
|
+ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
|
|
+ int err;
|
|
+
|
|
+ err = dev_validate_mtu(dev, mtu, extack);
|
|
+ if (err) {
|
|
+ free_netdev(dev);
|
|
+ return ERR_PTR(err);
|
|
+ }
|
|
+ dev->mtu = mtu;
|
|
+ }
|
|
if (tb[IFLA_ADDRESS]) {
|
|
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
|
|
nla_len(tb[IFLA_ADDRESS]));
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index 3866d7e20c07..ded2d5227678 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
|
|
|
|
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
|
{
|
|
- sock_owned_by_me(sk);
|
|
-
|
|
sk_psock_cork_free(psock);
|
|
sk_psock_zap_ingress(psock);
|
|
|
|
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
|
|
index d40de84a637f..754d84b217f0 100644
|
|
--- a/net/hsr/hsr_main.h
|
|
+++ b/net/hsr/hsr_main.h
|
|
@@ -191,7 +191,7 @@ void hsr_debugfs_term(struct hsr_priv *priv);
|
|
void hsr_debugfs_create_root(void);
|
|
void hsr_debugfs_remove_root(void);
|
|
#else
|
|
-static inline void void hsr_debugfs_rename(struct net_device *dev)
|
|
+static inline void hsr_debugfs_rename(struct net_device *dev)
|
|
{
|
|
}
|
|
static inline void hsr_debugfs_init(struct hsr_priv *priv,
|
|
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
|
|
index 0e4a7cf6bc87..e2e219c7854a 100644
|
|
--- a/net/ipv4/esp4_offload.c
|
|
+++ b/net/ipv4/esp4_offload.c
|
|
@@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
|
if (!x)
|
|
goto out_reset;
|
|
|
|
+ skb->mark = xfrm_smark_get(skb->mark, x);
|
|
+
|
|
sp->xvec[sp->len++] = x;
|
|
sp->olen++;
|
|
|
|
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
|
|
index 1ab2fb6bb37d..f12fa8da6127 100644
|
|
--- a/net/ipv4/fib_trie.c
|
|
+++ b/net/ipv4/fib_trie.c
|
|
@@ -2175,6 +2175,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
|
|
int count = cb->args[2];
|
|
t_key key = cb->args[3];
|
|
|
|
+ /* First time here, count and key are both always 0. Count > 0
|
|
+ * and key == 0 means the dump has wrapped around and we are done.
|
|
+ */
|
|
+ if (count && !key)
|
|
+ return skb->len;
|
|
+
|
|
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
|
|
int err;
|
|
|
|
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
|
|
index 30fa771d382a..dcc79ff54b41 100644
|
|
--- a/net/ipv4/fou.c
|
|
+++ b/net/ipv4/fou.c
|
|
@@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
|
|
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
|
|
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
|
|
[FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
|
|
- [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
|
|
- [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
|
|
+ [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
|
|
+ [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
|
|
[FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
|
|
[FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
|
|
};
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index 0fe2a5d3e258..74e1d964a615 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
|
|
iph->version = 4;
|
|
iph->ihl = 5;
|
|
|
|
- if (tunnel->collect_md) {
|
|
- dev->features |= NETIF_F_NETNS_LOCAL;
|
|
+ if (tunnel->collect_md)
|
|
netif_keep_dst(dev);
|
|
- }
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(ip_tunnel_init);
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index 84115577d3dc..3640e8563a10 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -2520,6 +2520,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
|
|
{
|
|
struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
|
|
|
|
+ tcp_sk(sk)->highest_sack = NULL;
|
|
while (p) {
|
|
struct sk_buff *skb = rb_to_skb(p);
|
|
|
|
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
|
|
index a6545ef0d27b..6c4d79baff26 100644
|
|
--- a/net/ipv4/tcp_bbr.c
|
|
+++ b/net/ipv4/tcp_bbr.c
|
|
@@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
|
|
* bandwidth sample. Delivered is in packets and interval_us in uS and
|
|
* ratio will be <<1 for most connections. So delivered is first scaled.
|
|
*/
|
|
- bw = (u64)rs->delivered * BW_UNIT;
|
|
- do_div(bw, rs->interval_us);
|
|
+ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
|
|
|
|
/* If this sample is application-limited, it is likely to have a very
|
|
* low delivered count that represents application behavior rather than
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 59b78ce2ce2e..6f7155d91313 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -3164,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
|
|
tp->retransmit_skb_hint = NULL;
|
|
if (unlikely(skb == tp->lost_skb_hint))
|
|
tp->lost_skb_hint = NULL;
|
|
+ tcp_highest_sack_replace(sk, skb, next);
|
|
tcp_rtx_queue_unlink_and_free(skb, sk);
|
|
}
|
|
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index e4ba915c4bb5..660b24fe041e 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -3231,6 +3231,7 @@ int tcp_send_synack(struct sock *sk)
|
|
if (!nskb)
|
|
return -ENOMEM;
|
|
INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
|
|
+ tcp_highest_sack_replace(sk, skb, nskb);
|
|
tcp_rtx_queue_unlink_and_free(skb, sk);
|
|
__skb_header_release(nskb);
|
|
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 7aa4e77161f6..7ae7065758bd 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
|
|
if (likely(partial)) {
|
|
up->forward_deficit += size;
|
|
size = up->forward_deficit;
|
|
- if (size < (sk->sk_rcvbuf >> 2))
|
|
+ if (size < (sk->sk_rcvbuf >> 2) &&
|
|
+ !skb_queue_empty(&up->reader_queue))
|
|
return;
|
|
} else {
|
|
size += up->forward_deficit;
|
|
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
|
|
index e31626ffccd1..fd535053245b 100644
|
|
--- a/net/ipv6/esp6_offload.c
|
|
+++ b/net/ipv6/esp6_offload.c
|
|
@@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
|
if (!x)
|
|
goto out_reset;
|
|
|
|
+ skb->mark = xfrm_smark_get(skb->mark, x);
|
|
+
|
|
sp->xvec[sp->len++] = x;
|
|
sp->olen++;
|
|
|
|
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
|
|
index 189de56f5e36..9ec05a1df5e1 100644
|
|
--- a/net/ipv6/ip6_gre.c
|
|
+++ b/net/ipv6/ip6_gre.c
|
|
@@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
|
|
dev->mtu -= 8;
|
|
|
|
if (tunnel->parms.collect_md) {
|
|
- dev->features |= NETIF_F_NETNS_LOCAL;
|
|
netif_keep_dst(dev);
|
|
}
|
|
ip6gre_tnl_init_features(dev);
|
|
@@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
|
|
dev->needs_free_netdev = true;
|
|
dev->priv_destructor = ip6gre_dev_free;
|
|
|
|
- dev->features |= NETIF_F_NETNS_LOCAL;
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
|
netif_keep_dst(dev);
|
|
@@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
|
|
dev->needs_free_netdev = true;
|
|
dev->priv_destructor = ip6gre_dev_free;
|
|
|
|
- dev->features |= NETIF_F_NETNS_LOCAL;
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
|
netif_keep_dst(dev);
|
|
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
|
|
index 2f376dbc37d5..b5dd20c4599b 100644
|
|
--- a/net/ipv6/ip6_tunnel.c
|
|
+++ b/net/ipv6/ip6_tunnel.c
|
|
@@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
|
|
if (err)
|
|
return err;
|
|
ip6_tnl_link_config(t);
|
|
- if (t->parms.collect_md) {
|
|
- dev->features |= NETIF_F_NETNS_LOCAL;
|
|
+ if (t->parms.collect_md)
|
|
netif_keep_dst(dev);
|
|
- }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
|
|
index e70567446f28..802eebf8ac4b 100644
|
|
--- a/net/ipv6/seg6_local.c
|
|
+++ b/net/ipv6/seg6_local.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <net/addrconf.h>
|
|
#include <net/ip6_route.h>
|
|
#include <net/dst_cache.h>
|
|
+#include <net/ip_tunnels.h>
|
|
#ifdef CONFIG_IPV6_SEG6_HMAC
|
|
#include <net/seg6_hmac.h>
|
|
#endif
|
|
@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
|
|
|
|
skb_reset_network_header(skb);
|
|
skb_reset_transport_header(skb);
|
|
- skb->encapsulation = 0;
|
|
+ if (iptunnel_pull_offloads(skb))
|
|
+ return false;
|
|
|
|
return true;
|
|
}
|
|
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
|
|
index e1f271a1b2c1..bfd4b42ba305 100644
|
|
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
|
|
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
|
|
@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
|
|
|
|
if (set->extensions & IPSET_EXT_DESTROY)
|
|
mtype_ext_cleanup(set);
|
|
- memset(map->members, 0, map->memsize);
|
|
+ bitmap_zero(map->members, map->elements);
|
|
set->elements = 0;
|
|
set->ext_size = 0;
|
|
}
|
|
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
|
|
index 11ff9d4a7006..d934384f31ad 100644
|
|
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
|
|
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
|
|
@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
|
|
|
|
/* Type structure */
|
|
struct bitmap_ip {
|
|
- void *members; /* the set members */
|
|
+ unsigned long *members; /* the set members */
|
|
u32 first_ip; /* host byte order, included in range */
|
|
u32 last_ip; /* host byte order, included in range */
|
|
u32 elements; /* number of max elements in the set */
|
|
@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
|
|
u32 first_ip, u32 last_ip,
|
|
u32 elements, u32 hosts, u8 netmask)
|
|
{
|
|
- map->members = ip_set_alloc(map->memsize);
|
|
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
|
|
if (!map->members)
|
|
return false;
|
|
map->first_ip = first_ip;
|
|
@@ -310,7 +310,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
|
if (!map)
|
|
return -ENOMEM;
|
|
|
|
- map->memsize = bitmap_bytes(0, elements - 1);
|
|
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
|
|
set->variant = &bitmap_ip;
|
|
if (!init_map_ip(set, map, first_ip, last_ip,
|
|
elements, hosts, netmask)) {
|
|
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
index 1d4e63326e68..e8532783b43a 100644
|
|
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
@@ -42,7 +42,7 @@ enum {
|
|
|
|
/* Type structure */
|
|
struct bitmap_ipmac {
|
|
- void *members; /* the set members */
|
|
+ unsigned long *members; /* the set members */
|
|
u32 first_ip; /* host byte order, included in range */
|
|
u32 last_ip; /* host byte order, included in range */
|
|
u32 elements; /* number of max elements in the set */
|
|
@@ -299,7 +299,7 @@ static bool
|
|
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
|
|
u32 first_ip, u32 last_ip, u32 elements)
|
|
{
|
|
- map->members = ip_set_alloc(map->memsize);
|
|
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
|
|
if (!map->members)
|
|
return false;
|
|
map->first_ip = first_ip;
|
|
@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
|
if (!map)
|
|
return -ENOMEM;
|
|
|
|
- map->memsize = bitmap_bytes(0, elements - 1);
|
|
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
|
|
set->variant = &bitmap_ipmac;
|
|
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
|
|
kfree(map);
|
|
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
|
|
index 704a0dda1609..e3ac914fff1a 100644
|
|
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
|
|
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
|
|
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
|
|
|
|
/* Type structure */
|
|
struct bitmap_port {
|
|
- void *members; /* the set members */
|
|
+ unsigned long *members; /* the set members */
|
|
u16 first_port; /* host byte order, included in range */
|
|
u16 last_port; /* host byte order, included in range */
|
|
u32 elements; /* number of max elements in the set */
|
|
@@ -204,7 +204,7 @@ static bool
|
|
init_map_port(struct ip_set *set, struct bitmap_port *map,
|
|
u16 first_port, u16 last_port)
|
|
{
|
|
- map->members = ip_set_alloc(map->memsize);
|
|
+ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
|
|
if (!map->members)
|
|
return false;
|
|
map->first_port = first_port;
|
|
@@ -244,7 +244,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
|
return -ENOMEM;
|
|
|
|
map->elements = elements;
|
|
- map->memsize = bitmap_bytes(0, map->elements);
|
|
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
|
|
set->variant = &bitmap_port;
|
|
if (!init_map_port(set, map, first_port, last_port)) {
|
|
kfree(map);
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 9fefd0150091..23544842b692 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -488,48 +488,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
|
|
|
|
static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
|
|
|
|
+static const struct nft_chain_type *
|
|
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
|
|
+{
|
|
+ if (family >= NFPROTO_NUMPROTO ||
|
|
+ type >= NFT_CHAIN_T_MAX)
|
|
+ return NULL;
|
|
+
|
|
+ return chain_type[family][type];
|
|
+}
|
|
+
|
|
static const struct nft_chain_type *
|
|
__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
|
|
{
|
|
+ const struct nft_chain_type *type;
|
|
int i;
|
|
|
|
for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
|
|
- if (chain_type[family][i] != NULL &&
|
|
- !nla_strcmp(nla, chain_type[family][i]->name))
|
|
- return chain_type[family][i];
|
|
+ type = __nft_chain_type_get(family, i);
|
|
+ if (!type)
|
|
+ continue;
|
|
+ if (!nla_strcmp(nla, type->name))
|
|
+ return type;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
-/*
|
|
- * Loading a module requires dropping mutex that guards the transaction.
|
|
- * A different client might race to start a new transaction meanwhile. Zap the
|
|
- * list of pending transaction and then restore it once the mutex is grabbed
|
|
- * again. Users of this function return EAGAIN which implicitly triggers the
|
|
- * transaction abort path to clean up the list of pending transactions.
|
|
- */
|
|
+struct nft_module_request {
|
|
+ struct list_head list;
|
|
+ char module[MODULE_NAME_LEN];
|
|
+ bool done;
|
|
+};
|
|
+
|
|
#ifdef CONFIG_MODULES
|
|
-static void nft_request_module(struct net *net, const char *fmt, ...)
|
|
+static int nft_request_module(struct net *net, const char *fmt, ...)
|
|
{
|
|
char module_name[MODULE_NAME_LEN];
|
|
- LIST_HEAD(commit_list);
|
|
+ struct nft_module_request *req;
|
|
va_list args;
|
|
int ret;
|
|
|
|
- list_splice_init(&net->nft.commit_list, &commit_list);
|
|
-
|
|
va_start(args, fmt);
|
|
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
|
|
va_end(args);
|
|
if (ret >= MODULE_NAME_LEN)
|
|
- return;
|
|
+ return 0;
|
|
|
|
- mutex_unlock(&net->nft.commit_mutex);
|
|
- request_module("%s", module_name);
|
|
- mutex_lock(&net->nft.commit_mutex);
|
|
+ list_for_each_entry(req, &net->nft.module_list, list) {
|
|
+ if (!strcmp(req->module, module_name)) {
|
|
+ if (req->done)
|
|
+ return 0;
|
|
|
|
- WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
|
|
- list_splice(&commit_list, &net->nft.commit_list);
|
|
+ /* A request to load this module already exists. */
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
|
|
+ if (!req)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ req->done = false;
|
|
+ strlcpy(req->module, module_name, MODULE_NAME_LEN);
|
|
+ list_add_tail(&req->list, &net->nft.module_list);
|
|
+
|
|
+ return -EAGAIN;
|
|
}
|
|
#endif
|
|
|
|
@@ -553,10 +576,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
|
|
lockdep_nfnl_nft_mutex_not_held();
|
|
#ifdef CONFIG_MODULES
|
|
if (autoload) {
|
|
- nft_request_module(net, "nft-chain-%u-%.*s", family,
|
|
- nla_len(nla), (const char *)nla_data(nla));
|
|
- type = __nf_tables_chain_type_lookup(nla, family);
|
|
- if (type != NULL)
|
|
+ if (nft_request_module(net, "nft-chain-%u-%.*s", family,
|
|
+ nla_len(nla),
|
|
+ (const char *)nla_data(nla)) == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
#endif
|
|
@@ -1095,11 +1117,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
|
|
|
|
void nft_register_chain_type(const struct nft_chain_type *ctype)
|
|
{
|
|
- if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
|
|
- return;
|
|
-
|
|
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
|
- if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
|
|
+ if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
|
|
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
|
return;
|
|
}
|
|
@@ -1551,7 +1570,10 @@ static int nft_chain_parse_hook(struct net *net,
|
|
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
|
|
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
|
|
|
|
- type = chain_type[family][NFT_CHAIN_T_DEFAULT];
|
|
+ type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
|
|
+ if (!type)
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
if (nla[NFTA_CHAIN_TYPE]) {
|
|
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
|
|
family, autoload);
|
|
@@ -2060,9 +2082,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
|
|
static int nft_expr_type_request_module(struct net *net, u8 family,
|
|
struct nlattr *nla)
|
|
{
|
|
- nft_request_module(net, "nft-expr-%u-%.*s", family,
|
|
- nla_len(nla), (char *)nla_data(nla));
|
|
- if (__nft_expr_type_get(family, nla))
|
|
+ if (nft_request_module(net, "nft-expr-%u-%.*s", family,
|
|
+ nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
|
|
return -EAGAIN;
|
|
|
|
return 0;
|
|
@@ -2088,9 +2109,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
|
|
if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
- nft_request_module(net, "nft-expr-%.*s",
|
|
- nla_len(nla), (char *)nla_data(nla));
|
|
- if (__nft_expr_type_get(family, nla))
|
|
+ if (nft_request_module(net, "nft-expr-%.*s",
|
|
+ nla_len(nla),
|
|
+ (char *)nla_data(nla)) == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
#endif
|
|
@@ -2181,9 +2202,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
|
|
err = PTR_ERR(ops);
|
|
#ifdef CONFIG_MODULES
|
|
if (err == -EAGAIN)
|
|
- nft_expr_type_request_module(ctx->net,
|
|
- ctx->family,
|
|
- tb[NFTA_EXPR_NAME]);
|
|
+ if (nft_expr_type_request_module(ctx->net,
|
|
+ ctx->family,
|
|
+ tb[NFTA_EXPR_NAME]) != -EAGAIN)
|
|
+ err = -ENOENT;
|
|
#endif
|
|
goto err1;
|
|
}
|
|
@@ -3020,8 +3042,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
|
|
lockdep_nfnl_nft_mutex_not_held();
|
|
#ifdef CONFIG_MODULES
|
|
if (list_empty(&nf_tables_set_types)) {
|
|
- nft_request_module(ctx->net, "nft-set");
|
|
- if (!list_empty(&nf_tables_set_types))
|
|
+ if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
#endif
|
|
@@ -5147,8 +5168,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
|
|
lockdep_nfnl_nft_mutex_not_held();
|
|
#ifdef CONFIG_MODULES
|
|
if (type == NULL) {
|
|
- nft_request_module(net, "nft-obj-%u", objtype);
|
|
- if (__nft_obj_type_get(objtype))
|
|
+ if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
#endif
|
|
@@ -5764,8 +5784,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
|
|
lockdep_nfnl_nft_mutex_not_held();
|
|
#ifdef CONFIG_MODULES
|
|
if (type == NULL) {
|
|
- nft_request_module(net, "nf-flowtable-%u", family);
|
|
- if (__nft_flowtable_type_get(family))
|
|
+ if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
#endif
|
|
@@ -6712,6 +6731,18 @@ static void nft_chain_del(struct nft_chain *chain)
|
|
list_del_rcu(&chain->list);
|
|
}
|
|
|
|
+static void nf_tables_module_autoload_cleanup(struct net *net)
|
|
+{
|
|
+ struct nft_module_request *req, *next;
|
|
+
|
|
+ WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
|
|
+ list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
|
|
+ WARN_ON_ONCE(!req->done);
|
|
+ list_del(&req->list);
|
|
+ kfree(req);
|
|
+ }
|
|
+}
|
|
+
|
|
static void nf_tables_commit_release(struct net *net)
|
|
{
|
|
struct nft_trans *trans;
|
|
@@ -6724,6 +6755,7 @@ static void nf_tables_commit_release(struct net *net)
|
|
* to prevent expensive synchronize_rcu() in commit phase.
|
|
*/
|
|
if (list_empty(&net->nft.commit_list)) {
|
|
+ nf_tables_module_autoload_cleanup(net);
|
|
mutex_unlock(&net->nft.commit_mutex);
|
|
return;
|
|
}
|
|
@@ -6738,6 +6770,7 @@ static void nf_tables_commit_release(struct net *net)
|
|
list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
|
|
spin_unlock(&nf_tables_destroy_list_lock);
|
|
|
|
+ nf_tables_module_autoload_cleanup(net);
|
|
mutex_unlock(&net->nft.commit_mutex);
|
|
|
|
schedule_work(&trans_destroy_work);
|
|
@@ -6929,6 +6962,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|
return 0;
|
|
}
|
|
|
|
+static void nf_tables_module_autoload(struct net *net)
|
|
+{
|
|
+ struct nft_module_request *req, *next;
|
|
+ LIST_HEAD(module_list);
|
|
+
|
|
+ list_splice_init(&net->nft.module_list, &module_list);
|
|
+ mutex_unlock(&net->nft.commit_mutex);
|
|
+ list_for_each_entry_safe(req, next, &module_list, list) {
|
|
+ if (req->done) {
|
|
+ list_del(&req->list);
|
|
+ kfree(req);
|
|
+ } else {
|
|
+ request_module("%s", req->module);
|
|
+ req->done = true;
|
|
+ }
|
|
+ }
|
|
+ mutex_lock(&net->nft.commit_mutex);
|
|
+ list_splice(&module_list, &net->nft.module_list);
|
|
+}
|
|
+
|
|
static void nf_tables_abort_release(struct nft_trans *trans)
|
|
{
|
|
switch (trans->msg_type) {
|
|
@@ -6958,7 +7011,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|
kfree(trans);
|
|
}
|
|
|
|
-static int __nf_tables_abort(struct net *net)
|
|
+static int __nf_tables_abort(struct net *net, bool autoload)
|
|
{
|
|
struct nft_trans *trans, *next;
|
|
struct nft_trans_elem *te;
|
|
@@ -7080,6 +7133,11 @@ static int __nf_tables_abort(struct net *net)
|
|
nf_tables_abort_release(trans);
|
|
}
|
|
|
|
+ if (autoload)
|
|
+ nf_tables_module_autoload(net);
|
|
+ else
|
|
+ nf_tables_module_autoload_cleanup(net);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -7088,9 +7146,9 @@ static void nf_tables_cleanup(struct net *net)
|
|
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
|
|
}
|
|
|
|
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
|
|
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
|
|
{
|
|
- int ret = __nf_tables_abort(net);
|
|
+ int ret = __nf_tables_abort(net, autoload);
|
|
|
|
mutex_unlock(&net->nft.commit_mutex);
|
|
|
|
@@ -7685,6 +7743,7 @@ static int __net_init nf_tables_init_net(struct net *net)
|
|
{
|
|
INIT_LIST_HEAD(&net->nft.tables);
|
|
INIT_LIST_HEAD(&net->nft.commit_list);
|
|
+ INIT_LIST_HEAD(&net->nft.module_list);
|
|
mutex_init(&net->nft.commit_mutex);
|
|
net->nft.base_seq = 1;
|
|
net->nft.validate_state = NFT_VALIDATE_SKIP;
|
|
@@ -7696,7 +7755,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
|
|
{
|
|
mutex_lock(&net->nft.commit_mutex);
|
|
if (!list_empty(&net->nft.commit_list))
|
|
- __nf_tables_abort(net);
|
|
+ __nf_tables_abort(net, false);
|
|
__nft_release_tables(net);
|
|
mutex_unlock(&net->nft.commit_mutex);
|
|
WARN_ON_ONCE(!list_empty(&net->nft.tables));
|
|
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
|
|
index 4abbb452cf6c..99127e2d95a8 100644
|
|
--- a/net/netfilter/nfnetlink.c
|
|
+++ b/net/netfilter/nfnetlink.c
|
|
@@ -476,7 +476,7 @@ ack:
|
|
}
|
|
done:
|
|
if (status & NFNL_BATCH_REPLAY) {
|
|
- ss->abort(net, oskb);
|
|
+ ss->abort(net, oskb, true);
|
|
nfnl_err_reset(&err_list);
|
|
kfree_skb(skb);
|
|
module_put(ss->owner);
|
|
@@ -487,11 +487,11 @@ done:
|
|
status |= NFNL_BATCH_REPLAY;
|
|
goto done;
|
|
} else if (err) {
|
|
- ss->abort(net, oskb);
|
|
+ ss->abort(net, oskb, false);
|
|
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
|
|
}
|
|
} else {
|
|
- ss->abort(net, oskb);
|
|
+ ss->abort(net, oskb, false);
|
|
}
|
|
if (ss->cleanup)
|
|
ss->cleanup(net);
|
|
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
|
|
index f54d6ae15bb1..b42247aa48a9 100644
|
|
--- a/net/netfilter/nft_osf.c
|
|
+++ b/net/netfilter/nft_osf.c
|
|
@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
|
|
int err;
|
|
u8 ttl;
|
|
|
|
+ if (!tb[NFTA_OSF_DREG])
|
|
+ return -EINVAL;
|
|
+
|
|
if (tb[NFTA_OSF_TTL]) {
|
|
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
|
|
if (ttl > 2)
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index 76e0d122616a..c2cdd0fc2e70 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -2055,9 +2055,8 @@ replay:
|
|
&chain_info));
|
|
|
|
mutex_unlock(&chain->filter_chain_lock);
|
|
- tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
|
|
- protocol, prio, chain, rtnl_held,
|
|
- extack);
|
|
+ tp_new = tcf_proto_create(name, protocol, prio, chain,
|
|
+ rtnl_held, extack);
|
|
if (IS_ERR(tp_new)) {
|
|
err = PTR_ERR(tp_new);
|
|
goto errout_tp;
|
|
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
|
|
index 8f2ad706784d..d0140a92694a 100644
|
|
--- a/net/sched/ematch.c
|
|
+++ b/net/sched/ematch.c
|
|
@@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
|
|
}
|
|
em->data = (unsigned long) v;
|
|
}
|
|
+ em->datalen = data_len;
|
|
}
|
|
}
|
|
|
|
em->matchid = em_hdr->matchid;
|
|
em->flags = em_hdr->flags;
|
|
- em->datalen = data_len;
|
|
em->net = net;
|
|
|
|
err = 0;
|
|
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
|
|
index a80920f261ca..41e9c2932b34 100644
|
|
--- a/net/tls/tls_sw.c
|
|
+++ b/net/tls/tls_sw.c
|
|
@@ -793,7 +793,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
|
|
psock = sk_psock_get(sk);
|
|
if (!psock || !policy) {
|
|
err = tls_push_record(sk, flags, record_type);
|
|
- if (err) {
|
|
+ if (err && err != -EINPROGRESS) {
|
|
*copied -= sk_msg_free(sk, msg);
|
|
tls_free_open_rec(sk);
|
|
}
|
|
@@ -819,7 +819,7 @@ more_data:
|
|
switch (psock->eval) {
|
|
case __SK_PASS:
|
|
err = tls_push_record(sk, flags, record_type);
|
|
- if (err < 0) {
|
|
+ if (err && err != -EINPROGRESS) {
|
|
*copied -= sk_msg_free(sk, msg);
|
|
tls_free_open_rec(sk);
|
|
goto out_err;
|
|
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
|
|
index 6aee9f5e8e71..256f3e97d1f3 100644
|
|
--- a/net/x25/af_x25.c
|
|
+++ b/net/x25/af_x25.c
|
|
@@ -760,6 +760,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
|
if (sk->sk_state == TCP_ESTABLISHED)
|
|
goto out;
|
|
|
|
+ rc = -EALREADY; /* Do nothing if call is already in progress */
|
|
+ if (sk->sk_state == TCP_SYN_SENT)
|
|
+ goto out;
|
|
+
|
|
sk->sk_state = TCP_CLOSE;
|
|
sock->state = SS_UNCONNECTED;
|
|
|
|
@@ -806,7 +810,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
|
/* Now the loop */
|
|
rc = -EINPROGRESS;
|
|
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
|
|
- goto out_put_neigh;
|
|
+ goto out;
|
|
|
|
rc = x25_wait_for_connection_establishment(sk);
|
|
if (rc)
|
|
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
|
|
index 612268eabef4..7225107a9aaf 100644
|
|
--- a/scripts/recordmcount.c
|
|
+++ b/scripts/recordmcount.c
|
|
@@ -38,6 +38,10 @@
|
|
#define R_AARCH64_ABS64 257
|
|
#endif
|
|
|
|
+#define R_ARM_PC24 1
|
|
+#define R_ARM_THM_CALL 10
|
|
+#define R_ARM_CALL 28
|
|
+
|
|
static int fd_map; /* File descriptor for file being modified. */
|
|
static int mmap_failed; /* Boolean flag. */
|
|
static char gpfx; /* prefix for global symbol name (sometimes '_') */
|
|
@@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
|
|
#define RECORD_MCOUNT_64
|
|
#include "recordmcount.h"
|
|
|
|
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
|
|
+{
|
|
+ switch (ELF32_R_TYPE(w(rp->r_info))) {
|
|
+ case R_ARM_THM_CALL:
|
|
+ case R_ARM_CALL:
|
|
+ case R_ARM_PC24:
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
|
|
* http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
|
|
* We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
|
|
@@ -523,6 +539,7 @@ static int do_file(char const *const fname)
|
|
altmcount = "__gnu_mcount_nc";
|
|
make_nop = make_nop_arm;
|
|
rel_type_nop = R_ARM_NONE;
|
|
+ is_fake_mcount32 = arm_is_fake_mcount;
|
|
gpfx = 0;
|
|
break;
|
|
case EM_AARCH64:
|