345 lines
9.8 KiB
Diff
345 lines
9.8 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 64f73757916f..cf28e431b68a 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 11
|
|
-SUBLEVEL = 10
|
|
+SUBLEVEL = 11
|
|
EXTRAVERSION =
|
|
NAME = Fearless Coyote
|
|
|
|
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
|
|
index 0b1ff4c1c14e..fffb2794dd89 100644
|
|
--- a/arch/x86/include/asm/pat.h
|
|
+++ b/arch/x86/include/asm/pat.h
|
|
@@ -7,6 +7,7 @@
|
|
bool pat_enabled(void);
|
|
void pat_disable(const char *reason);
|
|
extern void pat_init(void);
|
|
+extern void init_cache_modes(void);
|
|
|
|
extern int reserve_memtype(u64 start, u64 end,
|
|
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
|
|
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
|
|
index 4bf0c8926a1c..11beda5a691b 100644
|
|
--- a/arch/x86/kernel/setup.c
|
|
+++ b/arch/x86/kernel/setup.c
|
|
@@ -1080,6 +1080,13 @@ void __init setup_arch(char **cmdline_p)
|
|
max_possible_pfn = max_pfn;
|
|
|
|
/*
|
|
+ * This call is required when the CPU does not support PAT. If
|
|
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
|
|
+ * effect.
|
|
+ */
|
|
+ init_cache_modes();
|
|
+
|
|
+ /*
|
|
* Define random base addresses for memory sections after max_pfn is
|
|
* defined and before each memory section base is used.
|
|
*/
|
|
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
|
|
index efc32bc6862b..0ad3f71c55f8 100644
|
|
--- a/arch/x86/mm/pat.c
|
|
+++ b/arch/x86/mm/pat.c
|
|
@@ -36,14 +36,14 @@
|
|
#undef pr_fmt
|
|
#define pr_fmt(fmt) "" fmt
|
|
|
|
-static bool boot_cpu_done;
|
|
-
|
|
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
|
|
-static void init_cache_modes(void);
|
|
+static bool __read_mostly boot_cpu_done;
|
|
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
|
+static bool __read_mostly pat_initialized;
|
|
+static bool __read_mostly init_cm_done;
|
|
|
|
void pat_disable(const char *reason)
|
|
{
|
|
- if (!__pat_enabled)
|
|
+ if (pat_disabled)
|
|
return;
|
|
|
|
if (boot_cpu_done) {
|
|
@@ -51,10 +51,8 @@ void pat_disable(const char *reason)
|
|
return;
|
|
}
|
|
|
|
- __pat_enabled = 0;
|
|
+ pat_disabled = true;
|
|
pr_info("x86/PAT: %s\n", reason);
|
|
-
|
|
- init_cache_modes();
|
|
}
|
|
|
|
static int __init nopat(char *str)
|
|
@@ -66,7 +64,7 @@ early_param("nopat", nopat);
|
|
|
|
bool pat_enabled(void)
|
|
{
|
|
- return !!__pat_enabled;
|
|
+ return pat_initialized;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pat_enabled);
|
|
|
|
@@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat)
|
|
update_cache_mode_entry(i, cache);
|
|
}
|
|
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
|
|
+
|
|
+ init_cm_done = true;
|
|
}
|
|
|
|
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
|
|
@@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat)
|
|
}
|
|
|
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
|
+ pat_initialized = true;
|
|
|
|
__init_cache_modes(pat);
|
|
}
|
|
@@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat)
|
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
|
}
|
|
|
|
-static void init_cache_modes(void)
|
|
+void init_cache_modes(void)
|
|
{
|
|
u64 pat = 0;
|
|
- static int init_cm_done;
|
|
|
|
if (init_cm_done)
|
|
return;
|
|
@@ -286,8 +286,6 @@ static void init_cache_modes(void)
|
|
}
|
|
|
|
__init_cache_modes(pat);
|
|
-
|
|
- init_cm_done = 1;
|
|
}
|
|
|
|
/**
|
|
@@ -305,10 +303,8 @@ void pat_init(void)
|
|
u64 pat;
|
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
|
- if (!pat_enabled()) {
|
|
- init_cache_modes();
|
|
+ if (pat_disabled)
|
|
return;
|
|
- }
|
|
|
|
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
|
|
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
|
|
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
|
|
index 8baab4307f7b..7830d304dff6 100644
|
|
--- a/crypto/rsa-pkcs1pad.c
|
|
+++ b/crypto/rsa-pkcs1pad.c
|
|
@@ -496,7 +496,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
|
|
goto done;
|
|
pos++;
|
|
|
|
- if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
|
|
+ if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
|
|
goto done;
|
|
|
|
pos += digest_info->size;
|
|
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
|
|
index 9bc80eb06934..b4e3f4ef5c05 100644
|
|
--- a/drivers/crypto/caam/caamalg.c
|
|
+++ b/drivers/crypto/caam/caamalg.c
|
|
@@ -1474,8 +1474,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
|
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
|
struct device *jrdev = ctx->jrdev;
|
|
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
|
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
GFP_KERNEL : GFP_ATOMIC;
|
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
|
struct ablkcipher_edesc *edesc;
|
|
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
|
|
index 8deac8d9225d..13a3e505d4fc 100644
|
|
--- a/drivers/staging/comedi/comedi_fops.c
|
|
+++ b/drivers/staging/comedi/comedi_fops.c
|
|
@@ -2901,6 +2901,7 @@ static int __init comedi_init(void)
|
|
dev = comedi_alloc_board_minor(NULL);
|
|
if (IS_ERR(dev)) {
|
|
comedi_cleanup_board_minors();
|
|
+ class_destroy(comedi_class);
|
|
cdev_del(&comedi_cdev);
|
|
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
|
|
COMEDI_NUM_MINORS);
|
|
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
|
|
index 9e074e9daf4e..e2422f2aa843 100644
|
|
--- a/drivers/staging/vt6656/main_usb.c
|
|
+++ b/drivers/staging/vt6656/main_usb.c
|
|
@@ -523,6 +523,9 @@ static int vnt_start(struct ieee80211_hw *hw)
|
|
goto free_all;
|
|
}
|
|
|
|
+ if (vnt_key_init_table(priv))
|
|
+ goto free_all;
|
|
+
|
|
priv->int_interval = 1; /* bInterval is set to 1 */
|
|
|
|
vnt_int_start_interrupt(priv);
|
|
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
|
|
index 42145be5c6b4..5dc655e410b4 100644
|
|
--- a/fs/ext4/sysfs.c
|
|
+++ b/fs/ext4/sysfs.c
|
|
@@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
|
|
int ret;
|
|
|
|
ret = kstrtoull(skip_spaces(buf), 0, &val);
|
|
- if (!ret || val >= clusters)
|
|
+ if (ret || val >= clusters)
|
|
return -EINVAL;
|
|
|
|
atomic64_set(&sbi->s_resv_clusters, val);
|
|
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
|
|
index c5ae09b6c726..18694598bebf 100644
|
|
--- a/fs/proc/internal.h
|
|
+++ b/fs/proc/internal.h
|
|
@@ -67,7 +67,7 @@ struct proc_inode {
|
|
struct proc_dir_entry *pde;
|
|
struct ctl_table_header *sysctl;
|
|
struct ctl_table *sysctl_entry;
|
|
- struct list_head sysctl_inodes;
|
|
+ struct hlist_node sysctl_inodes;
|
|
const struct proc_ns_operations *ns_ops;
|
|
struct inode vfs_inode;
|
|
};
|
|
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
|
|
index d04ea4349909..4353faa9d416 100644
|
|
--- a/fs/proc/proc_sysctl.c
|
|
+++ b/fs/proc/proc_sysctl.c
|
|
@@ -191,7 +191,7 @@ static void init_header(struct ctl_table_header *head,
|
|
head->set = set;
|
|
head->parent = NULL;
|
|
head->node = node;
|
|
- INIT_LIST_HEAD(&head->inodes);
|
|
+ INIT_HLIST_HEAD(&head->inodes);
|
|
if (node) {
|
|
struct ctl_table *entry;
|
|
for (entry = table; entry->procname; entry++, node++)
|
|
@@ -261,25 +261,42 @@ static void unuse_table(struct ctl_table_header *p)
|
|
complete(p->unregistering);
|
|
}
|
|
|
|
-/* called under sysctl_lock */
|
|
static void proc_sys_prune_dcache(struct ctl_table_header *head)
|
|
{
|
|
- struct inode *inode, *prev = NULL;
|
|
+ struct inode *inode;
|
|
struct proc_inode *ei;
|
|
+ struct hlist_node *node;
|
|
+ struct super_block *sb;
|
|
|
|
rcu_read_lock();
|
|
- list_for_each_entry_rcu(ei, &head->inodes, sysctl_inodes) {
|
|
- inode = igrab(&ei->vfs_inode);
|
|
- if (inode) {
|
|
- rcu_read_unlock();
|
|
- iput(prev);
|
|
- prev = inode;
|
|
- d_prune_aliases(inode);
|
|
+ for (;;) {
|
|
+ node = hlist_first_rcu(&head->inodes);
|
|
+ if (!node)
|
|
+ break;
|
|
+ ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
|
|
+ spin_lock(&sysctl_lock);
|
|
+ hlist_del_init_rcu(&ei->sysctl_inodes);
|
|
+ spin_unlock(&sysctl_lock);
|
|
+
|
|
+ inode = &ei->vfs_inode;
|
|
+ sb = inode->i_sb;
|
|
+ if (!atomic_inc_not_zero(&sb->s_active))
|
|
+ continue;
|
|
+ inode = igrab(inode);
|
|
+ rcu_read_unlock();
|
|
+ if (unlikely(!inode)) {
|
|
+ deactivate_super(sb);
|
|
rcu_read_lock();
|
|
+ continue;
|
|
}
|
|
+
|
|
+ d_prune_aliases(inode);
|
|
+ iput(inode);
|
|
+ deactivate_super(sb);
|
|
+
|
|
+ rcu_read_lock();
|
|
}
|
|
rcu_read_unlock();
|
|
- iput(prev);
|
|
}
|
|
|
|
/* called under sysctl_lock, will reacquire if has to wait */
|
|
@@ -465,7 +482,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
|
|
}
|
|
ei->sysctl = head;
|
|
ei->sysctl_entry = table;
|
|
- list_add_rcu(&ei->sysctl_inodes, &head->inodes);
|
|
+ hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
|
|
head->count++;
|
|
spin_unlock(&sysctl_lock);
|
|
|
|
@@ -493,7 +510,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
|
|
void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
|
|
{
|
|
spin_lock(&sysctl_lock);
|
|
- list_del_rcu(&PROC_I(inode)->sysctl_inodes);
|
|
+ hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
|
|
if (!--head->count)
|
|
kfree_rcu(head, rcu);
|
|
spin_unlock(&sysctl_lock);
|
|
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
|
|
index b7e82049fec7..0e5cc33b9b25 100644
|
|
--- a/include/linux/sysctl.h
|
|
+++ b/include/linux/sysctl.h
|
|
@@ -143,7 +143,7 @@ struct ctl_table_header
|
|
struct ctl_table_set *set;
|
|
struct ctl_dir *parent;
|
|
struct ctl_node *node;
|
|
- struct list_head inodes; /* head for proc_inode->sysctl_inodes */
|
|
+ struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
|
|
};
|
|
|
|
struct ctl_dir {
|
|
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
|
|
index e8d41ff57241..a6ced9e07e1c 100644
|
|
--- a/ipc/mqueue.c
|
|
+++ b/ipc/mqueue.c
|
|
@@ -1253,8 +1253,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
|
|
|
|
timeo = MAX_SCHEDULE_TIMEOUT;
|
|
ret = netlink_attachskb(sock, nc, &timeo, NULL);
|
|
- if (ret == 1)
|
|
+ if (ret == 1) {
|
|
+ sock = NULL;
|
|
goto retry;
|
|
+ }
|
|
if (ret) {
|
|
sock = NULL;
|
|
nc = NULL;
|
|
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
|
|
index c65f7989f850..20819df98125 100644
|
|
--- a/kernel/locking/rwsem-spinlock.c
|
|
+++ b/kernel/locking/rwsem-spinlock.c
|
|
@@ -231,8 +231,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
|
|
|
|
out_nolock:
|
|
list_del(&waiter.list);
|
|
- if (!list_empty(&sem->wait_list))
|
|
- __rwsem_do_wake(sem, 1);
|
|
+ if (!list_empty(&sem->wait_list) && sem->count >= 0)
|
|
+ __rwsem_do_wake(sem, 0);
|
|
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
|
|
|
return -EINTR;
|