diff --git a/config/kernel/linux-meson64-legacy.config b/config/kernel/linux-meson64-legacy.config index 1afeafecc5..f740d7ba39 100644 --- a/config/kernel/linux-meson64-legacy.config +++ b/config/kernel/linux-meson64-legacy.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.9.250 Kernel Configuration +# Linux/arm64 4.9.258 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y diff --git a/patch/kernel/meson64-legacy/patch-4.9.250-251.patch b/patch/kernel/meson64-legacy/patch-4.9.250-251.patch new file mode 100644 index 0000000000..4c2c636c0c --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.250-251.patch @@ -0,0 +1,1121 @@ +diff --git a/Makefile b/Makefile +index 525d7ec7249d6..8ebbb60f2078a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 250 ++SUBLEVEL = 251 + EXTRAVERSION = + NAME = Roaring Lionus + +@@ -349,7 +349,7 @@ OBJDUMP = $(CROSS_COMPILE)objdump + AWK = awk + GENKSYMS = scripts/genksyms/genksyms + INSTALLKERNEL := installkernel +-DEPMOD = /sbin/depmod ++DEPMOD = depmod + PERL = perl + PYTHON = python + CHECK = sparse +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c +index e12ee86906c62..9436f34520491 100644 +--- a/arch/x86/kernel/cpu/mtrr/generic.c ++++ b/arch/x86/kernel/cpu/mtrr/generic.c +@@ -166,9 +166,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, + *repeat = 0; + *uniform = 1; + +- /* Make end inclusive instead of exclusive */ +- end--; +- + prev_match = MTRR_TYPE_INVALID; + for (i = 0; i < num_var_ranges; ++i) { + unsigned short start_state, end_state, inclusive; +@@ -260,6 +257,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) + int repeat; + u64 partial_end; + ++ /* Make end inclusive instead of exclusive */ ++ end--; ++ + if (!mtrr_state_set) + return MTRR_TYPE_INVALID; + +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 08e0380414a9b..b68b102f67747 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -697,6 +697,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) + } + + free_page((unsigned long)pmd_sv); ++ ++ pgtable_pmd_page_dtor(virt_to_page(pmd)); + free_page((unsigned long)pmd); + + return 1; +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index 074616b39f4d5..89adb49e435ef 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -3615,7 +3615,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, + + if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) { + printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev)); +- return err; ++ goto err_out_disable_pdev; + } + + card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL); +diff --git a/drivers/base/core.c b/drivers/base/core.c +index ec5cc5d98a3e7..3b8487e28c84f 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -2364,7 +2364,7 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + if (!(parent && fn == parent->fwnode)) +- fn->secondary = ERR_PTR(-ENODEV); ++ fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } +diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c +index e31199f3048ca..0d3b159f4c562 100644 +--- a/drivers/net/ethernet/ethoc.c ++++ b/drivers/net/ethernet/ethoc.c +@@ -1190,7 +1190,7 @@ static int ethoc_probe(struct platform_device *pdev) + ret = mdiobus_register(priv->mdio); + if (ret) { + dev_err(&netdev->dev, "failed to register MDIO bus\n"); +- goto free2; ++ goto free3; + } + + ret = ethoc_mdio_probe(netdev); +@@ -1222,6 +1222,7 @@ error2: + netif_napi_del(&priv->napi); + error: + mdiobus_unregister(priv->mdio); ++free3: + mdiobus_free(priv->mdio); + free2: + if (priv->clk) +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index af922bac19ae7..cdef919d5b75c 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3939,12 +3939,12 @@ static int ucc_geth_remove(struct platform_device* ofdev) + struct device_node *np = ofdev->dev.of_node; + + unregister_netdev(dev); +- free_netdev(dev); + ucc_geth_memclean(ugeth); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(ugeth->ug_info->tbi_node); + of_node_put(ugeth->ug_info->phy_node); ++ free_netdev(dev); + + return 0; + } +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +index f38848c4f69da..62143ccfd5fb3 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +@@ -447,6 +447,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, + /* for mutl buffer*/ + new_skb = skb_copy(skb, GFP_ATOMIC); + dev_kfree_skb_any(skb); ++ if (!new_skb) { ++ netdev_err(ndev, "skb alloc failed\n"); ++ return; ++ } + skb = new_skb; + + check_ok = 0; +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index be4e56826daf6..99ca9526dd65a 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -1602,9 +1602,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) + * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be + * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. + */ +- netif_info(dev, link, dev->net, +- "network connection: %sconnected\n", +- !!event->wValue ? "" : "dis"); + usbnet_link_change(dev, !!event->wValue, 0); + break; + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 7118b82637605..a3447f8eaf66b 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1357,14 +1357,16 @@ static int virtnet_set_channels(struct net_device *dev, + + get_online_cpus(); + err = virtnet_set_queues(vi, queue_pairs); +- if (!err) { +- netif_set_real_num_tx_queues(dev, queue_pairs); +- netif_set_real_num_rx_queues(dev, queue_pairs); +- +- virtnet_set_affinity(vi); ++ if (err) { ++ put_online_cpus(); ++ goto err; + } ++ virtnet_set_affinity(vi); + put_online_cpus(); + ++ netif_set_real_num_tx_queues(dev, queue_pairs); ++ netif_set_real_num_rx_queues(dev, queue_pairs); ++err: + return err; + } + +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c +index 35589ee0cde11..fe543099768c0 100644 +--- a/drivers/net/wan/hdlc_ppp.c ++++ b/drivers/net/wan/hdlc_ppp.c +@@ -572,6 +572,13 @@ static void ppp_timer(unsigned long arg) + unsigned long flags; + + spin_lock_irqsave(&ppp->lock, flags); ++ /* mod_timer could be called after we entered this function but ++ * before we got the lock. ++ */ ++ if (timer_pending(&proto->timer)) { ++ spin_unlock_irqrestore(&ppp->lock, flags); ++ return; ++ } + switch (proto->state) { + case STOPPING: + case REQ_SENT: +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c +index 7ea84e6c85bb8..553bdd0983f7e 100644 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c +@@ -133,9 +133,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) + misc_pdev = of_find_device_by_node(args.np); + of_node_put(args.np); + +- if (!misc_pdev || !platform_get_drvdata(misc_pdev)) ++ if (!misc_pdev) + return ERR_PTR(-EPROBE_DEFER); + ++ if (!platform_get_drvdata(misc_pdev)) { ++ put_device(&misc_pdev->dev); ++ return ERR_PTR(-EPROBE_DEFER); ++ } + data->dev = &misc_pdev->dev; + + if (of_find_property(np, "disable-over-current", NULL)) +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 90f4a5c8012ca..a251514a0ee95 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1849,6 +1849,10 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */ + .driver_info = IGNORE_DEVICE, + }, ++ ++ { USB_DEVICE(0x04d8, 0xf58b), ++ .driver_info = IGNORE_DEVICE, ++ }, + #endif + + /*Samsung phone in firmware update mode */ +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 71c2ae4b81067..76701d6ce92c3 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -289,8 +289,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i + #define usblp_reset(usblp)\ + usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) + +-#define usblp_hp_channel_change_request(usblp, channel, buffer) \ +- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) ++static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) ++{ ++ u8 *buf; ++ int ret; ++ ++ buf = kzalloc(1, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, ++ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, ++ channel, buf, 1); ++ if (ret == 0) ++ *new_channel = buf[0]; ++ ++ kfree(buf); ++ ++ return ret; ++} + + /* + * See the description for usblp_select_alts() below for the usage +diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig +index f3ee80ece6828..59dc3054ad6e7 100644 +--- a/drivers/usb/gadget/Kconfig ++++ b/drivers/usb/gadget/Kconfig +@@ -258,6 +258,7 @@ config USB_CONFIGFS_NCM + depends on NET + select USB_U_ETHER + select USB_F_NCM ++ select CRC32 + help + NCM is an advanced protocol for Ethernet encapsulation, allows + grouping of several ethernet frames into one USB transfer and +@@ -307,6 +308,7 @@ config USB_CONFIGFS_EEM + depends on NET + select USB_U_ETHER + select USB_F_EEM ++ select CRC32 + help + CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM + and therefore can be supported by more hardware. Technically ECM and +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 5a1723d99fe51..c574bf981140a 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function) + + spin_lock_irqsave(&cdev->lock, flags); + +- if (cdev->deactivations == 0) ++ if (cdev->deactivations == 0) { ++ spin_unlock_irqrestore(&cdev->lock, flags); + status = usb_gadget_deactivate(cdev->gadget); ++ spin_lock_irqsave(&cdev->lock, flags); ++ } + if (status == 0) + cdev->deactivations++; + +@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function) + status = -EINVAL; + else { + cdev->deactivations--; +- if (cdev->deactivations == 0) ++ if (cdev->deactivations == 0) { ++ spin_unlock_irqrestore(&cdev->lock, flags); + status = usb_gadget_activate(cdev->gadget); ++ spin_lock_irqsave(&cdev->lock, flags); ++ } + } + + spin_unlock_irqrestore(&cdev->lock, flags); +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c +index 6420cae820bc2..0aa1b8fa9b47a 100644 +--- a/drivers/usb/gadget/configfs.c ++++ b/drivers/usb/gadget/configfs.c +@@ -232,9 +232,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, + + static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) + { +- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; ++ struct gadget_info *gi = to_gadget_info(item); ++ char *udc_name; ++ int ret; ++ ++ mutex_lock(&gi->lock); ++ udc_name = gi->composite.gadget_driver.udc_name; ++ ret = sprintf(page, "%s\n", udc_name ?: ""); ++ mutex_unlock(&gi->lock); + +- return sprintf(page, "%s\n", udc_name ?: ""); ++ return ret; + } + + static int unregister_gadget(struct gadget_info *gi) +@@ -1214,9 +1221,9 @@ static void purge_configs_funcs(struct gadget_info *gi) + + cfg = container_of(c, struct config_usb_cfg, c); + +- list_for_each_entry_safe(f, tmp, &c->functions, list) { ++ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) { + +- list_move_tail(&f->list, &cfg->func_list); ++ list_move(&f->list, &cfg->func_list); + if (f->unbind) { + dev_dbg(&gi->cdev.gadget->dev, + "unbind function '%s'/%p\n", +@@ -1502,7 +1509,7 @@ static const struct usb_gadget_driver configfs_driver_template = { + .suspend = configfs_composite_suspend, + .resume = configfs_composite_resume, + +- .max_speed = USB_SPEED_SUPER, ++ .max_speed = USB_SPEED_SUPER_PLUS, + .driver = { + .owner = THIS_MODULE, + .name = "configfs-gadget", +@@ -1542,7 +1549,7 @@ static struct config_group *gadgets_make( + gi->composite.unbind = configfs_do_nothing; + gi->composite.suspend = NULL; + gi->composite.resume = NULL; +- gi->composite.max_speed = USB_SPEED_SUPER; ++ gi->composite.max_speed = USB_SPEED_SUPER_PLUS; + + spin_lock_init(&gi->spinlock); + mutex_init(&gi->lock); +diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c +index d89b3046dd10b..b962f24b500bf 100644 +--- a/drivers/usb/gadget/function/f_printer.c ++++ b/drivers/usb/gadget/function/f_printer.c +@@ -1120,6 +1120,7 @@ fail_tx_reqs: + printer_req_free(dev->in_ep, req); + } + ++ usb_free_all_descriptors(f); + return ret; + + } +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index a631975e050d9..af7fd0185a44e 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -766,7 +766,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = { + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1023), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 1, + }; + +@@ -775,7 +775,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = { + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1024), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 4, + }; + +@@ -843,7 +843,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = { + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1023), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 1, + }; + +@@ -852,7 +852,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = { + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1024), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 4, + }; + +@@ -963,12 +963,28 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) + "%s:%d Error!\n", __func__, __LINE__); + } + +-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, ++static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + struct usb_endpoint_descriptor *ep_desc, +- unsigned int factor, bool is_playback) ++ enum usb_device_speed speed, bool is_playback) + { + int chmask, srate, ssize; +- u16 max_packet_size; ++ u16 max_size_bw, max_size_ep; ++ unsigned int factor; ++ ++ switch (speed) { ++ case USB_SPEED_FULL: ++ max_size_ep = 1023; ++ factor = 1000; ++ break; ++ ++ case USB_SPEED_HIGH: ++ max_size_ep = 1024; ++ factor = 8000; ++ break; ++ ++ default: ++ return -EINVAL; ++ } + + if (is_playback) { + chmask = uac2_opts->p_chmask; +@@ -980,10 +996,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + ssize = uac2_opts->c_ssize; + } + +- max_packet_size = num_channels(chmask) * ssize * ++ max_size_bw = num_channels(chmask) * ssize * + DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); +- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size, +- le16_to_cpu(ep_desc->wMaxPacketSize))); ++ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, ++ max_size_ep)); ++ ++ return 0; + } + + static int +@@ -1082,10 +1100,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) + uac2->c_prm.uac2 = uac2; + + /* Calculate wMaxPacketSize according to audio bandwidth */ +- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); +- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); +- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true); +- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false); ++ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL, ++ true); ++ if (ret < 0) { ++ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); ++ return ret; ++ } ++ ++ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL, ++ false); ++ if (ret < 0) { ++ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); ++ return ret; ++ } ++ ++ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH, ++ true); ++ if (ret < 0) { ++ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); ++ return ret; ++ } ++ ++ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH, ++ false); ++ if (ret < 0) { ++ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); ++ return ret; ++ } + + hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; + hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; +diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c +index c39de65a448ba..270bb88c51cb4 100644 +--- a/drivers/usb/gadget/legacy/acm_ms.c ++++ b/drivers/usb/gadget/legacy/acm_ms.c +@@ -207,8 +207,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail_string_ids; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index b27987431079e..b0e1d3da2e7ed 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -4409,19 +4409,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, + { + unsigned long long timeout_ns; + ++ if (xhci->quirks & XHCI_INTEL_HOST) ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); ++ else ++ timeout_ns = udev->u1_params.sel; ++ + /* Prevent U1 if service interval is shorter than U1 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { +- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { ++ if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + +- if (xhci->quirks & XHCI_INTEL_HOST) +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); +- else +- timeout_ns = udev->u1_params.sel; +- + /* The U1 timeout is encoded in 1us intervals. + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. + */ +@@ -4473,19 +4473,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, + { + unsigned long long timeout_ns; + ++ if (xhci->quirks & XHCI_INTEL_HOST) ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); ++ else ++ timeout_ns = udev->u2_params.sel; ++ + /* Prevent U2 if service interval is shorter than U2 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { +- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { ++ if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + +- if (xhci->quirks & XHCI_INTEL_HOST) +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); +- else +- timeout_ns = udev->u2_params.sel; +- + /* The U2 timeout is encoded in 256us intervals */ + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); + /* If the necessary timeout value is bigger than what we can set in the +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c +index 1be1fa1b73770..16cab1b09ad8e 100644 +--- a/drivers/usb/misc/yurex.c ++++ b/drivers/usb/misc/yurex.c +@@ -507,6 +507,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, + timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); + finish_wait(&dev->waitq, &wait); + ++ /* make sure URB is idle after timeout or (spurious) CMD_ACK */ ++ usb_kill_urb(dev->cntl_urb); ++ + mutex_unlock(&dev->io_mutex); + + if (retval < 0) { +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c +index bdeb2b2489549..f3c0ad138c3ed 100644 +--- a/drivers/usb/serial/iuu_phoenix.c ++++ b/drivers/usb/serial/iuu_phoenix.c +@@ -553,23 +553,29 @@ static int iuu_uart_flush(struct usb_serial_port *port) + struct device *dev = &port->dev; + int i; + int status; +- u8 rxcmd = IUU_UART_RX; ++ u8 *rxcmd; + struct iuu_private *priv = usb_get_serial_port_data(port); + + if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0) + return -EIO; + ++ rxcmd = kmalloc(1, GFP_KERNEL); ++ if (!rxcmd) ++ return -ENOMEM; ++ ++ rxcmd[0] = IUU_UART_RX; ++ + for (i = 0; i < 2; i++) { +- status = bulk_immediate(port, &rxcmd, 1); ++ status = bulk_immediate(port, rxcmd, 1); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_write error\n", __func__); +- return status; ++ goto out_free; + } + + status = read_immediate(port, &priv->len, 1); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_read error\n", __func__); +- return status; ++ goto out_free; + } + + if (priv->len > 0) { +@@ -577,12 +583,16 @@ static int iuu_uart_flush(struct usb_serial_port *port) + status = read_immediate(port, priv->buf, priv->len); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_read error\n", __func__); +- return status; ++ goto out_free; + } + } + } + dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__); + iuu_led(port, 0, 0xF000, 0, 0xFF); ++ ++out_free: ++ kfree(rxcmd); ++ + return status; + } + +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index 93a31ea8e210c..1899190d4f5d2 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -559,10 +559,8 @@ exit: + static void keyspan_pda_write_bulk_callback(struct urb *urb) + { + struct usb_serial_port *port = urb->context; +- struct keyspan_pda_private *priv; + + set_bit(0, &port->write_urbs_free); +- priv = usb_get_serial_port_data(port); + + /* queue up a wakeup at scheduler time */ + usb_serial_port_softint(port); +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 6045a8e24068c..1998b314368e0 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -2043,6 +2043,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ + .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ ++ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 018b0663d6109..1bf15f9d4f43e 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -163,6 +163,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + ++/* Reported-by: Thinh Nguyen */ ++UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, ++ "PNY", ++ "Pro Elite SSD", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_ATA_1X), ++ + /* Reported-by: Thinh Nguyen */ + UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 861f43f8f9cea..66663757cd701 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -377,6 +377,7 @@ static void handle_tx(struct vhost_net *net) + size_t hdr_size; + struct socket *sock; + struct vhost_net_ubuf_ref *uninitialized_var(ubufs); ++ struct ubuf_info *ubuf; + bool zcopy, zcopy_used; + int sent_pkts = 0; + +@@ -444,9 +445,7 @@ static void handle_tx(struct vhost_net *net) + + /* use msg_control to pass vhost zerocopy ubuf info to skb */ + if (zcopy_used) { +- struct ubuf_info *ubuf; + ubuf = nvq->ubuf_info + nvq->upend_idx; +- + vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); + vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; + ubuf->callback = vhost_zerocopy_callback; +@@ -465,7 +464,8 @@ static void handle_tx(struct vhost_net *net) + err = sock->ops->sendmsg(sock, &msg, len); + if (unlikely(err < 0)) { + if (zcopy_used) { +- vhost_net_ubuf_put(ubufs); ++ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) ++ vhost_net_ubuf_put(ubufs); + nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) + % UIO_MAXIOV; + } +diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c +index f3938c5278832..6e680007cf6b0 100644 +--- a/drivers/video/fbdev/hyperv_fb.c ++++ b/drivers/video/fbdev/hyperv_fb.c +@@ -713,11 +713,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) + } + + /* +- * Map the VRAM cacheable for performance. This is also required for +- * VM Connect to display properly for ARM64 Linux VM, as the host also +- * maps the VRAM cacheable. ++ * Map the VRAM cacheable for performance. + */ +- fb_virt = ioremap_cache(par->mem->start, screen_fb_size); ++ fb_virt = ioremap_wc(par->mem->start, screen_fb_size); + if (!fb_virt) + goto err2; + +diff --git a/include/net/red.h b/include/net/red.h +index 3618cdfec884e..17821f66de111 100644 +--- a/include/net/red.h ++++ b/include/net/red.h +@@ -167,12 +167,14 @@ static inline void red_set_vars(struct red_vars *v) + v->qcount = -1; + } + +-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) ++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log) + { + if (fls(qth_min) + Wlog > 32) + return false; + if (fls(qth_max) + Wlog > 32) + return false; ++ if (Scell_log >= 32) ++ return false; + if (qth_max < qth_min) + return false; + return true; +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 00c295d3104bb..205c3131f8b05 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -3448,17 +3448,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + * is updated and visible. + */ + if (!freezable || !workqueue_freezing) { ++ bool kick = false; ++ + pwq->max_active = wq->saved_max_active; + + while (!list_empty(&pwq->delayed_works) && +- pwq->nr_active < pwq->max_active) ++ pwq->nr_active < pwq->max_active) { + pwq_activate_first_delayed(pwq); ++ kick = true; ++ } + + /* + * Need to kick a worker after thawed or an unbound wq's +- * max_active is bumped. It's a slow path. Do it always. ++ * max_active is bumped. In realtime scenarios, always kicking a ++ * worker will cause interference on the isolated cpu cores, so ++ * let's kick iff work items were activated. + */ +- wake_up_worker(pwq->pool); ++ if (kick) ++ wake_up_worker(pwq->pool); + } else { + pwq->max_active = 0; + } +diff --git a/lib/genalloc.c b/lib/genalloc.c +index 7e85d1e37a6ea..0b8ee173cf3a6 100644 +--- a/lib/genalloc.c ++++ b/lib/genalloc.c +@@ -83,14 +83,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) + * users set the same bit, one user will return remain bits, otherwise + * return 0. + */ +-static int bitmap_set_ll(unsigned long *map, int start, int nr) ++static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) + { + unsigned long *p = map + BIT_WORD(start); +- const int size = start + nr; ++ const unsigned long size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + +- while (nr - bits_to_set >= 0) { ++ while (nr >= bits_to_set) { + if (set_bits_ll(p, mask_to_set)) + return nr; + nr -= bits_to_set; +@@ -118,14 +118,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr) + * users clear the same bit, one user will return remain bits, + * otherwise return 0. + */ +-static int bitmap_clear_ll(unsigned long *map, int start, int nr) ++static unsigned long ++bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) + { + unsigned long *p = map + BIT_WORD(start); +- const int size = start + nr; ++ const unsigned long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + +- while (nr - bits_to_clear >= 0) { ++ while (nr >= bits_to_clear) { + if (clear_bits_ll(p, mask_to_clear)) + return nr; + nr -= bits_to_clear; +@@ -184,8 +185,8 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy + size_t size, int nid) + { + struct gen_pool_chunk *chunk; +- int nbits = size >> pool->min_alloc_order; +- int nbytes = sizeof(struct gen_pool_chunk) + ++ unsigned long nbits = size >> pool->min_alloc_order; ++ unsigned long nbytes = sizeof(struct gen_pool_chunk) + + BITS_TO_LONGS(nbits) * sizeof(long); + + chunk = vzalloc_node(nbytes, nid); +@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool) + struct list_head *_chunk, *_next_chunk; + struct gen_pool_chunk *chunk; + int order = pool->min_alloc_order; +- int bit, end_bit; ++ unsigned long bit, end_bit; + + list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { + chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); +@@ -293,7 +294,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, + struct gen_pool_chunk *chunk; + unsigned long addr = 0; + int order = pool->min_alloc_order; +- int nbits, start_bit, end_bit, remain; ++ unsigned long nbits, start_bit, end_bit, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -376,7 +377,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) + { + struct gen_pool_chunk *chunk; + int order = pool->min_alloc_order; +- int start_bit, nbits, remain; ++ unsigned long start_bit, nbits, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -638,7 +639,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + index = bitmap_find_next_zero_area(map, size, start, nr, 0); + + while (index < size) { +- int next_bit = find_next_bit(map, size, index + nr); ++ unsigned long next_bit = find_next_bit(map, size, index + nr); + if ((next_bit - index) < len) { + len = next_bit - index; + start_bit = index; +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index cbe3fdba4a2c8..aa4c045587d10 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -292,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), + .daddr = ip_hdr(skb)->saddr, +- .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), ++ .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK, + .flowi4_scope = scope, + .flowi4_mark = vmark ? skb->mark : 0, + }; +diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c +index 087db775b3dc7..c973d88e9ea72 100644 +--- a/net/ncsi/ncsi-rsp.c ++++ b/net/ncsi/ncsi-rsp.c +@@ -975,7 +975,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev, + int payload, i, ret; + + /* Find the NCSI device */ +- nd = ncsi_find_dev(dev); ++ nd = ncsi_find_dev(orig_dev); + ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; + if (!ndp) + return -ENODEV; +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index d32fd6b036bfa..a4bd2d3a4821b 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -113,20 +113,6 @@ htable_size(u8 hbits) + return hsize * sizeof(struct hbucket *) + sizeof(struct htable); + } + +-/* Compute htable_bits from the user input parameter hashsize */ +-static u8 +-htable_bits(u32 hashsize) +-{ +- /* Assume that hashsize == 2^htable_bits */ +- u8 bits = fls(hashsize - 1); +- +- if (jhash_size(bits) != hashsize) +- /* Round up to the first 2^n value */ +- bits = fls(hashsize); +- +- return bits; +-} +- + #ifdef IP_SET_HASH_WITH_NETS + #if IPSET_NET_COUNT > 1 + #define __CIDR(cidr, i) (cidr[i]) +@@ -1309,7 +1295,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, + get_random_bytes(&h->initval, sizeof(h->initval)); + set->timeout = IPSET_NO_TIMEOUT; + +- hbits = htable_bits(hashsize); ++ /* Compute htable_bits from the user input parameter hashsize. ++ * Assume that hashsize == 2^htable_bits, ++ * otherwise round up to the first 2^n value. ++ */ ++ hbits = fls(hashsize - 1); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); +diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c +index 92cfae66743bc..1076b8a50d008 100644 +--- a/net/netfilter/xt_RATEEST.c ++++ b/net/netfilter/xt_RATEEST.c +@@ -106,6 +106,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) + } cfg; + int ret; + ++ if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name)) ++ return -ENAMETOOLONG; ++ + net_get_random_once(&jhash_rnd, sizeof(jhash_rnd)); + + mutex_lock(&xt_rateest_mutex); +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c +index d13e1de0b3a21..2fb79c245f3fc 100644 +--- a/net/sched/sch_choke.c ++++ b/net/sched/sch_choke.c +@@ -425,7 +425,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) + + ctl = nla_data(tb[TCA_CHOKE_PARMS]); + +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) + return -EINVAL; + + if (ctl->limit > CHOKE_MAX_QUEUE) +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c +index 729c0e4eca21d..d86a96313981b 100644 +--- a/net/sched/sch_gred.c ++++ b/net/sched/sch_gred.c +@@ -356,7 +356,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, + struct gred_sched *table = qdisc_priv(sch); + struct gred_sched_data *q = table->tab[dp]; + +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) + return -EINVAL; + + if (!q) { +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c +index 4610d44f58d3a..797895bddcfda 100644 +--- a/net/sched/sch_red.c ++++ b/net/sched/sch_red.c +@@ -184,7 +184,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) + max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; + + ctl = nla_data(tb[TCA_RED_PARMS]); +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) + return -EINVAL; + + if (ctl->limit > 0) { +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 633e237a406ca..69a5fffed86c7 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -645,7 +645,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) + } + + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, +- ctl_v1->Wlog)) ++ ctl_v1->Wlog, ctl_v1->Scell_log)) + return -EINVAL; + if (ctl_v1 && ctl_v1->qth_min) { + p = kmalloc(sizeof(*p), GFP_KERNEL); +diff --git a/scripts/depmod.sh b/scripts/depmod.sh +index baedaef53ca05..b0cb89e73bc56 100755 +--- a/scripts/depmod.sh ++++ b/scripts/depmod.sh +@@ -14,6 +14,8 @@ if ! test -r System.map ; then + exit 0 + fi + ++# legacy behavior: "depmod" in /sbin, no /sbin in PATH ++PATH="$PATH:/sbin" + if [ -z $(command -v $DEPMOD) ]; then + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "This is probably in the kmod package." >&2 +diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py +index f9b92ece78343..6d2e09a2ad2f9 100644 +--- a/scripts/gdb/linux/dmesg.py ++++ b/scripts/gdb/linux/dmesg.py +@@ -12,6 +12,7 @@ + # + + import gdb ++import sys + + from linux import utils + +@@ -23,10 +24,11 @@ class LxDmesg(gdb.Command): + super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): +- log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) +- log_first_idx = int(gdb.parse_and_eval("log_first_idx")) +- log_next_idx = int(gdb.parse_and_eval("log_next_idx")) +- log_buf_len = int(gdb.parse_and_eval("log_buf_len")) ++ log_buf_addr = int(str(gdb.parse_and_eval( ++ "(void *)'printk.c'::log_buf")).split()[0], 16) ++ log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) ++ log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) ++ log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) + + inf = gdb.inferiors()[0] + start = log_buf_addr + log_first_idx +@@ -51,13 +53,19 @@ class LxDmesg(gdb.Command): + continue + + text_len = utils.read_u16(log_buf[pos + 10:pos + 12]) +- text = log_buf[pos + 16:pos + 16 + text_len].decode() ++ text = log_buf[pos + 16:pos + 16 + text_len].decode( ++ encoding='utf8', errors='replace') + time_stamp = utils.read_u64(log_buf[pos:pos + 8]) + + for line in text.splitlines(): +- gdb.write("[{time:12.6f}] {line}\n".format( ++ msg = u"[{time:12.6f}] {line}\n".format( + time=time_stamp / 1000000000.0, +- line=line)) ++ line=line) ++ # With python2 gdb.write will attempt to convert unicode to ++ # ascii and might fail so pass an utf8-encoded str instead. ++ if sys.hexversion < 0x03000000: ++ msg = msg.encode(encoding='utf8', errors='replace') ++ gdb.write(msg) + + pos += length + +diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py +index 38b1f09d1cd95..822e3767bc054 100644 +--- a/scripts/gdb/linux/proc.py ++++ b/scripts/gdb/linux/proc.py +@@ -40,7 +40,7 @@ class LxVersion(gdb.Command): + + def invoke(self, arg, from_tty): + # linux_banner should contain a newline +- gdb.write(gdb.parse_and_eval("linux_banner").string()) ++ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string()) + + LxVersion() + +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 1e99500dbb6c8..f7797e546e3de 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -1001,6 +1001,7 @@ static int patch_conexant_auto(struct hda_codec *codec) + static const struct hda_device_id snd_hda_id_conexant[] = { + HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), ++ HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index b8d4b5b3e54a1..ea264727fdf7b 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -1867,6 +1867,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, + ms_ep = find_usb_ms_endpoint_descriptor(hostep); + if (!ms_ep) + continue; ++ if (ms_ep->bNumEmbMIDIJack > 0x10) ++ continue; + if (usb_endpoint_dir_out(ep)) { + if (endpoints[epidx].out_ep) { + if (++epidx >= MIDI_MAX_ENDPOINTS) { +@@ -2119,6 +2121,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi, + cs_desc[1] == USB_DT_CS_INTERFACE && + cs_desc[2] == 0xf1 && + cs_desc[3] == 0x02) { ++ if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10) ++ continue; + endpoint->in_cables = (1 << cs_desc[4]) - 1; + endpoint->out_cables = (1 << cs_desc[5]) - 1; + return snd_usbmidi_detect_endpoints(umidi, endpoint, 1); diff --git a/patch/kernel/meson64-legacy/patch-4.9.251-252.patch b/patch/kernel/meson64-legacy/patch-4.9.251-252.patch new file mode 100644 index 0000000000..0c46ccb97d --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.251-252.patch @@ -0,0 +1,788 @@ +diff --git a/Makefile b/Makefile +index 8ebbb60f2078a..2213fe336705f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 251 ++SUBLEVEL = 252 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c +index f989145480c8f..bf236b7af8c1a 100644 +--- a/arch/arm/mach-omap2/omap_device.c ++++ b/arch/arm/mach-omap2/omap_device.c +@@ -224,10 +224,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, + break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); +- if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && +- pm_runtime_status_suspended(dev)) { ++ if (od) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; +- pm_runtime_set_active(dev); ++ if (od->_state == OMAP_DEVICE_STATE_ENABLED && ++ pm_runtime_status_suspended(dev)) { ++ pm_runtime_set_active(dev); ++ } + } + break; + case BUS_NOTIFY_ADD_DEVICE: +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index 0ad81fa13688f..10d80456f38f1 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -450,6 +450,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) + { + u64 pmcr, val; + ++ /* No PMU available, PMCR_EL0 may UNDEF... */ ++ if (!kvm_arm_support_pmu_v3()) ++ return; ++ + pmcr = read_sysreg(pmcr_el0); + /* + * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h +index 6b8b2d57fdc8c..e588028922a83 100644 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h +@@ -411,9 +411,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h +index 1263c22d60d85..330fe178c0c5e 100644 +--- a/arch/powerpc/include/asm/nohash/pgtable.h ++++ b/arch/powerpc/include/asm/nohash/pgtable.h +@@ -155,9 +155,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + flush_hash_entry(mm, ptep, addr); + #endif + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +diff --git a/block/genhd.c b/block/genhd.c +index fcd6d4fae657c..9c1adfd768d2c 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -159,14 +159,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) + part = rcu_dereference(ptbl->part[piter->idx]); + if (!part) + continue; ++ get_device(part_to_dev(part)); ++ piter->part = part; + if (!part_nr_sects_read(part) && + !(piter->flags & DISK_PITER_INCL_EMPTY) && + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && +- piter->idx == 0)) ++ piter->idx == 0)) { ++ put_device(part_to_dev(part)); ++ piter->part = NULL; + continue; ++ } + +- get_device(part_to_dev(part)); +- piter->part = part; + piter->idx += inc; + break; + } +diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig +index 39dd30b6ef86e..894102fd5a069 100644 +--- a/drivers/block/Kconfig ++++ b/drivers/block/Kconfig +@@ -530,6 +530,7 @@ config BLK_DEV_RBD + config BLK_DEV_RSXX + tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" + depends on PCI ++ select CRC32 + help + Device driver for IBM's high speed PCIe SSD + storage device: Flash Adapter 900GB Full Height. +diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c +index 0b5bf135b0907..59f16807921ad 100644 +--- a/drivers/cpufreq/powernow-k8.c ++++ b/drivers/cpufreq/powernow-k8.c +@@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data *data) + + /* Take a frequency, and issue the fid/vid transition command */ + static int transition_frequency_fidvid(struct powernow_k8_data *data, +- unsigned int index) ++ unsigned int index, ++ struct cpufreq_policy *policy) + { +- struct cpufreq_policy *policy; + u32 fid = 0; + u32 vid = 0; + int res; +@@ -921,9 +921,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, + freqs.old = find_khz_freq_from_fid(data->currfid); + freqs.new = find_khz_freq_from_fid(fid); + +- policy = cpufreq_cpu_get(smp_processor_id()); +- cpufreq_cpu_put(policy); +- + cpufreq_freq_transition_begin(policy, &freqs); + res = transition_fid_vid(data, fid, vid); + cpufreq_freq_transition_end(policy, &freqs, res); +@@ -978,7 +975,7 @@ static long powernowk8_target_fn(void *arg) + + powernow_k8_acpi_pst_values(data, newstate); + +- ret = transition_frequency_fidvid(data, newstate); ++ ret = transition_frequency_fidvid(data, newstate, pol); + + if (ret) { + pr_err("transition frequency failed\n"); +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c +index ef99ef0bb1ca2..f00652585ee31 100644 +--- a/drivers/dma/xilinx/xilinx_dma.c ++++ b/drivers/dma/xilinx/xilinx_dma.c +@@ -2357,7 +2357,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + has_dre = false; + + if (!has_dre) +- xdev->common.copy_align = fls(width - 1); ++ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || +@@ -2630,7 +2630,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) + } + + /* Register the DMA engine with the core */ +- dma_async_device_register(&xdev->common); ++ err = dma_async_device_register(&xdev->common); ++ if (err) { ++ dev_err(xdev->dev, "failed to register the dma device\n"); ++ goto error; ++ } + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index 4548d89abcdc3..ff8168c60b35a 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -882,7 +882,7 @@ eb_vma_misplaced(struct i915_vma *vma) + return !only_mappable_for_reloc(entry->flags); + + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && +- (vma->node.start + vma->node.size - 1) >> 32) ++ (vma->node.start + vma->node.size + 4095) >> 32) + return true; + + return false; +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c +index ce125ec23d2a5..88ba1a65c2830 100644 +--- a/drivers/iommu/intel_irq_remapping.c ++++ b/drivers/iommu/intel_irq_remapping.c +@@ -1350,6 +1350,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_cfg = irqd_cfg(irq_data); + if (!irq_data || !irq_cfg) { ++ if (!i) ++ kfree(data); + ret = -EINVAL; + goto out_free_data; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index 8cd7227fbdfce..3dd0bc8804c1a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -930,6 +930,7 @@ err_destroy_groups: + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + kvfree(in); ++ kfree(ft->g); + + return err; + } +diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig +index 4e9fe75d70675..069f933b0add2 100644 +--- a/drivers/net/wan/Kconfig ++++ b/drivers/net/wan/Kconfig +@@ -295,6 +295,7 @@ config SLIC_DS26522 + tristate "Slic Maxim ds26522 card support" + depends on SPI + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST ++ select BITREVERSE + help + This module initializes and configures the slic maxim card + in T1 or E1 mode. +diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig +index 6dfedc8bd6a3d..7df13a684d2df 100644 +--- a/drivers/net/wireless/ath/wil6210/Kconfig ++++ b/drivers/net/wireless/ath/wil6210/Kconfig +@@ -1,6 +1,7 @@ + config WIL6210 + tristate "Wilocity 60g WiFi card wil6210 support" + select WANT_DEV_COREDUMP ++ select CRC32 + depends on CFG80211 + depends on PCI + default n +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index da3834fe5e570..9bb6a574ab2fe 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1606,7 +1606,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + return -ENODEV; + } + +- master = spi_alloc_master(dev, sizeof(struct driver_data)); ++ master = devm_spi_alloc_master(dev, sizeof(*drv_data)); + if (!master) { + dev_err(&pdev->dev, "cannot alloc spi_master\n"); + pxa_ssp_free(ssp); +@@ -1788,7 +1788,6 @@ out_error_clock_enabled: + free_irq(ssp->irq, drv_data); + + out_error_master_alloc: +- spi_master_put(master); + pxa_ssp_free(ssp); + return status; + } +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index e738b4621cbba..ecd707f74ddcb 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1736,6 +1736,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: ++ case TCM_TOO_MANY_TARGET_DESCS: ++ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: ++ case TCM_TOO_MANY_SEGMENT_DESCS: ++ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: + break; + case TCM_OUT_OF_RESOURCES: + sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -2886,6 +2890,26 @@ static const struct sense_info sense_info_table[] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ + }, ++ [TCM_TOO_MANY_TARGET_DESCS] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ ++ }, ++ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ ++ }, ++ [TCM_TOO_MANY_SEGMENT_DESCS] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ ++ }, ++ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ ++ }, + [TCM_PARAMETER_LIST_LENGTH_ERROR] = { + .key = ILLEGAL_REQUEST, + .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c +index 18848ba8d2ba0..84e3bf1132fd5 100644 +--- a/drivers/target/target_core_xcopy.c ++++ b/drivers/target/target_core_xcopy.c +@@ -52,64 +52,87 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) + return 0; + } + +-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, +- bool src) ++/** ++ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers ++ * ++ * @se_dev: device being considered for match ++ * @dev_wwn: XCOPY requested NAA dev_wwn ++ * @return: 1 on match, 0 on no-match ++ */ ++static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, ++ const unsigned char *dev_wwn) + { +- struct se_device *se_dev; +- unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; ++ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + int rc; + +- if (src) +- dev_wwn = &xop->dst_tid_wwn[0]; +- else +- dev_wwn = &xop->src_tid_wwn[0]; +- +- mutex_lock(&g_device_mutex); +- list_for_each_entry(se_dev, &g_device_list, g_dev_node) { +- +- if (!se_dev->dev_attrib.emulate_3pc) +- continue; ++ if (!se_dev->dev_attrib.emulate_3pc) { ++ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); ++ return 0; ++ } + +- memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); +- target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); ++ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); ++ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); + +- rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); +- if (rc != 0) +- continue; ++ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); ++ if (rc != 0) { ++ pr_debug("XCOPY: skip non-matching: %*ph\n", ++ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); ++ return 0; ++ } ++ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); + +- if (src) { +- xop->dst_dev = se_dev; +- pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" +- " se_dev\n", xop->dst_dev); +- } else { +- xop->src_dev = se_dev; +- pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" +- " se_dev\n", xop->src_dev); +- } ++ return 1; ++} + +- rc = target_depend_item(&se_dev->dev_group.cg_item); +- if (rc != 0) { +- pr_err("configfs_depend_item attempt failed:" +- " %d for se_dev: %p\n", rc, se_dev); +- mutex_unlock(&g_device_mutex); +- return rc; ++static int target_xcopy_locate_se_dev_e4(struct se_session *sess, ++ const unsigned char *dev_wwn, ++ struct se_device **_found_dev, ++ struct percpu_ref **_found_lun_ref) ++{ ++ struct se_dev_entry *deve; ++ struct se_node_acl *nacl; ++ struct se_lun *this_lun = NULL; ++ struct se_device *found_dev = NULL; ++ ++ /* cmd with NULL sess indicates no associated $FABRIC_MOD */ ++ if (!sess) ++ goto err_out; ++ ++ pr_debug("XCOPY 0xe4: searching for: %*ph\n", ++ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); ++ ++ nacl = sess->se_node_acl; ++ rcu_read_lock(); ++ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { ++ struct se_device *this_dev; ++ int rc; ++ ++ this_lun = rcu_dereference(deve->se_lun); ++ this_dev = rcu_dereference_raw(this_lun->lun_se_dev); ++ ++ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); ++ if (rc) { ++ if (percpu_ref_tryget_live(&this_lun->lun_ref)) ++ found_dev = this_dev; ++ break; + } +- +- pr_debug("Called configfs_depend_item for se_dev: %p" +- " se_dev->se_dev_group: %p\n", se_dev, +- &se_dev->dev_group); +- +- mutex_unlock(&g_device_mutex); +- return 0; + } +- mutex_unlock(&g_device_mutex); +- ++ rcu_read_unlock(); ++ if (found_dev == NULL) ++ goto err_out; ++ ++ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", ++ found_dev, &found_dev->dev_group); ++ *_found_dev = found_dev; ++ *_found_lun_ref = &this_lun->lun_ref; ++ return 0; ++err_out: + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; + } + + static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, +- unsigned char *p, bool src) ++ unsigned char *p, unsigned short cscd_index) + { + unsigned char *desc = p; + unsigned short ript; +@@ -154,7 +177,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op + return -EINVAL; + } + +- if (src) { ++ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { ++ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " ++ "dest\n", cscd_index); ++ return 0; ++ } ++ ++ if (cscd_index == xop->stdi) { + memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the source designator matches the local device +@@ -166,10 +195,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op + pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" + " received xop\n", xop->src_dev); + } +- } else { ++ } ++ ++ if (cscd_index == xop->dtdi) { + memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* +- * Determine if the destination designator matches the local device ++ * Determine if the destination designator matches the local ++ * device. If @cscd_index corresponds to both source (stdi) and ++ * destination (dtdi), or dtdi comes after stdi, then ++ * XCOL_DEST_RECV_OP wins. + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { +@@ -189,9 +223,9 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + { + struct se_device *local_dev = se_cmd->se_dev; + unsigned char *desc = p; +- int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; ++ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; ++ unsigned short cscd_index = 0; + unsigned short start = 0; +- bool src = true; + + *sense_ret = TCM_INVALID_PARAMETER_LIST; + +@@ -214,25 +248,19 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + + while (start < tdll) { + /* +- * Check target descriptor identification with 0xE4 type with +- * use VPD 0x83 WWPN matching .. ++ * Check target descriptor identification with 0xE4 type, and ++ * compare the current index with the CSCD descriptor IDs in ++ * the segment descriptor. Use VPD 0x83 WWPN matching .. + */ + switch (desc[0]) { + case 0xe4: + rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, +- &desc[0], src); ++ &desc[0], cscd_index); + if (rc != 0) + goto out; +- /* +- * Assume target descriptors are in source -> destination order.. +- */ +- if (src) +- src = false; +- else +- src = true; + start += XCOPY_TARGET_DESC_LEN; + desc += XCOPY_TARGET_DESC_LEN; +- ret++; ++ cscd_index++; + break; + default: + pr_err("XCOPY unsupported descriptor type code:" +@@ -241,10 +269,25 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + } + } + +- if (xop->op_origin == XCOL_SOURCE_RECV_OP) +- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); +- else +- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); ++ switch (xop->op_origin) { ++ case XCOL_SOURCE_RECV_OP: ++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, ++ xop->dst_tid_wwn, ++ &xop->dst_dev, ++ &xop->remote_lun_ref); ++ break; ++ case XCOL_DEST_RECV_OP: ++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, ++ xop->src_tid_wwn, ++ &xop->src_dev, ++ &xop->remote_lun_ref); ++ break; ++ default: ++ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " ++ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); ++ rc = -EINVAL; ++ break; ++ } + /* + * If a matching IEEE NAA 0x83 descriptor for the requested device + * is not located on this node, return COPY_ABORTED with ASQ/ASQC +@@ -261,7 +304,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->dst_dev, &xop->dst_tid_wwn[0]); + +- return ret; ++ return cscd_index; + + out: + return -EINVAL; +@@ -305,17 +348,26 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op + + static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, + struct xcopy_op *xop, unsigned char *p, +- unsigned int sdll) ++ unsigned int sdll, sense_reason_t *sense_ret) + { + unsigned char *desc = p; + unsigned int start = 0; + int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + ++ *sense_ret = TCM_INVALID_PARAMETER_LIST; ++ + if (offset != 0) { + pr_err("XCOPY segment descriptor list length is not" + " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + return -EINVAL; + } ++ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { ++ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" ++ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); ++ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ ++ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; ++ return -EINVAL; ++ } + + while (start < sdll) { + /* +@@ -372,18 +424,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) + + static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) + { +- struct se_device *remote_dev; +- + if (xop->op_origin == XCOL_SOURCE_RECV_OP) +- remote_dev = xop->dst_dev; ++ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); + else +- remote_dev = xop->src_dev; ++ pr_debug("putting src lun_ref for %p\n", xop->src_dev); + +- pr_debug("Calling configfs_undepend_item for" +- " remote_dev: %p remote_dev->dev_group: %p\n", +- remote_dev, &remote_dev->dev_group.cg_item); +- +- target_undepend_item(&remote_dev->dev_group.cg_item); ++ percpu_ref_put(xop->remote_lun_ref); + } + + static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) +@@ -893,6 +939,20 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, + tdll, sdll, inline_dl); + ++ /* ++ * skip over the target descriptors until segment descriptors ++ * have been passed - CSCD ids are needed to determine src and dest. ++ */ ++ seg_desc = &p[16] + tdll; ++ ++ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, ++ sdll, &ret); ++ if (rc <= 0) ++ goto out; ++ ++ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, ++ rc * XCOPY_SEGMENT_DESC_LEN); ++ + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); + if (rc <= 0) + goto out; +@@ -910,18 +970,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) + + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, + rc * XCOPY_TARGET_DESC_LEN); +- seg_desc = &p[16]; +- seg_desc += (rc * XCOPY_TARGET_DESC_LEN); +- +- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); +- if (rc <= 0) { +- xcopy_pt_undepend_remotedev(xop); +- goto out; +- } + transport_kunmap_data_sg(se_cmd); + +- pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, +- rc * XCOPY_SEGMENT_DESC_LEN); + INIT_WORK(&xop->xop_work, target_xcopy_do_work); + queue_work(xcopy_wq, &xop->xop_work); + return TCM_NO_SENSE; +diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h +index 700a981c7b415..7db8d0c9223f8 100644 +--- a/drivers/target/target_core_xcopy.h ++++ b/drivers/target/target_core_xcopy.h +@@ -19,6 +19,7 @@ struct xcopy_op { + struct se_device *dst_dev; + unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; ++ struct percpu_ref *remote_lun_ref; + + sector_t src_lba; + sector_t dst_lba; +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c +index 9213a9e046ae0..99caaae01caba 100644 +--- a/fs/ubifs/io.c ++++ b/fs/ubifs/io.c +@@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) + { + uint32_t crc; + +- ubifs_assert(pad >= 0 && !(pad & 7)); ++ ubifs_assert(pad >= 0); + + if (pad >= UBIFS_PAD_NODE_SZ) { + struct ubifs_ch *ch = buf; +@@ -721,6 +721,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + * write-buffer. + */ + memcpy(wbuf->buf + wbuf->used, buf, len); ++ if (aligned_len > len) { ++ ubifs_assert(aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); ++ } + + if (aligned_len == wbuf->avail) { + dbg_io("flush jhead %s wbuf to LEB %d:%d", +@@ -813,13 +817,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + } + + spin_lock(&wbuf->lock); +- if (aligned_len) ++ if (aligned_len) { + /* + * And now we have what's left and what does not take whole + * max. write unit, so write it to the write-buffer and we are + * done. + */ + memcpy(wbuf->buf, buf + written, len); ++ if (aligned_len > len) { ++ ubifs_assert(aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + len, aligned_len - len); ++ } ++ } + + if (c->leb_size - wbuf->offs >= c->max_write_size) + wbuf->size = c->max_write_size; +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index 4fdb1d9848444..36198563fb8bc 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -462,7 +462,10 @@ + */ + #define TEXT_TEXT \ + ALIGN_FUNCTION(); \ +- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ ++ *(.text.hot .text.hot.*) \ ++ *(TEXT_MAIN .text.fixup) \ ++ *(.text.unlikely .text.unlikely.*) \ ++ *(.text.unknown .text.unknown.*) \ + *(.text..ftrace) \ + *(TEXT_CFI_MAIN) \ + *(.ref.text) \ +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 30f99ce4c6cea..8a70d38f13329 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -178,6 +178,10 @@ enum tcm_sense_reason_table { + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), ++ TCM_TOO_MANY_TARGET_DESCS = R(0x19), ++ TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a), ++ TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), ++ TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), + #undef R + }; + +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index a4c4234976862..026f4525063c1 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -1592,6 +1592,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) + skb->csum = csum_block_sub(skb->csum, + skb_checksum(skb, len, delta, 0), + len); ++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; ++ int offset = skb_checksum_start_offset(skb) + skb->csum_offset; ++ ++ if (offset + sizeof(__sum16) > hdlen) ++ return -EINVAL; + } + return __pskb_trim(skb, len); + } +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index c37e9598262e5..3164bae4024a4 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -300,7 +300,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk + if (skb_is_gso(skb)) + return ip_finish_output_gso(net, sk, skb, mtu); + +- if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) ++ if (skb->len > mtu || IPCB(skb)->frag_max_size) + return ip_fragment(net, sk, skb, mtu, ip_finish_output2); + + return ip_finish_output2(net, sk, skb); +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index 5f2e3334cccec..26e1dbc958189 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -743,7 +743,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + +- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ++ df = tnl_params->frag_off; ++ if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) ++ df |= (inner_iph->frag_off & htons(IP_DF)); ++ ++ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) { + ip_rt_put(rt); + goto tx_error; + } +@@ -771,10 +775,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + ttl = ip4_dst_hoplimit(&rt->dst); + } + +- df = tnl_params->frag_off; +- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) +- df |= (inner_iph->frag_off&htons(IP_DF)); +- + max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); + if (max_headroom > dev->needed_headroom) diff --git a/patch/kernel/meson64-legacy/patch-4.9.252-253.patch b/patch/kernel/meson64-legacy/patch-4.9.252-253.patch new file mode 100644 index 0000000000..81d5766561 --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.252-253.patch @@ -0,0 +1,777 @@ +diff --git a/Makefile b/Makefile +index 2213fe336705f..62a07bdcfacb7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 252 ++SUBLEVEL = 253 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/Makefile b/arch/arc/Makefile +index fd79faab78926..5dc2d73c64994 100644 +--- a/arch/arc/Makefile ++++ b/arch/arc/Makefile +@@ -108,6 +108,7 @@ bootpImage: vmlinux + + boot_targets += uImage uImage.bin uImage.gz + ++PHONY += $(boot_targets) + $(boot_targets): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h +index ffb5f33475f19..f0f43eb709d2f 100644 +--- a/arch/arc/include/asm/page.h ++++ b/arch/arc/include/asm/page.h +@@ -13,6 +13,7 @@ + #ifndef __ASSEMBLY__ + + #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) ++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + + struct vm_area_struct; +diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +index 533919e96eaee..f22a6b4363177 100644 +--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi ++++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +@@ -54,18 +54,21 @@ + emac: gem@30000 { + compatible = "cadence,gem"; + reg = <0x30000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <31>; + }; + + dmac1: dmac@40000 { + compatible = "snps,dw-dmac"; + reg = <0x40000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <25>; + }; + + dmac2: dmac@50000 { + compatible = "snps,dw-dmac"; + reg = <0x50000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <26>; + }; + +@@ -243,6 +246,7 @@ + axi2pico@c0000000 { + compatible = "picochip,axi2pico-pc3x2"; + reg = <0xc0000000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <13 14 15 16 17 18 19 20 21>; + }; + }; +diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c +index fdf99e9dd4c39..3a015e41b762b 100644 +--- a/arch/mips/boot/compressed/decompress.c ++++ b/arch/mips/boot/compressed/decompress.c +@@ -17,6 +17,7 @@ + #include + + #include ++#include + + /* + * These two variables specify the free mem region +@@ -124,7 +125,7 @@ void decompress_kernel(unsigned long boot_heap_start) + dtb_size = fdt_totalsize((void *)&__appended_dtb); + + /* last four bytes is always image size in little endian */ +- image_size = le32_to_cpup((void *)&__image_end - 4); ++ image_size = get_unaligned_le32((void *)&__image_end - 4); + + /* copy dtb to where the booted kernel will expect it */ + memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, +diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c +index 1958910b75c07..b4a7c303019b0 100644 +--- a/arch/mips/kernel/relocate.c ++++ b/arch/mips/kernel/relocate.c +@@ -175,8 +175,14 @@ static int __init relocate_exception_table(long offset) + static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) + { +- size_t i; +- unsigned long *ptr = (unsigned long *)area; ++ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); ++ size_t diff, i; ++ ++ diff = (void *)ptr - area; ++ if (unlikely(size < diff + sizeof(hash))) ++ return hash; ++ ++ size = ALIGN_DOWN(size - diff, sizeof(hash)); + + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index b012e94b7d9f9..eae0b278d5172 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -98,7 +98,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); + extern struct list_head acpi_bus_id_list; + + struct acpi_device_bus_id { +- char bus_id[15]; ++ const char *bus_id; + unsigned int instance_no; + struct list_head node; + }; +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 9a7f017dda47f..f49e2b7880ac2 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -485,6 +485,7 @@ static void acpi_device_del(struct acpi_device *device) + acpi_device_bus_id->instance_no--; + else { + list_del(&acpi_device_bus_id->node); ++ kfree_const(acpi_device_bus_id->bus_id); + kfree(acpi_device_bus_id); + } + break; +@@ -673,7 +674,14 @@ int acpi_device_add(struct acpi_device *device, + } + if (!found) { + acpi_device_bus_id = new_bus_id; +- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); ++ acpi_device_bus_id->bus_id = ++ kstrdup_const(acpi_device_hid(device), GFP_KERNEL); ++ if (!acpi_device_bus_id->bus_id) { ++ pr_err(PREFIX "Memory allocation error for bus id\n"); ++ result = -ENOMEM; ++ goto err_free_new_bus_id; ++ } ++ + acpi_device_bus_id->instance_no = 0; + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); + } +@@ -708,6 +716,11 @@ int acpi_device_add(struct acpi_device *device, + if (device->parent) + list_del(&device->node); + list_del(&device->wakeup_list); ++ ++ err_free_new_bus_id: ++ if (!found) ++ kfree(new_bus_id); ++ + mutex_unlock(&acpi_device_lock); + + err_detach: +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +index cacb720f44a02..2896808545f43 100644 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +@@ -180,6 +180,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, + + } + usnic_uiom_free_dev_list(dev_list); ++ dev_list = NULL; + } + + if (!found) { +@@ -207,6 +208,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, + spin_unlock(&vf->lock); + if (IS_ERR_OR_NULL(qp_grp)) { + usnic_err("Failed to allocate qp_grp\n"); ++ if (usnic_ib_share_vf) ++ usnic_uiom_free_dev_list(dev_list); + return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); + } + +diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c +index 8f2042432c851..66a46c84e28f5 100644 +--- a/drivers/input/ff-core.c ++++ b/drivers/input/ff-core.c +@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) + EXPORT_SYMBOL_GPL(input_ff_erase); + + /* +- * flush_effects - erase all effects owned by a file handle ++ * input_ff_flush - erase all effects owned by a file handle ++ * @dev: input device to erase effect from ++ * @file: purported owner of the effects ++ * ++ * This function erases all force-feedback effects associated with ++ * the given owner from specified device. Note that @file may be %NULL, ++ * in which case all effects will be erased. + */ +-static int flush_effects(struct input_dev *dev, struct file *file) ++int input_ff_flush(struct input_dev *dev, struct file *file) + { + struct ff_device *ff = dev->ff; + int i; +@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file) + + return 0; + } ++EXPORT_SYMBOL_GPL(input_ff_flush); + + /** + * input_ff_event() - generic handler for force-feedback events +@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) + mutex_init(&ff->mutex); + + dev->ff = ff; +- dev->flush = flush_effects; ++ dev->flush = input_ff_flush; + dev->event = input_ff_event; + __set_bit(EV_FF, dev->evbit); + +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c +index 89d37d0d45ed5..fcd10a4708c3e 100644 +--- a/drivers/input/misc/uinput.c ++++ b/drivers/input/misc/uinput.c +@@ -231,6 +231,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) + return uinput_request_submit(udev, &request); + } + ++static int uinput_dev_flush(struct input_dev *dev, struct file *file) ++{ ++ /* ++ * If we are called with file == NULL that means we are tearing ++ * down the device, and therefore we can not handle FF erase ++ * requests: either we are handling UI_DEV_DESTROY (and holding ++ * the udev->mutex), or the file descriptor is closed and there is ++ * nobody on the other side anymore. ++ */ ++ return file ? input_ff_flush(dev, file) : 0; ++} ++ + static void uinput_destroy_device(struct uinput_device *udev) + { + const char *name, *phys; +@@ -298,6 +310,12 @@ static int uinput_create_device(struct uinput_device *udev) + dev->ff->playback = uinput_dev_playback; + dev->ff->set_gain = uinput_dev_set_gain; + dev->ff->set_autocenter = uinput_dev_set_autocenter; ++ /* ++ * The standard input_ff_flush() implementation does ++ * not quite work for uinput as we can't reasonably ++ * handle FF requests during device teardown. ++ */ ++ dev->flush = uinput_dev_flush; + } + + error = input_register_device(udev->dev); +diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig +index c0730d5c734d6..fb61181a5c4f7 100644 +--- a/drivers/isdn/mISDN/Kconfig ++++ b/drivers/isdn/mISDN/Kconfig +@@ -12,6 +12,7 @@ if MISDN != n + config MISDN_DSP + tristate "Digital Audio Processing of transparent data" + depends on MISDN ++ select BITREVERSE + help + Enable support for digital audio processing capability. + +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +index 1f015edcca227..3c6fc61597f7e 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = { + }; + + module_platform_driver(fs_enet_bb_mdio_driver); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +index a89267b94352a..fb2b0586469b2 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = { + }; + + module_platform_driver(fs_enet_fec_mdio_driver); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h +index 5da19b440a6a8..bf25e49d4fe34 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.h ++++ b/drivers/net/ethernet/freescale/ucc_geth.h +@@ -580,7 +580,14 @@ struct ucc_geth_tx_global_pram { + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ +- u8 res2[0x80 - 0x74]; ++ u8 res2[0x78 - 0x74]; ++ u64 snums_en; ++ u32 l2l3baseptr; /* top byte consists of a few other bit fields */ ++ ++ u16 mtu[8]; ++ u8 res3[0xa8 - 0x94]; ++ u32 wrrtablebase; /* top byte is reserved */ ++ u8 res4[0xc0 - 0xac]; + } __packed; + + /* structure representing Extended Filtering Global Parameters in PRAM */ +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +index 7a0281a36c281..a5ee3d328f3d6 100644 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -586,11 +586,6 @@ static const struct net_device_ops netxen_netdev_ops = { + #endif + }; + +-static inline bool netxen_function_zero(struct pci_dev *pdev) +-{ +- return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +-} +- + static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, + u32 mode) + { +@@ -686,7 +681,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter) + netxen_initialize_interrupt_registers(adapter); + netxen_set_msix_bit(pdev, 0); + +- if (netxen_function_zero(pdev)) { ++ if (adapter->portnum == 0) { + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); + else +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index 99ca9526dd65a..d418542924e16 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -1128,7 +1128,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + * accordingly. Otherwise, we should check here. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) +- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); ++ delayed_ndp_size = ctx->max_ndp_size + ++ max_t(u32, ++ ctx->tx_ndp_modulus, ++ ctx->tx_modulus + ctx->tx_remainder) - 1; + else + delayed_ndp_size = 0; + +@@ -1281,7 +1284,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_max - skb_out->len; +- memset(skb_put(skb_out, padding_count), 0, padding_count); ++ if (!WARN_ON(padding_count > ctx->tx_max)) ++ memset(skb_put(skb_out, padding_count), 0, padding_count); + } else if (skb_out->len < ctx->tx_max && + (skb_out->len % dev->maxpacket) == 0) { + *skb_put(skb_out, 1) = 0; /* force short packet */ +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c +index 9ccbdf1431063..dc5b8e69f8e86 100644 +--- a/drivers/net/usb/rndis_host.c ++++ b/drivers/net/usb/rndis_host.c +@@ -398,7 +398,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) + reply_len = sizeof *phym; + retval = rndis_query(dev, intf, u.buf, + RNDIS_OID_GEN_PHYSICAL_MEDIUM, +- 0, (void **) &phym, &reply_len); ++ reply_len, (void **)&phym, &reply_len); + if (retval != 0 || !phym) { + /* OID is optional so don't fail here. */ + phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); +diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c +index 1c57ce64abba0..e383c63689157 100644 +--- a/drivers/spi/spi-cadence.c ++++ b/drivers/spi/spi-cadence.c +@@ -118,6 +118,7 @@ struct cdns_spi { + void __iomem *regs; + struct clk *ref_clk; + struct clk *pclk; ++ unsigned int clk_rate; + u32 speed_hz; + const u8 *txbuf; + u8 *rxbuf; +@@ -253,7 +254,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, + u32 ctrl_reg, baud_rate_val; + unsigned long frequency; + +- frequency = clk_get_rate(xspi->ref_clk); ++ frequency = xspi->clk_rate; + + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); + +@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev) + master->auto_runtime_pm = true; + master->mode_bits = SPI_CPOL | SPI_CPHA; + ++ xspi->clk_rate = clk_get_rate(xspi->ref_clk); + /* Set to default valid value */ +- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; ++ master->max_speed_hz = xspi->clk_rate / 4; + xspi->speed_hz = master->max_speed_hz; + + master->bits_per_word_mask = SPI_BPW_MASK(8); +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c +index 2a14c71739d7d..c1c7df26d62f2 100644 +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -100,7 +100,7 @@ static void io_watchdog_func(unsigned long _ohci); + + + /* Some boards misreport power switching/overcurrent */ +-static bool distrust_firmware = true; ++static bool distrust_firmware; + module_param (distrust_firmware, bool, 0); + MODULE_PARM_DESC (distrust_firmware, + "true to distrust firmware power/overcurrent setup"); +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index 2016f5a5d3cf1..75fff707beb6a 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -799,7 +799,10 @@ resizefs_out: + err = ext4_journal_get_write_access(handle, sbi->s_sbh); + if (err) + goto pwsalt_err_journal; ++ lock_buffer(sbi->s_sbh); + generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); ++ ext4_superblock_csum_set(sb); ++ unlock_buffer(sbi->s_sbh); + err = ext4_handle_dirty_metadata(handle, NULL, + sbi->s_sbh); + pwsalt_err_journal: +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 8ded38ac4cdef..6224b0e6fb643 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -3421,8 +3421,6 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent, + return retval; + } + } +- brelse(ent->bh); +- ent->bh = NULL; + + return 0; + } +@@ -3635,6 +3633,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + } + } + ++ old_file_type = old.de->file_type; + if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) + ext4_handle_sync(handle); + +@@ -3662,7 +3661,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + force_reread = (new.dir->i_ino == old.dir->i_ino && + ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); + +- old_file_type = old.de->file_type; + if (whiteout) { + /* + * Do this before adding a new entry, so the old entry is sure +@@ -3734,15 +3732,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + retval = 0; + + end_rename: +- brelse(old.dir_bh); +- brelse(old.bh); +- brelse(new.bh); + if (whiteout) { +- if (retval) ++ if (retval) { ++ ext4_setent(handle, &old, ++ old.inode->i_ino, old_file_type); + drop_nlink(whiteout); ++ } + unlock_new_inode(whiteout); + iput(whiteout); ++ + } ++ brelse(old.dir_bh); ++ brelse(old.bh); ++ brelse(new.bh); + if (handle) + ext4_journal_stop(handle); + return retval; +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index 52ea41bce0383..e0f0397fc7ce2 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -572,12 +572,14 @@ extern int nfs4_test_session_trunk(struct rpc_clnt *, + + static inline struct inode *nfs_igrab_and_active(struct inode *inode) + { +- inode = igrab(inode); +- if (inode != NULL && !nfs_sb_active(inode->i_sb)) { +- iput(inode); +- inode = NULL; ++ struct super_block *sb = inode->i_sb; ++ ++ if (sb && nfs_sb_active(sb)) { ++ if (igrab(inode)) ++ return inode; ++ nfs_sb_deactive(sb); + } +- return inode; ++ return NULL; + } + + static inline void nfs_iput_and_deactive(struct inode *inode) +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c +index 7e50248ca432c..93c6fb53cc0ed 100644 +--- a/fs/nfsd/nfs3xdr.c ++++ b/fs/nfsd/nfs3xdr.c +@@ -822,9 +822,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, + if (isdotent(name, namlen)) { + if (namlen == 2) { + dchild = dget_parent(dparent); +- /* filesystem root - cannot return filehandle for ".." */ ++ /* ++ * Don't return filehandle for ".." if we're at ++ * the filesystem or export root: ++ */ + if (dchild == dparent) + goto out; ++ if (dparent == exp->ex_path.dentry) ++ goto out; + } else + dchild = dget(dparent); + } else +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 5670bb9788bb4..192b045cc56ec 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -734,6 +734,13 @@ static inline int acpi_device_modalias(struct device *dev, + return -ENODEV; + } + ++static inline struct platform_device * ++acpi_create_platform_device(struct acpi_device *adev, ++ struct property_entry *properties) ++{ ++ return NULL; ++} ++ + static inline bool acpi_dma_supported(struct acpi_device *adev) + { + return false; +diff --git a/include/linux/input.h b/include/linux/input.h +index a65e3b24fb183..fb5e23c7ed988 100644 +--- a/include/linux/input.h ++++ b/include/linux/input.h +@@ -529,6 +529,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, + + int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); + int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); ++int input_ff_flush(struct input_dev *dev, struct file *file); + + int input_ff_create_memless(struct input_dev *dev, void *data, + int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index b469d099dc5f6..52b5e0e026d60 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -3767,7 +3767,7 @@ retry: + * So we need to block hugepage fault by PG_hwpoison bit check. + */ + if (unlikely(PageHWPoison(page))) { +- ret = VM_FAULT_HWPOISON | ++ ret = VM_FAULT_HWPOISON_LARGE | + VM_FAULT_SET_HINDEX(hstate_index(h)); + goto backout_unlocked; + } +diff --git a/mm/slub.c b/mm/slub.c +index 51a73d2d1082e..7ccfc043c28e2 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1833,7 +1833,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + + t = acquire_slab(s, n, page, object == NULL, &objects); + if (!t) +- break; ++ continue; /* cmpxchg raced */ + + available += objects; + if (!object) { +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 026f4525063c1..5582a4ed7c2fb 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -489,13 +489,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb); + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + gfp_t gfp_mask) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + struct sk_buff *skb; + void *data; + + len += NET_SKB_PAD + NET_IP_ALIGN; + +- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || ++ /* If requested length is either too small or too big, ++ * we use kmalloc() for skb->head allocation. ++ */ ++ if (len <= SKB_WITH_OVERHEAD(1024) || ++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) +@@ -503,6 +507,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + goto skb_success; + } + ++ nc = this_cpu_ptr(&napi_alloc_cache); + len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + len = SKB_DATA_ALIGN(len); + +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c +index a1116701287fb..2dbf5a0faad32 100644 +--- a/net/dcb/dcbnl.c ++++ b/net/dcb/dcbnl.c +@@ -1726,6 +1726,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh) + fn = &reply_funcs[dcb->cmd]; + if (!fn->cb) + return -EOPNOTSUPP; ++ if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) ++ return -EPERM; + + if (!tb[DCB_ATTR_IFNAME]) + return -EINVAL; +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index df705303992e0..a4a5863e482cd 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -1583,8 +1583,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, + } + + #ifdef CONFIG_IPV6_SIT_6RD +- if (ipip6_netlink_6rd_parms(data, &ip6rd)) ++ if (ipip6_netlink_6rd_parms(data, &ip6rd)) { + err = ipip6_tunnel_update_6rd(nt, &ip6rd); ++ if (err < 0) ++ unregister_netdevice_queue(dev, NULL); ++ } + #endif + + return err; +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index 5f446cd9f3fd4..0dbd17137dee6 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -458,6 +458,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write, + { + int ret; + ++ /* module_param hashsize could have changed value */ ++ nf_conntrack_htable_size_user = nf_conntrack_htable_size; ++ + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret < 0 || !write) + return ret; +diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c +index fa475b02bdceb..16efd04d4a801 100644 +--- a/net/rxrpc/key.c ++++ b/net/rxrpc/key.c +@@ -1106,7 +1106,7 @@ static long rxrpc_read(const struct key *key, + default: /* we have a ticket we can't encode */ + pr_err("Unsupported key token type (%u)\n", + token->security_index); +- continue; ++ return -ENOPKG; + } + + _debug("token[%u]: toksize=%u", ntoks, toksize); +@@ -1226,7 +1226,9 @@ static long rxrpc_read(const struct key *key, + break; + + default: +- break; ++ pr_err("Unsupported key token type (%u)\n", ++ token->security_index); ++ return -ENOPKG; + } + + ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==, +diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c +index 8391c27855501..7404f02702a1c 100644 +--- a/net/sunrpc/addr.c ++++ b/net/sunrpc/addr.c +@@ -184,7 +184,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, + scope_id = dev->ifindex; + dev_put(dev); + } else { +- if (kstrtou32(p, 10, &scope_id) == 0) { ++ if (kstrtou32(p, 10, &scope_id) != 0) { + kfree(p); + return 0; + } +diff --git a/net/tipc/link.c b/net/tipc/link.c +index c7406c1fdc14b..06327f78f2032 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -877,9 +877,8 @@ void tipc_link_reset(struct tipc_link *l) + int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + struct sk_buff_head *xmitq) + { +- struct tipc_msg *hdr = buf_msg(skb_peek(list)); + unsigned int maxwin = l->window; +- unsigned int i, imp = msg_importance(hdr); ++ unsigned int i; + unsigned int mtu = l->mtu; + u16 ack = l->rcv_nxt - 1; + u16 seqno = l->snd_nxt; +@@ -888,7 +887,13 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + struct sk_buff_head *backlogq = &l->backlogq; + struct sk_buff *skb, *_skb, *bskb; + int pkt_cnt = skb_queue_len(list); ++ struct tipc_msg *hdr; ++ int imp; ++ ++ if (pkt_cnt <= 0) ++ return 0; + ++ imp = msg_importance(hdr); + /* Match msg importance against this and all higher backlog limits: */ + if (!skb_queue_empty(backlogq)) { + for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { +@@ -896,6 +901,8 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + return link_schedule_user(l, list); + } + } ++ ++ hdr = buf_msg(skb_peek(list)); + if (unlikely(msg_size(hdr) > mtu)) { + skb_queue_purge(list); + return -EMSGSIZE; +diff --git a/security/lsm_audit.c b/security/lsm_audit.c +index 44a20c2184092..cc4000dba600a 100644 +--- a/security/lsm_audit.c ++++ b/security/lsm_audit.c +@@ -277,7 +277,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, + struct inode *inode; + + audit_log_format(ab, " name="); ++ spin_lock(&a->u.dentry->d_lock); + audit_log_untrustedstring(ab, a->u.dentry->d_name.name); ++ spin_unlock(&a->u.dentry->d_lock); + + inode = d_backing_inode(a->u.dentry); + if (inode) { +@@ -295,8 +297,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, + dentry = d_find_alias(inode); + if (dentry) { + audit_log_format(ab, " name="); +- audit_log_untrustedstring(ab, +- dentry->d_name.name); ++ spin_lock(&dentry->d_lock); ++ audit_log_untrustedstring(ab, dentry->d_name.name); ++ spin_unlock(&dentry->d_lock); + dput(dentry); + } + audit_log_format(ab, " dev="); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 41a29bb215199..11edb6f6bdafe 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -2349,6 +2349,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) + enum snd_soc_dapm_direction dir; + + list_del(&w->list); ++ list_del(&w->dirty); + /* + * remove source and sink paths associated to this widget. + * While removing the path, remove reference to it from both diff --git a/patch/kernel/meson64-legacy/patch-4.9.253-254.patch b/patch/kernel/meson64-legacy/patch-4.9.253-254.patch new file mode 100644 index 0000000000..b812f33139 --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.253-254.patch @@ -0,0 +1,571 @@ +diff --git a/Makefile b/Makefile +index 62a07bdcfacb7..ea9ea119460d4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 253 ++SUBLEVEL = 254 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig +index 78bc97b1d0270..ac834e9e0e0a4 100644 +--- a/arch/sh/drivers/dma/Kconfig ++++ b/arch/sh/drivers/dma/Kconfig +@@ -62,8 +62,7 @@ config PVR2_DMA + + config G2_DMA + tristate "G2 Bus DMA support" +- depends on SH_DREAMCAST +- select SH_DMA_API ++ depends on SH_DREAMCAST && SH_DMA_API + help + This enables support for the DMA controller for the Dreamcast's + G2 bus. Drivers that want this will generally enable this on +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 89b163351e642..7be7acd6a5409 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -35,6 +35,8 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) + KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) ++# Disable relocation relaxation in case the link is not PIE. ++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index f49e2b7880ac2..5aa4a01f698fe 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -585,6 +585,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, + if (!device) + return -EINVAL; + ++ *device = NULL; ++ + status = acpi_get_data_full(handle, acpi_scan_drop_device, + (void **)device, callback); + if (ACPI_FAILURE(status) || !*device) { +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +index 7deb81b6dbac6..4b571cc6bc70f 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) + nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", + image.base, image.type, image.size); + +- if (!shadow_fetch(bios, mthd, image.size)) { ++ if (!shadow_fetch(bios, mthd, image.base + image.size)) { + nvkm_debug(subdev, "%08x: fetch failed\n", image.base); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +index a5783f4d972e3..c49795e779be4 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +@@ -33,7 +33,7 @@ static void + gm200_i2c_aux_fini(struct gm200_i2c_aux *aux) + { + struct nvkm_device *device = aux->base.pad->i2c->subdev.device; +- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000); ++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000); + } + + static int +@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) + AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); + return -EBUSY; + } +- } while (ctrl & 0x03010000); ++ } while (ctrl & 0x07010000); + + /* set some magic, and wait up to 1ms for it to appear */ +- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); ++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq); + timeout = 1000; + do { + ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); +@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) + gm200_i2c_aux_fini(aux); + return -EBUSY; + } +- } while ((ctrl & 0x03000000) != urep); ++ } while ((ctrl & 0x07000000) != urep); + + return 0; + } +diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c +index 3da7b673aab25..3957ce678265d 100644 +--- a/drivers/hwtracing/stm/heartbeat.c ++++ b/drivers/hwtracing/stm/heartbeat.c +@@ -72,7 +72,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) + + static int stm_heartbeat_init(void) + { +- int i, ret = -ENOMEM; ++ int i, ret; + + if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) + return -EINVAL; +@@ -80,8 +80,10 @@ static int stm_heartbeat_init(void) + for (i = 0; i < nr_devs; i++) { + stm_heartbeat[i].data.name = + kasprintf(GFP_KERNEL, "heartbeat.%d", i); +- if (!stm_heartbeat[i].data.name) ++ if (!stm_heartbeat[i].data.name) { ++ ret = -ENOMEM; + goto fail_unregister; ++ } + + stm_heartbeat[i].data.nr_chans = 1; + stm_heartbeat[i].data.link = stm_heartbeat_link; +diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c +index 5e63b17f935d5..e5ad3f9cd372f 100644 +--- a/drivers/i2c/busses/i2c-octeon-core.c ++++ b/drivers/i2c/busses/i2c-octeon-core.c +@@ -383,7 +383,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, + + data[i] = octeon_i2c_data_read(i2c); + if (recv_len && i == 0) { +- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) ++ if (data[i] > I2C_SMBUS_BLOCK_MAX) + return -EPROTO; + length += data[i]; + } +diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c +index 788b3d6fd1cc9..b15a02f502df9 100644 +--- a/drivers/iio/dac/ad5504.c ++++ b/drivers/iio/dac/ad5504.c +@@ -189,9 +189,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, + return ret; + + if (pwr_down) +- st->pwr_down_mask |= (1 << chan->channel); +- else + st->pwr_down_mask &= ~(1 << chan->channel); ++ else ++ st->pwr_down_mask |= (1 << chan->channel); + + ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, + AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 62e3dc19b6099..5d120c3cee57d 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -396,14 +396,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + { + int r; + dev_t dev; ++ unsigned int major, minor; ++ char dummy; + struct dm_dev_internal *dd; + struct dm_table *t = ti->table; + + BUG_ON(!t); + +- dev = dm_get_dev_t(path); +- if (!dev) +- return -ENODEV; ++ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { ++ /* Extract the major/minor numbers */ ++ dev = MKDEV(major, minor); ++ if (MAJOR(dev) != major || MINOR(dev) != minor) ++ return -EOVERFLOW; ++ } else { ++ dev = dm_get_dev_t(path); ++ if (!dev) ++ return -ENODEV; ++ } + + dd = find_device(&t->devices, dev); + if (!dd) { +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index c7508d9a4c6fb..164078609f98e 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -555,11 +555,11 @@ static void can_restart(struct net_device *dev) + } + cf->can_id |= CAN_ERR_RESTARTED; + +- netif_rx_ni(skb); +- + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + ++ netif_rx_ni(skb); ++ + restart: + netdev_dbg(dev, "restarted\n"); + priv->can_stats.restarts++; +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index c387be5c926b7..5ec0042bc3840 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -946,7 +946,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, + if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) + return -EOPNOTSUPP; + +- if (vlan->vid_end > dev->num_vlans) ++ if (vlan->vid_end >= dev->num_vlans) + return -ERANGE; + + b53_enable_vlan(dev, true); +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 6f8d4810ce979..7458b1a70e5d6 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -2411,10 +2411,10 @@ static int sh_eth_close(struct net_device *ndev) + /* Free all the skbuffs in the Rx queue and the DMA buffer. */ + sh_eth_ring_free(ndev); + +- pm_runtime_put_sync(&mdp->pdev->dev); +- + mdp->is_opened = 0; + ++ pm_runtime_put(&mdp->pdev->dev); ++ + return 0; + } + +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index ad80e4223c2d3..a767d942bfca5 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -4552,19 +4552,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) + { + struct Scsi_Host *host; + struct ufs_hba *hba; +- unsigned int tag; + u32 pos; + int err; +- u8 resp = 0xF; +- struct ufshcd_lrb *lrbp; ++ u8 resp = 0xF, lun; + unsigned long flags; + + host = cmd->device->host; + hba = shost_priv(host); +- tag = cmd->request->tag; + +- lrbp = &hba->lrb[tag]; +- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); ++ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); ++ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; +@@ -4573,7 +4570,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) + + /* clear the commands that were pending for corresponding LUN */ + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { +- if (hba->lrb[pos].lun == lrbp->lun) { ++ if (hba->lrb[pos].lun == lun) { + err = ufshcd_clear_cmd(hba, pos); + if (err) + break; +diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig +index 0d7b8c9f72fda..778df4badf888 100644 +--- a/drivers/usb/gadget/udc/bdc/Kconfig ++++ b/drivers/usb/gadget/udc/bdc/Kconfig +@@ -14,7 +14,7 @@ if USB_BDC_UDC + comment "Platform Support" + config USB_BDC_PCI + tristate "BDC support for PCIe based platforms" +- depends on PCI ++ depends on PCI && BROKEN + default USB_BDC_UDC + help + Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 99c7cf4822c33..34ad964d54d11 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -1424,10 +1424,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t n) + { + struct usb_udc *udc = container_of(dev, struct usb_udc, dev); ++ ssize_t ret; + ++ mutex_lock(&udc_lock); + if (!udc->driver) { + dev_err(dev, "soft-connect without a gadget driver\n"); +- return -EOPNOTSUPP; ++ ret = -EOPNOTSUPP; ++ goto out; + } + + if (sysfs_streq(buf, "connect")) { +@@ -1439,10 +1442,14 @@ static ssize_t usb_udc_softconn_store(struct device *dev, + usb_gadget_udc_stop(udc); + } else { + dev_err(dev, "unsupported command '%s'\n", buf); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out; + } + +- return n; ++ ret = n; ++out: ++ mutex_unlock(&udc_lock); ++ return ret; + } + static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store); + +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index b3d6cc1a8021b..52c4176afae96 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -587,6 +587,7 @@ static int ehci_run (struct usb_hcd *hcd) + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 hcc_params; ++ int rc; + + hcd->uses_new_polling = 1; + +@@ -642,9 +643,20 @@ static int ehci_run (struct usb_hcd *hcd) + down_write(&ehci_cf_port_reset_rwsem); + ehci->rh_state = EHCI_RH_RUNNING; + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ++ ++ /* Wait until HC become operational */ + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + msleep(5); ++ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); ++ + up_write(&ehci_cf_port_reset_rwsem); ++ ++ if (rc) { ++ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", ++ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); ++ return rc; ++ } ++ + ehci->last_periodic_enable = ktime_get_real(); + + temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c +index 3a3926ff7b857..643c427bb2773 100644 +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -350,6 +350,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) + + unlink_empty_async_suspended(ehci); + ++ /* Some Synopsys controllers mistakenly leave IAA turned on */ ++ ehci_writel(ehci, STS_IAA, &ehci->regs->status); ++ + /* Any IAA cycle that started before the suspend is now invalid */ + end_iaa_cycle(ehci); + ehci_handle_start_intr_unlinks(ehci); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 1468dca77facf..6e1d65fda97e0 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -2851,6 +2851,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); ++ /* make sure TRB is fully written before giving it to the controller */ ++ wmb(); + trb->field[3] = cpu_to_le32(field4); + inc_enq(xhci, ring, more_trbs_coming); + } +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 97d57a94776ac..040c43097d558 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -579,6 +579,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, + enable); + if (err < 0) + break; ++ ++ /* ++ * wait 500us for LFPS detector to be disabled before ++ * sending ACK ++ */ ++ if (!enable) ++ usleep_range(500, 1000); + } + + if (err < 0) { +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index 61650c1830d45..d830eddacdc60 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -149,6 +149,12 @@ + + #if GCC_VERSION < 30200 + # error Sorry, your compiler is too old - please upgrade it. ++#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 && !defined(__clang__) ++/* ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 ++ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk ++ */ ++# error Sorry, your version of GCC is too old - please use 5.1 or newer. + #endif + + #if GCC_VERSION < 30300 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 335c00209f746..78bdfbefd996c 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1732,12 +1732,11 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, + * unsigned shift, so make the appropriate casts. + */ + if (min_val < 0 || dst_reg->min_value < 0) +- dst_reg->min_value = BPF_REGISTER_MIN_RANGE; ++ reset_reg_range_values(regs, insn->dst_reg); + else +- dst_reg->min_value = +- (u64)(dst_reg->min_value) >> min_val; ++ dst_reg->min_value = (u64)(dst_reg->min_value) >> max_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) +- dst_reg->max_value >>= max_val; ++ dst_reg->max_value >>= min_val; + break; + default: + reset_reg_range_values(regs, insn->dst_reg); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 55f60d2edc3fb..4ec8bf634f89f 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -4289,6 +4289,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return; ++ /* prevent another thread from changing buffer sizes */ ++ mutex_lock(&buffer->mutex); + + atomic_inc(&buffer->resize_disabled); + atomic_inc(&cpu_buffer->record_disabled); +@@ -4312,6 +4314,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) + + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&buffer->resize_disabled); ++ ++ mutex_unlock(&buffer->mutex); + } + EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); + +diff --git a/mm/slub.c b/mm/slub.c +index 7ccfc043c28e2..5c48a8a0524a2 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5648,10 +5648,8 @@ static int sysfs_slab_add(struct kmem_cache *s) + + s->kobj.kset = cache_kset(s); + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); +- if (err) { +- kobject_put(&s->kobj); ++ if (err) + goto out; +- } + + err = sysfs_create_group(&s->kobj, &slab_attr_group); + if (err) +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 5582a4ed7c2fb..79034fb861b52 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -428,7 +428,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, + + len += NET_SKB_PAD; + +- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || ++ /* If requested length is either too small or too big, ++ * we use kmalloc() for skb->head allocation. ++ */ ++ if (len <= SKB_WITH_OVERHEAD(1024) || ++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c +index 78cc64eddfc18..32a363465e0a4 100644 +--- a/net/ipv4/netfilter/ipt_rpfilter.c ++++ b/net/ipv4/netfilter/ipt_rpfilter.c +@@ -92,7 +92,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) + flow.saddr = rpfilter_get_saddr(iph->daddr); + flow.flowi4_oif = 0; + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; +- flow.flowi4_tos = RT_TOS(iph->tos); ++ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; + flow.flowi4_scope = RT_SCOPE_UNIVERSE; + + return rpfilter_lookup_reverse(par->net, &flow, par->in, info->flags) ^ invert; +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index a4c00242a90b3..6119ab33a56ea 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2285,6 +2285,7 @@ static void addrconf_add_mroute(struct net_device *dev) + .fc_dst_len = 8, + .fc_flags = RTF_UP, + .fc_nlinfo.nl_net = dev_net(dev), ++ .fc_protocol = RTPROT_KERNEL, + }; + + ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c +index ab66e2b38e662..70ee78d8f8fb8 100644 +--- a/net/sched/cls_tcindex.c ++++ b/net/sched/cls_tcindex.c +@@ -307,9 +307,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, + if (tb[TCA_TCINDEX_MASK]) + cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); + +- if (tb[TCA_TCINDEX_SHIFT]) ++ if (tb[TCA_TCINDEX_SHIFT]) { + cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); +- ++ if (cp->shift > 16) { ++ err = -EINVAL; ++ goto errout; ++ } ++ } + if (!cp->hash) { + /* Hash not specified, use perfect hash if the upper limit + * of the hashing index is below the threshold. +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c +index c939459172353..247b68790a522 100644 +--- a/sound/core/seq/oss/seq_oss_synth.c ++++ b/sound/core/seq/oss/seq_oss_synth.c +@@ -624,7 +624,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in + + if (info->is_midi) { + struct midi_info minf; +- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); ++ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf)) ++ return -ENXIO; + inf->synth_type = SYNTH_TYPE_MIDI; + inf->synth_subtype = 0; + inf->nr_voices = 16; +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index fc30d1e8aa76a..9dd104c308e1d 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -135,6 +135,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec) + spec->codec_type = VT1708S; + spec->gen.indep_hp = 1; + spec->gen.keep_eapd_on = 1; ++ spec->gen.dac_min_mute = 1; + spec->gen.pcm_playback_hook = via_playback_pcm_hook; + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; + codec->power_save_node = 1; +diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c +index 11d0cc2b0e390..060da95770416 100644 +--- a/sound/soc/intel/boards/haswell.c ++++ b/sound/soc/intel/boards/haswell.c +@@ -197,6 +197,7 @@ static struct platform_driver haswell_audio = { + .probe = haswell_audio_probe, + .driver = { + .name = "haswell-audio", ++ .pm = &snd_soc_pm_ops, + }, + }; + diff --git a/patch/kernel/meson64-legacy/patch-4.9.254-255.patch b/patch/kernel/meson64-legacy/patch-4.9.254-255.patch new file mode 100644 index 0000000000..57ed959453 --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.254-255.patch @@ -0,0 +1,1579 @@ +diff --git a/Makefile b/Makefile +index ea9ea119460d4..4780b5f80b2a8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 254 ++SUBLEVEL = 255 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S +index 7d84b617af481..99d2e296082c7 100644 +--- a/arch/arm/mach-imx/suspend-imx6.S ++++ b/arch/arm/mach-imx/suspend-imx6.S +@@ -73,6 +73,7 @@ + #define MX6Q_CCM_CCR 0x0 + + .align 3 ++ .arm + + .macro sync_l2_cache + +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c +index 84ae4dd261caf..cafdaabf062fc 100644 +--- a/arch/x86/kvm/pmu_intel.c ++++ b/arch/x86/kvm/pmu_intel.c +@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { + [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, + [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, +- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, ++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, + }; + + /* mapping between fixed pmc index and intel_arch_events array */ +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 3c0f9be107e42..98fb3a7240371 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -97,6 +97,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); + + static void update_cr8_intercept(struct kvm_vcpu *vcpu); + static void process_nmi(struct kvm_vcpu *vcpu); ++static void process_smi(struct kvm_vcpu *vcpu); + static void enter_smm(struct kvm_vcpu *vcpu); + static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); + +@@ -3199,6 +3200,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events) + { + process_nmi(vcpu); ++ ++ if (kvm_check_request(KVM_REQ_SMI, vcpu)) ++ process_smi(vcpu); ++ + events->exception.injected = + vcpu->arch.exception.pending && + !kvm_exception_is_soft(vcpu->arch.exception.nr); +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index 98b513d049f6a..fb610ad495f10 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + +- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); +- if (len < 0) +- return len; +- +- env->buflen += len; +- if (!adev->data.of_compatible) +- return 0; +- +- if (len > 0 && add_uevent_var(env, "MODALIAS=")) +- return -ENOMEM; +- +- len = create_of_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); ++ if (adev->data.of_compatible) ++ len = create_of_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); ++ else ++ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index bb45eb22ba1f5..36bdb04f8f018 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -1976,7 +1976,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; +- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; ++ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; + return 0; +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 977070ce4fe97..9ad5a7019abfd 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -1024,8 +1024,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + { + struct intel_iommu *iommu; + u32 ver, sts; +- int agaw = 0; +- int msagaw = 0; ++ int agaw = -1; ++ int msagaw = -1; + int err; + + if (!drhd->reg_base_addr) { +@@ -1050,17 +1050,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + err = -EINVAL; +- agaw = iommu_calculate_agaw(iommu); +- if (agaw < 0) { +- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ if (cap_sagaw(iommu->cap) == 0) { ++ pr_info("%s: No supported address widths. Not attempting DMA translation.\n", ++ iommu->name); ++ drhd->ignored = 1; + } +- msagaw = iommu_calculate_max_sagaw(iommu); +- if (msagaw < 0) { +- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ ++ if (!drhd->ignored) { ++ agaw = iommu_calculate_agaw(iommu); ++ if (agaw < 0) { ++ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ } ++ } ++ if (!drhd->ignored) { ++ msagaw = iommu_calculate_max_sagaw(iommu); ++ if (msagaw < 0) { ++ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ agaw = -1; ++ } + } + iommu->agaw = agaw; + iommu->msagaw = msagaw; +@@ -1087,7 +1098,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + + raw_spin_lock_init(&iommu->register_lock); + +- if (intel_iommu_enabled) { ++ if (intel_iommu_enabled && !drhd->ignored) { + iommu->iommu_dev = iommu_device_create(NULL, iommu, + intel_iommu_groups, + "%s", iommu->name); +@@ -1099,6 +1110,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + drhd->iommu = iommu; ++ iommu->drhd = drhd; + + return 0; + +@@ -1113,7 +1125,8 @@ error: + + static void free_iommu(struct intel_iommu *iommu) + { +- iommu_device_destroy(iommu->iommu_dev); ++ if (intel_iommu_enabled && !iommu->drhd->ignored) ++ iommu_device_destroy(iommu->iommu_dev); + + if (iommu->irq) { + if (iommu->pr_irq) { +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c +index 431123b048a27..573a5a80b23c6 100644 +--- a/drivers/leds/led-triggers.c ++++ b/drivers/leds/led-triggers.c +@@ -283,14 +283,15 @@ void led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) + led_set_brightness(led_cdev, brightness); +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + EXPORT_SYMBOL_GPL(led_trigger_event); + +@@ -301,11 +302,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + int invert) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { + if (oneshot) + led_blink_set_oneshot(led_cdev, delay_on, delay_off, +@@ -313,7 +315,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + else + led_blink_set(led_cdev, delay_on, delay_off); + } +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + + void led_trigger_blink(struct led_trigger *trig, +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 164078609f98e..ea38b67d0b737 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -1017,7 +1017,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; +- struct can_berr_counter bec; ++ struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index a571024882d7c..1c0aec70ee5d2 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -942,6 +942,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index e1287c3421165..71edbf7a42ed4 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -1909,7 +1909,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + + while (offs < dwords) { + /* limit the time we spin here under lock to 1/2s */ +- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC); ++ unsigned long end = jiffies + HZ / 2; ++ bool resched = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, +@@ -1920,14 +1921,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + HBUS_TARG_MEM_RDAT); + offs++; + +- /* calling ktime_get is expensive so +- * do it once in 128 reads +- */ +- if (offs % 128 == 0 && ktime_after(ktime_get(), +- timeout)) ++ if (time_after(jiffies, end)) { ++ resched = true; + break; ++ } + } + iwl_trans_release_nic_access(trans, &flags); ++ ++ if (resched) ++ cond_resched(); + } else { + return -EBUSY; + } +diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c +index 56cad16e70ca6..1b68aef03fe2e 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/dma.c ++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c +@@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) + + if (new_p) { + /* we have one extra ref from the allocator */ +- __free_pages(e->p, MT_RX_ORDER); +- ++ put_page(e->p); + e->p = new_p; + } + } +@@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + } + + e = &q->e[q->end]; +- e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); +@@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + + q->end = (q->end + 1) % q->entries; + q->used++; ++ e->skb = skb; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +diff --git a/fs/exec.c b/fs/exec.c +index cd5da140f94cb..319a1f5732fa9 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1021,7 +1021,7 @@ static int exec_mmap(struct mm_struct *mm) + /* Notify parent that we're no longer interested in the old VM */ + tsk = current; + old_mm = current->mm; +- mm_release(tsk, old_mm); ++ exec_mm_release(tsk, old_mm); + + if (old_mm) { + sync_mm_rss(old_mm); +diff --git a/include/linux/compat.h b/include/linux/compat.h +index fab35daf87596..6b9d38a7adcaf 100644 +--- a/include/linux/compat.h ++++ b/include/linux/compat.h +@@ -311,8 +311,6 @@ struct compat_kexec_segment; + struct compat_mq_attr; + struct compat_msgbuf; + +-extern void compat_exit_robust_list(struct task_struct *curr); +- + asmlinkage long + compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +diff --git a/include/linux/futex.h b/include/linux/futex.h +index c015fa91e7cce..0f294ae63c78c 100644 +--- a/include/linux/futex.h ++++ b/include/linux/futex.h +@@ -1,6 +1,8 @@ + #ifndef _LINUX_FUTEX_H + #define _LINUX_FUTEX_H + ++#include ++ + #include + + struct inode; +@@ -11,9 +13,6 @@ union ktime; + long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); + +-extern int +-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); +- + /* + * Futexes are matched on equal values of this key. + * The key type depends on whether it's a shared or private mapping. +@@ -56,19 +55,34 @@ union futex_key { + #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } + + #ifdef CONFIG_FUTEX +-extern void exit_robust_list(struct task_struct *curr); +-extern void exit_pi_state_list(struct task_struct *curr); +-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +-#define futex_cmpxchg_enabled 1 +-#else +-extern int futex_cmpxchg_enabled; +-#endif +-#else +-static inline void exit_robust_list(struct task_struct *curr) +-{ +-} +-static inline void exit_pi_state_list(struct task_struct *curr) ++enum { ++ FUTEX_STATE_OK, ++ FUTEX_STATE_EXITING, ++ FUTEX_STATE_DEAD, ++}; ++ ++static inline void futex_init_task(struct task_struct *tsk) + { ++ tsk->robust_list = NULL; ++#ifdef CONFIG_COMPAT ++ tsk->compat_robust_list = NULL; ++#endif ++ INIT_LIST_HEAD(&tsk->pi_state_list); ++ tsk->pi_state_cache = NULL; ++ tsk->futex_state = FUTEX_STATE_OK; ++ mutex_init(&tsk->futex_exit_mutex); + } ++ ++void futex_exit_recursive(struct task_struct *tsk); ++void futex_exit_release(struct task_struct *tsk); ++void futex_exec_release(struct task_struct *tsk); ++ ++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, ++ u32 __user *uaddr2, u32 val2, u32 val3); ++#else ++static inline void futex_init_task(struct task_struct *tsk) { } ++static inline void futex_exit_recursive(struct task_struct *tsk) { } ++static inline void futex_exit_release(struct task_struct *tsk) { } ++static inline void futex_exec_release(struct task_struct *tsk) { } + #endif + #endif +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index d86ac620f0aac..188bd17689711 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -447,6 +447,8 @@ struct intel_iommu { + struct device *iommu_dev; /* IOMMU-sysfs device */ + int node; + u32 flags; /* Software defined flags */ ++ ++ struct dmar_drhd_unit *drhd; + }; + + static inline void __iommu_flush_cache( +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 1872d4e9acbe1..f094882822a63 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1815,6 +1815,8 @@ struct task_struct { + #endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; ++ struct mutex futex_exit_mutex; ++ unsigned int futex_state; + #endif + #ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; +@@ -2276,7 +2278,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, + * Per process flags + */ + #define PF_EXITING 0x00000004 /* getting shut down */ +-#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ + #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ + #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +@@ -2955,8 +2956,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); + * succeeds. + */ + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +-/* Remove the current tasks stale references to the old mm_struct */ +-extern void mm_release(struct task_struct *, struct mm_struct *); ++/* Remove the current tasks stale references to the old mm_struct on exit() */ ++extern void exit_mm_release(struct task_struct *, struct mm_struct *); ++/* Remove the current tasks stale references to the old mm_struct on exec() */ ++extern void exec_mm_release(struct task_struct *, struct mm_struct *); + + #ifdef CONFIG_HAVE_COPY_THREAD_TLS + extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, +diff --git a/kernel/Makefile b/kernel/Makefile +index 184fa9aa58027..92488cf6ad913 100644 +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -47,9 +47,6 @@ obj-$(CONFIG_PROFILING) += profile.o + obj-$(CONFIG_STACKTRACE) += stacktrace.o + obj-y += time/ + obj-$(CONFIG_FUTEX) += futex.o +-ifeq ($(CONFIG_COMPAT),y) +-obj-$(CONFIG_FUTEX) += futex_compat.o +-endif + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o + obj-$(CONFIG_SMP) += smp.o + ifneq ($(CONFIG_SMP),y) +diff --git a/kernel/exit.c b/kernel/exit.c +index f9943ef23fa82..8716f0780fe3d 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -464,7 +464,7 @@ static void exit_mm(struct task_struct *tsk) + struct mm_struct *mm = tsk->mm; + struct core_state *core_state; + +- mm_release(tsk, mm); ++ exit_mm_release(tsk, mm); + if (!mm) + return; + sync_mm_rss(mm); +@@ -785,34 +785,15 @@ void __noreturn do_exit(long code) + */ + if (unlikely(tsk->flags & PF_EXITING)) { + pr_alert("Fixing recursive fault but reboot is needed!\n"); +- /* +- * We can do this unlocked here. The futex code uses +- * this flag just to verify whether the pi state +- * cleanup has been done or not. In the worst case it +- * loops once more. We pretend that the cleanup was +- * done as there is no way to return. Either the +- * OWNER_DIED bit is set by now or we push the blocked +- * task into the wait for ever nirwana as well. +- */ +- tsk->flags |= PF_EXITPIDONE; ++ futex_exit_recursive(tsk); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + } + + exit_signals(tsk); /* sets PF_EXITING */ + + schedtune_exit_task(tsk); + +- /* +- * Ensure that all new tsk->pi_lock acquisitions must observe +- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner(). +- */ +- smp_mb(); +- /* +- * Ensure that we must observe the pi_state in exit_mm() -> +- * mm_release() -> exit_pi_state_list(). +- */ +- raw_spin_unlock_wait(&tsk->pi_lock); + + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) +@@ -876,12 +857,6 @@ void __noreturn do_exit(long code) + * Make sure we are holding no locks: + */ + debug_check_no_locks_held(); +- /* +- * We can do this unlocked here. The futex code uses this flag +- * just to verify whether the pi state cleanup has been done +- * or not. In the worst case it loops once more. +- */ +- tsk->flags |= PF_EXITPIDONE; + + if (tsk->io_context) + exit_io_context(tsk); +diff --git a/kernel/fork.c b/kernel/fork.c +index b64efec4a6e6e..91349fd3e162d 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1082,24 +1082,8 @@ static int wait_for_vfork_done(struct task_struct *child, + * restoring the old one. . . + * Eric Biederman 10 January 1998 + */ +-void mm_release(struct task_struct *tsk, struct mm_struct *mm) ++static void mm_release(struct task_struct *tsk, struct mm_struct *mm) + { +- /* Get rid of any futexes when releasing the mm */ +-#ifdef CONFIG_FUTEX +- if (unlikely(tsk->robust_list)) { +- exit_robust_list(tsk); +- tsk->robust_list = NULL; +- } +-#ifdef CONFIG_COMPAT +- if (unlikely(tsk->compat_robust_list)) { +- compat_exit_robust_list(tsk); +- tsk->compat_robust_list = NULL; +- } +-#endif +- if (unlikely(!list_empty(&tsk->pi_state_list))) +- exit_pi_state_list(tsk); +-#endif +- + uprobe_free_utask(tsk); + + /* Get rid of any cached register state */ +@@ -1132,6 +1116,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) + complete_vfork_done(tsk); + } + ++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) ++{ ++ futex_exit_release(tsk); ++ mm_release(tsk, mm); ++} ++ ++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) ++{ ++ futex_exec_release(tsk); ++ mm_release(tsk, mm); ++} ++ + /* + * Allocate a new mm structure and copy contents from the + * mm structure of the passed in task structure. +@@ -1706,14 +1702,8 @@ static __latent_entropy struct task_struct *copy_process( + #ifdef CONFIG_BLOCK + p->plug = NULL; + #endif +-#ifdef CONFIG_FUTEX +- p->robust_list = NULL; +-#ifdef CONFIG_COMPAT +- p->compat_robust_list = NULL; +-#endif +- INIT_LIST_HEAD(&p->pi_state_list); +- p->pi_state_cache = NULL; +-#endif ++ futex_init_task(p); ++ + /* + * sigaltstack should be cleared when sharing the same VM + */ +diff --git a/kernel/futex.c b/kernel/futex.c +index 7123d9cab4568..2ef8c5aef35d0 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -44,6 +44,7 @@ + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ ++#include + #include + #include + #include +@@ -171,8 +172,10 @@ + * double_lock_hb() and double_unlock_hb(), respectively. + */ + +-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG +-int __read_mostly futex_cmpxchg_enabled; ++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG ++#define futex_cmpxchg_enabled 1 ++#else ++static int __read_mostly futex_cmpxchg_enabled; + #endif + + /* +@@ -336,6 +339,12 @@ static inline bool should_fail_futex(bool fshared) + } + #endif /* CONFIG_FAIL_FUTEX */ + ++#ifdef CONFIG_COMPAT ++static void compat_exit_robust_list(struct task_struct *curr); ++#else ++static inline void compat_exit_robust_list(struct task_struct *curr) { } ++#endif ++ + static inline void futex_get_mm(union futex_key *key) + { + atomic_inc(&key->private.mm->mm_count); +@@ -891,7 +900,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) + * Kernel cleans up PI-state, but userspace is likely hosed. + * (Robust-futex cleanup is separate and might save the day for userspace.) + */ +-void exit_pi_state_list(struct task_struct *curr) ++static void exit_pi_state_list(struct task_struct *curr) + { + struct list_head *next, *head = &curr->pi_state_list; + struct futex_pi_state *pi_state; +@@ -1063,12 +1072,43 @@ out_state: + return 0; + } + ++/** ++ * wait_for_owner_exiting - Block until the owner has exited ++ * @exiting: Pointer to the exiting task ++ * ++ * Caller must hold a refcount on @exiting. ++ */ ++static void wait_for_owner_exiting(int ret, struct task_struct *exiting) ++{ ++ if (ret != -EBUSY) { ++ WARN_ON_ONCE(exiting); ++ return; ++ } ++ ++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) ++ return; ++ ++ mutex_lock(&exiting->futex_exit_mutex); ++ /* ++ * No point in doing state checking here. If the waiter got here ++ * while the task was in exec()->exec_futex_release() then it can ++ * have any FUTEX_STATE_* value when the waiter has acquired the ++ * mutex. OK, if running, EXITING or DEAD if it reached exit() ++ * already. Highly unlikely and not a problem. Just one more round ++ * through the futex maze. ++ */ ++ mutex_unlock(&exiting->futex_exit_mutex); ++ ++ put_task_struct(exiting); ++} ++ + /* + * Lookup the task for the TID provided from user space and attach to + * it after doing proper sanity checks. + */ + static int attach_to_pi_owner(u32 uval, union futex_key *key, +- struct futex_pi_state **ps) ++ struct futex_pi_state **ps, ++ struct task_struct **exiting) + { + pid_t pid = uval & FUTEX_TID_MASK; + struct futex_pi_state *pi_state; +@@ -1090,22 +1130,33 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + } + + /* +- * We need to look at the task state flags to figure out, +- * whether the task is exiting. To protect against the do_exit +- * change of the task flags, we do this protected by +- * p->pi_lock: ++ * We need to look at the task state to figure out, whether the ++ * task is exiting. To protect against the change of the task state ++ * in futex_exit_release(), we do this protected by p->pi_lock: + */ + raw_spin_lock_irq(&p->pi_lock); +- if (unlikely(p->flags & PF_EXITING)) { ++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) { + /* +- * The task is on the way out. When PF_EXITPIDONE is +- * set, we know that the task has finished the +- * cleanup: ++ * The task is on the way out. When the futex state is ++ * FUTEX_STATE_DEAD, we know that the task has finished ++ * the cleanup: + */ +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; ++ int ret = (p->futex_state = FUTEX_STATE_DEAD) ? -ESRCH : -EAGAIN; + + raw_spin_unlock_irq(&p->pi_lock); +- put_task_struct(p); ++ /* ++ * If the owner task is between FUTEX_STATE_EXITING and ++ * FUTEX_STATE_DEAD then store the task pointer and keep ++ * the reference on the task struct. The calling code will ++ * drop all locks, wait for the task to reach ++ * FUTEX_STATE_DEAD and then drop the refcount. This is ++ * required to prevent a live lock when the current task ++ * preempted the exiting task between the two states. ++ */ ++ if (ret == -EBUSY) ++ *exiting = p; ++ else ++ put_task_struct(p); + return ret; + } + +@@ -1136,7 +1187,8 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + } + + static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, +- union futex_key *key, struct futex_pi_state **ps) ++ union futex_key *key, struct futex_pi_state **ps, ++ struct task_struct **exiting) + { + struct futex_q *match = futex_top_waiter(hb, key); + +@@ -1151,7 +1203,7 @@ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + * We are the first waiter - try to look up the owner based on + * @uval and attach to it. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uval, key, ps, exiting); + } + + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) +@@ -1177,6 +1229,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + * lookup + * @task: the task to perform the atomic lock work for. This will + * be "current" except in the case of requeue pi. ++ * @exiting: Pointer to store the task pointer of the owner task ++ * which is in the middle of exiting + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) + * + * Return: +@@ -1185,11 +1239,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + * <0 - error + * + * The hb->lock and futex_key refs shall be held by the caller. ++ * ++ * @exiting is only set when the return value is -EBUSY. If so, this holds ++ * a refcount on the exiting task on return and the caller needs to drop it ++ * after waiting for the exit to complete. + */ + static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + union futex_key *key, + struct futex_pi_state **ps, +- struct task_struct *task, int set_waiters) ++ struct task_struct *task, ++ struct task_struct **exiting, ++ int set_waiters) + { + u32 uval, newval, vpid = task_pid_vnr(task); + struct futex_q *match; +@@ -1259,7 +1319,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + * attach to the owner. If that fails, no harm done, we only + * set the FUTEX_WAITERS bit in the user space variable. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uval, key, ps, exiting); + } + + /** +@@ -1685,6 +1745,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, + * @key1: the from futex key + * @key2: the to futex key + * @ps: address to store the pi_state pointer ++ * @exiting: Pointer to store the task pointer of the owner task ++ * which is in the middle of exiting + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) + * + * Try and get the lock on behalf of the top waiter if we can do it atomically. +@@ -1692,16 +1754,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, + * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. + * hb1 and hb2 must be held by the caller. + * ++ * @exiting is only set when the return value is -EBUSY. If so, this holds ++ * a refcount on the exiting task on return and the caller needs to drop it ++ * after waiting for the exit to complete. ++ * + * Return: + * 0 - failed to acquire the lock atomically; + * >0 - acquired the lock, return value is vpid of the top_waiter + * <0 - error + */ +-static int futex_proxy_trylock_atomic(u32 __user *pifutex, +- struct futex_hash_bucket *hb1, +- struct futex_hash_bucket *hb2, +- union futex_key *key1, union futex_key *key2, +- struct futex_pi_state **ps, int set_waiters) ++static int ++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, ++ struct futex_hash_bucket *hb2, union futex_key *key1, ++ union futex_key *key2, struct futex_pi_state **ps, ++ struct task_struct **exiting, int set_waiters) + { + struct futex_q *top_waiter = NULL; + u32 curval; +@@ -1738,7 +1804,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, + */ + vpid = task_pid_vnr(top_waiter->task); + ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, +- set_waiters); ++ exiting, set_waiters); + if (ret == 1) { + requeue_pi_wake_futex(top_waiter, key2, hb2); + return vpid; +@@ -1858,6 +1924,8 @@ retry_private: + } + + if (requeue_pi && (task_count - nr_wake < nr_requeue)) { ++ struct task_struct *exiting = NULL; ++ + /* + * Attempt to acquire uaddr2 and wake the top waiter. If we + * intend to requeue waiters, force setting the FUTEX_WAITERS +@@ -1865,7 +1933,8 @@ retry_private: + * faults rather in the requeue loop below. + */ + ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, +- &key2, &pi_state, nr_requeue); ++ &key2, &pi_state, ++ &exiting, nr_requeue); + + /* + * At this point the top_waiter has either taken uaddr2 or is +@@ -1892,7 +1961,8 @@ retry_private: + * If that call succeeds then we have pi_state and an + * initial refcount on it. + */ +- ret = lookup_pi_state(ret, hb2, &key2, &pi_state); ++ ret = lookup_pi_state(ret, hb2, &key2, ++ &pi_state, &exiting); + } + + switch (ret) { +@@ -1910,17 +1980,24 @@ retry_private: + if (!ret) + goto retry; + goto out; ++ case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: +- * - Owner is exiting and we just wait for the ++ * - EBUSY: Owner is exiting and we just wait for the + * exit to complete. +- * - The user space value changed. ++ * - EAGAIN: The user space value changed. + */ + double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); + put_futex_key(&key2); + put_futex_key(&key1); ++ /* ++ * Handle the case where the owner is in the middle of ++ * exiting. Wait for the exit to complete otherwise ++ * this task might loop forever, aka. live lock. ++ */ ++ wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: +@@ -2571,6 +2648,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + ktime_t *time, int trylock) + { + struct hrtimer_sleeper timeout, *to = NULL; ++ struct task_struct *exiting = NULL; + struct futex_hash_bucket *hb; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2594,7 +2672,8 @@ retry: + retry_private: + hb = queue_lock(&q); + +- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); ++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, ++ &exiting, 0); + if (unlikely(ret)) { + /* + * Atomic work succeeded and we got the lock, +@@ -2607,15 +2686,22 @@ retry_private: + goto out_unlock_put_key; + case -EFAULT: + goto uaddr_faulted; ++ case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: +- * - Task is exiting and we just wait for the ++ * - EBUSY: Task is exiting and we just wait for the + * exit to complete. +- * - The user space value changed. ++ * - EAGAIN: The user space value changed. + */ + queue_unlock(hb); + put_futex_key(&q.key); ++ /* ++ * Handle the case where the owner is in the middle of ++ * exiting. Wait for the exit to complete otherwise ++ * this task might loop forever, aka. live lock. ++ */ ++ wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: +@@ -3123,7 +3209,7 @@ err_unlock: + * Process a futex-list entry, check whether it's owned by the + * dying task, and do notification if so: + */ +-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) ++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) + { + u32 uval, uninitialized_var(nval), mval; + +@@ -3198,7 +3284,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, + * + * We silently return on any sign of list-walking problem. + */ +-void exit_robust_list(struct task_struct *curr) ++static void exit_robust_list(struct task_struct *curr) + { + struct robust_list_head __user *head = curr->robust_list; + struct robust_list __user *entry, *next_entry, *pending; +@@ -3261,6 +3347,114 @@ void exit_robust_list(struct task_struct *curr) + curr, pip); + } + ++static void futex_cleanup(struct task_struct *tsk) ++{ ++ if (unlikely(tsk->robust_list)) { ++ exit_robust_list(tsk); ++ tsk->robust_list = NULL; ++ } ++ ++#ifdef CONFIG_COMPAT ++ if (unlikely(tsk->compat_robust_list)) { ++ compat_exit_robust_list(tsk); ++ tsk->compat_robust_list = NULL; ++ } ++#endif ++ ++ if (unlikely(!list_empty(&tsk->pi_state_list))) ++ exit_pi_state_list(tsk); ++} ++ ++/** ++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD ++ * @tsk: task to set the state on ++ * ++ * Set the futex exit state of the task lockless. The futex waiter code ++ * observes that state when a task is exiting and loops until the task has ++ * actually finished the futex cleanup. The worst case for this is that the ++ * waiter runs through the wait loop until the state becomes visible. ++ * ++ * This is called from the recursive fault handling path in do_exit(). ++ * ++ * This is best effort. Either the futex exit code has run already or ++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can ++ * take it over. If not, the problem is pushed back to user space. If the ++ * futex exit code did not run yet, then an already queued waiter might ++ * block forever, but there is nothing which can be done about that. ++ */ ++void futex_exit_recursive(struct task_struct *tsk) ++{ ++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ ++ if (tsk->futex_state == FUTEX_STATE_EXITING) ++ mutex_unlock(&tsk->futex_exit_mutex); ++ tsk->futex_state = FUTEX_STATE_DEAD; ++} ++ ++static void futex_cleanup_begin(struct task_struct *tsk) ++{ ++ /* ++ * Prevent various race issues against a concurrent incoming waiter ++ * including live locks by forcing the waiter to block on ++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in ++ * attach_to_pi_owner(). ++ */ ++ mutex_lock(&tsk->futex_exit_mutex); ++ ++ /* ++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. ++ * ++ * This ensures that all subsequent checks of tsk->futex_state in ++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with ++ * tsk->pi_lock held. ++ * ++ * It guarantees also that a pi_state which was queued right before ++ * the state change under tsk->pi_lock by a concurrent waiter must ++ * be observed in exit_pi_state_list(). ++ */ ++ raw_spin_lock_irq(&tsk->pi_lock); ++ tsk->futex_state = FUTEX_STATE_EXITING; ++ raw_spin_unlock_irq(&tsk->pi_lock); ++} ++ ++static void futex_cleanup_end(struct task_struct *tsk, int state) ++{ ++ /* ++ * Lockless store. The only side effect is that an observer might ++ * take another loop until it becomes visible. ++ */ ++ tsk->futex_state = state; ++ /* ++ * Drop the exit protection. This unblocks waiters which observed ++ * FUTEX_STATE_EXITING to reevaluate the state. ++ */ ++ mutex_unlock(&tsk->futex_exit_mutex); ++} ++ ++void futex_exec_release(struct task_struct *tsk) ++{ ++ /* ++ * The state handling is done for consistency, but in the case of ++ * exec() there is no way to prevent futher damage as the PID stays ++ * the same. But for the unlikely and arguably buggy case that a ++ * futex is held on exec(), this provides at least as much state ++ * consistency protection which is possible. ++ */ ++ futex_cleanup_begin(tsk); ++ futex_cleanup(tsk); ++ /* ++ * Reset the state to FUTEX_STATE_OK. The task is alive and about ++ * exec a new binary. ++ */ ++ futex_cleanup_end(tsk, FUTEX_STATE_OK); ++} ++ ++void futex_exit_release(struct task_struct *tsk) ++{ ++ futex_cleanup_begin(tsk); ++ futex_cleanup(tsk); ++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD); ++} ++ + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + u32 __user *uaddr2, u32 val2, u32 val3) + { +@@ -3354,6 +3548,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); + } + ++#ifdef CONFIG_COMPAT ++/* ++ * Fetch a robust-list pointer. Bit 0 signals PI futexes: ++ */ ++static inline int ++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, ++ compat_uptr_t __user *head, unsigned int *pi) ++{ ++ if (get_user(*uentry, head)) ++ return -EFAULT; ++ ++ *entry = compat_ptr((*uentry) & ~1); ++ *pi = (unsigned int)(*uentry) & 1; ++ ++ return 0; ++} ++ ++static void __user *futex_uaddr(struct robust_list __user *entry, ++ compat_long_t futex_offset) ++{ ++ compat_uptr_t base = ptr_to_compat(entry); ++ void __user *uaddr = compat_ptr(base + futex_offset); ++ ++ return uaddr; ++} ++ ++/* ++ * Walk curr->robust_list (very carefully, it's a userspace list!) ++ * and mark any locks found there dead, and notify any waiters. ++ * ++ * We silently return on any sign of list-walking problem. ++ */ ++void compat_exit_robust_list(struct task_struct *curr) ++{ ++ struct compat_robust_list_head __user *head = curr->compat_robust_list; ++ struct robust_list __user *entry, *next_entry, *pending; ++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; ++ unsigned int uninitialized_var(next_pi); ++ compat_uptr_t uentry, next_uentry, upending; ++ compat_long_t futex_offset; ++ int rc; ++ ++ if (!futex_cmpxchg_enabled) ++ return; ++ ++ /* ++ * Fetch the list head (which was registered earlier, via ++ * sys_set_robust_list()): ++ */ ++ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) ++ return; ++ /* ++ * Fetch the relative futex offset: ++ */ ++ if (get_user(futex_offset, &head->futex_offset)) ++ return; ++ /* ++ * Fetch any possibly pending lock-add first, and handle it ++ * if it exists: ++ */ ++ if (compat_fetch_robust_entry(&upending, &pending, ++ &head->list_op_pending, &pip)) ++ return; ++ ++ next_entry = NULL; /* avoid warning with gcc */ ++ while (entry != (struct robust_list __user *) &head->list) { ++ /* ++ * Fetch the next entry in the list before calling ++ * handle_futex_death: ++ */ ++ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, ++ (compat_uptr_t __user *)&entry->next, &next_pi); ++ /* ++ * A pending lock might already be on the list, so ++ * dont process it twice: ++ */ ++ if (entry != pending) { ++ void __user *uaddr = futex_uaddr(entry, futex_offset); ++ ++ if (handle_futex_death(uaddr, curr, pi)) ++ return; ++ } ++ if (rc) ++ return; ++ uentry = next_uentry; ++ entry = next_entry; ++ pi = next_pi; ++ /* ++ * Avoid excessively long or circular lists: ++ */ ++ if (!--limit) ++ break; ++ ++ cond_resched(); ++ } ++ if (pending) { ++ void __user *uaddr = futex_uaddr(pending, futex_offset); ++ ++ handle_futex_death(uaddr, curr, pip); ++ } ++} ++ ++COMPAT_SYSCALL_DEFINE2(set_robust_list, ++ struct compat_robust_list_head __user *, head, ++ compat_size_t, len) ++{ ++ if (!futex_cmpxchg_enabled) ++ return -ENOSYS; ++ ++ if (unlikely(len != sizeof(*head))) ++ return -EINVAL; ++ ++ current->compat_robust_list = head; ++ ++ return 0; ++} ++ ++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, ++ compat_uptr_t __user *, head_ptr, ++ compat_size_t __user *, len_ptr) ++{ ++ struct compat_robust_list_head __user *head; ++ unsigned long ret; ++ struct task_struct *p; ++ ++ if (!futex_cmpxchg_enabled) ++ return -ENOSYS; ++ ++ rcu_read_lock(); ++ ++ ret = -ESRCH; ++ if (!pid) ++ p = current; ++ else { ++ p = find_task_by_vpid(pid); ++ if (!p) ++ goto err_unlock; ++ } ++ ++ ret = -EPERM; ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) ++ goto err_unlock; ++ ++ head = p->compat_robust_list; ++ rcu_read_unlock(); ++ ++ if (put_user(sizeof(*head), len_ptr)) ++ return -EFAULT; ++ return put_user(ptr_to_compat(head), head_ptr); ++ ++err_unlock: ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, ++ struct compat_timespec __user *, utime, u32 __user *, uaddr2, ++ u32, val3) ++{ ++ struct timespec ts; ++ ktime_t t, *tp = NULL; ++ int val2 = 0; ++ int cmd = op & FUTEX_CMD_MASK; ++ ++ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || ++ cmd == FUTEX_WAIT_BITSET || ++ cmd == FUTEX_WAIT_REQUEUE_PI)) { ++ if (compat_get_timespec(&ts, utime)) ++ return -EFAULT; ++ if (!timespec_valid(&ts)) ++ return -EINVAL; ++ ++ t = timespec_to_ktime(ts); ++ if (cmd == FUTEX_WAIT) ++ t = ktime_add_safe(ktime_get(), t); ++ tp = &t; ++ } ++ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || ++ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) ++ val2 = (int) (unsigned long) utime; ++ ++ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); ++} ++#endif /* CONFIG_COMPAT */ ++ + static void __init futex_detect_cmpxchg(void) + { + #ifndef CONFIG_HAVE_FUTEX_CMPXCHG +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +deleted file mode 100644 +index 4ae3232e7a28a..0000000000000 +--- a/kernel/futex_compat.c ++++ /dev/null +@@ -1,201 +0,0 @@ +-/* +- * linux/kernel/futex_compat.c +- * +- * Futex compatibililty routines. +- * +- * Copyright 2006, Red Hat, Inc., Ingo Molnar +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include +- +- +-/* +- * Fetch a robust-list pointer. Bit 0 signals PI futexes: +- */ +-static inline int +-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, +- compat_uptr_t __user *head, unsigned int *pi) +-{ +- if (get_user(*uentry, head)) +- return -EFAULT; +- +- *entry = compat_ptr((*uentry) & ~1); +- *pi = (unsigned int)(*uentry) & 1; +- +- return 0; +-} +- +-static void __user *futex_uaddr(struct robust_list __user *entry, +- compat_long_t futex_offset) +-{ +- compat_uptr_t base = ptr_to_compat(entry); +- void __user *uaddr = compat_ptr(base + futex_offset); +- +- return uaddr; +-} +- +-/* +- * Walk curr->robust_list (very carefully, it's a userspace list!) +- * and mark any locks found there dead, and notify any waiters. +- * +- * We silently return on any sign of list-walking problem. +- */ +-void compat_exit_robust_list(struct task_struct *curr) +-{ +- struct compat_robust_list_head __user *head = curr->compat_robust_list; +- struct robust_list __user *entry, *next_entry, *pending; +- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; +- unsigned int uninitialized_var(next_pi); +- compat_uptr_t uentry, next_uentry, upending; +- compat_long_t futex_offset; +- int rc; +- +- if (!futex_cmpxchg_enabled) +- return; +- +- /* +- * Fetch the list head (which was registered earlier, via +- * sys_set_robust_list()): +- */ +- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) +- return; +- /* +- * Fetch the relative futex offset: +- */ +- if (get_user(futex_offset, &head->futex_offset)) +- return; +- /* +- * Fetch any possibly pending lock-add first, and handle it +- * if it exists: +- */ +- if (fetch_robust_entry(&upending, &pending, +- &head->list_op_pending, &pip)) +- return; +- +- next_entry = NULL; /* avoid warning with gcc */ +- while (entry != (struct robust_list __user *) &head->list) { +- /* +- * Fetch the next entry in the list before calling +- * handle_futex_death: +- */ +- rc = fetch_robust_entry(&next_uentry, &next_entry, +- (compat_uptr_t __user *)&entry->next, &next_pi); +- /* +- * A pending lock might already be on the list, so +- * dont process it twice: +- */ +- if (entry != pending) { +- void __user *uaddr = futex_uaddr(entry, futex_offset); +- +- if (handle_futex_death(uaddr, curr, pi)) +- return; +- } +- if (rc) +- return; +- uentry = next_uentry; +- entry = next_entry; +- pi = next_pi; +- /* +- * Avoid excessively long or circular lists: +- */ +- if (!--limit) +- break; +- +- cond_resched(); +- } +- if (pending) { +- void __user *uaddr = futex_uaddr(pending, futex_offset); +- +- handle_futex_death(uaddr, curr, pip); +- } +-} +- +-COMPAT_SYSCALL_DEFINE2(set_robust_list, +- struct compat_robust_list_head __user *, head, +- compat_size_t, len) +-{ +- if (!futex_cmpxchg_enabled) +- return -ENOSYS; +- +- if (unlikely(len != sizeof(*head))) +- return -EINVAL; +- +- current->compat_robust_list = head; +- +- return 0; +-} +- +-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, +- compat_uptr_t __user *, head_ptr, +- compat_size_t __user *, len_ptr) +-{ +- struct compat_robust_list_head __user *head; +- unsigned long ret; +- struct task_struct *p; +- +- if (!futex_cmpxchg_enabled) +- return -ENOSYS; +- +- rcu_read_lock(); +- +- ret = -ESRCH; +- if (!pid) +- p = current; +- else { +- p = find_task_by_vpid(pid); +- if (!p) +- goto err_unlock; +- } +- +- ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) +- goto err_unlock; +- +- head = p->compat_robust_list; +- rcu_read_unlock(); +- +- if (put_user(sizeof(*head), len_ptr)) +- return -EFAULT; +- return put_user(ptr_to_compat(head), head_ptr); +- +-err_unlock: +- rcu_read_unlock(); +- +- return ret; +-} +- +-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, +- struct compat_timespec __user *, utime, u32 __user *, uaddr2, +- u32, val3) +-{ +- struct timespec ts; +- ktime_t t, *tp = NULL; +- int val2 = 0; +- int cmd = op & FUTEX_CMD_MASK; +- +- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || +- cmd == FUTEX_WAIT_BITSET || +- cmd == FUTEX_WAIT_REQUEUE_PI)) { +- if (compat_get_timespec(&ts, utime)) +- return -EFAULT; +- if (!timespec_valid(&ts)) +- return -EINVAL; +- +- t = timespec_to_ktime(ts); +- if (cmd == FUTEX_WAIT) +- t = ktime_add_safe(ktime_get(), t); +- tp = &t; +- } +- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || +- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) +- val2 = (int) (unsigned long) utime; +- +- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); +-} +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 0b0de3030e0dc..9c20c53f6729e 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1046,6 +1046,7 @@ enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_FLUSH, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, + + IEEE80211_QUEUE_STOP_REASONS, + }; +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index ad03331ee7855..7d43e0085cfc7 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1577,6 +1577,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + if (ret) + return ret; + ++ ieee80211_stop_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); ++ synchronize_net(); ++ + ieee80211_do_stop(sdata, false); + + ieee80211_teardown_sdata(sdata); +@@ -1597,6 +1601,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + err = ieee80211_do_open(&sdata->wdev, false); + WARN(err, "type change: do_open returned %d", err); + ++ ieee80211_wake_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + return ret; + } + +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index b9dd4e9604261..81adbfaffe38f 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -210,8 +210,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, + nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, + priv->expr->ops->size); + if (set->flags & NFT_SET_TIMEOUT) { +- if (timeout || set->timeout) ++ if (timeout || set->timeout) { ++ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); ++ } + } + + priv->timeout = timeout; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index 0afae9f73ebb4..f326a6ea35fc7 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -887,6 +887,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) + + if (!dev->polling) { + device_unlock(&dev->dev); ++ nfc_put_device(dev); + return -EINVAL; + } + +diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c +index 574af981806fa..92a3cfae4de87 100644 +--- a/net/nfc/rawsock.c ++++ b/net/nfc/rawsock.c +@@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { + rc = -EINVAL; +- goto error; ++ goto put_dev; + } + + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index 4bf0296a7c433..0e809ae17f381 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -898,8 +898,9 @@ out: + int call_commit_handler(struct net_device *dev) + { + #ifdef CONFIG_WIRELESS_EXT +- if ((netif_running(dev)) && +- (dev->wireless_handlers->standard[0] != NULL)) ++ if (netif_running(dev) && ++ dev->wireless_handlers && ++ dev->wireless_handlers->standard[0]) + /* Call the commit handler on the driver */ + return dev->wireless_handlers->standard[0](dev, NULL, + NULL, NULL); +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 1e87639f2c270..d613bf77cc0f9 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -315,7 +315,7 @@ resume: + /* only the first xfrm gets the encap type */ + encap_type = 0; + +- if (async && x->repl->recheck(x, skb, seq)) { ++ if (x->repl->recheck(x, skb, seq)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); + goto drop_unlock; + } diff --git a/patch/kernel/meson64-legacy/patch-4.9.255-256.patch b/patch/kernel/meson64-legacy/patch-4.9.255-256.patch new file mode 100644 index 0000000000..d07b17210f --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.255-256.patch @@ -0,0 +1,12 @@ +diff --git a/Makefile b/Makefile +index 4780b5f80b2a8..69af44d3dcd14 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 255 ++SUBLEVEL = 256 + EXTRAVERSION = + NAME = Roaring Lionus + diff --git a/patch/kernel/meson64-legacy/patch-4.9.256-257.patch b/patch/kernel/meson64-legacy/patch-4.9.256-257.patch new file mode 100644 index 0000000000..7d4e2716d0 --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.256-257.patch @@ -0,0 +1,1823 @@ +diff --git a/Makefile b/Makefile +index 69af44d3dcd14..e53096154f816 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 256 ++SUBLEVEL = 257 + EXTRAVERSION = + NAME = Roaring Lionus + +@@ -841,12 +841,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + # change __FILE__ to the relative path from the srctree + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +-# ensure -fcf-protection is disabled when using retpoline as it is +-# incompatible with -mindirect-branch=thunk-extern +-ifdef CONFIG_RETPOLINE +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +-endif +- + # use the deterministic mode of AR if available + KBUILD_ARFLAGS := $(call ar-option,D) + +@@ -1141,7 +1135,7 @@ endef + + define filechk_version.h + (echo \#define LINUX_VERSION_CODE $(shell \ +- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) + endef + +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c +index 96a3d73ef4bf4..fd6c9169fa78e 100644 +--- a/arch/arm/mach-footbridge/dc21285.c ++++ b/arch/arm/mach-footbridge/dc21285.c +@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("ldrb %0, [%1, %2]" ++ asm volatile("ldrb %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 2: +- asm("ldrh %0, [%1, %2]" ++ asm volatile("ldrh %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 4: +- asm("ldr %0, [%1, %2]" ++ asm volatile("ldr %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + } +@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("strb %0, [%1, %2]" ++ asm volatile("strb %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 2: +- asm("strh %0, [%1, %2]" ++ asm volatile("strh %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 4: +- asm("str %0, [%1, %2]" ++ asm volatile("str %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 940ed27a62123..a95d414663b1e 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -137,6 +137,9 @@ else + KBUILD_CFLAGS += -mno-red-zone + KBUILD_CFLAGS += -mcmodel=kernel + ++ # Intel CET isn't enabled in the kernel ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) ++ + # -funit-at-a-time shrinks the kernel .text considerably + # unfortunately it makes reading oopses harder. + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index f39fd349cef65..a6d034257b7bb 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -176,16 +176,6 @@ static inline void lapic_update_tsc_freq(void) { } + #endif /* !CONFIG_X86_LOCAL_APIC */ + + #ifdef CONFIG_X86_X2APIC +-/* +- * Make previous memory operations globally visible before +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or +- * mfence for this. +- */ +-static inline void x2apic_wrmsr_fence(void) +-{ +- asm volatile("mfence" : : : "memory"); +-} +- + static inline void native_apic_msr_write(u32 reg, u32 v) + { + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index a0f450b21d676..89b75edf24af8 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -110,4 +110,22 @@ do { \ + + #include + ++/* ++ * Make previous memory operations globally visible before ++ * a WRMSR. ++ * ++ * MFENCE makes writes visible, but only affects load/store ++ * instructions. WRMSR is unfortunately not a load/store ++ * instruction and is unaffected by MFENCE. The LFENCE ensures ++ * that the WRMSR is not reordered. ++ * ++ * Most WRMSRs are full serializing instructions themselves and ++ * do not require this barrier. This is only required for the ++ * IA32_TSC_DEADLINE and X2APIC MSRs. ++ */ ++static inline void weak_wrmsr_fence(void) ++{ ++ asm volatile("mfence; lfence" : : : "memory"); ++} ++ + #endif /* _ASM_X86_BARRIER_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 722a76b88bcc0..107a9eff587bf 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -476,6 +477,9 @@ static int lapic_next_deadline(unsigned long delta, + { + u64 tsc; + ++ /* This MSR is special and need a special fence: */ ++ weak_wrmsr_fence(); ++ + tsc = rdtsc(); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index 200af5ae96626..ca64c150d1c53 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -27,7 +27,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); + } + +@@ -40,7 +41,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long flags; + u32 dest; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index ff111f05a3145..8889420ea7c6f 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -40,7 +40,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); + } + +@@ -51,7 +52,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long this_cpu; + unsigned long flags; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 35e8fbca10ad5..c53c88b531639 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -188,6 +188,8 @@ struct acpi_thermal { + int tz_enabled; + int kelvin_offset; + struct work_struct thermal_check_work; ++ struct mutex thermal_check_lock; ++ atomic_t thermal_check_count; + }; + + /* -------------------------------------------------------------------------- +@@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) + return 0; + } + +-static void acpi_thermal_check(void *data) +-{ +- struct acpi_thermal *tz = data; +- +- if (!tz->tz_enabled) +- return; +- +- thermal_zone_device_update(tz->thermal_zone, +- THERMAL_EVENT_UNSPECIFIED); +-} +- + /* sys I/F for generic thermal sysfs support */ + + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) +@@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, + return 0; + } + ++static void acpi_thermal_check_fn(struct work_struct *work); ++ + static int thermal_set_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) + { +@@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "%s kernel ACPI thermal control\n", + tz->tz_enabled ? "Enable" : "Disable")); +- acpi_thermal_check(tz); ++ acpi_thermal_check_fn(&tz->thermal_check_work); + } + return 0; + } +@@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) + Driver Interface + -------------------------------------------------------------------------- */ + ++static void acpi_queue_thermal_check(struct acpi_thermal *tz) ++{ ++ if (!work_pending(&tz->thermal_check_work)) ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++} ++ + static void acpi_thermal_notify(struct acpi_device *device, u32 event) + { + struct acpi_thermal *tz = acpi_driver_data(device); +@@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) + + switch (event) { + case ACPI_THERMAL_NOTIFY_TEMPERATURE: +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + break; + case ACPI_THERMAL_NOTIFY_THRESHOLDS: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + case ACPI_THERMAL_NOTIFY_DEVICES: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; +@@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) + { + struct acpi_thermal *tz = container_of(work, struct acpi_thermal, + thermal_check_work); +- acpi_thermal_check(tz); ++ ++ if (!tz->tz_enabled) ++ return; ++ /* ++ * In general, it is not sufficient to check the pending bit, because ++ * subsequent instances of this function may be queued after one of them ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just ++ * one of them is running, though, because it may have done the actual ++ * check some time ago, so allow at least one of them to block on the ++ * mutex while another one is running the update. ++ */ ++ if (!atomic_add_unless(&tz->thermal_check_count, -1, 1)) ++ return; ++ ++ mutex_lock(&tz->thermal_check_lock); ++ ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); ++ ++ atomic_inc(&tz->thermal_check_count); ++ ++ mutex_unlock(&tz->thermal_check_lock); + } + + static int acpi_thermal_add(struct acpi_device *device) +@@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device) + if (result) + goto free_memory; + ++ atomic_set(&tz->thermal_check_count, 3); ++ mutex_init(&tz->thermal_check_lock); + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); + + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), +@@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev) + tz->state.active |= tz->trips.active[i].flags.enabled; + } + +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++ acpi_queue_thermal_check(tz); + + return AE_OK; + } +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 637f1347cd13d..815b69d35722c 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -232,9 +232,17 @@ static const struct xpad_device { + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, +@@ -313,6 +321,9 @@ static const struct xpad_device { + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -446,8 +457,12 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + { } + }; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index fa07be0b4500e..2317f8d3fef6f 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, ++ }, ++ { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 593a4bfcba428..b2061cfa05ab4 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3323,6 +3323,12 @@ static int __init init_dmars(void) + + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; ++ ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { ++ pr_info("Disable batched IOTLB flush due to virtualization"); ++ intel_iommu_strict = 1; ++ } ++ + #ifdef CONFIG_INTEL_IOMMU_SVM + if (pasid_enabled(iommu)) + intel_svm_alloc_pasid_tables(iommu); +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index 934c4816d78bf..14c56cf66ddc6 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -24,6 +24,8 @@ + #include "sdio_cis.h" + #include "sdio_ops.h" + ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ ++ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + const unsigned char *buf, unsigned size) + { +@@ -269,6 +271,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + + do { + unsigned char tpl_code, tpl_link; ++ unsigned long timeout = jiffies + ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); + + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); + if (ret) +@@ -321,6 +325,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + prev = &this->next; + + if (ret == -ENOENT) { ++ if (time_after(jiffies, timeout)) ++ break; + /* warn about unknown tuples */ + pr_warn_ratelimited("%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 84def5819d2ec..a3742a3b413cd 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -515,15 +515,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); +- if (!priv->master_mii_bus) ++ if (!priv->master_mii_bus) { ++ of_node_put(dn); + return -EPROBE_DEFER; ++ } + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); +- if (!priv->slave_mii_bus) ++ if (!priv->slave_mii_bus) { ++ of_node_put(dn); + return -ENOMEM; ++ } + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index a8ff4f8a6b87d..f23559a2b2bd1 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -3496,6 +3496,12 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ++ /* This barrier makes sure ibmvnic_next_crq()'s ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded ++ * before ibmvnic_handle_crq()'s ++ * switch(gen_crq->first) and switch(gen_crq->cmd). ++ */ ++ dma_rmb(); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 04b3ac17531db..7865feb8e5e83 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -2891,8 +2891,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); +- if (sdev->type == TYPE_DISK) ++ if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); ++ } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; + } +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index d0a86ef806522..59fd6101f188b 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -1585,8 +1585,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + } + + /* +@@ -1605,6 +1610,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + ++skip_resp: + fc_exch_release(ep); + return; + rel: +@@ -1848,10 +1854,16 @@ static void fc_exch_reset(struct fc_exch *ep) + + fc_exch_hold(ep); + +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); ++skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); + } +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 76701d6ce92c3..582099f4f449f 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -1349,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) + return -EINVAL; + +- alts = usblp->protocol[protocol].alt_setting; +- if (alts < 0) +- return -EINVAL; +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts); +- if (r < 0) { +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", +- alts, usblp->ifnum); +- return r; ++ /* Don't unnecessarily set the interface if there's a single alt. */ ++ if (usblp->intf->num_altsetting > 1) { ++ alts = usblp->protocol[protocol].alt_setting; ++ if (alts < 0) ++ return -EINVAL; ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts); ++ if (r < 0) { ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", ++ alts, usblp->ifnum); ++ return r; ++ } + } + + usblp->bidir = (usblp->protocol[protocol].epread != NULL); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 9381a108a9851..6bde5db1490a1 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -942,7 +942,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + u32 windex) + { +- struct dwc2_hsotg_ep *ep; + int dir = (windex & USB_DIR_IN) ? 1 : 0; + int idx = windex & 0x7F; + +@@ -952,12 +951,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + if (idx > hsotg->num_of_eps) + return NULL; + +- ep = index_to_ep(hsotg, idx, dir); +- +- if (idx && ep->dir_in != dir) +- return NULL; +- +- return ep; ++ return index_to_ep(hsotg, idx, dir); + } + + /** +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c +index 25a2c2e485920..3396e7193dba2 100644 +--- a/drivers/usb/gadget/legacy/ether.c ++++ b/drivers/usb/gadget/legacy/ether.c +@@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 6e1d65fda97e0..69dbfffde4df0 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -692,11 +692,16 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, +- seg->bounce_len, seg->bounce_offs); +- if (len != seg->bounce_len) +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", +- len, seg->bounce_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, ++ seg->bounce_len, seg->bounce_offs); ++ if (len != seg->bounce_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", ++ len, seg->bounce_len); ++ } else { ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, ++ seg->bounce_len); ++ } + seg->bounce_len = 0; + seg->bounce_offs = 0; + } +@@ -3196,12 +3201,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +- seg->bounce_buf, new_buff_len, enqd_len); +- if (len != new_buff_len) +- xhci_warn(xhci, +- "WARN Wrong bounce buffer write length: %zu != %d\n", +- len, new_buff_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, ++ seg->bounce_buf, new_buff_len, enqd_len); ++ if (len != new_buff_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", ++ len, new_buff_len); ++ } else { ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); ++ } ++ + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index abf8a3cac2651..1847074b4d819 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ +@@ -198,6 +199,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 1998b314368e0..3c536eed07541 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 + #define CINTERION_PRODUCT_CLS8 0x00b0 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -1896,6 +1898,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), ++ .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), ++ .driver_info = RSVD(0)}, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 0262c8f7e7c76..09f49dab77393 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -830,6 +830,7 @@ static int + cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + { + struct inode *inode; ++ int rc; + + if (flags & LOOKUP_RCU) + return -ECHILD; +@@ -839,8 +840,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + +- if (cifs_revalidate_dentry(direntry)) +- return 0; ++ rc = cifs_revalidate_dentry(direntry); ++ if (rc) { ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); ++ switch (rc) { ++ case -ENOENT: ++ case -ESTALE: ++ /* ++ * Those errors mean the dentry is invalid ++ * (file was deleted or recreated) ++ */ ++ return 0; ++ default: ++ /* ++ * Otherwise some unexpected error happened ++ * report it as-is to VFS layer ++ */ ++ return rc; ++ } ++ } + else { + /* + * If the inode wasn't known to be a dfs entry when +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 253b03451b727..ba1909b887efb 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -665,8 +665,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ++ set_page_huge_active(page); + /* +- * page_put due to reference from alloc_huge_page() ++ * put_page() due to reference from alloc_huge_page() + * unlock_page because locked by add_to_page_cache() + */ + put_page(page); +diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h +index 698d51a0eea3f..4adf7faeaeb59 100644 +--- a/include/linux/elfcore.h ++++ b/include/linux/elfcore.h +@@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse + } + #endif + ++#if defined(CONFIG_UM) || defined(CONFIG_IA64) + /* + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its +@@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); + extern int + elf_core_write_extra_data(struct coredump_params *cprm); + extern size_t elf_core_extra_data_size(void); ++#else ++static inline Elf_Half elf_core_extra_phdrs(void) ++{ ++ return 0; ++} ++ ++static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) ++{ ++ return 1; ++} ++ ++static inline int elf_core_write_extra_data(struct coredump_params *cprm) ++{ ++ return 1; ++} ++ ++static inline size_t elf_core_extra_data_size(void) ++{ ++ return 0; ++} ++#endif + + #endif /* _LINUX_ELFCORE_H */ +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 6b8a7b654771a..4e4c35a6bfc5a 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -502,6 +502,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) + { + atomic_long_sub(l, &mm->hugetlb_usage); + } ++ ++void set_page_huge_active(struct page *page); ++ + #else /* CONFIG_HUGETLB_PAGE */ + struct hstate {}; + #define alloc_huge_page(v, a, r) NULL +diff --git a/kernel/Makefile b/kernel/Makefile +index 92488cf6ad913..6c4a28cf680e4 100644 +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -90,7 +90,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o + obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o + obj-$(CONFIG_TRACEPOINTS) += tracepoint.o + obj-$(CONFIG_LATENCYTOP) += latencytop.o +-obj-$(CONFIG_ELFCORE) += elfcore.o + obj-$(CONFIG_FUNCTION_TRACER) += trace/ + obj-$(CONFIG_TRACING) += trace/ + obj-$(CONFIG_TRACE_CLOCK) += trace/ +diff --git a/kernel/elfcore.c b/kernel/elfcore.c +deleted file mode 100644 +index a2b29b9bdfcb2..0000000000000 +--- a/kernel/elfcore.c ++++ /dev/null +@@ -1,25 +0,0 @@ +-#include +-#include +-#include +-#include +-#include +- +-Elf_Half __weak elf_core_extra_phdrs(void) +-{ +- return 0; +-} +- +-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +-{ +- return 1; +-} +- +-int __weak elf_core_write_extra_data(struct coredump_params *cprm) +-{ +- return 1; +-} +- +-size_t __weak elf_core_extra_data_size(void) +-{ +- return 0; +-} +diff --git a/kernel/futex.c b/kernel/futex.c +index 2ef8c5aef35d0..83db5787c67ef 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -837,6 +837,29 @@ static struct futex_pi_state * alloc_pi_state(void) + return pi_state; + } + ++static void pi_state_update_owner(struct futex_pi_state *pi_state, ++ struct task_struct *new_owner) ++{ ++ struct task_struct *old_owner = pi_state->owner; ++ ++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock); ++ ++ if (old_owner) { ++ raw_spin_lock(&old_owner->pi_lock); ++ WARN_ON(list_empty(&pi_state->list)); ++ list_del_init(&pi_state->list); ++ raw_spin_unlock(&old_owner->pi_lock); ++ } ++ ++ if (new_owner) { ++ raw_spin_lock(&new_owner->pi_lock); ++ WARN_ON(!list_empty(&pi_state->list)); ++ list_add(&pi_state->list, &new_owner->pi_state_list); ++ pi_state->owner = new_owner; ++ raw_spin_unlock(&new_owner->pi_lock); ++ } ++} ++ + /* + * Drops a reference to the pi_state object and frees or caches it + * when the last reference is gone. +@@ -856,11 +879,8 @@ static void put_pi_state(struct futex_pi_state *pi_state) + * and has cleaned up the pi_state already + */ + if (pi_state->owner) { +- raw_spin_lock_irq(&pi_state->owner->pi_lock); +- list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); +- +- rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); ++ pi_state_update_owner(pi_state, NULL); ++ rt_mutex_proxy_unlock(&pi_state->pi_mutex); + } + + if (current->pi_state_cache) +@@ -941,7 +961,7 @@ static void exit_pi_state_list(struct task_struct *curr) + pi_state->owner = NULL; + raw_spin_unlock_irq(&curr->pi_lock); + +- rt_mutex_unlock(&pi_state->pi_mutex); ++ rt_mutex_futex_unlock(&pi_state->pi_mutex); + + spin_unlock(&hb->lock); + +@@ -997,7 +1017,8 @@ static void exit_pi_state_list(struct task_struct *curr) + * FUTEX_OWNER_DIED bit. See [4] + * + * [10] There is no transient state which leaves owner and user space +- * TID out of sync. ++ * TID out of sync. Except one error case where the kernel is denied ++ * write access to the user address, see fixup_pi_state_owner(). + */ + + /* +@@ -1394,12 +1415,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); + + /* +- * It is possible that the next waiter (the one that brought +- * this owner to the kernel) timed out and is no longer +- * waiting on the lock. ++ * When we interleave with futex_lock_pi() where it does ++ * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter, ++ * but the rt_mutex's wait_list can be empty (either still, or again, ++ * depending on which side we land). ++ * ++ * When this happens, give up our locks and try again, giving the ++ * futex_lock_pi() instance time to complete, either by waiting on the ++ * rtmutex or removing itself from the futex queue. + */ +- if (!new_owner) +- new_owner = this->task; ++ if (!new_owner) { ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return -EAGAIN; ++ } + + /* + * We pass it to the next owner. The WAITERS bit is always +@@ -1425,36 +1453,24 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, + else + ret = -EINVAL; + } +- if (ret) { +- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- return ret; +- } +- +- raw_spin_lock(&pi_state->owner->pi_lock); +- WARN_ON(list_empty(&pi_state->list)); +- list_del_init(&pi_state->list); +- raw_spin_unlock(&pi_state->owner->pi_lock); + +- raw_spin_lock(&new_owner->pi_lock); +- WARN_ON(!list_empty(&pi_state->list)); +- list_add(&pi_state->list, &new_owner->pi_state_list); +- pi_state->owner = new_owner; +- raw_spin_unlock(&new_owner->pi_lock); ++ if (!ret) { ++ /* ++ * This is a point of no return; once we modified the uval ++ * there is no going back and subsequent operations must ++ * not fail. ++ */ ++ pi_state_update_owner(pi_state, new_owner); ++ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); ++ } + + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); +- +- /* +- * First unlock HB so the waiter does not spin on it once he got woken +- * up. Second wake up the waiter before the priority is adjusted. If we +- * deboost first (and lose our higher priority), then the task might get +- * scheduled away before the wake up can take place. +- */ + spin_unlock(&hb->lock); +- wake_up_q(&wake_q); +- if (deboost) ++ ++ if (deboost) { ++ wake_up_q(&wake_q); + rt_mutex_adjust_prio(current); ++ } + + return 0; + } +@@ -2257,30 +2273,32 @@ static void unqueue_me_pi(struct futex_q *q) + spin_unlock(q->lock_ptr); + } + +-/* +- * Fixup the pi_state owner with the new owner. +- * +- * Must be called with hash bucket lock held and mm->sem held for non +- * private futexes. +- */ +-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, +- struct task_struct *newowner) ++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ++ struct task_struct *argowner) + { +- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; +- struct task_struct *oldowner = pi_state->owner; +- u32 uval, uninitialized_var(curval), newval; +- int ret; ++ struct task_struct *oldowner, *newowner; ++ u32 uval, curval, newval, newtid; ++ int err = 0; ++ ++ oldowner = pi_state->owner; + + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; + + /* +- * We are here either because we stole the rtmutex from the +- * previous highest priority waiter or we are the highest priority +- * waiter but failed to get the rtmutex the first time. +- * We have to replace the newowner TID in the user space variable. ++ * We are here because either: ++ * ++ * - we stole the lock and pi_state->owner needs updating to reflect ++ * that (@argowner == current), ++ * ++ * or: ++ * ++ * - someone stole our lock and we need to fix things to point to the ++ * new owner (@argowner == NULL). ++ * ++ * Either way, we have to replace the TID in the user space variable. + * This must be atomic as we have to preserve the owner died bit here. + * + * Note: We write the user space value _before_ changing the pi_state +@@ -2294,6 +2312,39 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + * in lookup_pi_state. + */ + retry: ++ if (!argowner) { ++ if (oldowner != current) { ++ /* ++ * We raced against a concurrent self; things are ++ * already fixed up. Nothing to do. ++ */ ++ return 0; ++ } ++ ++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { ++ /* We got the lock after all, nothing to fix. */ ++ return 1; ++ } ++ ++ /* ++ * Since we just failed the trylock; there must be an owner. ++ */ ++ newowner = rt_mutex_owner(&pi_state->pi_mutex); ++ BUG_ON(!newowner); ++ } else { ++ WARN_ON_ONCE(argowner != current); ++ if (oldowner == current) { ++ /* ++ * We raced against a concurrent self; things are ++ * already fixed up. Nothing to do. ++ */ ++ return 1; ++ } ++ newowner = argowner; ++ } ++ ++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; ++ + if (get_futex_value_locked(&uval, uaddr)) + goto handle_fault; + +@@ -2311,19 +2362,8 @@ retry: + * We fixed up user space. Now we need to fix the pi_state + * itself. + */ +- if (pi_state->owner != NULL) { +- raw_spin_lock_irq(&pi_state->owner->pi_lock); +- WARN_ON(list_empty(&pi_state->list)); +- list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); +- } +- +- pi_state->owner = newowner; ++ pi_state_update_owner(pi_state, newowner); + +- raw_spin_lock_irq(&newowner->pi_lock); +- WARN_ON(!list_empty(&pi_state->list)); +- list_add(&pi_state->list, &newowner->pi_state_list); +- raw_spin_unlock_irq(&newowner->pi_lock); + return 0; + + /* +@@ -2339,7 +2379,7 @@ retry: + handle_fault: + spin_unlock(q->lock_ptr); + +- ret = fault_in_user_writeable(uaddr); ++ err = fault_in_user_writeable(uaddr); + + spin_lock(q->lock_ptr); + +@@ -2347,12 +2387,45 @@ handle_fault: + * Check if someone else fixed it for us: + */ + if (pi_state->owner != oldowner) +- return 0; ++ return argowner == current; + +- if (ret) +- return ret; ++ /* Retry if err was -EAGAIN or the fault in succeeded */ ++ if (!err) ++ goto retry; + +- goto retry; ++ /* ++ * fault_in_user_writeable() failed so user state is immutable. At ++ * best we can make the kernel state consistent but user state will ++ * be most likely hosed and any subsequent unlock operation will be ++ * rejected due to PI futex rule [10]. ++ * ++ * Ensure that the rtmutex owner is also the pi_state owner despite ++ * the user space value claiming something different. There is no ++ * point in unlocking the rtmutex if current is the owner as it ++ * would need to wait until the next waiter has taken the rtmutex ++ * to guarantee consistent state. Keep it simple. Userspace asked ++ * for this wreckaged state. ++ * ++ * The rtmutex has an owner - either current or some other ++ * task. See the EAGAIN loop above. ++ */ ++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); ++ ++ return err; ++} ++ ++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ++ struct task_struct *argowner) ++{ ++ struct futex_pi_state *pi_state = q->pi_state; ++ int ret; ++ ++ lockdep_assert_held(q->lock_ptr); ++ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ret = __fixup_pi_state_owner(uaddr, q, argowner); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; + } + + static long futex_wait_restart(struct restart_block *restart); +@@ -2374,13 +2447,16 @@ static long futex_wait_restart(struct restart_block *restart); + */ + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + { +- struct task_struct *owner; + int ret = 0; + + if (locked) { + /* + * Got the lock. We might not be the anticipated owner if we + * did a lock-steal - fix up the PI-state in that case: ++ * ++ * Speculative pi_state->owner read (we don't hold wait_lock); ++ * since we own the lock pi_state->owner == current is the ++ * stable state, anything else needs more attention. + */ + if (q->pi_state->owner != current) + ret = fixup_pi_state_owner(uaddr, q, current); +@@ -2388,43 +2464,24 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + } + + /* +- * Catch the rare case, where the lock was released when we were on the +- * way back before we locked the hash bucket. ++ * If we didn't get the lock; check if anybody stole it from us. In ++ * that case, we need to fix up the uval to point to them instead of ++ * us, otherwise bad things happen. [10] ++ * ++ * Another speculative read; pi_state->owner == current is unstable ++ * but needs our attention. + */ + if (q->pi_state->owner == current) { +- /* +- * Try to get the rt_mutex now. This might fail as some other +- * task acquired the rt_mutex after we removed ourself from the +- * rt_mutex waiters list. +- */ +- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { +- locked = 1; +- goto out; +- } +- +- /* +- * pi_state is incorrect, some other task did a lock steal and +- * we returned due to timeout or signal without taking the +- * rt_mutex. Too late. +- */ +- raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); +- owner = rt_mutex_owner(&q->pi_state->pi_mutex); +- if (!owner) +- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); +- raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); +- ret = fixup_pi_state_owner(uaddr, q, owner); ++ ret = fixup_pi_state_owner(uaddr, q, NULL); + goto out; + } + + /* + * Paranoia check. If we did not take the lock, then we should not be +- * the owner of the rt_mutex. ++ * the owner of the rt_mutex. Warn and establish consistent state. + */ +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) +- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " +- "pi-state %p\n", ret, +- q->pi_state->pi_mutex.owner, +- q->pi_state->owner); ++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) ++ return fixup_pi_state_owner(uaddr, q, current); + + out: + return ret ? ret : locked; +@@ -2721,7 +2778,7 @@ retry_private: + if (!trylock) { + ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); + } else { +- ret = rt_mutex_trylock(&q.pi_state->pi_mutex); ++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; + } +@@ -2739,13 +2796,6 @@ retry_private: + if (res) + ret = (res < 0) ? res : 0; + +- /* +- * If fixup_owner() faulted and was unable to handle the fault, unlock +- * it and return the fault to userspace. +- */ +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) +- rt_mutex_unlock(&q.pi_state->pi_mutex); +- + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + +@@ -3050,8 +3100,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) +- rt_mutex_unlock(&q.pi_state->pi_mutex); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. +@@ -3088,14 +3136,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (res) + ret = (res < 0) ? res : 0; + +- /* +- * If fixup_pi_state_owner() faulted and was unable to handle +- * the fault, unlock the rt_mutex and return the fault to +- * userspace. +- */ +- if (ret && rt_mutex_owner(pi_mutex) == current) +- rt_mutex_unlock(pi_mutex); +- + /* Unqueue and drop the lock. */ + unqueue_me_pi(&q); + } +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 3938e4670b89b..51867a2e537fa 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1884,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp) + int i; + void *addr; + ++ /* If only rp->kp.addr is specified, check reregistering kprobes */ ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) ++ return -EINVAL; ++ + if (kretprobe_blacklist_size) { + addr = kprobe_addr(&rp->kp); + if (IS_ERR(addr)) +diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c +index 62b6cee8ea7f9..0613c4b1d0596 100644 +--- a/kernel/locking/rtmutex-debug.c ++++ b/kernel/locking/rtmutex-debug.c +@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) + lock->name = name; + } + +-void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) +-{ +-} +- +-void rt_mutex_deadlock_account_unlock(struct task_struct *task) +-{ +-} +- +diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h +index d0519c3432b67..b585af9a1b508 100644 +--- a/kernel/locking/rtmutex-debug.h ++++ b/kernel/locking/rtmutex-debug.h +@@ -9,9 +9,6 @@ + * This file contains macros used solely by rtmutex.c. Debug version. + */ + +-extern void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); +-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); + extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 7615e7722258c..6ff4156b3929e 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -956,8 +956,6 @@ takeit: + */ + rt_mutex_set_owner(lock, task); + +- rt_mutex_deadlock_account_lock(lock, task); +- + return 1; + } + +@@ -1316,6 +1314,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + return ret; + } + ++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) ++{ ++ int ret = try_to_take_rt_mutex(lock, current, NULL); ++ ++ /* ++ * try_to_take_rt_mutex() sets the lock waiters bit ++ * unconditionally. Clean this up. ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ return ret; ++} ++ + /* + * Slow path try-lock function: + */ +@@ -1338,13 +1349,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + +- ret = try_to_take_rt_mutex(lock, current, NULL); +- +- /* +- * try_to_take_rt_mutex() sets the lock waiters bit +- * unconditionally. Clean this up. +- */ +- fixup_rt_mutex_waiters(lock); ++ ret = __rt_mutex_slowtrylock(lock); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +@@ -1365,8 +1370,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, + + debug_rt_mutex_unlock(lock); + +- rt_mutex_deadlock_account_unlock(current); +- + /* + * We must be careful here if the fast path is enabled. If we + * have no waiters queued we cannot set owner to NULL here +@@ -1432,11 +1435,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); ++ ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + } + + static inline int +@@ -1448,21 +1450,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + enum rtmutex_chainwalk chwalk)) + { + if (chwalk == RT_MUTEX_MIN_CHAINWALK && +- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, timeout, chwalk); ++ ++ return slowfn(lock, state, timeout, chwalk); + } + + static inline int + rt_mutex_fasttrylock(struct rt_mutex *lock, + int (*slowfn)(struct rt_mutex *lock)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; +- } ++ + return slowfn(lock); + } + +@@ -1472,19 +1472,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock, + struct wake_q_head *wqh)) + { + WAKE_Q(wake_q); ++ bool deboost; + +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ return; + +- } else { +- bool deboost = slowfn(lock, &wake_q); ++ deboost = slowfn(lock, &wake_q); + +- wake_up_q(&wake_q); ++ wake_up_q(&wake_q); + +- /* Undo pi boosting if necessary: */ +- if (deboost) +- rt_mutex_adjust_prio(current); +- } ++ /* Undo pi boosting if necessary: */ ++ if (deboost) ++ rt_mutex_adjust_prio(current); + } + + /** +@@ -1519,15 +1518,28 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + + /* + * Futex variant with full deadlock detection. ++ * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock(). + */ +-int rt_mutex_timed_futex_lock(struct rt_mutex *lock, ++int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout) + { + might_sleep(); + +- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, +- RT_MUTEX_FULL_CHAINWALK, +- rt_mutex_slowlock); ++ return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, ++ timeout, RT_MUTEX_FULL_CHAINWALK); ++} ++ ++/* ++ * Futex variant, must not use fastpath. ++ */ ++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) ++{ ++ return rt_mutex_slowtrylock(lock); ++} ++ ++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) ++{ ++ return __rt_mutex_slowtrylock(lock); + } + + /** +@@ -1586,20 +1598,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) + EXPORT_SYMBOL_GPL(rt_mutex_unlock); + + /** +- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock +- * @lock: the rt_mutex to be unlocked +- * +- * Returns: true/false indicating whether priority adjustment is +- * required or not. ++ * Futex variant, that since futex variants do not use the fast-path, can be ++ * simple and will not need to retry. + */ +-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh) ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wake_q) + { +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); +- return false; ++ lockdep_assert_held(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ return false; /* done */ ++ } ++ ++ mark_wakeup_next_waiter(wake_q, lock); ++ return true; /* deboost and wakeups */ ++} ++ ++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) ++{ ++ WAKE_Q(wake_q); ++ bool deboost; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ deboost = __rt_mutex_futex_unlock(lock, &wake_q); ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ if (deboost) { ++ wake_up_q(&wake_q); ++ rt_mutex_adjust_prio(current); + } +- return rt_mutex_slowunlock(lock, wqh); + } + + /** +@@ -1656,7 +1686,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + __rt_mutex_init(lock, NULL); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); +- rt_mutex_deadlock_account_lock(lock, proxy_owner); + } + + /** +@@ -1667,12 +1696,10 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + * No locking. Caller has to do serializing itself + * Special API call for PI-futex support + */ +-void rt_mutex_proxy_unlock(struct rt_mutex *lock, +- struct task_struct *proxy_owner) ++void rt_mutex_proxy_unlock(struct rt_mutex *lock) + { + debug_rt_mutex_proxy_unlock(lock); + rt_mutex_set_owner(lock, NULL); +- rt_mutex_deadlock_account_unlock(proxy_owner); + } + + /** +diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h +index c4060584c4076..6607802efa8bd 100644 +--- a/kernel/locking/rtmutex.h ++++ b/kernel/locking/rtmutex.h +@@ -11,8 +11,6 @@ + */ + + #define rt_mutex_deadlock_check(l) (0) +-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) +-#define rt_mutex_deadlock_account_unlock(l) do { } while (0) + #define debug_rt_mutex_init_waiter(w) do { } while (0) + #define debug_rt_mutex_free_waiter(w) do { } while (0) + #define debug_rt_mutex_lock(l) do { } while (0) +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index 14cbafed00142..bea5d677fe343 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -102,8 +102,7 @@ enum rtmutex_chainwalk { + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, +- struct task_struct *proxy_owner); ++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); +@@ -113,8 +112,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter); + extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); +-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh); ++extern int rt_mutex_futex_trylock(struct rt_mutex *l); ++extern int __rt_mutex_futex_trylock(struct rt_mutex *l); ++ ++extern void rt_mutex_futex_unlock(struct rt_mutex *lock); ++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wqh); ++ + extern void rt_mutex_adjust_prio(struct task_struct *task); + + #ifdef CONFIG_DEBUG_RT_MUTEXES +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 0385c57a2b7af..05ca01ef97f7f 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1753,7 +1753,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + spinlock_t *ptl; + struct mm_struct *mm = vma->vm_mm; + unsigned long haddr = address & HPAGE_PMD_MASK; +- bool was_locked = false; ++ bool do_unlock_page = false; + pmd_t _pmd; + + mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); +@@ -1766,7 +1766,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + VM_BUG_ON(freeze && !page); + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); +- was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +@@ -1775,19 +1774,29 @@ repeat: + if (pmd_trans_huge(*pmd)) { + if (!page) { + page = pmd_page(*pmd); +- if (unlikely(!trylock_page(page))) { +- get_page(page); +- _pmd = *pmd; +- spin_unlock(ptl); +- lock_page(page); +- spin_lock(ptl); +- if (unlikely(!pmd_same(*pmd, _pmd))) { +- unlock_page(page); ++ /* ++ * An anonymous page must be locked, to ensure that a ++ * concurrent reuse_swap_page() sees stable mapcount; ++ * but reuse_swap_page() is not used on shmem or file, ++ * and page lock must not be taken when zap_pmd_range() ++ * calls __split_huge_pmd() while i_mmap_lock is held. ++ */ ++ if (PageAnon(page)) { ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } + put_page(page); +- page = NULL; +- goto repeat; + } +- put_page(page); ++ do_unlock_page = true; + } + } + if (PageMlocked(page)) +@@ -1797,7 +1806,7 @@ repeat: + __split_huge_pmd_locked(vma, pmd, haddr, freeze); + out: + spin_unlock(ptl); +- if (!was_locked && page) ++ if (do_unlock_page) + unlock_page(page); + mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); + } +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 52b5e0e026d60..5a16d892c891c 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1210,12 +1210,11 @@ struct hstate *size_to_hstate(unsigned long size) + */ + bool page_huge_active(struct page *page) + { +- VM_BUG_ON_PAGE(!PageHuge(page), page); +- return PageHead(page) && PagePrivate(&page[1]); ++ return PageHeadHuge(page) && PagePrivate(&page[1]); + } + + /* never called for tail page */ +-static void set_page_huge_active(struct page *page) ++void set_page_huge_active(struct page *page) + { + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); + SetPagePrivate(&page[1]); +@@ -4657,9 +4656,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) + { + bool ret = true; + +- VM_BUG_ON_PAGE(!PageHead(page), page); + spin_lock(&hugetlb_lock); +- if (!page_huge_active(page) || !get_page_unless_zero(page)) { ++ if (!PageHeadHuge(page) || !page_huge_active(page) || ++ !get_page_unless_zero(page)) { + ret = false; + goto unlock; + } +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c +index 482c94d9d958a..d1c7dcc234486 100644 +--- a/net/lapb/lapb_out.c ++++ b/net/lapb/lapb_out.c +@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb) + skb = skb_dequeue(&lapb->write_queue); + + do { +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ++ skbn = skb_copy(skb, GFP_ATOMIC); ++ if (!skbn) { + skb_queue_head(&lapb->write_queue, skb); + break; + } +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index f783d1377d9a8..9f0f437a09b95 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local, + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); +- if (ret == 0) ++ if (ret == 0) { + sta->uploaded = true; ++ if (rcu_access_pointer(sta->sta.rates)) ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta); ++ } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index e6096dfd02105..41421609a0f02 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -892,7 +892,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, + if (old) + kfree_rcu(old, rcu_head); + +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ++ if (sta->uploaded) ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + return 0; + } +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 9be82ed02e0e5..c38d68131d02e 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -3802,6 +3802,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 912ed9b901ac9..45038c837eab4 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -393,7 +393,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, + { + struct qdisc_rate_table *rtab; + +- if (tab == NULL || r->rate == 0 || r->cell_log == 0 || ++ if (tab == NULL || r->rate == 0 || ++ r->cell_log == 0 || r->cell_log >= 32 || + nla_len(tab) != TC_RTAB_SIZE) + return NULL; + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 720de648510dc..b003cb07254a8 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6284,7 +6284,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, +- {0x13, 0xb8a60140}, ++ {0x13, 0xb8a61140}, + {0x17, 0x90170110}), + {} + }; +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c +index d84c28eac262d..0ba5bb51bd93c 100644 +--- a/tools/objtool/elf.c ++++ b/tools/objtool/elf.c +@@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf) + + symtab = find_section_by_name(elf, ".symtab"); + if (!symtab) { +- WARN("missing symbol table"); +- return -1; ++ /* ++ * A missing symbol table is actually possible if it's an empty ++ * .o file. This can happen for thunk_64.o. ++ */ ++ return 0; + } + + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; diff --git a/patch/kernel/meson64-legacy/patch-4.9.257-258.patch b/patch/kernel/meson64-legacy/patch-4.9.257-258.patch new file mode 100644 index 0000000000..94ef50a317 --- /dev/null +++ b/patch/kernel/meson64-legacy/patch-4.9.257-258.patch @@ -0,0 +1,2316 @@ +diff --git a/Makefile b/Makefile +index e53096154f816..e5955f122ffd3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 257 ++SUBLEVEL = 258 + EXTRAVERSION = + NAME = Roaring Lionus + +@@ -762,6 +762,13 @@ ifdef CONFIG_FUNCTION_TRACER + ifndef CC_FLAGS_FTRACE + CC_FLAGS_FTRACE := -pg + endif ++ifdef CONFIG_FTRACE_MCOUNT_RECORD ++ # gcc 5 supports generating the mcount tables directly ++ ifeq ($(call cc-option-yn,-mrecord-mcount),y) ++ CC_FLAGS_FTRACE += -mrecord-mcount ++ export CC_USING_RECORD_MCOUNT := 1 ++ endif ++endif + export CC_FLAGS_FTRACE + ifdef CONFIG_HAVE_FENTRY + CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY) +diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi +index 2802c9565b6ca..976a75a4eb2c6 100644 +--- a/arch/arm/boot/dts/lpc32xx.dtsi ++++ b/arch/arm/boot/dts/lpc32xx.dtsi +@@ -323,9 +323,6 @@ + + clocks = <&xtal_32k>, <&xtal>; + clock-names = "xtal_32k", "xtal"; +- +- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>; +- assigned-clock-rates = <208000000>; + }; + }; + +diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c +index 0ed01f2d5ee4b..02579e6569f0c 100644 +--- a/arch/arm/xen/p2m.c ++++ b/arch/arm/xen/p2m.c +@@ -93,8 +93,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + for (i = 0; i < count; i++) { + if (map_ops[i].status) + continue; +- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, +- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); ++ if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, ++ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) { ++ return -ENOMEM; ++ } + } + + return 0; +diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c +index dc2d16ce8a0d5..3e33a9844d99a 100644 +--- a/arch/h8300/kernel/asm-offsets.c ++++ b/arch/h8300/kernel/asm-offsets.c +@@ -62,6 +62,9 @@ int main(void) + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE, thread_info, preempt_count); ++#ifdef CONFIG_PREEMPTION ++ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); ++#endif + + return 0; + } +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index a95d414663b1e..9f0099c46c881 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -61,6 +61,9 @@ endif + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow + KBUILD_CFLAGS += $(call cc-option,-mno-avx,) + ++# Intel CET isn't enabled in the kernel ++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) ++ + ifeq ($(CONFIG_X86_32),y) + BITS := 32 + UTS_MACHINE := i386 +@@ -137,9 +140,6 @@ else + KBUILD_CFLAGS += -mno-red-zone + KBUILD_CFLAGS += -mcmodel=kernel + +- # Intel CET isn't enabled in the kernel +- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +- + # -funit-at-a-time shrinks the kernel .text considerably + # unfortunately it makes reading oopses harder. + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c +index 37129db76d33e..fbf8508e558ac 100644 +--- a/arch/x86/xen/p2m.c ++++ b/arch/x86/xen/p2m.c +@@ -725,7 +725,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + unsigned long mfn, pfn; + + /* Do not add to override if the map failed. */ +- if (map_ops[i].status) ++ if (map_ops[i].status != GNTST_okay || ++ (kmap_ops && kmap_ops[i].status != GNTST_okay)) + continue; + + if (map_ops[i].flags & GNTMAP_contains_pte) { +@@ -763,17 +764,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); + unsigned long pfn = page_to_pfn(pages[i]); + +- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { ++ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); ++ else + ret = -EINVAL; +- goto out; +- } +- +- set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } + if (kunmap_ops) + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, +- kunmap_ops, count); +-out: ++ kunmap_ops, count) ?: ret; ++ + return ret; + } + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c +index 4f643a87f9c7d..2b739ba841b1a 100644 +--- a/drivers/block/xen-blkback/blkback.c ++++ b/drivers/block/xen-blkback/blkback.c +@@ -843,8 +843,11 @@ again: + pages[i]->page = persistent_gnt->page; + pages[i]->persistent_gnt = persistent_gnt; + } else { +- if (get_free_page(ring, &pages[i]->page)) +- goto out_of_memory; ++ if (get_free_page(ring, &pages[i]->page)) { ++ put_free_pages(ring, pages_to_gnt, segs_to_map); ++ ret = -ENOMEM; ++ goto out; ++ } + addr = vaddr(pages[i]->page); + pages_to_gnt[segs_to_map] = pages[i]->page; + pages[i]->persistent_gnt = NULL; +@@ -860,10 +863,8 @@ again: + break; + } + +- if (segs_to_map) { ++ if (segs_to_map) + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); +- BUG_ON(ret); +- } + + /* + * Now swizzle the MFN in our domain with the MFN from the other domain +@@ -878,7 +879,7 @@ again: + pr_debug("invalid buffer -- could not remap it\n"); + put_free_pages(ring, &pages[seg_idx]->page, 1); + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; +- ret |= 1; ++ ret |= !ret; + goto next; + } + pages[seg_idx]->handle = map[new_map_idx].handle; +@@ -930,17 +931,18 @@ next: + } + segs_to_map = 0; + last_map = map_until; +- if (map_until != num) ++ if (!ret && map_until != num) + goto again; + +- return ret; +- +-out_of_memory: +- pr_alert("%s: out of memory\n", __func__); +- put_free_pages(ring, pages_to_gnt, segs_to_map); +- for (i = last_map; i < num; i++) ++out: ++ for (i = last_map; i < num; i++) { ++ /* Don't zap current batch's valid persistent grants. */ ++ if(i >= last_map + segs_to_map) ++ pages[i]->persistent_gnt = NULL; + pages[i]->handle = BLKBACK_INVALID_HANDLE; +- return -ENOMEM; ++ } ++ ++ return ret; + } + + static int xen_blkbk_map_seg(struct pending_req *pending_req) +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +index f4d75ffe3d8a8..7f01fb91ea668 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +@@ -518,7 +518,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + const size_t bufsz = sizeof(buf); + int pos = 0; + ++ mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); ++ mutex_unlock(&mvm->mutex); ++ + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 6d38eec3f9d3c..a78aaf17116e9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -1104,6 +1104,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); ++ put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); + } +@@ -1202,7 +1203,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) + module_put(THIS_MODULE); + return; + } +- reprobe->dev = mvm->trans->dev; ++ reprobe->dev = get_device(mvm->trans->dev); + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +index 174e45d78c46a..ff564198d2cef 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +@@ -676,6 +676,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + ++ if (!txq) { ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); ++ return; ++ } ++ + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index fd2ac6cd0c691..0024200c30ce4 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -1328,13 +1328,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) + return 0; + + gnttab_batch_copy(queue->tx_copy_ops, nr_cops); +- if (nr_mops != 0) { ++ if (nr_mops != 0) + ret = gnttab_map_refs(queue->tx_map_ops, + NULL, + queue->pages_to_map, + nr_mops); +- BUG_ON(ret); +- } + + work_done = xenvif_tx_submit(queue); + +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c +index f152246c7dfb7..ddfb1cfa2dd94 100644 +--- a/drivers/net/xen-netback/rx.c ++++ b/drivers/net/xen-netback/rx.c +@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) + RING_IDX prod, cons; + struct sk_buff *skb; + int needed; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&queue->rx_queue.lock, flags); + + skb = skb_peek(&queue->rx_queue); +- if (!skb) ++ if (!skb) { ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + return false; ++ } + + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) +@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) + if (skb->sw_hash) + needed++; + ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags); ++ + do { + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; +diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c +index 2e0caaaa766a3..72fc33bba99c1 100644 +--- a/drivers/remoteproc/qcom_q6v5_pil.c ++++ b/drivers/remoteproc/qcom_q6v5_pil.c +@@ -193,6 +193,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw) + { + struct q6v5 *qproc = rproc->priv; + ++ /* MBA is restricted to a maximum size of 1M */ ++ if (fw->size > qproc->mba_size || fw->size > SZ_1M) { ++ dev_err(qproc->dev, "MBA firmware load failed\n"); ++ return -EINVAL; ++ } ++ + memcpy(qproc->mba_region, fw->data, fw->size); + + return 0; +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c +index 9c2c7fe612806..ba83c36b76bdf 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -878,7 +878,8 @@ qla27xx_template_checksum(void *p, ulong size) + static inline int + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) + { +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0; ++ return qla27xx_template_checksum(tmp, ++ le32_to_cpu(tmp->template_size)) == 0; + } + + static inline int +@@ -894,7 +895,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) + ulong len; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + tmp = memcpy(vha->hw->fw_dump, tmp, len); + ql27xx_edit_template(vha, tmp); + qla27xx_walk_template(vha, tmp, tmp, &len); +@@ -910,7 +911,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + qla27xx_walk_template(vha, tmp, NULL, &len); + } + +@@ -922,7 +923,7 @@ qla27xx_fwdt_template_size(void *p) + { + struct qla27xx_fwdt_template *tmp = p; + +- return tmp->template_size; ++ return le32_to_cpu(tmp->template_size); + } + + ulong +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h +index 141c1c5e73f42..2d3e1a8349b3b 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h +@@ -13,7 +13,7 @@ + struct __packed qla27xx_fwdt_template { + uint32_t template_type; + uint32_t entry_offset; +- uint32_t template_size; ++ __le32 template_size; + uint32_t reserved_1; + + uint32_t entry_count; +diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c +index bd86f84f37901..3862edf59f7de 100644 +--- a/drivers/usb/dwc3/ulpi.c ++++ b/drivers/usb/dwc3/ulpi.c +@@ -10,6 +10,8 @@ + * published by the Free Software Foundation. + */ + ++#include ++#include + #include + + #include "core.h" +@@ -20,12 +22,22 @@ + DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ + DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) + +-static int dwc3_ulpi_busyloop(struct dwc3 *dwc) ++#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L) ++ ++static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read) + { +- unsigned count = 1000; ++ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY; ++ unsigned int count = 1000; + u32 reg; + ++ if (addr >= ULPI_EXT_VENDOR_SPECIFIC) ++ ns += DWC3_ULPI_BASE_DELAY; ++ ++ if (read) ++ ns += DWC3_ULPI_BASE_DELAY; ++ + while (count--) { ++ ndelay(ns); + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); + if (!(reg & DWC3_GUSB2PHYACC_BUSY)) + return 0; +@@ -44,7 +56,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr) + reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); + +- ret = dwc3_ulpi_busyloop(dwc); ++ ret = dwc3_ulpi_busyloop(dwc, addr, true); + if (ret) + return ret; + +@@ -62,7 +74,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val) + reg |= DWC3_GUSB2PHYACC_WRITE | val; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); + +- return dwc3_ulpi_busyloop(dwc); ++ return dwc3_ulpi_busyloop(dwc, addr, false); + } + + static const struct ulpi_ops dwc3_ulpi_ops = { +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c +index 910b5d40c6e9f..69d59102ff1be 100644 +--- a/drivers/xen/gntdev.c ++++ b/drivers/xen/gntdev.c +@@ -293,36 +293,47 @@ static int map_grant_pages(struct grant_map *map) + * to the kernel linear addresses of the struct pages. + * These ptes are completely different from the user ptes dealt + * with find_grant_ptes. ++ * Note that GNTMAP_device_map isn't needed here: The ++ * dev_bus_addr output field gets consumed only from ->map_ops, ++ * and by not requesting it when mapping we also avoid needing ++ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra ++ * reference to the page in the hypervisor). + */ ++ unsigned int flags = (map->flags & ~GNTMAP_device_map) | ++ GNTMAP_host_map; ++ + for (i = 0; i < map->count; i++) { + unsigned long address = (unsigned long) + pfn_to_kaddr(page_to_pfn(map->pages[i])); + BUG_ON(PageHighMem(map->pages[i])); + +- gnttab_set_map_op(&map->kmap_ops[i], address, +- map->flags | GNTMAP_host_map, ++ gnttab_set_map_op(&map->kmap_ops[i], address, flags, + map->grants[i].ref, + map->grants[i].domid); + gnttab_set_unmap_op(&map->kunmap_ops[i], address, +- map->flags | GNTMAP_host_map, -1); ++ flags, -1); + } + } + + pr_debug("map %d+%d\n", map->index, map->count); + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, + map->pages, map->count); +- if (err) +- return err; + + for (i = 0; i < map->count; i++) { +- if (map->map_ops[i].status) { ++ if (map->map_ops[i].status == GNTST_okay) ++ map->unmap_ops[i].handle = map->map_ops[i].handle; ++ else if (!err) + err = -EINVAL; +- continue; +- } + +- map->unmap_ops[i].handle = map->map_ops[i].handle; +- if (use_ptemod) +- map->kunmap_ops[i].handle = map->kmap_ops[i].handle; ++ if (map->flags & GNTMAP_device_map) ++ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; ++ ++ if (use_ptemod) { ++ if (map->kmap_ops[i].status == GNTST_okay) ++ map->kunmap_ops[i].handle = map->kmap_ops[i].handle; ++ else if (!err) ++ err = -EINVAL; ++ } + } + return err; + } +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c +index 3243d917651a3..4bba877ef5477 100644 +--- a/drivers/xen/xen-scsiback.c ++++ b/drivers/xen/xen-scsiback.c +@@ -423,12 +423,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, + return 0; + + err = gnttab_map_refs(map, NULL, pg, cnt); +- BUG_ON(err); + for (i = 0; i < cnt; i++) { + if (unlikely(map[i].status != GNTST_okay)) { + pr_err("invalid buffer -- could not remap it\n"); + map[i].handle = SCSIBACK_INVALID_HANDLE; +- err = -ENOMEM; ++ if (!err) ++ err = -ENOMEM; + } else { + get_page(pg[i]); + } +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index f978ae2bb846f..2de656ecc48bb 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -1971,7 +1971,7 @@ void wb_workfn(struct work_struct *work) + struct bdi_writeback, dwork); + long pages_written; + +- set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); ++ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); + current->flags |= PF_SWAPWRITE; + + if (likely(!current_is_workqueue_rescuer() || +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index 299dbf59f28f8..3a583aa1fafeb 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -92,6 +92,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + + if (ovl_is_private_xattr(name)) + continue; ++ ++ error = security_inode_copy_up_xattr(name); ++ if (error < 0 && error != -EOPNOTSUPP) ++ break; ++ if (error == 1) { ++ error = 0; ++ continue; /* Discard */ ++ } + retry: + size = vfs_getxattr(old, name, value, value_size); + if (size == -ERANGE) +@@ -115,13 +123,6 @@ retry: + goto retry; + } + +- error = security_inode_copy_up_xattr(name); +- if (error < 0 && error != -EOPNOTSUPP) +- break; +- if (error == 1) { +- error = 0; +- continue; /* Discard */ +- } + error = vfs_setxattr(new, name, value, size, 0); + if (error) + break; +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c +index 8073b6532cf04..d2a806416c3ab 100644 +--- a/fs/squashfs/export.c ++++ b/fs/squashfs/export.c +@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) + struct squashfs_sb_info *msblk = sb->s_fs_info; + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ u64 start; + __le64 ino; + int err; + + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) ++ return -EINVAL; ++ ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); + if (err < 0) + return err; +@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + u64 lookup_table_start, u64 next_table, unsigned int inodes) + { + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_inode_lookup_table, length %d\n", length); + +@@ -134,20 +142,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + if (inodes == 0) + return ERR_PTR(-EINVAL); + +- /* length bytes should not extend into the next table - this check +- * also traps instances where lookup_table_start is incorrectly larger +- * than the next table start ++ /* ++ * The computed size of the lookup table (length bytes) should exactly ++ * match the table start and end points + */ +- if (lookup_table_start + length > next_table) ++ if (length != (next_table - lookup_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, lookup_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first inode lookup table metadata block, +- * this should be less than lookup_table_start ++ * table0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed inode lookup blocks. Each entry should be ++ * less than the next (i.e. table[0] < table[1]), and the difference ++ * between them should be SQUASHFS_METADATA_SIZE or less. ++ * table[indexes - 1] should be less than lookup_table_start, and ++ * again the difference should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c +index d38ea3dab9515..8ccc0e3f6ea5a 100644 +--- a/fs/squashfs/id.c ++++ b/fs/squashfs/id.c +@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_ID_BLOCK(index); + int offset = SQUASHFS_ID_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->id_table[block]); ++ u64 start_block; + __le32 disk_id; + int err; + ++ if (index >= msblk->ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->id_table[block]); ++ + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, + sizeof(disk_id)); + if (err < 0) +@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + u64 id_table_start, u64 next_table, unsigned short no_ids) + { + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_id_index_table, length %d\n", length); + +@@ -80,20 +88,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + return ERR_PTR(-EINVAL); + + /* +- * length bytes should not extend into the next table - this check +- * also traps instances where id_table_start is incorrectly larger +- * than the next table start ++ * The computed size of the index table (length bytes) should exactly ++ * match the table start and end points + */ +- if (id_table_start + length > next_table) ++ if (length != (next_table - id_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, id_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first id lookup table metadata block, this +- * should be less than id_table_start ++ * table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than id_table_start, and again the difference ++ * should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index ef69c31947bf8..5234c19a0eabc 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -77,5 +77,6 @@ struct squashfs_sb_info { + unsigned int inodes; + unsigned int fragments; + int xattr_ids; ++ unsigned int ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 1516bb779b8d4..5abc9d03397c1 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); ++ msblk->ids = le16_to_cpu(sblk->no_ids); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %pg\n", sb->s_bdev); +@@ -187,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); + TRACE("Number of fragments %d\n", msblk->fragments); +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); ++ TRACE("Number of ids %d\n", msblk->ids); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); + TRACE("sblk->fragment_table_start %llx\n", +@@ -244,8 +245,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + allocate_id_index_table: + /* Allocate and read id index table */ + msblk->id_table = squashfs_read_id_index_table(sb, +- le64_to_cpu(sblk->id_table_start), next_table, +- le16_to_cpu(sblk->no_ids)); ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + if (IS_ERR(msblk->id_table)) { + ERROR("unable to read id index table\n"); + err = PTR_ERR(msblk->id_table); +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h +index afe70f815e3de..86b0a0073e51f 100644 +--- a/fs/squashfs/xattr.h ++++ b/fs/squashfs/xattr.h +@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, + u64 start, u64 *xattr_table_start, int *xattr_ids) + { ++ struct squashfs_xattr_id_table *id_table; ++ ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ if (IS_ERR(id_table)) ++ return (__le64 *) id_table; ++ ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); ++ kfree(id_table); ++ + ERROR("Xattrs in filesystem, these will be ignored\n"); +- *xattr_table_start = start; + return ERR_PTR(-ENOTSUPP); + } + +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c +index c89607d690c48..3a655d879600c 100644 +--- a/fs/squashfs/xattr_id.c ++++ b/fs/squashfs/xattr_id.c +@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_XATTR_BLOCK(index); + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ u64 start_block; + struct squashfs_xattr_id id; + int err; + ++ if (index >= msblk->xattr_ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ + err = squashfs_read_metadata(sb, &id, &start_block, &offset, + sizeof(id)); + if (err < 0) +@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + /* + * Read uncompressed xattr id lookup table indexes from disk into memory + */ +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, + u64 *xattr_table_start, int *xattr_ids) + { +- unsigned int len; ++ struct squashfs_sb_info *msblk = sb->s_fs_info; ++ unsigned int len, indexes; + struct squashfs_xattr_id_table *id_table; ++ __le64 *table; ++ u64 start, end; ++ int n; + +- id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + +@@ -83,13 +92,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, + if (*xattr_ids == 0) + return ERR_PTR(-EINVAL); + +- /* xattr_table should be less than start */ +- if (*xattr_table_start >= start) ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); ++ ++ /* ++ * The computed size of the index table (len bytes) should exactly ++ * match the table start and end points ++ */ ++ start = table_start + sizeof(*id_table); ++ end = msblk->bytes_used; ++ ++ if (len != (end - start)) + return ERR_PTR(-EINVAL); + +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ table = squashfs_read_table(sb, start, len); ++ if (IS_ERR(table)) ++ return table; ++ ++ /* table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed xattr id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than table_start, and again the difference ++ * shouls be SQUASHFS_METADATA_SIZE or less. ++ * ++ * Finally xattr_table_start should be less than table[0]. ++ */ ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- TRACE("In read_xattr_index_table, length %d\n", len); ++ if (*xattr_table_start >= le64_to_cpu(table[0])) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- return squashfs_read_table(sb, start + sizeof(*id_table), len); ++ return table; + } +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index 63f17b106a4a6..57db558c9a616 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -517,4 +518,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) + (1 << WB_async_congested)); + } + ++extern const char *bdi_unknown_name; ++ ++static inline const char *bdi_dev_name(struct backing_dev_info *bdi) ++{ ++ if (!bdi || !bdi->dev) ++ return bdi_unknown_name; ++ return dev_name(bdi->dev); ++} ++ + #endif /* _LINUX_BACKING_DEV_H */ +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h +index b3d34d3e0e7ef..9b8b014d13af1 100644 +--- a/include/linux/ftrace.h ++++ b/include/linux/ftrace.h +@@ -783,7 +783,9 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + + /* for init task */ +-#define INIT_FTRACE_GRAPH .ret_stack = NULL, ++#define INIT_FTRACE_GRAPH \ ++ .ret_stack = NULL, \ ++ .tracing_graph_pause = ATOMIC_INIT(0), + + /* + * Stack of return addresses for functions +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index 8b35bdbdc214c..fd77f8303ab9a 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -490,9 +490,21 @@ bool mem_cgroup_oom_synchronize(bool wait); + extern int do_swap_account; + #endif + +-void lock_page_memcg(struct page *page); ++struct mem_cgroup *lock_page_memcg(struct page *page); ++void __unlock_page_memcg(struct mem_cgroup *memcg); + void unlock_page_memcg(struct page *page); + ++static inline void __mem_cgroup_update_page_stat(struct page *page, ++ struct mem_cgroup *memcg, ++ enum mem_cgroup_stat_index idx, ++ int val) ++{ ++ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); ++ ++ if (memcg && memcg->stat) ++ this_cpu_add(memcg->stat->count[idx], val); ++} ++ + /** + * mem_cgroup_update_page_stat - update page state statistics + * @page: the page +@@ -508,13 +520,12 @@ void unlock_page_memcg(struct page *page); + * mem_cgroup_update_page_stat(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) + */ ++ + static inline void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) + { +- VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); + +- if (page->mem_cgroup) +- this_cpu_add(page->mem_cgroup->stat->count[idx], val); ++ __mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val); + } + + static inline void mem_cgroup_inc_page_stat(struct page *page, +@@ -709,7 +720,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) + { + } + +-static inline void lock_page_memcg(struct page *page) ++static inline struct mem_cgroup *lock_page_memcg(struct page *page) ++{ ++ return NULL; ++} ++ ++static inline void __unlock_page_memcg(struct mem_cgroup *memcg) + { + } + +@@ -745,6 +761,13 @@ static inline void mem_cgroup_update_page_stat(struct page *page, + { + } + ++static inline void __mem_cgroup_update_page_stat(struct page *page, ++ struct mem_cgroup *memcg, ++ enum mem_cgroup_stat_index idx, ++ int nr) ++{ ++} ++ + static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) + { +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 4d1b1056ac972..2aacafe2bce58 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -3701,6 +3701,7 @@ static inline void netif_tx_disable(struct net_device *dev) + + local_bh_disable(); + cpu = smp_processor_id(); ++ spin_lock(&dev->tx_global_lock); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + +@@ -3708,6 +3709,7 @@ static inline void netif_tx_disable(struct net_device *dev) + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } ++ spin_unlock(&dev->tx_global_lock); + local_bh_enable(); + } + +diff --git a/include/linux/string.h b/include/linux/string.h +index 42eed573ebb63..66a91f5a34499 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -29,6 +29,10 @@ size_t strlcpy(char *, const char *, size_t); + #ifndef __HAVE_ARCH_STRSCPY + ssize_t strscpy(char *, const char *, size_t); + #endif ++ ++/* Wraps calls to strscpy()/memset(), no arch specific code required */ ++ssize_t strscpy_pad(char *dest, const char *src, size_t count); ++ + #ifndef __HAVE_ARCH_STRCAT + extern char * strcat(char *, const char *); + #endif +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index 56c48c884a242..bf0db32b40aa0 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -23,8 +23,7 @@ + #define XDR_QUADLEN(l) (((l) + 3) >> 2) + + /* +- * Generic opaque `network object.' At the kernel level, this type +- * is used only by lockd. ++ * Generic opaque `network object.' + */ + #define XDR_MAX_NETOBJ 1024 + struct xdr_netobj { +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h +index ec964a924cd2f..49a72adc7135c 100644 +--- a/include/trace/events/writeback.h ++++ b/include/trace/events/writeback.h +@@ -65,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : ++ NULL), 32); + __entry->ino = mapping ? mapping->host->i_ino : 0; + __entry->index = page->index; + ), +@@ -95,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, + struct backing_dev_info *bdi = inode_to_bdi(inode); + + /* may be called for files on pseudo FSes w/ unregistered bdi */ +- strncpy(__entry->name, +- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->flags = flags; +@@ -175,8 +175,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->sync_mode = wbc->sync_mode; + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); +@@ -219,8 +219,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- strncpy(__entry->name, +- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + __entry->nr_pages = work->nr_pages; + __entry->sb_dev = work->sb ? work->sb->s_dev : 0; + __entry->sync_mode = work->sync_mode; +@@ -273,7 +272,7 @@ DECLARE_EVENT_CLASS(writeback_class, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + TP_printk("bdi %s: cgroup_ino=%u", +@@ -296,7 +295,7 @@ TRACE_EVENT(writeback_bdi_register, + __array(char, name, 32) + ), + TP_fast_assign( +- strncpy(__entry->name, dev_name(bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + ), + TP_printk("bdi %s", + __entry->name +@@ -321,7 +320,7 @@ DECLARE_EVENT_CLASS(wbc_class, + ), + + TP_fast_assign( +- strncpy(__entry->name, dev_name(bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->sync_mode = wbc->sync_mode; +@@ -372,7 +371,7 @@ TRACE_EVENT(writeback_queue_io, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; + __entry->moved = moved; +@@ -457,7 +456,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, + ), + + TP_fast_assign( +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + __entry->write_bw = KBps(wb->write_bandwidth); + __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); + __entry->dirty_rate = KBps(dirty_rate); +@@ -522,7 +521,7 @@ TRACE_EVENT(balance_dirty_pages, + + TP_fast_assign( + unsigned long freerun = (thresh + bg_thresh) / 2; +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + + __entry->limit = global_wb_domain.dirty_limit; + __entry->setpoint = (global_wb_domain.dirty_limit + +@@ -582,8 +581,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; +@@ -656,8 +655,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; +diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h +index 34b1379f9777d..f9d8aac170fbc 100644 +--- a/include/xen/grant_table.h ++++ b/include/xen/grant_table.h +@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, + map->flags = flags; + map->ref = ref; + map->dom = domid; ++ map->status = 1; /* arbitrary positive value */ + } + + static inline void +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c +index a2a232dec2363..2fdf6f96f9762 100644 +--- a/kernel/bpf/stackmap.c ++++ b/kernel/bpf/stackmap.c +@@ -70,6 +70,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) + + /* hash table size must be power of 2 */ + n_buckets = roundup_pow_of_two(attr->max_entries); ++ if (!n_buckets) ++ return ERR_PTR(-E2BIG); + + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); + if (cost >= U32_MAX - PAGE_SIZE) +diff --git a/kernel/futex.c b/kernel/futex.c +index 83db5787c67ef..b65dbb5d60bb1 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1019,6 +1019,39 @@ static void exit_pi_state_list(struct task_struct *curr) + * [10] There is no transient state which leaves owner and user space + * TID out of sync. Except one error case where the kernel is denied + * write access to the user address, see fixup_pi_state_owner(). ++ * ++ * ++ * Serialization and lifetime rules: ++ * ++ * hb->lock: ++ * ++ * hb -> futex_q, relation ++ * futex_q -> pi_state, relation ++ * ++ * (cannot be raw because hb can contain arbitrary amount ++ * of futex_q's) ++ * ++ * pi_mutex->wait_lock: ++ * ++ * {uval, pi_state} ++ * ++ * (and pi_mutex 'obviously') ++ * ++ * p->pi_lock: ++ * ++ * p->pi_state_list -> pi_state->list, relation ++ * ++ * pi_state->refcount: ++ * ++ * pi_state lifetime ++ * ++ * ++ * Lock order: ++ * ++ * hb->lock ++ * pi_mutex->wait_lock ++ * p->pi_lock ++ * + */ + + /* +@@ -1026,10 +1059,12 @@ static void exit_pi_state_list(struct task_struct *curr) + * the pi_state against the user space value. If correct, attach to + * it. + */ +-static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, ++static int attach_to_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_pi_state *pi_state, + struct futex_pi_state **ps) + { + pid_t pid = uval & FUTEX_TID_MASK; ++ int ret, uval2; + + /* + * Userspace might have messed up non-PI and PI futexes [3] +@@ -1037,8 +1072,33 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + if (unlikely(!pi_state)) + return -EINVAL; + ++ /* ++ * We get here with hb->lock held, and having found a ++ * futex_top_waiter(). This means that futex_lock_pi() of said futex_q ++ * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), ++ * which in turn means that futex_lock_pi() still has a reference on ++ * our pi_state. ++ */ + WARN_ON(!atomic_read(&pi_state->refcount)); + ++ /* ++ * Now that we have a pi_state, we can acquire wait_lock ++ * and do the state validation. ++ */ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ++ /* ++ * Since {uval, pi_state} is serialized by wait_lock, and our current ++ * uval was read without holding it, it can have changed. Verify it ++ * still is what we expect it to be, otherwise retry the entire ++ * operation. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ goto out_efault; ++ ++ if (uval != uval2) ++ goto out_eagain; ++ + /* + * Handle the owner died case: + */ +@@ -1054,11 +1114,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * is not 0. Inconsistent state. [5] + */ + if (pid) +- return -EINVAL; ++ goto out_einval; + /* + * Take a ref on the state and return success. [4] + */ +- goto out_state; ++ goto out_attach; + } + + /* +@@ -1070,14 +1130,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * Take a ref on the state and return success. [6] + */ + if (!pid) +- goto out_state; ++ goto out_attach; + } else { + /* + * If the owner died bit is not set, then the pi_state + * must have an owner. [7] + */ + if (!pi_state->owner) +- return -EINVAL; ++ goto out_einval; + } + + /* +@@ -1086,11 +1146,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * user space TID. [9/10] + */ + if (pid != task_pid_vnr(pi_state->owner)) +- return -EINVAL; +-out_state: ++ goto out_einval; ++ ++out_attach: + atomic_inc(&pi_state->refcount); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + *ps = pi_state; + return 0; ++ ++out_einval: ++ ret = -EINVAL; ++ goto out_error; ++ ++out_eagain: ++ ret = -EAGAIN; ++ goto out_error; ++ ++out_efault: ++ ret = -EFAULT; ++ goto out_error; ++ ++out_error: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; + } + + /** +@@ -1123,11 +1201,67 @@ static void wait_for_owner_exiting(int ret, struct task_struct *exiting) + put_task_struct(exiting); + } + ++static int handle_exit_race(u32 __user *uaddr, u32 uval, ++ struct task_struct *tsk) ++{ ++ u32 uval2; ++ ++ /* ++ * If the futex exit state is not yet FUTEX_STATE_DEAD, wait ++ * for it to finish. ++ */ ++ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) ++ return -EAGAIN; ++ ++ /* ++ * Reread the user space value to handle the following situation: ++ * ++ * CPU0 CPU1 ++ * ++ * sys_exit() sys_futex() ++ * do_exit() futex_lock_pi() ++ * futex_lock_pi_atomic() ++ * exit_signals(tsk) No waiters: ++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID ++ * mm_release(tsk) Set waiter bit ++ * exit_robust_list(tsk) { *uaddr = 0x80000PID; ++ * Set owner died attach_to_pi_owner() { ++ * *uaddr = 0xC0000000; tsk = get_task(PID); ++ * } if (!tsk->flags & PF_EXITING) { ++ * ... attach(); ++ * tsk->futex_state = } else { ++ * FUTEX_STATE_DEAD; if (tsk->futex_state != ++ * FUTEX_STATE_DEAD) ++ * return -EAGAIN; ++ * return -ESRCH; <--- FAIL ++ * } ++ * ++ * Returning ESRCH unconditionally is wrong here because the ++ * user space value has been changed by the exiting task. ++ * ++ * The same logic applies to the case where the exiting task is ++ * already gone. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ return -EFAULT; ++ ++ /* If the user space value has changed, try again. */ ++ if (uval2 != uval) ++ return -EAGAIN; ++ ++ /* ++ * The exiting task did not have a robust list, the robust list was ++ * corrupted or the user space value in *uaddr is simply bogus. ++ * Give up and tell user space. ++ */ ++ return -ESRCH; ++} ++ + /* + * Lookup the task for the TID provided from user space and attach to + * it after doing proper sanity checks. + */ +-static int attach_to_pi_owner(u32 uval, union futex_key *key, ++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, + struct futex_pi_state **ps, + struct task_struct **exiting) + { +@@ -1138,12 +1272,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + /* + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when TID = 0 [1] ++ * ++ * The !pid check is paranoid. None of the call sites should end up ++ * with pid == 0, but better safe than sorry. Let the caller retry + */ + if (!pid) +- return -ESRCH; ++ return -EAGAIN; + p = futex_find_get_task(pid); + if (!p) +- return -ESRCH; ++ return handle_exit_race(uaddr, uval, NULL); + + if (unlikely(p->flags & PF_KTHREAD)) { + put_task_struct(p); +@@ -1162,7 +1299,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + * FUTEX_STATE_DEAD, we know that the task has finished + * the cleanup: + */ +- int ret = (p->futex_state = FUTEX_STATE_DEAD) ? -ESRCH : -EAGAIN; ++ int ret = handle_exit_race(uaddr, uval, p); + + raw_spin_unlock_irq(&p->pi_lock); + /* +@@ -1183,6 +1320,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + + /* + * No existing pi state. First waiter. [2] ++ * ++ * This creates pi_state, we have hb->lock held, this means nothing can ++ * observe this state, wait_lock is irrelevant. + */ + pi_state = alloc_pi_state(); + +@@ -1207,7 +1347,8 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + return 0; + } + +-static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, ++static int lookup_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps, + struct task_struct **exiting) + { +@@ -1218,13 +1359,13 @@ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + * attach to the pi_state when the validation succeeds. + */ + if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ return attach_to_pi_state(uaddr, uval, match->pi_state, ps); + + /* + * We are the first waiter - try to look up the owner based on + * @uval and attach to it. + */ +- return attach_to_pi_owner(uval, key, ps, exiting); ++ return attach_to_pi_owner(uaddr, uval, key, ps, exiting); + } + + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) +@@ -1237,7 +1378,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) + return -EFAULT; + +- /*If user space value changed, let the caller retry */ ++ /* If user space value changed, let the caller retry */ + return curval != uval ? -EAGAIN : 0; + } + +@@ -1301,7 +1442,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + */ + match = futex_top_waiter(hb, key); + if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ return attach_to_pi_state(uaddr, uval, match->pi_state, ps); + + /* + * No waiter and user TID is 0. We are here because the +@@ -1340,7 +1481,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + * attach to the owner. If that fails, no harm done, we only + * set the FUTEX_WAITERS bit in the user space variable. + */ +- return attach_to_pi_owner(uval, key, ps, exiting); ++ return attach_to_pi_owner(uaddr, newval, key, ps, exiting); + } + + /** +@@ -1441,6 +1582,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, + + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { + ret = -EFAULT; ++ + } else if (curval != uval) { + /* + * If a unconditional UNLOCK_PI operation (user space did not +@@ -1977,7 +2119,7 @@ retry_private: + * If that call succeeds then we have pi_state and an + * initial refcount on it. + */ +- ret = lookup_pi_state(ret, hb2, &key2, ++ ret = lookup_pi_state(uaddr2, ret, hb2, &key2, + &pi_state, &exiting); + } + +@@ -2282,7 +2424,6 @@ static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + int err = 0; + + oldowner = pi_state->owner; +- + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; +@@ -2305,11 +2446,10 @@ static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + * because we can fault here. Imagine swapped out pages or a fork + * that marked all the anonymous memory readonly for cow. + * +- * Modifying pi_state _before_ the user space value would +- * leave the pi_state in an inconsistent state when we fault +- * here, because we need to drop the hash bucket lock to +- * handle the fault. This might be observed in the PID check +- * in lookup_pi_state. ++ * Modifying pi_state _before_ the user space value would leave the ++ * pi_state in an inconsistent state when we fault here, because we ++ * need to drop the locks to handle the fault. This might be observed ++ * in the PID check in lookup_pi_state. + */ + retry: + if (!argowner) { +@@ -2322,7 +2462,7 @@ retry: + } + + if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { +- /* We got the lock after all, nothing to fix. */ ++ /* We got the lock. pi_state is correct. Tell caller. */ + return 1; + } + +@@ -2364,24 +2504,29 @@ retry: + */ + pi_state_update_owner(pi_state, newowner); + +- return 0; ++ return argowner == current; + + /* +- * To handle the page fault we need to drop the hash bucket +- * lock here. That gives the other task (either the highest priority +- * waiter itself or the task which stole the rtmutex) the +- * chance to try the fixup of the pi_state. So once we are +- * back from handling the fault we need to check the pi_state +- * after reacquiring the hash bucket lock and before trying to +- * do another fixup. When the fixup has been done already we +- * simply return. ++ * To handle the page fault we need to drop the locks here. That gives ++ * the other task (either the highest priority waiter itself or the ++ * task which stole the rtmutex) the chance to try the fixup of the ++ * pi_state. So once we are back from handling the fault we need to ++ * check the pi_state after reacquiring the locks and before trying to ++ * do another fixup. When the fixup has been done already we simply ++ * return. ++ * ++ * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely ++ * drop hb->lock since the caller owns the hb -> futex_q relation. ++ * Dropping the pi_mutex->wait_lock requires the state revalidate. + */ + handle_fault: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + spin_unlock(q->lock_ptr); + + err = fault_in_user_writeable(uaddr); + + spin_lock(q->lock_ptr); ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + /* + * Check if someone else fixed it for us: +@@ -2447,8 +2592,6 @@ static long futex_wait_restart(struct restart_block *restart); + */ + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + { +- int ret = 0; +- + if (locked) { + /* + * Got the lock. We might not be the anticipated owner if we +@@ -2459,8 +2602,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + * stable state, anything else needs more attention. + */ + if (q->pi_state->owner != current) +- ret = fixup_pi_state_owner(uaddr, q, current); +- goto out; ++ return fixup_pi_state_owner(uaddr, q, current); ++ return 1; + } + + /* +@@ -2471,10 +2614,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + * Another speculative read; pi_state->owner == current is unstable + * but needs our attention. + */ +- if (q->pi_state->owner == current) { +- ret = fixup_pi_state_owner(uaddr, q, NULL); +- goto out; +- } ++ if (q->pi_state->owner == current) ++ return fixup_pi_state_owner(uaddr, q, NULL); + + /* + * Paranoia check. If we did not take the lock, then we should not be +@@ -2483,8 +2624,7 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) + return fixup_pi_state_owner(uaddr, q, current); + +-out: +- return ret ? ret : locked; ++ return 0; + } + + /** +@@ -3106,6 +3246,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + */ + put_pi_state(q.pi_state); + spin_unlock(q.lock_ptr); ++ /* ++ * Adjust the return value. It's either -EFAULT or ++ * success (1) but the caller expects 0 for success. ++ */ ++ ret = ret < 0 ? ret : 0; + } + } else { + struct rt_mutex *pi_mutex; +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 16ca877745f62..ce49b62b08346 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -5803,7 +5803,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) + } + + if (t->ret_stack == NULL) { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->curr_ret_stack = -1; + /* Make sure the tasks see the -1 first: */ +@@ -6015,7 +6014,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + static void + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) + { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visible before we add the ret_stack */ +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 005929d13c7db..b87ab105fa22b 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2090,7 +2090,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, + (entry = this_cpu_read(trace_buffered_event))) { + /* Try to use the per cpu buffer first */ + val = this_cpu_inc_return(trace_buffered_event_cnt); +- if (val == 1) { ++ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) { + trace_event_setup(entry, type, flags, pc); + entry->array[0] = len; + return entry; +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 5bf072e437c41..1499b2c2799c7 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -1105,7 +1105,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, + mutex_lock(&event_mutex); + list_for_each_entry(file, &tr->events, list) { + call = file->event_call; +- if (!trace_event_name(call) || !call->class || !call->class->reg) ++ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || ++ !trace_event_name(call) || !call->class || !call->class->reg) + continue; + + if (system && strcmp(call->class->system, system->name) != 0) +diff --git a/lib/string.c b/lib/string.c +index d099762a9bd60..8fe13371aed7a 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -157,11 +157,9 @@ EXPORT_SYMBOL(strlcpy); + * @src: Where to copy the string from + * @count: Size of destination buffer + * +- * Copy the string, or as much of it as fits, into the dest buffer. +- * The routine returns the number of characters copied (not including +- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. +- * The behavior is undefined if the string buffers overlap. +- * The destination buffer is always NUL terminated, unless it's zero-sized. ++ * Copy the string, or as much of it as fits, into the dest buffer. The ++ * behavior is undefined if the string buffers overlap. The destination ++ * buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since +@@ -171,8 +169,10 @@ EXPORT_SYMBOL(strlcpy); + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be +- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() +- * with an overflow test, then just memset() the tail of the dest buffer. ++ * zeroed. If zeroing is desired please use strscpy_pad(). ++ * ++ * Return: The number of characters copied (not including the trailing ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough. + */ + ssize_t strscpy(char *dest, const char *src, size_t count) + { +@@ -259,6 +259,39 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) + } + EXPORT_SYMBOL(stpcpy); + ++/** ++ * strscpy_pad() - Copy a C-string into a sized buffer ++ * @dest: Where to copy the string to ++ * @src: Where to copy the string from ++ * @count: Size of destination buffer ++ * ++ * Copy the string, or as much of it as fits, into the dest buffer. The ++ * behavior is undefined if the string buffers overlap. The destination ++ * buffer is always %NUL terminated, unless it's zero-sized. ++ * ++ * If the source string is shorter than the destination buffer, zeros ++ * the tail of the destination buffer. ++ * ++ * For full explanation of why you may want to consider using the ++ * 'strscpy' functions please see the function docstring for strscpy(). ++ * ++ * Return: The number of characters copied (not including the trailing ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough. ++ */ ++ssize_t strscpy_pad(char *dest, const char *src, size_t count) ++{ ++ ssize_t written; ++ ++ written = strscpy(dest, src, count); ++ if (written < 0 || written == count - 1) ++ return written; ++ ++ memset(dest + written + 1, 0, count - written - 1); ++ ++ return written; ++} ++EXPORT_SYMBOL(strscpy_pad); ++ + #ifndef __HAVE_ARCH_STRCAT + /** + * strcat - Append one %NUL-terminated string to another +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index 113b7d3170799..aad61d0175a1c 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { + EXPORT_SYMBOL_GPL(noop_backing_dev_info); + + static struct class *bdi_class; ++const char *bdi_unknown_name = "(unknown)"; + + /* + * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side +diff --git a/mm/memblock.c b/mm/memblock.c +index 42b98af6a4158..e43065b13c08c 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -186,14 +186,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, + * + * Find @size free area aligned to @align in the specified range and node. + * +- * When allocation direction is bottom-up, the @start should be greater +- * than the end of the kernel image. Otherwise, it will be trimmed. The +- * reason is that we want the bottom-up allocation just near the kernel +- * image so it is highly likely that the allocated memory and the kernel +- * will reside in the same node. +- * +- * If bottom-up allocation failed, will try to allocate memory top-down. +- * + * RETURNS: + * Found address on success, 0 on failure. + */ +@@ -201,8 +193,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t align, phys_addr_t start, + phys_addr_t end, int nid, ulong flags) + { +- phys_addr_t kernel_end, ret; +- + /* pump up @end */ + if (end == MEMBLOCK_ALLOC_ACCESSIBLE) + end = memblock.current_limit; +@@ -210,39 +200,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + /* avoid allocating the first page */ + start = max_t(phys_addr_t, start, PAGE_SIZE); + end = max(start, end); +- kernel_end = __pa_symbol(_end); +- +- /* +- * try bottom-up allocation only when bottom-up mode +- * is set and @end is above the kernel image. +- */ +- if (memblock_bottom_up() && end > kernel_end) { +- phys_addr_t bottom_up_start; +- +- /* make sure we will allocate above the kernel */ +- bottom_up_start = max(start, kernel_end); + +- /* ok, try bottom-up allocation first */ +- ret = __memblock_find_range_bottom_up(bottom_up_start, end, +- size, align, nid, flags); +- if (ret) +- return ret; +- +- /* +- * we always limit bottom-up allocation above the kernel, +- * but top-down allocation doesn't have the limit, so +- * retrying top-down allocation may succeed when bottom-up +- * allocation failed. +- * +- * bottom-up allocation is expected to be fail very rarely, +- * so we use WARN_ONCE() here to see the stack trace if +- * fail happens. +- */ +- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); +- } +- +- return __memblock_find_range_top_down(start, end, size, align, nid, +- flags); ++ if (memblock_bottom_up()) ++ return __memblock_find_range_bottom_up(start, end, size, align, ++ nid, flags); ++ else ++ return __memblock_find_range_top_down(start, end, size, align, ++ nid, flags); + } + + /** +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index d4232744c59f1..27b0b4f03fcdc 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -1638,9 +1638,13 @@ cleanup: + * @page: the page + * + * This function protects unlocked LRU pages from being moved to +- * another cgroup and stabilizes their page->mem_cgroup binding. ++ * another cgroup. ++ * ++ * It ensures lifetime of the returned memcg. Caller is responsible ++ * for the lifetime of the page; __unlock_page_memcg() is available ++ * when @page might get freed inside the locked section. + */ +-void lock_page_memcg(struct page *page) ++struct mem_cgroup *lock_page_memcg(struct page *page) + { + struct mem_cgroup *memcg; + unsigned long flags; +@@ -1649,18 +1653,24 @@ void lock_page_memcg(struct page *page) + * The RCU lock is held throughout the transaction. The fast + * path can get away without acquiring the memcg->move_lock + * because page moving starts with an RCU grace period. +- */ ++ * ++ * The RCU lock also protects the memcg from being freed when ++ * the page state that is going to change is the only thing ++ * preventing the page itself from being freed. E.g. writeback ++ * doesn't hold a page reference and relies on PG_writeback to ++ * keep off truncation, migration and so forth. ++ */ + rcu_read_lock(); + + if (mem_cgroup_disabled()) +- return; ++ return NULL; + again: + memcg = page->mem_cgroup; + if (unlikely(!memcg)) +- return; ++ return NULL; + + if (atomic_read(&memcg->moving_account) <= 0) +- return; ++ return memcg; + + spin_lock_irqsave(&memcg->move_lock, flags); + if (memcg != page->mem_cgroup) { +@@ -1676,18 +1686,18 @@ again: + memcg->move_lock_task = current; + memcg->move_lock_flags = flags; + +- return; ++ return memcg; + } + EXPORT_SYMBOL(lock_page_memcg); + + /** +- * unlock_page_memcg - unlock a page->mem_cgroup binding +- * @page: the page ++ * __unlock_page_memcg - unlock and unpin a memcg ++ * @memcg: the memcg ++ * ++ * Unlock and unpin a memcg returned by lock_page_memcg(). + */ +-void unlock_page_memcg(struct page *page) ++void __unlock_page_memcg(struct mem_cgroup *memcg) + { +- struct mem_cgroup *memcg = page->mem_cgroup; +- + if (memcg && memcg->move_lock_task == current) { + unsigned long flags = memcg->move_lock_flags; + +@@ -1699,6 +1709,15 @@ void unlock_page_memcg(struct page *page) + + rcu_read_unlock(); + } ++ ++/** ++ * unlock_page_memcg - unlock a page->mem_cgroup binding ++ * @page: the page ++ */ ++void unlock_page_memcg(struct page *page) ++{ ++ __unlock_page_memcg(page->mem_cgroup); ++} + EXPORT_SYMBOL(unlock_page_memcg); + + /* +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 462c778b9fb55..498c924f2fcd6 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -2717,9 +2717,10 @@ EXPORT_SYMBOL(clear_page_dirty_for_io); + int test_clear_page_writeback(struct page *page) + { + struct address_space *mapping = page_mapping(page); ++ struct mem_cgroup *memcg; + int ret; + +- lock_page_memcg(page); ++ memcg = lock_page_memcg(page); + if (mapping && mapping_use_writeback_tags(mapping)) { + struct inode *inode = mapping->host; + struct backing_dev_info *bdi = inode_to_bdi(inode); +@@ -2747,13 +2748,20 @@ int test_clear_page_writeback(struct page *page) + } else { + ret = TestClearPageWriteback(page); + } ++ /* ++ * NOTE: Page might be free now! Writeback doesn't hold a page ++ * reference on its own, it relies on truncation to wait for ++ * the clearing of PG_writeback. The below can only access ++ * page state that is static across allocation cycles. ++ */ + if (ret) { +- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); ++ __mem_cgroup_update_page_stat(page, memcg, ++ MEM_CGROUP_STAT_WRITEBACK, -1); + dec_node_page_state(page, NR_WRITEBACK); + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); + inc_node_page_state(page, NR_WRITTEN); + } +- unlock_page_memcg(page); ++ __unlock_page_memcg(memcg); + return ret; + } + +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 76a008b1cbe5f..adc93329e6aac 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -2933,7 +2933,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) + break; + if (!aalg->pfkey_supported) + continue; +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + return sz + sizeof(struct sadb_prop); +@@ -2951,7 +2951,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!ealg->pfkey_supported) + continue; + +- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) ++ if (!(ealg_tmpl_set(t, ealg))) + continue; + + for (k = 1; ; k++) { +@@ -2962,7 +2962,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!aalg->pfkey_supported) + continue; + +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + } +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index d507d0fc7858a..ddd90a3820d39 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -903,7 +903,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, + * Let nf_ct_resolve_clash() deal with this later. + */ + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, +- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && ++ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) + continue; + + NF_CT_STAT_INC_ATOMIC(net, found); +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c +index 79d7ad621a80f..03c8bd854e56a 100644 +--- a/net/netfilter/xt_recent.c ++++ b/net/netfilter/xt_recent.c +@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) + /* + * Drop entries with timestamps older then 'time'. + */ +-static void recent_entry_reap(struct recent_table *t, unsigned long time) ++static void recent_entry_reap(struct recent_table *t, unsigned long time, ++ struct recent_entry *working, bool update) + { + struct recent_entry *e; + +@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time) + */ + e = list_entry(t->lru_list.next, struct recent_entry, lru_list); + ++ /* ++ * Do not reap the entry which are going to be updated. ++ */ ++ if (e == working && update) ++ return; ++ + /* + * The last time stamp is the most recent. + */ +@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) + + /* info->seconds must be non-zero */ + if (info->check_set & XT_RECENT_REAP) +- recent_entry_reap(t, time); ++ recent_entry_reap(t, time, e, ++ info->check_set & XT_RECENT_UPDATE && ret); + } + + if (info->check_set & XT_RECENT_SET || +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 591d378d1a188..1d00f49dfe48d 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -53,6 +53,7 @@ + #include + #include + ++#include "auth_gss_internal.h" + #include "../netns.h" + + static const struct rpc_authops authgss_ops; +@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, size_t len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static inline const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- dest->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(dest->data == NULL)) +- return ERR_PTR(-ENOMEM); +- dest->len = len; +- return q; +-} +- + static struct gss_cl_ctx * + gss_cred_get_ctx(struct rpc_cred *cred) + { +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h +new file mode 100644 +index 0000000000000..f6d9631bd9d00 +--- /dev/null ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h ++ * ++ * Internal definitions for RPCSEC_GSS client authentication ++ * ++ * Copyright (c) 2000 The Regents of the University of Michigan. ++ * All rights reserved. ++ * ++ */ ++#include ++#include ++#include ++ ++static inline const void * ++simple_get_bytes(const void *p, const void *end, void *res, size_t len) ++{ ++ const void *q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ memcpy(res, p, len); ++ return q; ++} ++ ++static inline const void * ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ++{ ++ const void *q; ++ unsigned int len; ++ ++ p = simple_get_bytes(p, end, &len, sizeof(len)); ++ if (IS_ERR(p)) ++ return p; ++ q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ if (len) { ++ dest->data = kmemdup(p, len, GFP_NOFS); ++ if (unlikely(dest->data == NULL)) ++ return ERR_PTR(-ENOMEM); ++ } else ++ dest->data = NULL; ++ dest->len = len; ++ return q; ++} +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index 60595835317af..ea2f6022b3d5d 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -46,6 +46,8 @@ + #include + #include + ++#include "auth_gss_internal.h" ++ + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH + #endif +@@ -187,35 +189,6 @@ get_gss_krb5_enctype(int etype) + return NULL; + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, int len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- res->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(res->data == NULL)) +- return ERR_PTR(-ENOMEM); +- res->len = len; +- return q; +-} +- + static inline const void * + get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_skcipher **res) +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 3a2543b9701a9..bd3a5ef8e59b9 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -830,10 +830,12 @@ static int vsock_shutdown(struct socket *sock, int mode) + */ + + sk = sock->sk; ++ ++ lock_sock(sk); + if (sock->state == SS_UNCONNECTED) { + err = -ENOTCONN; + if (sk->sk_type == SOCK_STREAM) +- return err; ++ goto out; + } else { + sock->state = SS_DISCONNECTING; + err = 0; +@@ -842,10 +844,8 @@ static int vsock_shutdown(struct socket *sock, int mode) + /* Receive and send shutdowns are treated alike. */ + mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); + if (mode) { +- lock_sock(sk); + sk->sk_shutdown |= mode; + sk->sk_state_change(sk); +- release_sock(sk); + + if (sk->sk_type == SOCK_STREAM) { + sock_reset_flag(sk, SOCK_DONE); +@@ -853,6 +853,8 @@ static int vsock_shutdown(struct socket *sock, int mode) + } + } + ++out: ++ release_sock(sk); + return err; + } + +@@ -1121,7 +1123,6 @@ static void vsock_connect_timeout(struct work_struct *work) + { + struct sock *sk; + struct vsock_sock *vsk; +- int cancel = 0; + + vsk = container_of(work, struct vsock_sock, connect_work.work); + sk = sk_vsock(vsk); +@@ -1132,11 +1133,9 @@ static void vsock_connect_timeout(struct work_struct *work) + sk->sk_state = SS_UNCONNECTED; + sk->sk_err = ETIMEDOUT; + sk->sk_error_report(sk); +- cancel = 1; ++ vsock_transport_cancel_pkt(vsk); + } + release_sock(sk); +- if (cancel) +- vsock_transport_cancel_pkt(vsk); + + sock_put(sk); + } +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index aa9d1c7780c3d..5f7bcc7da460d 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -959,10 +959,10 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) + + vsk = vsock_sk(sk); + +- space_available = virtio_transport_space_update(sk, pkt); +- + lock_sock(sk); + ++ space_available = virtio_transport_space_update(sk, pkt); ++ + /* Update CID in case it has changed after a transport reset event */ + vsk->local_addr.svm_cid = dst.svm_cid; + +diff --git a/scripts/Makefile.build b/scripts/Makefile.build +index 6228a83156ea1..f8ee4e33a085f 100644 +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -222,6 +222,8 @@ cmd_modversions_c = \ + endif + + ifdef CONFIG_FTRACE_MCOUNT_RECORD ++ifndef CC_USING_RECORD_MCOUNT ++# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl + ifdef BUILD_C_RECORDMCOUNT + ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") + RECORDMCOUNT_FLAGS = -w +@@ -250,6 +252,7 @@ cmd_record_mcount = \ + "$(CC_FLAGS_FTRACE)" ]; then \ + $(sub_cmd_record_mcount) \ + fi; ++endif # CC_USING_RECORD_MCOUNT + endif + + ifdef CONFIG_STACK_VALIDATION +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 5bddabb3de7c3..db859b595dba1 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -382,9 +382,8 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, + */ + kvm->mmu_notifier_count++; + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); +- need_tlb_flush |= kvm->tlbs_dirty; + /* we've to flush the tlb before the pages can be freed */ +- if (need_tlb_flush) ++ if (need_tlb_flush || kvm->tlbs_dirty) + kvm_flush_remote_tlbs(kvm); + + spin_unlock(&kvm->mmu_lock);