92251 lines
2.9 MiB
92251 lines
2.9 MiB
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
|
|
index 9aae5bf..06beefb 100644
|
|
--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
|
|
+++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
|
|
@@ -5,4 +5,4 @@ Description:
|
|
The attributes:
|
|
|
|
qlen - depth of loopback queue
|
|
- bulk_buflen - buffer length
|
|
+ buflen - buffer length
|
|
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
|
|
index 29477c3..bc7ff73 100644
|
|
--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
|
|
+++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
|
|
@@ -9,4 +9,4 @@ Description:
|
|
isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss)
|
|
isoc_mult - 0..2 (hs/ss only)
|
|
isoc_maxburst - 0..15 (ss only)
|
|
- qlen - buffer length
|
|
+ buflen - buffer length
|
|
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
|
|
index 4c3efe4..750ab97 100644
|
|
--- a/Documentation/ABI/testing/ima_policy
|
|
+++ b/Documentation/ABI/testing/ima_policy
|
|
@@ -20,16 +20,18 @@ Description:
|
|
action: measure | dont_measure | appraise | dont_appraise | audit
|
|
condition:= base | lsm [option]
|
|
base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
|
|
- [fowner]]
|
|
+ [euid=] [fowner=]]
|
|
lsm: [[subj_user=] [subj_role=] [subj_type=]
|
|
[obj_user=] [obj_role=] [obj_type=]]
|
|
option: [[appraise_type=]] [permit_directio]
|
|
|
|
base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
|
|
- mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
|
|
+ mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
|
|
+ [[^]MAY_EXEC]
|
|
fsmagic:= hex value
|
|
fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
|
|
uid:= decimal value
|
|
+ euid:= decimal value
|
|
fowner:=decimal value
|
|
lsm: are LSM specific
|
|
option: appraise_type:= [imasig]
|
|
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
|
|
index 1e61113..7dd32d3 100644
|
|
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
|
|
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
|
|
@@ -3,7 +3,8 @@
|
|
Required properties:
|
|
- compatible : should contain one of the following:
|
|
- "renesas,sata-r8a7779" for R-Car H1
|
|
- - "renesas,sata-r8a7790" for R-Car H2
|
|
+ - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
|
|
+ - "renesas,sata-r8a7790" for R-Car H2 other than ES1
|
|
- "renesas,sata-r8a7791" for R-Car M2
|
|
- reg : address and length of the SATA registers;
|
|
- interrupts : must consist of one interrupt specifier.
|
|
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
|
|
index a4873e5..e30e184 100644
|
|
--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
|
|
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
|
|
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
|
|
80 81 68 69
|
|
70 71 72 73
|
|
74 75 76 77>;
|
|
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
|
|
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
|
|
"saif0", "saif1", "i2c0", "i2c1",
|
|
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
|
|
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
|
|
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
|
|
index 1486497..8a3c408 100644
|
|
--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
|
|
+++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
|
|
@@ -4,11 +4,13 @@ Specifying interrupt information for devices
|
|
1) Interrupt client nodes
|
|
-------------------------
|
|
|
|
-Nodes that describe devices which generate interrupts must contain an either an
|
|
-"interrupts" property or an "interrupts-extended" property. These properties
|
|
-contain a list of interrupt specifiers, one per output interrupt. The format of
|
|
-the interrupt specifier is determined by the interrupt controller to which the
|
|
-interrupts are routed; see section 2 below for details.
|
|
+Nodes that describe devices which generate interrupts must contain an
|
|
+"interrupts" property, an "interrupts-extended" property, or both. If both are
|
|
+present, the latter should take precedence; the former may be provided simply
|
|
+for compatibility with software that does not recognize the latter. These
|
|
+properties contain a list of interrupt specifiers, one per output interrupt. The
|
|
+format of the interrupt specifier is determined by the interrupt controller to
|
|
+which the interrupts are routed; see section 2 below for details.
|
|
|
|
Example:
|
|
interrupt-parent = <&intc1>;
|
|
@@ -28,10 +30,6 @@ should only be used when a device has multiple interrupt parents.
|
|
Example:
|
|
interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
|
|
|
|
-A device node may contain either "interrupts" or "interrupts-extended", but not
|
|
-both. If both properties are present, then the operating system should log an
|
|
-error and use only the data in "interrupts".
|
|
-
|
|
2) Interrupt controller nodes
|
|
-----------------------------
|
|
|
|
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
|
|
index 01ef408..8faff12 100644
|
|
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
|
|
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
|
|
@@ -91,5 +91,5 @@ mpp61 61 gpo, dev(wen1), uart1(txd), audio(rclk)
|
|
mpp62 62 gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
|
|
audio(mclk), uart0(cts)
|
|
mpp63 63 gpo, spi0(sck), tclk
|
|
-mpp64 64 gpio, spi0(miso), spi0-1(cs1)
|
|
-mpp65 65 gpio, spi0(mosi), spi0-1(cs2)
|
|
+mpp64 64 gpio, spi0(miso), spi0(cs1)
|
|
+mpp65 65 gpio, spi0(mosi), spi0(cs2)
|
|
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
|
|
index bfa0a2e..86dec67 100644
|
|
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
|
|
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
|
|
@@ -41,15 +41,15 @@ mpp20 20 gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
|
|
mpp21 21 gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
|
|
mpp22 22 gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
|
|
mpp23 23 gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
|
|
-mpp24 24 gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
|
|
-mpp25 25 gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
|
|
-mpp26 26 gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
|
|
+mpp24 24 gpio, lcd(hsync), sata1(prsnt), tdm(rst)
|
|
+mpp25 25 gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
|
|
+mpp26 26 gpio, lcd(clk), tdm(fsync)
|
|
mpp27 27 gpio, lcd(e), tdm(dtx), ptp(trig)
|
|
mpp28 28 gpio, lcd(pwm), tdm(drx), ptp(evreq)
|
|
-mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
|
|
+mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk)
|
|
mpp30 30 gpio, tdm(int1), sd0(clk)
|
|
-mpp31 31 gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
|
|
-mpp32 32 gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
|
|
+mpp31 31 gpio, tdm(int2), sd0(cmd)
|
|
+mpp32 32 gpio, tdm(int3), sd0(d0)
|
|
mpp33 33 gpio, tdm(int4), sd0(d1), mem(bat)
|
|
mpp34 34 gpio, tdm(int5), sd0(d2), sata0(prsnt)
|
|
mpp35 35 gpio, tdm(int6), sd0(d3), sata1(prsnt)
|
|
@@ -57,21 +57,18 @@ mpp36 36 gpio, spi(mosi)
|
|
mpp37 37 gpio, spi(miso)
|
|
mpp38 38 gpio, spi(sck)
|
|
mpp39 39 gpio, spi(cs0)
|
|
-mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
|
|
- pcie(clkreq0)
|
|
+mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
|
|
mpp41 41 gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
|
|
pcie(clkreq1)
|
|
-mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
|
|
- vdd(cpu0-pd)
|
|
-mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
|
|
- vdd(cpu2-3-pd){1}
|
|
+mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
|
|
+mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
|
|
mpp44 44 gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
|
|
mem(bat)
|
|
mpp45 45 gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
|
|
mpp46 46 gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
|
|
mpp47 47 gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
|
|
ref(clkout)
|
|
-mpp48 48 gpio, tclk, dev(burst/last)
|
|
+mpp48 48 gpio, dev(clkout), dev(burst/last)
|
|
|
|
* Marvell Armada XP (mv78260 and mv78460 only)
|
|
|
|
@@ -83,9 +80,9 @@ mpp51 51 gpio, dev(ad16)
|
|
mpp52 52 gpio, dev(ad17)
|
|
mpp53 53 gpio, dev(ad18)
|
|
mpp54 54 gpio, dev(ad19)
|
|
-mpp55 55 gpio, dev(ad20), vdd(cpu0-pd)
|
|
-mpp56 56 gpio, dev(ad21), vdd(cpu1-pd)
|
|
-mpp57 57 gpio, dev(ad22), vdd(cpu2-3-pd){1}
|
|
+mpp55 55 gpio, dev(ad20)
|
|
+mpp56 56 gpio, dev(ad21)
|
|
+mpp57 57 gpio, dev(ad22)
|
|
mpp58 58 gpio, dev(ad23)
|
|
mpp59 59 gpio, dev(ad24)
|
|
mpp60 60 gpio, dev(ad25)
|
|
@@ -95,6 +92,3 @@ mpp63 63 gpio, dev(ad28)
|
|
mpp64 64 gpio, dev(ad29)
|
|
mpp65 65 gpio, dev(ad30)
|
|
mpp66 66 gpio, dev(ad31)
|
|
-
|
|
-Notes:
|
|
-* {1} vdd(cpu2-3-pd) only available on mv78460.
|
|
diff --git a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
|
|
index 46f3449..4eb7997 100644
|
|
--- a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
|
|
+++ b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
|
|
@@ -1,7 +1,7 @@
|
|
ADI AXI-SPDIF controller
|
|
|
|
Required properties:
|
|
- - compatible : Must be "adi,axi-spdif-1.00.a"
|
|
+ - compatible : Must be "adi,axi-spdif-tx-1.00.a"
|
|
- reg : Must contain SPDIF core's registers location and length
|
|
- clocks : Pairs of phandle and specifier referencing the controller's clocks.
|
|
The controller expects two clocks, the clock used for the AXI interface and
|
|
diff --git a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt
|
|
index 22ed679..4d1673c 100644
|
|
--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt
|
|
+++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt
|
|
@@ -4,9 +4,9 @@ Required properties:
|
|
- compatible : "arm,pl022", "arm,primecell"
|
|
- reg : Offset and length of the register set for the device
|
|
- interrupts : Should contain SPI controller interrupt
|
|
+- num-cs : total number of chipselects
|
|
|
|
Optional properties:
|
|
-- num-cs : total number of chipselects
|
|
- cs-gpios : should specify GPIOs used for chipselects.
|
|
The gpios will be referred to as reg = <index> in the SPI child nodes.
|
|
If unspecified, a single SPI device without a chip select can be used.
|
|
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
|
|
index cef2e7d..a666fd8 100644
|
|
--- a/Documentation/kernel-parameters.txt
|
|
+++ b/Documentation/kernel-parameters.txt
|
|
@@ -1175,6 +1175,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
i8042.notimeout [HW] Ignore timeout condition signalled by controller
|
|
i8042.reset [HW] Reset the controller during init and cleanup
|
|
i8042.unlock [HW] Unlock (ignore) the keylock
|
|
+ i8042.kbdreset [HW] Reset device connected to KBD port
|
|
|
|
i810= [HW,DRM]
|
|
|
|
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
|
|
index 69b3cac..5d86756 100644
|
|
--- a/Documentation/ramoops.txt
|
|
+++ b/Documentation/ramoops.txt
|
|
@@ -14,11 +14,19 @@ survive after a restart.
|
|
|
|
1. Ramoops concepts
|
|
|
|
-Ramoops uses a predefined memory area to store the dump. The start and size of
|
|
-the memory area are set using two variables:
|
|
+Ramoops uses a predefined memory area to store the dump. The start and size
|
|
+and type of the memory area are set using three variables:
|
|
* "mem_address" for the start
|
|
* "mem_size" for the size. The memory size will be rounded down to a
|
|
power of two.
|
|
+ * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
|
|
+
|
|
+Typically the default value of mem_type=0 should be used as that sets the pstore
|
|
+mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
|
|
+pgprot_noncached, which only works on some platforms. This is because pstore
|
|
+depends on atomic operations. At least on ARM, pgprot_noncached causes the
|
|
+memory to be mapped strongly ordered, and atomic operations on strongly ordered
|
|
+memory are implementation defined, and won't work on many ARMs such as omaps.
|
|
|
|
The memory area is divided into "record_size" chunks (also rounded down to
|
|
power of two) and each oops/panic writes a "record_size" chunk of
|
|
@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
|
|
static struct ramoops_platform_data ramoops_data = {
|
|
.mem_size = <...>,
|
|
.mem_address = <...>,
|
|
+ .mem_type = <...>,
|
|
.record_size = <...>,
|
|
.dump_oops = <...>,
|
|
.ecc = <...>,
|
|
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
|
|
index b8dd0df..0fd40b1 100644
|
|
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
|
|
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
|
|
@@ -2026,8 +2026,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
|
|
-------------------
|
|
|
|
Module for sound cards based on the Asus AV66/AV100/AV200 chips,
|
|
- i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
|
|
- HDAV1.3 (Deluxe), and HDAV1.3 Slim.
|
|
+ i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
|
|
+ Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
|
|
|
|
This module supports autoprobe and multiple cards.
|
|
|
|
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
|
|
index b0714d8..8dfb6a5 100644
|
|
--- a/Documentation/stable_kernel_rules.txt
|
|
+++ b/Documentation/stable_kernel_rules.txt
|
|
@@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the
|
|
|
|
Procedure for submitting patches to the -stable tree:
|
|
|
|
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
|
|
+ submission guidelines as described in
|
|
+ Documentation/networking/netdev-FAQ.txt
|
|
- Send the patch, after verifying that it follows the above rules, to
|
|
stable@vger.kernel.org. You must note the upstream commit ID in the
|
|
changelog of your submission, as well as the kernel version you wish
|
|
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
|
|
index 6cd63a9..bc6d617 100644
|
|
--- a/Documentation/virtual/kvm/api.txt
|
|
+++ b/Documentation/virtual/kvm/api.txt
|
|
@@ -2344,7 +2344,8 @@ should be created before this ioctl is invoked.
|
|
|
|
Possible features:
|
|
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
|
|
- Depends on KVM_CAP_ARM_PSCI.
|
|
+ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
|
|
+ and execute guest code when KVM_RUN is called.
|
|
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
|
|
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
|
|
|
|
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
|
|
index 2908941..53838d9 100644
|
|
--- a/Documentation/virtual/kvm/mmu.txt
|
|
+++ b/Documentation/virtual/kvm/mmu.txt
|
|
@@ -425,6 +425,20 @@ fault through the slow path.
|
|
Since only 19 bits are used to store generation-number on mmio spte, all
|
|
pages are zapped when there is an overflow.
|
|
|
|
+Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
|
|
+times, the last one happening when the generation number is retrieved and
|
|
+stored into the MMIO spte. Thus, the MMIO spte might be created based on
|
|
+out-of-date information, but with an up-to-date generation number.
|
|
+
|
|
+To avoid this, the generation number is incremented again after synchronize_srcu
|
|
+returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
|
|
+memslot update, while some SRCU readers might be using the old copy. We do not
|
|
+want to use an MMIO sptes created with an odd generation number, and we can do
|
|
+this without losing a bit in the MMIO spte. The low bit of the generation
|
|
+is not stored in MMIO spte, and presumed zero when it is extracted out of the
|
|
+spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
|
|
+the next access to the spte will always be a cache miss.
|
|
+
|
|
|
|
Further reading
|
|
===============
|
|
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
|
|
index c584a51..afe68dd 100644
|
|
--- a/Documentation/x86/x86_64/mm.txt
|
|
+++ b/Documentation/x86/x86_64/mm.txt
|
|
@@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
|
|
ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
|
|
ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
|
|
... unused hole ...
|
|
+ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
|
+... unused hole ...
|
|
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
|
ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
|
|
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
|
diff --git a/Makefile b/Makefile
|
|
index 230c7f6..86d2277 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 14
|
|
+SUBLEVEL = 53
|
|
EXTRAVERSION =
|
|
NAME = Remembering Coco
|
|
|
|
@@ -244,7 +244,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
|
|
|
|
HOSTCC = gcc
|
|
HOSTCXX = g++
|
|
-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
|
|
+HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
|
|
HOSTCXXFLAGS = -O2
|
|
|
|
# Decide whether to build built-in, modular, or both.
|
|
@@ -382,7 +382,9 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
|
-fno-strict-aliasing -fno-common \
|
|
-Werror-implicit-function-declaration \
|
|
-Wno-format-security \
|
|
- -fno-delete-null-pointer-checks
|
|
+ -fno-delete-null-pointer-checks \
|
|
+ -std=gnu89
|
|
+
|
|
KBUILD_AFLAGS_KERNEL :=
|
|
KBUILD_CFLAGS_KERNEL :=
|
|
KBUILD_AFLAGS := -D__ASSEMBLY__
|
|
@@ -639,6 +641,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
|
|
endif
|
|
endif
|
|
|
|
+KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
|
|
+
|
|
ifdef CONFIG_DEBUG_INFO
|
|
KBUILD_CFLAGS += -g
|
|
KBUILD_AFLAGS += -Wa,--gdwarf-2
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index 98838a0..9d0ac09 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -156,6 +156,8 @@ retry:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
|
|
index 4f31b2e..4c169d8 100644
|
|
--- a/arch/arc/boot/dts/nsimosci.dts
|
|
+++ b/arch/arc/boot/dts/nsimosci.dts
|
|
@@ -20,7 +20,7 @@
|
|
/* this is for console on PGU */
|
|
/* bootargs = "console=tty0 consoleblank=0"; */
|
|
/* this is for console on serial */
|
|
- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
|
|
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
|
|
};
|
|
|
|
aliases {
|
|
@@ -46,9 +46,9 @@
|
|
#interrupt-cells = <1>;
|
|
};
|
|
|
|
- uart0: serial@c0000000 {
|
|
+ uart0: serial@f0000000 {
|
|
compatible = "ns8250";
|
|
- reg = <0xc0000000 0x2000>;
|
|
+ reg = <0xf0000000 0x2000>;
|
|
interrupts = <11>;
|
|
clock-frequency = <3686400>;
|
|
baud = <115200>;
|
|
@@ -57,21 +57,21 @@
|
|
no-loopback-test = <1>;
|
|
};
|
|
|
|
- pgu0: pgu@c9000000 {
|
|
+ pgu0: pgu@f9000000 {
|
|
compatible = "snps,arcpgufb";
|
|
- reg = <0xc9000000 0x400>;
|
|
+ reg = <0xf9000000 0x400>;
|
|
};
|
|
|
|
- ps2: ps2@c9001000 {
|
|
+ ps2: ps2@f9001000 {
|
|
compatible = "snps,arc_ps2";
|
|
- reg = <0xc9000400 0x14>;
|
|
+ reg = <0xf9000400 0x14>;
|
|
interrupts = <13>;
|
|
interrupt-names = "arc_ps2_irq";
|
|
};
|
|
|
|
- eth0: ethernet@c0003000 {
|
|
+ eth0: ethernet@f0003000 {
|
|
compatible = "snps,oscilan";
|
|
- reg = <0xc0003000 0x44>;
|
|
+ reg = <0xf0003000 0x44>;
|
|
interrupts = <7>, <8>;
|
|
interrupt-names = "rx", "tx";
|
|
};
|
|
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
|
|
deleted file mode 100644
|
|
index c32245c..0000000
|
|
--- a/arch/arc/include/asm/barrier.h
|
|
+++ /dev/null
|
|
@@ -1,37 +0,0 @@
|
|
-/*
|
|
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License version 2 as
|
|
- * published by the Free Software Foundation.
|
|
- */
|
|
-
|
|
-#ifndef __ASM_BARRIER_H
|
|
-#define __ASM_BARRIER_H
|
|
-
|
|
-#ifndef __ASSEMBLY__
|
|
-
|
|
-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
|
|
-#define mb() __asm__ __volatile__ ("" : : : "memory")
|
|
-#define rmb() mb()
|
|
-#define wmb() mb()
|
|
-#define set_mb(var, value) do { var = value; mb(); } while (0)
|
|
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
|
|
-#define read_barrier_depends() mb()
|
|
-
|
|
-/* TODO-vineetg verify the correctness of macros here */
|
|
-#ifdef CONFIG_SMP
|
|
-#define smp_mb() mb()
|
|
-#define smp_rmb() rmb()
|
|
-#define smp_wmb() wmb()
|
|
-#else
|
|
-#define smp_mb() barrier()
|
|
-#define smp_rmb() barrier()
|
|
-#define smp_wmb() barrier()
|
|
-#endif
|
|
-
|
|
-#define smp_read_barrier_depends() do { } while (0)
|
|
-
|
|
-#endif
|
|
-
|
|
-#endif
|
|
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
|
|
index 2fd3162..c1d3d2d 100644
|
|
--- a/arch/arc/include/asm/cache.h
|
|
+++ b/arch/arc/include/asm/cache.h
|
|
@@ -55,4 +55,31 @@ extern void read_decode_cache_bcr(void);
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
+/* Instruction cache related Auxiliary registers */
|
|
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
|
|
+#define ARC_REG_IC_IVIC 0x10
|
|
+#define ARC_REG_IC_CTRL 0x11
|
|
+#define ARC_REG_IC_IVIL 0x19
|
|
+#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
|
|
+#define ARC_REG_IC_PTAG 0x1E
|
|
+#endif
|
|
+
|
|
+/* Bit val in IC_CTRL */
|
|
+#define IC_CTRL_CACHE_DISABLE 0x1
|
|
+
|
|
+/* Data cache related Auxiliary registers */
|
|
+#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
|
|
+#define ARC_REG_DC_IVDC 0x47
|
|
+#define ARC_REG_DC_CTRL 0x48
|
|
+#define ARC_REG_DC_IVDL 0x4A
|
|
+#define ARC_REG_DC_FLSH 0x4B
|
|
+#define ARC_REG_DC_FLDL 0x4C
|
|
+#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
|
|
+#define ARC_REG_DC_PTAG 0x5C
|
|
+#endif
|
|
+
|
|
+/* Bit val in DC_CTRL */
|
|
+#define DC_CTRL_INV_MODE_FLUSH 0x40
|
|
+#define DC_CTRL_FLUSH_STATUS 0x100
|
|
+
|
|
#endif /* _ASM_CACHE_H */
|
|
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
|
|
index 03cd689..90de5c5 100644
|
|
--- a/arch/arc/include/asm/cmpxchg.h
|
|
+++ b/arch/arc/include/asm/cmpxchg.h
|
|
@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
|
|
" scond %3, [%1] \n"
|
|
" bnz 1b \n"
|
|
"2: \n"
|
|
- : "=&r"(prev)
|
|
- : "r"(ptr), "ir"(expected),
|
|
- "r"(new) /* can't be "ir". scond can't take limm for "b" */
|
|
- : "cc");
|
|
+ : "=&r"(prev) /* Early clobber, to prevent reg reuse */
|
|
+ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
|
|
+ "ir"(expected),
|
|
+ "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
|
|
+ : "cc", "memory"); /* so that gcc knows memory is being written here */
|
|
|
|
return prev;
|
|
}
|
|
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
|
|
index b65fca7..fea9316 100644
|
|
--- a/arch/arc/include/asm/kgdb.h
|
|
+++ b/arch/arc/include/asm/kgdb.h
|
|
@@ -19,7 +19,7 @@
|
|
* register API yet */
|
|
#undef DBG_MAX_REG_NUM
|
|
|
|
-#define GDB_MAX_REGS 39
|
|
+#define GDB_MAX_REGS 87
|
|
|
|
#define BREAK_INSTR_SIZE 2
|
|
#define CACHE_FLUSH_IS_SAFE 1
|
|
@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
|
|
|
|
extern void kgdb_trap(struct pt_regs *regs);
|
|
|
|
-enum arc700_linux_regnums {
|
|
+/* This is the numbering of registers according to the GDB. See GDB's
|
|
+ * arc-tdep.h for details.
|
|
+ *
|
|
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
|
|
+enum arc_linux_regnums {
|
|
_R0 = 0,
|
|
_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
|
|
_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
|
|
_R25, _R26,
|
|
- _BTA = 27,
|
|
- _LP_START = 28,
|
|
- _LP_END = 29,
|
|
- _LP_COUNT = 30,
|
|
- _STATUS32 = 31,
|
|
- _BLINK = 32,
|
|
- _FP = 33,
|
|
- __SP = 34,
|
|
- _EFA = 35,
|
|
- _RET = 36,
|
|
- _ORIG_R8 = 37,
|
|
- _STOP_PC = 38
|
|
+ _FP = 27,
|
|
+ __SP = 28,
|
|
+ _R30 = 30,
|
|
+ _BLINK = 31,
|
|
+ _LP_COUNT = 60,
|
|
+ _STOP_PC = 64,
|
|
+ _RET = 64,
|
|
+ _LP_START = 65,
|
|
+ _LP_END = 66,
|
|
+ _STATUS32 = 67,
|
|
+ _ECR = 76,
|
|
+ _BTA = 82,
|
|
};
|
|
|
|
#else
|
|
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
|
|
index 66ee552..5faad17 100644
|
|
--- a/arch/arc/include/asm/linkage.h
|
|
+++ b/arch/arc/include/asm/linkage.h
|
|
@@ -13,20 +13,6 @@
|
|
|
|
#define ASM_NL ` /* use '`' to mark new line in macro */
|
|
|
|
-/* Can't use the ENTRY macro in linux/linkage.h
|
|
- * gas considers ';' as comment vs. newline
|
|
- */
|
|
-.macro ARC_ENTRY name
|
|
- .global \name
|
|
- .align 4
|
|
- \name:
|
|
-.endm
|
|
-
|
|
-.macro ARC_EXIT name
|
|
-#define ASM_PREV_SYM_ADDR(name) .-##name
|
|
- .size \ name, ASM_PREV_SYM_ADDR(\name)
|
|
-.endm
|
|
-
|
|
/* annotation for data we want in DCCM - if enabled in .config */
|
|
.macro ARCFP_DATA nm
|
|
#ifdef CONFIG_ARC_HAS_DCCM
|
|
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
|
|
index 6b0b7f7e..7670f33 100644
|
|
--- a/arch/arc/include/asm/pgtable.h
|
|
+++ b/arch/arc/include/asm/pgtable.h
|
|
@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
|
|
|
|
#define pte_page(x) (mem_map + \
|
|
- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
|
|
+ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
|
|
+ PAGE_SHIFT)))
|
|
|
|
#define mk_pte(page, pgprot) \
|
|
({ \
|
|
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
|
|
index 15334ab..fb95aa8 100644
|
|
--- a/arch/arc/include/asm/processor.h
|
|
+++ b/arch/arc/include/asm/processor.h
|
|
@@ -69,18 +69,19 @@ unsigned long thread_saved_pc(struct task_struct *t);
|
|
#define release_segments(mm) do { } while (0)
|
|
|
|
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
|
|
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
|
|
|
|
/*
|
|
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
|
|
* Look in process.c for details of kernel stack layout
|
|
*/
|
|
-#define KSTK_ESP(tsk) (tsk->thread.ksp)
|
|
+#define TSK_K_ESP(tsk) (tsk->thread.ksp)
|
|
|
|
-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
|
|
+#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
|
|
sizeof(struct callee_regs) + off)))
|
|
|
|
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
|
|
-#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
|
|
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
|
|
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
|
|
|
|
/*
|
|
* Do necessary setup to start up a newly executed thread.
|
|
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
|
|
index 1bfeec2..2a58af7 100644
|
|
--- a/arch/arc/include/asm/ptrace.h
|
|
+++ b/arch/arc/include/asm/ptrace.h
|
|
@@ -63,7 +63,7 @@ struct callee_regs {
|
|
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
|
};
|
|
|
|
-#define instruction_pointer(regs) ((regs)->ret)
|
|
+#define instruction_pointer(regs) (unsigned long)((regs)->ret)
|
|
#define profile_pc(regs) instruction_pointer(regs)
|
|
|
|
/* return 1 if user mode or 0 if kernel mode */
|
|
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
|
|
index 65690e7..e248594 100644
|
|
--- a/arch/arc/kernel/ctx_sw_asm.S
|
|
+++ b/arch/arc/kernel/ctx_sw_asm.S
|
|
@@ -10,9 +10,9 @@
|
|
* -This is the more "natural" hand written assembler
|
|
*/
|
|
|
|
+#include <linux/linkage.h>
|
|
#include <asm/entry.h> /* For the SAVE_* macros */
|
|
#include <asm/asm-offsets.h>
|
|
-#include <asm/linkage.h>
|
|
|
|
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
|
|
|
|
@@ -62,4 +62,4 @@ __switch_to:
|
|
ld.ab blink, [sp, 4]
|
|
j [blink]
|
|
|
|
-ARC_EXIT __switch_to
|
|
+END(__switch_to)
|
|
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
|
|
index 6e8f83a..29b82ad 100644
|
|
--- a/arch/arc/kernel/entry.S
|
|
+++ b/arch/arc/kernel/entry.S
|
|
@@ -141,7 +141,7 @@ VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
|
|
VECTOR reserved ; Reserved Exceptions
|
|
.endr
|
|
|
|
-#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
|
|
+#include <linux/linkage.h> /* {EXTRY,EXIT} */
|
|
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */
|
|
#include <asm/errno.h>
|
|
#include <asm/arcregs.h>
|
|
@@ -184,7 +184,7 @@ reserved: ; processor restart
|
|
; ---------------------------------------------
|
|
; Level 2 ISR: Can interrupt a Level 1 ISR
|
|
; ---------------------------------------------
|
|
-ARC_ENTRY handle_interrupt_level2
|
|
+ENTRY(handle_interrupt_level2)
|
|
|
|
; TODO-vineetg for SMP this wont work
|
|
; free up r9 as scratchpad
|
|
@@ -225,14 +225,14 @@ ARC_ENTRY handle_interrupt_level2
|
|
|
|
b ret_from_exception
|
|
|
|
-ARC_EXIT handle_interrupt_level2
|
|
+END(handle_interrupt_level2)
|
|
|
|
#endif
|
|
|
|
; ---------------------------------------------
|
|
; Level 1 ISR
|
|
; ---------------------------------------------
|
|
-ARC_ENTRY handle_interrupt_level1
|
|
+ENTRY(handle_interrupt_level1)
|
|
|
|
/* free up r9 as scratchpad */
|
|
#ifdef CONFIG_SMP
|
|
@@ -265,7 +265,7 @@ ARC_ENTRY handle_interrupt_level1
|
|
sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
|
|
|
|
b ret_from_exception
|
|
-ARC_EXIT handle_interrupt_level1
|
|
+END(handle_interrupt_level1)
|
|
|
|
;################### Non TLB Exception Handling #############################
|
|
|
|
@@ -273,7 +273,7 @@ ARC_EXIT handle_interrupt_level1
|
|
; Instruction Error Exception Handler
|
|
; ---------------------------------------------
|
|
|
|
-ARC_ENTRY instr_service
|
|
+ENTRY(instr_service)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -284,13 +284,13 @@ ARC_ENTRY instr_service
|
|
|
|
bl do_insterror_or_kprobe
|
|
b ret_from_exception
|
|
-ARC_EXIT instr_service
|
|
+END(instr_service)
|
|
|
|
; ---------------------------------------------
|
|
; Memory Error Exception Handler
|
|
; ---------------------------------------------
|
|
|
|
-ARC_ENTRY mem_service
|
|
+ENTRY(mem_service)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -301,13 +301,13 @@ ARC_ENTRY mem_service
|
|
|
|
bl do_memory_error
|
|
b ret_from_exception
|
|
-ARC_EXIT mem_service
|
|
+END(mem_service)
|
|
|
|
; ---------------------------------------------
|
|
; Machine Check Exception Handler
|
|
; ---------------------------------------------
|
|
|
|
-ARC_ENTRY EV_MachineCheck
|
|
+ENTRY(EV_MachineCheck)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -331,13 +331,13 @@ ARC_ENTRY EV_MachineCheck
|
|
|
|
j do_machine_check_fault
|
|
|
|
-ARC_EXIT EV_MachineCheck
|
|
+END(EV_MachineCheck)
|
|
|
|
; ---------------------------------------------
|
|
; Protection Violation Exception Handler
|
|
; ---------------------------------------------
|
|
|
|
-ARC_ENTRY EV_TLBProtV
|
|
+ENTRY(EV_TLBProtV)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -385,12 +385,12 @@ ARC_ENTRY EV_TLBProtV
|
|
|
|
b ret_from_exception
|
|
|
|
-ARC_EXIT EV_TLBProtV
|
|
+END(EV_TLBProtV)
|
|
|
|
; ---------------------------------------------
|
|
; Privilege Violation Exception Handler
|
|
; ---------------------------------------------
|
|
-ARC_ENTRY EV_PrivilegeV
|
|
+ENTRY(EV_PrivilegeV)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -401,12 +401,12 @@ ARC_ENTRY EV_PrivilegeV
|
|
|
|
bl do_privilege_fault
|
|
b ret_from_exception
|
|
-ARC_EXIT EV_PrivilegeV
|
|
+END(EV_PrivilegeV)
|
|
|
|
; ---------------------------------------------
|
|
; Extension Instruction Exception Handler
|
|
; ---------------------------------------------
|
|
-ARC_ENTRY EV_Extension
|
|
+ENTRY(EV_Extension)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -417,7 +417,7 @@ ARC_ENTRY EV_Extension
|
|
|
|
bl do_extension_fault
|
|
b ret_from_exception
|
|
-ARC_EXIT EV_Extension
|
|
+END(EV_Extension)
|
|
|
|
;######################### System Call Tracing #########################
|
|
|
|
@@ -504,7 +504,7 @@ trap_with_param:
|
|
; (2) Break Points
|
|
;------------------------------------------------------------------
|
|
|
|
-ARC_ENTRY EV_Trap
|
|
+ENTRY(EV_Trap)
|
|
|
|
EXCEPTION_PROLOGUE
|
|
|
|
@@ -534,9 +534,9 @@ ARC_ENTRY EV_Trap
|
|
jl [r9] ; Entry into Sys Call Handler
|
|
|
|
; fall through to ret_from_system_call
|
|
-ARC_EXIT EV_Trap
|
|
+END(EV_Trap)
|
|
|
|
-ARC_ENTRY ret_from_system_call
|
|
+ENTRY(ret_from_system_call)
|
|
|
|
st r0, [sp, PT_r0] ; sys call return value in pt_regs
|
|
|
|
@@ -546,7 +546,7 @@ ARC_ENTRY ret_from_system_call
|
|
;
|
|
; If ret to user mode do we need to handle signals, schedule() et al.
|
|
|
|
-ARC_ENTRY ret_from_exception
|
|
+ENTRY(ret_from_exception)
|
|
|
|
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
|
|
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
|
|
@@ -728,9 +728,9 @@ not_level1_interrupt:
|
|
debug_marker_syscall:
|
|
rtie
|
|
|
|
-ARC_EXIT ret_from_exception
|
|
+END(ret_from_exception)
|
|
|
|
-ARC_ENTRY ret_from_fork
|
|
+ENTRY(ret_from_fork)
|
|
; when the forked child comes here from the __switch_to function
|
|
; r0 has the last task pointer.
|
|
; put last task in scheduler queue
|
|
@@ -747,11 +747,11 @@ ARC_ENTRY ret_from_fork
|
|
; special case of kernel_thread entry point returning back due to
|
|
; kernel_execve() - pretend return from syscall to ret to userland
|
|
b ret_from_exception
|
|
-ARC_EXIT ret_from_fork
|
|
+END(ret_from_fork)
|
|
|
|
;################### Special Sys Call Wrappers ##########################
|
|
|
|
-ARC_ENTRY sys_clone_wrapper
|
|
+ENTRY(sys_clone_wrapper)
|
|
SAVE_CALLEE_SAVED_USER
|
|
bl @sys_clone
|
|
DISCARD_CALLEE_SAVED_USER
|
|
@@ -761,7 +761,7 @@ ARC_ENTRY sys_clone_wrapper
|
|
bnz tracesys_exit
|
|
|
|
b ret_from_system_call
|
|
-ARC_EXIT sys_clone_wrapper
|
|
+END(sys_clone_wrapper)
|
|
|
|
#ifdef CONFIG_ARC_DW2_UNWIND
|
|
; Workaround for bug 94179 (STAR ):
|
|
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
|
|
index 9919972..07a58f2 100644
|
|
--- a/arch/arc/kernel/head.S
|
|
+++ b/arch/arc/kernel/head.S
|
|
@@ -12,10 +12,42 @@
|
|
* to skip certain things during boot on simulator
|
|
*/
|
|
|
|
+#include <linux/linkage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/entry.h>
|
|
-#include <linux/linkage.h>
|
|
#include <asm/arcregs.h>
|
|
+#include <asm/cache.h>
|
|
+
|
|
+.macro CPU_EARLY_SETUP
|
|
+
|
|
+ ; Setting up Vectror Table (in case exception happens in early boot
|
|
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
|
|
+
|
|
+ ; Disable I-cache/D-cache if kernel so configured
|
|
+ lr r5, [ARC_REG_IC_BCR]
|
|
+ breq r5, 0, 1f ; I$ doesn't exist
|
|
+ lr r5, [ARC_REG_IC_CTRL]
|
|
+#ifdef CONFIG_ARC_HAS_ICACHE
|
|
+ bclr r5, r5, 0 ; 0 - Enable, 1 is Disable
|
|
+#else
|
|
+ bset r5, r5, 0 ; I$ exists, but is not used
|
|
+#endif
|
|
+ sr r5, [ARC_REG_IC_CTRL]
|
|
+
|
|
+1:
|
|
+ lr r5, [ARC_REG_DC_BCR]
|
|
+ breq r5, 0, 1f ; D$ doesn't exist
|
|
+ lr r5, [ARC_REG_DC_CTRL]
|
|
+ bclr r5, r5, 6 ; Invalidate (discard w/o wback)
|
|
+#ifdef CONFIG_ARC_HAS_DCACHE
|
|
+ bclr r5, r5, 0 ; Enable (+Inv)
|
|
+#else
|
|
+ bset r5, r5, 0 ; Disable (+Inv)
|
|
+#endif
|
|
+ sr r5, [ARC_REG_DC_CTRL]
|
|
+
|
|
+1:
|
|
+.endm
|
|
|
|
.cpu A7
|
|
|
|
@@ -24,13 +56,13 @@
|
|
.globl stext
|
|
stext:
|
|
;-------------------------------------------------------------------
|
|
- ; Don't clobber r0-r4 yet. It might have bootloader provided info
|
|
+ ; Don't clobber r0-r2 yet. It might have bootloader provided info
|
|
;-------------------------------------------------------------------
|
|
|
|
- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
|
|
+ CPU_EARLY_SETUP
|
|
|
|
#ifdef CONFIG_SMP
|
|
- ; Only Boot (Master) proceeds. Others wait in platform dependent way
|
|
+ ; Ensure Boot (Master) proceeds. Others wait in platform dependent way
|
|
; IDENTITY Reg [ 3 2 1 0 ]
|
|
; (cpu-id) ^^^ => Zero for UP ARC700
|
|
; => #Core-ID if SMP (Master 0)
|
|
@@ -39,7 +71,8 @@ stext:
|
|
; need to make sure only boot cpu takes this path.
|
|
GET_CPU_ID r5
|
|
cmp r5, 0
|
|
- jnz arc_platform_smp_wait_to_boot
|
|
+ mov.ne r0, r5
|
|
+ jne arc_platform_smp_wait_to_boot
|
|
#endif
|
|
; Clear BSS before updating any globals
|
|
; XXX: use ZOL here
|
|
@@ -89,7 +122,7 @@ stext:
|
|
|
|
first_lines_of_secondary:
|
|
|
|
- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
|
|
+ CPU_EARLY_SETUP
|
|
|
|
; setup per-cpu idle task as "current" on this CPU
|
|
ld r0, [@secondary_idle_tsk]
|
|
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
|
|
index 7e95e1a..a0c63fc 100644
|
|
--- a/arch/arc/kernel/signal.c
|
|
+++ b/arch/arc/kernel/signal.c
|
|
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
|
sigset_t *set)
|
|
{
|
|
int err;
|
|
- err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
|
|
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
|
|
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
|
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
|
|
|
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
|
if (!err)
|
|
set_current_blocked(&set);
|
|
|
|
- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
|
|
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
|
|
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
|
|
|
return err;
|
|
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|
/* Don't restart from sigreturn */
|
|
syscall_wont_restart(regs);
|
|
|
|
+ /*
|
|
+ * Ensure that sigreturn always returns to user mode (in case the
|
|
+ * regs saved on user stack got fudged between save and sigreturn)
|
|
+ * Otherwise it is easy to panic the kernel with a custom
|
|
+ * signal handler and/or restorer which clobberes the status32/ret
|
|
+ * to return to a bogus location in kernel mode.
|
|
+ */
|
|
+ regs->status32 |= STATUS_U_MASK;
|
|
+
|
|
return regs->r0;
|
|
|
|
badframe:
|
|
@@ -234,8 +243,11 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
/*
|
|
* handler returns using sigreturn stub provided already by userpsace
|
|
+ * If not, nuke the process right away
|
|
*/
|
|
- BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
|
|
+ if(!(ka->sa.sa_flags & SA_RESTORER))
|
|
+ return 1;
|
|
+
|
|
regs->blink = (unsigned long)ka->sa.sa_restorer;
|
|
|
|
/* User Stack for signal handler will be above the frame just carved */
|
|
@@ -302,12 +314,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
|
struct pt_regs *regs)
|
|
{
|
|
sigset_t *oldset = sigmask_to_save();
|
|
- int ret;
|
|
+ int failed;
|
|
|
|
/* Set up the stack frame */
|
|
- ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
|
+ failed = setup_rt_frame(sig, ka, info, oldset, regs);
|
|
|
|
- if (ret)
|
|
+ if (failed)
|
|
force_sigsegv(sig, current);
|
|
else
|
|
signal_delivered(sig, info, ka, regs, 0);
|
|
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
|
|
index 9ce47cf..fb98769 100644
|
|
--- a/arch/arc/kernel/stacktrace.c
|
|
+++ b/arch/arc/kernel/stacktrace.c
|
|
@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
|
|
|
|
frame_info->task = tsk;
|
|
|
|
- frame_info->regs.r27 = KSTK_FP(tsk);
|
|
- frame_info->regs.r28 = KSTK_ESP(tsk);
|
|
- frame_info->regs.r31 = KSTK_BLINK(tsk);
|
|
+ frame_info->regs.r27 = TSK_K_FP(tsk);
|
|
+ frame_info->regs.r28 = TSK_K_ESP(tsk);
|
|
+ frame_info->regs.r31 = TSK_K_BLINK(tsk);
|
|
frame_info->regs.r63 = (unsigned int)__switch_to;
|
|
|
|
/* In the prologue of __switch_to, first FP is saved on stack
|
|
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
|
|
index bc813d5..978bf83 100644
|
|
--- a/arch/arc/lib/memcmp.S
|
|
+++ b/arch/arc/lib/memcmp.S
|
|
@@ -6,7 +6,7 @@
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define WORD2 r2
|
|
@@ -16,7 +16,7 @@
|
|
#define SHIFT r2
|
|
#endif
|
|
|
|
-ARC_ENTRY memcmp
|
|
+ENTRY(memcmp)
|
|
or r12,r0,r1
|
|
asl_s r12,r12,30
|
|
sub r3,r2,1
|
|
@@ -121,4 +121,4 @@ ARC_ENTRY memcmp
|
|
.Lnil:
|
|
j_s.d [blink]
|
|
mov r0,0
|
|
-ARC_EXIT memcmp
|
|
+END(memcmp)
|
|
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
|
|
index b64cc10..3222573 100644
|
|
--- a/arch/arc/lib/memcpy-700.S
|
|
+++ b/arch/arc/lib/memcpy-700.S
|
|
@@ -6,9 +6,9 @@
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
-ARC_ENTRY memcpy
|
|
+ENTRY(memcpy)
|
|
or r3,r0,r1
|
|
asl_s r3,r3,30
|
|
mov_s r5,r0
|
|
@@ -63,4 +63,4 @@ ARC_ENTRY memcpy
|
|
.Lendbloop:
|
|
j_s.d [blink]
|
|
stb r12,[r5,0]
|
|
-ARC_EXIT memcpy
|
|
+END(memcpy)
|
|
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
|
|
index 9b2d88d..d36bd43 100644
|
|
--- a/arch/arc/lib/memset.S
|
|
+++ b/arch/arc/lib/memset.S
|
|
@@ -6,11 +6,11 @@
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
|
|
|
|
-ARC_ENTRY memset
|
|
+ENTRY(memset)
|
|
mov_s r4,r0
|
|
or r12,r0,r2
|
|
bmsk.f r12,r12,1
|
|
@@ -46,14 +46,14 @@ ARC_ENTRY memset
|
|
stb.ab r1,[r4,1]
|
|
.Ltiny_end:
|
|
j_s [blink]
|
|
-ARC_EXIT memset
|
|
+END(memset)
|
|
|
|
; memzero: @r0 = mem, @r1 = size_t
|
|
; memset: @r0 = mem, @r1 = char, @r2 = size_t
|
|
|
|
-ARC_ENTRY memzero
|
|
+ENTRY(memzero)
|
|
; adjust bzero args to memset args
|
|
mov r2, r1
|
|
mov r1, 0
|
|
b memset ;tail call so need to tinker with blink
|
|
-ARC_EXIT memzero
|
|
+END(memzero)
|
|
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
|
|
index 9c548c7..b725d58 100644
|
|
--- a/arch/arc/lib/strchr-700.S
|
|
+++ b/arch/arc/lib/strchr-700.S
|
|
@@ -11,9 +11,9 @@
|
|
presence of the norm instruction makes it easier to operate on whole
|
|
words branch-free. */
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
-ARC_ENTRY strchr
|
|
+ENTRY(strchr)
|
|
extb_s r1,r1
|
|
asl r5,r1,8
|
|
bmsk r2,r0,1
|
|
@@ -130,4 +130,4 @@ ARC_ENTRY strchr
|
|
j_s.d [blink]
|
|
mov.mi r0,0
|
|
#endif /* ENDIAN */
|
|
-ARC_EXIT strchr
|
|
+END(strchr)
|
|
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
|
|
index 5dc802b..3544600 100644
|
|
--- a/arch/arc/lib/strcmp.S
|
|
+++ b/arch/arc/lib/strcmp.S
|
|
@@ -13,9 +13,9 @@
|
|
source 1; however, that would increase the overhead for loop setup / finish,
|
|
and strcmp might often terminate early. */
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
-ARC_ENTRY strcmp
|
|
+ENTRY(strcmp)
|
|
or r2,r0,r1
|
|
bmsk_s r2,r2,1
|
|
brne r2,0,.Lcharloop
|
|
@@ -93,4 +93,4 @@ ARC_ENTRY strcmp
|
|
.Lcmpend:
|
|
j_s.d [blink]
|
|
sub r0,r2,r3
|
|
-ARC_EXIT strcmp
|
|
+END(strcmp)
|
|
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
|
|
index b7ca4ae..8422f38 100644
|
|
--- a/arch/arc/lib/strcpy-700.S
|
|
+++ b/arch/arc/lib/strcpy-700.S
|
|
@@ -16,9 +16,9 @@
|
|
there, but the it is not likely to be taken often, and it
|
|
would also be likey to cost an unaligned mispredict at the next call. */
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
-ARC_ENTRY strcpy
|
|
+ENTRY(strcpy)
|
|
or r2,r0,r1
|
|
bmsk_s r2,r2,1
|
|
brne.d r2,0,charloop
|
|
@@ -67,4 +67,4 @@ charloop:
|
|
brne.d r3,0,charloop
|
|
stb.ab r3,[r10,1]
|
|
j [blink]
|
|
-ARC_EXIT strcpy
|
|
+END(strcpy)
|
|
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
|
|
index 39759e0..53cfd56 100644
|
|
--- a/arch/arc/lib/strlen.S
|
|
+++ b/arch/arc/lib/strlen.S
|
|
@@ -6,9 +6,9 @@
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
-#include <asm/linkage.h>
|
|
+#include <linux/linkage.h>
|
|
|
|
-ARC_ENTRY strlen
|
|
+ENTRY(strlen)
|
|
or r3,r0,7
|
|
ld r2,[r3,-7]
|
|
ld.a r6,[r3,-3]
|
|
@@ -80,4 +80,4 @@ ARC_ENTRY strlen
|
|
.Learly_end:
|
|
b.d .Lend
|
|
sub_s.ne r1,r1,r1
|
|
-ARC_EXIT strlen
|
|
+END(strlen)
|
|
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
|
|
index 400c663..1f676c4 100644
|
|
--- a/arch/arc/mm/cache_arc700.c
|
|
+++ b/arch/arc/mm/cache_arc700.c
|
|
@@ -73,37 +73,9 @@
|
|
#include <asm/cachectl.h>
|
|
#include <asm/setup.h>
|
|
|
|
-/* Instruction cache related Auxiliary registers */
|
|
-#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
|
|
-#define ARC_REG_IC_IVIC 0x10
|
|
-#define ARC_REG_IC_CTRL 0x11
|
|
-#define ARC_REG_IC_IVIL 0x19
|
|
-#if (CONFIG_ARC_MMU_VER > 2)
|
|
-#define ARC_REG_IC_PTAG 0x1E
|
|
-#endif
|
|
-
|
|
-/* Bit val in IC_CTRL */
|
|
-#define IC_CTRL_CACHE_DISABLE 0x1
|
|
-
|
|
-/* Data cache related Auxiliary registers */
|
|
-#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
|
|
-#define ARC_REG_DC_IVDC 0x47
|
|
-#define ARC_REG_DC_CTRL 0x48
|
|
-#define ARC_REG_DC_IVDL 0x4A
|
|
-#define ARC_REG_DC_FLSH 0x4B
|
|
-#define ARC_REG_DC_FLDL 0x4C
|
|
-#if (CONFIG_ARC_MMU_VER > 2)
|
|
-#define ARC_REG_DC_PTAG 0x5C
|
|
-#endif
|
|
-
|
|
-/* Bit val in DC_CTRL */
|
|
-#define DC_CTRL_INV_MODE_FLUSH 0x40
|
|
-#define DC_CTRL_FLUSH_STATUS 0x100
|
|
-
|
|
-char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
|
|
+char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
|
{
|
|
int n = 0;
|
|
- unsigned int c = smp_processor_id();
|
|
|
|
#define PR_CACHE(p, enb, str) \
|
|
{ \
|
|
@@ -169,72 +141,43 @@ void read_decode_cache_bcr(void)
|
|
*/
|
|
void arc_cache_init(void)
|
|
{
|
|
- unsigned int cpu = smp_processor_id();
|
|
- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
|
- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
|
|
- unsigned int dcache_does_alias, temp;
|
|
+ unsigned int __maybe_unused cpu = smp_processor_id();
|
|
+ struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
|
|
char str[256];
|
|
|
|
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
|
|
|
- if (!ic->ver)
|
|
- goto chk_dc;
|
|
-
|
|
-#ifdef CONFIG_ARC_HAS_ICACHE
|
|
- /* 1. Confirm some of I-cache params which Linux assumes */
|
|
- if (ic->line_len != L1_CACHE_BYTES)
|
|
- panic("Cache H/W doesn't match kernel Config");
|
|
-
|
|
- if (ic->ver != CONFIG_ARC_MMU_VER)
|
|
- panic("Cache ver doesn't match MMU ver\n");
|
|
-#endif
|
|
-
|
|
- /* Enable/disable I-Cache */
|
|
- temp = read_aux_reg(ARC_REG_IC_CTRL);
|
|
-
|
|
#ifdef CONFIG_ARC_HAS_ICACHE
|
|
- temp &= ~IC_CTRL_CACHE_DISABLE;
|
|
-#else
|
|
- temp |= IC_CTRL_CACHE_DISABLE;
|
|
+ ic = &cpuinfo_arc700[cpu].icache;
|
|
+ if (ic->ver) {
|
|
+ if (ic->line_len != L1_CACHE_BYTES)
|
|
+ panic("ICache line [%d] != kernel Config [%d]",
|
|
+ ic->line_len, L1_CACHE_BYTES);
|
|
+
|
|
+ if (ic->ver != CONFIG_ARC_MMU_VER)
|
|
+ panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
|
|
+ ic->ver, CONFIG_ARC_MMU_VER);
|
|
+ }
|
|
#endif
|
|
|
|
- write_aux_reg(ARC_REG_IC_CTRL, temp);
|
|
-
|
|
-chk_dc:
|
|
- if (!dc->ver)
|
|
- return;
|
|
-
|
|
#ifdef CONFIG_ARC_HAS_DCACHE
|
|
- if (dc->line_len != L1_CACHE_BYTES)
|
|
- panic("Cache H/W doesn't match kernel Config");
|
|
+ dc = &cpuinfo_arc700[cpu].dcache;
|
|
+ if (dc->ver) {
|
|
+ unsigned int dcache_does_alias;
|
|
|
|
- /* check for D-Cache aliasing */
|
|
- dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
|
|
+ if (dc->line_len != L1_CACHE_BYTES)
|
|
+ panic("DCache line [%d] != kernel Config [%d]",
|
|
+ dc->line_len, L1_CACHE_BYTES);
|
|
|
|
- if (dcache_does_alias && !cache_is_vipt_aliasing())
|
|
- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
|
- else if (!dcache_does_alias && cache_is_vipt_aliasing())
|
|
- panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
|
-#endif
|
|
-
|
|
- /* Set the default Invalidate Mode to "simpy discard dirty lines"
|
|
- * as this is more frequent then flush before invalidate
|
|
- * Ofcourse we toggle this default behviour when desired
|
|
- */
|
|
- temp = read_aux_reg(ARC_REG_DC_CTRL);
|
|
- temp &= ~DC_CTRL_INV_MODE_FLUSH;
|
|
+ /* check for D-Cache aliasing */
|
|
+ dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
|
|
|
|
-#ifdef CONFIG_ARC_HAS_DCACHE
|
|
- /* Enable D-Cache: Clear Bit 0 */
|
|
- write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
|
|
-#else
|
|
- /* Flush D cache */
|
|
- write_aux_reg(ARC_REG_DC_FLSH, 0x1);
|
|
- /* Disable D cache */
|
|
- write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
|
|
+ if (dcache_does_alias && !cache_is_vipt_aliasing())
|
|
+ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
|
+ else if (!dcache_does_alias && cache_is_vipt_aliasing())
|
|
+ panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
|
+ }
|
|
#endif
|
|
-
|
|
- return;
|
|
}
|
|
|
|
#define OP_INV 0x1
|
|
@@ -254,12 +197,16 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
|
|
|
|
if (cacheop == OP_INV_IC) {
|
|
aux_cmd = ARC_REG_IC_IVIL;
|
|
+#if (CONFIG_ARC_MMU_VER > 2)
|
|
aux_tag = ARC_REG_IC_PTAG;
|
|
+#endif
|
|
}
|
|
else {
|
|
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
|
|
aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
|
|
+#if (CONFIG_ARC_MMU_VER > 2)
|
|
aux_tag = ARC_REG_DC_PTAG;
|
|
+#endif
|
|
}
|
|
|
|
/* Ensure we properly floor/ceil the non-line aligned/sized requests
|
|
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
|
|
index 9c69552..01e18b5 100644
|
|
--- a/arch/arc/mm/fault.c
|
|
+++ b/arch/arc/mm/fault.c
|
|
@@ -162,6 +162,8 @@ good_area:
|
|
/* TBD: switch to pagefault_out_of_memory() */
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
|
|
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
|
|
index 3fcfdb3..79bfc81 100644
|
|
--- a/arch/arc/mm/tlbex.S
|
|
+++ b/arch/arc/mm/tlbex.S
|
|
@@ -260,7 +260,7 @@ ARCFP_CODE ;Fast Path Code, candidate for ICCM
|
|
; I-TLB Miss Exception Handler
|
|
;-----------------------------------------------------------------------------
|
|
|
|
-ARC_ENTRY EV_TLBMissI
|
|
+ENTRY(EV_TLBMissI)
|
|
|
|
TLBMISS_FREEUP_REGS
|
|
|
|
@@ -293,13 +293,13 @@ ARC_ENTRY EV_TLBMissI
|
|
TLBMISS_RESTORE_REGS
|
|
rtie
|
|
|
|
-ARC_EXIT EV_TLBMissI
|
|
+END(EV_TLBMissI)
|
|
|
|
;-----------------------------------------------------------------------------
|
|
; D-TLB Miss Exception Handler
|
|
;-----------------------------------------------------------------------------
|
|
|
|
-ARC_ENTRY EV_TLBMissD
|
|
+ENTRY(EV_TLBMissD)
|
|
|
|
TLBMISS_FREEUP_REGS
|
|
|
|
@@ -381,6 +381,4 @@ do_slow_path_pf:
|
|
bl do_page_fault
|
|
b ret_from_exception
|
|
|
|
-ARC_EXIT EV_TLBMissD
|
|
-
|
|
-ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr
|
|
+END(EV_TLBMissD)
|
|
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
|
|
index 066b034..8017cde 100644
|
|
--- a/arch/arm/boot/compressed/head.S
|
|
+++ b/arch/arm/boot/compressed/head.S
|
|
@@ -400,8 +400,7 @@ dtb_check_done:
|
|
add sp, sp, r6
|
|
#endif
|
|
|
|
- tst r4, #1
|
|
- bleq cache_clean_flush
|
|
+ bl cache_clean_flush
|
|
|
|
adr r0, BSYM(restart)
|
|
add r0, r0, r6
|
|
@@ -1050,6 +1049,8 @@ cache_clean_flush:
|
|
b call_cache_fn
|
|
|
|
__armv4_mpu_cache_flush:
|
|
+ tst r4, #1
|
|
+ movne pc, lr
|
|
mov r2, #1
|
|
mov r3, #0
|
|
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
|
|
@@ -1067,6 +1068,8 @@ __armv4_mpu_cache_flush:
|
|
mov pc, lr
|
|
|
|
__fa526_cache_flush:
|
|
+ tst r4, #1
|
|
+ movne pc, lr
|
|
mov r1, #0
|
|
mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
|
|
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
|
|
@@ -1075,13 +1078,16 @@ __fa526_cache_flush:
|
|
|
|
__armv6_mmu_cache_flush:
|
|
mov r1, #0
|
|
- mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
|
|
+ tst r4, #1
|
|
+ mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
|
|
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
|
|
- mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
|
|
+ mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
|
|
mcr p15, 0, r1, c7, c10, 4 @ drain WB
|
|
mov pc, lr
|
|
|
|
__armv7_mmu_cache_flush:
|
|
+ tst r4, #1
|
|
+ bne iflush
|
|
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
|
|
tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
|
|
mov r10, #0
|
|
@@ -1142,6 +1148,8 @@ iflush:
|
|
mov pc, lr
|
|
|
|
__armv5tej_mmu_cache_flush:
|
|
+ tst r4, #1
|
|
+ movne pc, lr
|
|
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
|
|
bne 1b
|
|
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
|
|
@@ -1149,6 +1157,8 @@ __armv5tej_mmu_cache_flush:
|
|
mov pc, lr
|
|
|
|
__armv4_mmu_cache_flush:
|
|
+ tst r4, #1
|
|
+ movne pc, lr
|
|
mov r2, #64*1024 @ default: 32K dcache size (*2)
|
|
mov r11, #32 @ default: 32 byte line size
|
|
mrc p15, 0, r3, c0, c0, 1 @ read cache type
|
|
@@ -1182,6 +1192,8 @@ no_cache_id:
|
|
|
|
__armv3_mmu_cache_flush:
|
|
__armv3_mpu_cache_flush:
|
|
+ tst r4, #1
|
|
+ movne pc, lr
|
|
mov r1, #0
|
|
mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
|
|
mov pc, lr
|
|
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
|
|
index 2e7d932..b3eff40 100644
|
|
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
|
|
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
|
|
@@ -197,6 +197,7 @@
|
|
|
|
usb@47401000 {
|
|
status = "okay";
|
|
+ dr_mode = "peripheral";
|
|
};
|
|
|
|
usb@47401800 {
|
|
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
|
|
index c6bd4d9..8775681 100644
|
|
--- a/arch/arm/boot/dts/am4372.dtsi
|
|
+++ b/arch/arm/boot/dts/am4372.dtsi
|
|
@@ -161,9 +161,6 @@
|
|
ti,hwmods = "mailbox";
|
|
ti,mbox-num-users = <4>;
|
|
ti,mbox-num-fifos = <8>;
|
|
- ti,mbox-names = "wkup_m3";
|
|
- ti,mbox-data = <0 0 0 0>;
|
|
- status = "disabled";
|
|
};
|
|
|
|
timer1: timer@44e31000 {
|
|
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
|
|
index 651aeb5..f3188e9 100644
|
|
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
|
|
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
|
|
@@ -144,6 +144,10 @@
|
|
marvell,nand-enable-arbiter;
|
|
nand-on-flash-bbt;
|
|
|
|
+ /* Use Hardware BCH ECC */
|
|
+ nand-ecc-strength = <4>;
|
|
+ nand-ecc-step-size = <512>;
|
|
+
|
|
partition@0 {
|
|
label = "u-boot";
|
|
reg = <0x0000000 0x180000>; /* 1.5MB */
|
|
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
|
|
index 4e27587..da406c1 100644
|
|
--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts
|
|
+++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
|
|
@@ -146,6 +146,10 @@
|
|
marvell,nand-enable-arbiter;
|
|
nand-on-flash-bbt;
|
|
|
|
+ /* Use Hardware BCH ECC */
|
|
+ nand-ecc-strength = <4>;
|
|
+ nand-ecc-step-size = <512>;
|
|
+
|
|
partition@0 {
|
|
label = "u-boot";
|
|
reg = <0x0000000 0x180000>; /* 1.5MB */
|
|
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
|
|
index 0d8530c..34841fc 100644
|
|
--- a/arch/arm/boot/dts/armada-370.dtsi
|
|
+++ b/arch/arm/boot/dts/armada-370.dtsi
|
|
@@ -106,11 +106,6 @@
|
|
reg = <0x11100 0x20>;
|
|
};
|
|
|
|
- system-controller@18200 {
|
|
- compatible = "marvell,armada-370-xp-system-controller";
|
|
- reg = <0x18200 0x100>;
|
|
- };
|
|
-
|
|
pinctrl {
|
|
compatible = "marvell,mv88f6710-pinctrl";
|
|
reg = <0x18000 0x38>;
|
|
@@ -167,6 +162,11 @@
|
|
interrupts = <91>;
|
|
};
|
|
|
|
+ system-controller@18200 {
|
|
+ compatible = "marvell,armada-370-xp-system-controller";
|
|
+ reg = <0x18200 0x100>;
|
|
+ };
|
|
+
|
|
gateclk: clock-gating-control@18220 {
|
|
compatible = "marvell,armada-370-gating-clock";
|
|
reg = <0x18220 0x4>;
|
|
diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
|
|
index ff049ee..b4aba09 100644
|
|
--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
|
|
+++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
|
|
@@ -224,6 +224,10 @@
|
|
marvell,nand-enable-arbiter;
|
|
nand-on-flash-bbt;
|
|
|
|
+ /* Use Hardware BCH ECC */
|
|
+ nand-ecc-strength = <4>;
|
|
+ nand-ecc-step-size = <512>;
|
|
+
|
|
partition@0 {
|
|
label = "u-boot";
|
|
reg = <0x0000000 0x180000>; /* 1.5MB */
|
|
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
|
|
index 1c6bd83..2ade357 100644
|
|
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
|
|
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
|
|
@@ -69,6 +69,10 @@
|
|
};
|
|
|
|
internal-regs {
|
|
+ rtc@10300 {
|
|
+ /* No crystal connected to the internal RTC */
|
|
+ status = "disabled";
|
|
+ };
|
|
serial@12000 {
|
|
clock-frequency = <250000000>;
|
|
status = "okay";
|
|
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
|
|
index fece866..b8f234b 100644
|
|
--- a/arch/arm/boot/dts/at91sam9263.dtsi
|
|
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
|
|
@@ -535,6 +535,7 @@
|
|
compatible = "atmel,hsmci";
|
|
reg = <0xfff80000 0x600>;
|
|
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
+ pinctrl-names = "default";
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
status = "disabled";
|
|
@@ -544,6 +545,7 @@
|
|
compatible = "atmel,hsmci";
|
|
reg = <0xfff84000 0x600>;
|
|
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
+ pinctrl-names = "default";
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
status = "disabled";
|
|
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
|
|
index 187fd46..355117c 100644
|
|
--- a/arch/arm/boot/dts/dove.dtsi
|
|
+++ b/arch/arm/boot/dts/dove.dtsi
|
|
@@ -154,7 +154,7 @@
|
|
|
|
uart2: serial@12200 {
|
|
compatible = "ns16550a";
|
|
- reg = <0x12000 0x100>;
|
|
+ reg = <0x12200 0x100>;
|
|
reg-shift = <2>;
|
|
interrupts = <9>;
|
|
clocks = <&core_clk 0>;
|
|
@@ -163,7 +163,7 @@
|
|
|
|
uart3: serial@12300 {
|
|
compatible = "ns16550a";
|
|
- reg = <0x12100 0x100>;
|
|
+ reg = <0x12300 0x100>;
|
|
reg-shift = <2>;
|
|
interrupts = <10>;
|
|
clocks = <&core_clk 0>;
|
|
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
|
|
index 5babba0..9381754 100644
|
|
--- a/arch/arm/boot/dts/dra7-evm.dts
|
|
+++ b/arch/arm/boot/dts/dra7-evm.dts
|
|
@@ -50,13 +50,13 @@
|
|
|
|
mcspi1_pins: pinmux_mcspi1_pins {
|
|
pinctrl-single,pins = <
|
|
- 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
|
|
- 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
|
|
- 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
|
|
- 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
|
|
- 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
|
|
- 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
|
|
- 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
|
|
+ 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */
|
|
+ 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */
|
|
+ 0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */
|
|
+ 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */
|
|
+ 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs1 */
|
|
+ 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */
|
|
+ 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */
|
|
>;
|
|
};
|
|
|
|
@@ -182,6 +182,7 @@
|
|
regulator-name = "ldo3";
|
|
regulator-min-microvolt = <1800000>;
|
|
regulator-max-microvolt = <1800000>;
|
|
+ regulator-always-on;
|
|
regulator-boot-on;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
|
|
index 1fd75aa..f60aeee 100644
|
|
--- a/arch/arm/boot/dts/dra7.dtsi
|
|
+++ b/arch/arm/boot/dts/dra7.dtsi
|
|
@@ -178,7 +178,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio2: gpio@48055000 {
|
|
@@ -189,7 +189,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio3: gpio@48057000 {
|
|
@@ -200,7 +200,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio4: gpio@48059000 {
|
|
@@ -211,7 +211,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio5: gpio@4805b000 {
|
|
@@ -222,7 +222,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio6: gpio@4805d000 {
|
|
@@ -233,7 +233,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio7: gpio@48051000 {
|
|
@@ -244,7 +244,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
gpio8: gpio@48053000 {
|
|
@@ -255,7 +255,7 @@
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <1>;
|
|
+ #interrupt-cells = <2>;
|
|
};
|
|
|
|
uart1: serial@4806a000 {
|
|
@@ -458,7 +458,7 @@
|
|
};
|
|
|
|
wdt2: wdt@4ae14000 {
|
|
- compatible = "ti,omap4-wdt";
|
|
+ compatible = "ti,omap3-wdt";
|
|
reg = <0x4ae14000 0x80>;
|
|
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
|
|
ti,hwmods = "wd_timer2";
|
|
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
|
|
index e96da9a..f2512e1 100644
|
|
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
|
|
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
|
|
@@ -243,10 +243,18 @@
|
|
ti,invert-autoidle-bit;
|
|
};
|
|
|
|
+ dpll_core_byp_mux: dpll_core_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x012c>;
|
|
+ };
|
|
+
|
|
dpll_core_ck: dpll_core_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-core-clock";
|
|
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
|
|
reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
|
|
};
|
|
|
|
@@ -309,10 +317,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_dsp_byp_mux: dpll_dsp_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x0240>;
|
|
+ };
|
|
+
|
|
dpll_dsp_ck: dpll_dsp_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
|
|
reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
|
|
};
|
|
|
|
@@ -335,10 +351,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_iva_byp_mux: dpll_iva_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x01ac>;
|
|
+ };
|
|
+
|
|
dpll_iva_ck: dpll_iva_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
|
|
reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
|
|
};
|
|
|
|
@@ -361,10 +385,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_gpu_byp_mux: dpll_gpu_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x02e4>;
|
|
+ };
|
|
+
|
|
dpll_gpu_ck: dpll_gpu_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
|
|
reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
|
|
};
|
|
|
|
@@ -398,10 +430,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_ddr_byp_mux: dpll_ddr_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x021c>;
|
|
+ };
|
|
+
|
|
dpll_ddr_ck: dpll_ddr_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
|
|
reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
|
|
};
|
|
|
|
@@ -416,10 +456,18 @@
|
|
ti,invert-autoidle-bit;
|
|
};
|
|
|
|
+ dpll_gmac_byp_mux: dpll_gmac_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x02b4>;
|
|
+ };
|
|
+
|
|
dpll_gmac_ck: dpll_gmac_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
|
|
reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
|
|
};
|
|
|
|
@@ -482,10 +530,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_eve_byp_mux: dpll_eve_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x0290>;
|
|
+ };
|
|
+
|
|
dpll_eve_ck: dpll_eve_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
|
|
reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
|
|
};
|
|
|
|
@@ -1214,10 +1270,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_per_byp_mux: dpll_per_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x014c>;
|
|
+ };
|
|
+
|
|
dpll_per_ck: dpll_per_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-clock";
|
|
- clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
|
|
reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
|
|
};
|
|
|
|
@@ -1240,10 +1304,18 @@
|
|
clock-div = <1>;
|
|
};
|
|
|
|
+ dpll_usb_byp_mux: dpll_usb_byp_mux {
|
|
+ #clock-cells = <0>;
|
|
+ compatible = "ti,mux-clock";
|
|
+ clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
|
|
+ ti,bit-shift = <23>;
|
|
+ reg = <0x018c>;
|
|
+ };
|
|
+
|
|
dpll_usb_ck: dpll_usb_ck {
|
|
#clock-cells = <0>;
|
|
compatible = "ti,omap4-dpll-j-type-clock";
|
|
- clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
|
|
+ clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
|
|
reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
|
|
index ab1116d..83a5b86 100644
|
|
--- a/arch/arm/boot/dts/hi3620.dtsi
|
|
+++ b/arch/arm/boot/dts/hi3620.dtsi
|
|
@@ -73,7 +73,7 @@
|
|
|
|
L2: l2-cache {
|
|
compatible = "arm,pl310-cache";
|
|
- reg = <0xfc10000 0x100000>;
|
|
+ reg = <0x100000 0x100000>;
|
|
interrupts = <0 15 4>;
|
|
cache-unified;
|
|
cache-level = <2>;
|
|
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
|
|
index 526bfdb..f8922fb 100644
|
|
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
|
|
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
|
|
@@ -12,6 +12,7 @@
|
|
*/
|
|
|
|
/dts-v1/;
|
|
+#include <dt-bindings/gpio/gpio.h>
|
|
#include "imx23.dtsi"
|
|
|
|
/ {
|
|
@@ -93,6 +94,7 @@
|
|
|
|
ahb@80080000 {
|
|
usb0: usb@80080000 {
|
|
+ dr_mode = "host";
|
|
vbus-supply = <®_usb0_vbus>;
|
|
status = "okay";
|
|
};
|
|
@@ -119,7 +121,7 @@
|
|
|
|
user {
|
|
label = "green";
|
|
- gpios = <&gpio2 1 1>;
|
|
+ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
|
|
index 904416e..3fd539b 100644
|
|
--- a/arch/arm/boot/dts/imx25.dtsi
|
|
+++ b/arch/arm/boot/dts/imx25.dtsi
|
|
@@ -160,7 +160,7 @@
|
|
#size-cells = <0>;
|
|
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
|
|
reg = <0x43fa4000 0x4000>;
|
|
- clocks = <&clks 62>, <&clks 62>;
|
|
+ clocks = <&clks 78>, <&clks 78>;
|
|
clock-names = "ipg", "per";
|
|
interrupts = <14>;
|
|
status = "disabled";
|
|
@@ -354,7 +354,7 @@
|
|
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
|
|
#pwm-cells = <2>;
|
|
reg = <0x53fa0000 0x4000>;
|
|
- clocks = <&clks 106>, <&clks 36>;
|
|
+ clocks = <&clks 106>, <&clks 52>;
|
|
clock-names = "ipg", "per";
|
|
interrupts = <36>;
|
|
};
|
|
@@ -373,7 +373,7 @@
|
|
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
|
|
#pwm-cells = <2>;
|
|
reg = <0x53fa8000 0x4000>;
|
|
- clocks = <&clks 107>, <&clks 36>;
|
|
+ clocks = <&clks 107>, <&clks 52>;
|
|
clock-names = "ipg", "per";
|
|
interrupts = <41>;
|
|
};
|
|
@@ -413,8 +413,9 @@
|
|
|
|
pwm4: pwm@53fc8000 {
|
|
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
|
|
+ #pwm-cells = <2>;
|
|
reg = <0x53fc8000 0x4000>;
|
|
- clocks = <&clks 108>, <&clks 36>;
|
|
+ clocks = <&clks 108>, <&clks 52>;
|
|
clock-names = "ipg", "per";
|
|
interrupts = <42>;
|
|
};
|
|
@@ -460,7 +461,7 @@
|
|
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
|
|
#pwm-cells = <2>;
|
|
reg = <0x53fe0000 0x4000>;
|
|
- clocks = <&clks 105>, <&clks 36>;
|
|
+ clocks = <&clks 105>, <&clks 52>;
|
|
clock-names = "ipg", "per";
|
|
interrupts = <26>;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
|
|
index 3f1f2c8..78be5c2 100644
|
|
--- a/arch/arm/boot/dts/imx27.dtsi
|
|
+++ b/arch/arm/boot/dts/imx27.dtsi
|
|
@@ -430,7 +430,7 @@
|
|
|
|
fec: ethernet@1002b000 {
|
|
compatible = "fsl,imx27-fec";
|
|
- reg = <0x1002b000 0x4000>;
|
|
+ reg = <0x1002b000 0x1000>;
|
|
interrupts = <50>;
|
|
clocks = <&clks 48>, <&clks 67>;
|
|
clock-names = "ipg", "ahb";
|
|
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
|
|
index f34d469..6f776ab 100644
|
|
--- a/arch/arm/boot/dts/imx28.dtsi
|
|
+++ b/arch/arm/boot/dts/imx28.dtsi
|
|
@@ -803,7 +803,7 @@
|
|
80 81 68 69
|
|
70 71 72 73
|
|
74 75 76 77>;
|
|
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
|
|
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
|
|
"saif0", "saif1", "i2c0", "i2c1",
|
|
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
|
|
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
|
|
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
|
|
index 57e00f9..a25debb 100644
|
|
--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
|
|
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
|
|
@@ -198,10 +198,6 @@
|
|
status = "okay";
|
|
};
|
|
|
|
-&pwm {
|
|
- status = "okay";
|
|
-};
|
|
-
|
|
&pinctrl0 {
|
|
gpio_leds: gpio-leds {
|
|
samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7";
|
|
diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi
|
|
index 4e3be4d..4f1eff3 100644
|
|
--- a/arch/arm/boot/dts/s3c64xx.dtsi
|
|
+++ b/arch/arm/boot/dts/s3c64xx.dtsi
|
|
@@ -168,7 +168,6 @@
|
|
clocks = <&clocks PCLK_PWM>;
|
|
samsung,pwm-outputs = <0>, <1>;
|
|
#pwm-cells = <3>;
|
|
- status = "disabled";
|
|
};
|
|
|
|
pinctrl0: pinctrl@7f008000 {
|
|
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
|
|
index a077585..eaf4145 100644
|
|
--- a/arch/arm/boot/dts/sama5d3_can.dtsi
|
|
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
|
|
@@ -40,7 +40,7 @@
|
|
atmel,clk-output-range = <0 66000000>;
|
|
};
|
|
|
|
- can1_clk: can0_clk {
|
|
+ can1_clk: can1_clk {
|
|
#clock-cells = <0>;
|
|
reg = <41>;
|
|
atmel,clk-output-range = <0 66000000>;
|
|
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
|
|
index e0853ea..75e748e 100644
|
|
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
|
|
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
|
|
@@ -985,23 +985,6 @@
|
|
status = "disabled";
|
|
};
|
|
|
|
- vmmci: regulator-gpio {
|
|
- compatible = "regulator-gpio";
|
|
-
|
|
- regulator-min-microvolt = <1800000>;
|
|
- regulator-max-microvolt = <2900000>;
|
|
- regulator-name = "mmci-reg";
|
|
- regulator-type = "voltage";
|
|
-
|
|
- startup-delay-us = <100>;
|
|
- enable-active-high;
|
|
-
|
|
- states = <1800000 0x1
|
|
- 2900000 0x0>;
|
|
-
|
|
- status = "disabled";
|
|
- };
|
|
-
|
|
mcde@a0350000 {
|
|
compatible = "stericsson,mcde";
|
|
reg = <0xa0350000 0x1000>, /* MCDE */
|
|
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
|
|
index 6cb9b68..0b668f8 100644
|
|
--- a/arch/arm/boot/dts/ste-href.dtsi
|
|
+++ b/arch/arm/boot/dts/ste-href.dtsi
|
|
@@ -111,6 +111,21 @@
|
|
pinctrl-1 = <&i2c3_sleep_mode>;
|
|
};
|
|
|
|
+ vmmci: regulator-gpio {
|
|
+ compatible = "regulator-gpio";
|
|
+
|
|
+ regulator-min-microvolt = <1800000>;
|
|
+ regulator-max-microvolt = <2900000>;
|
|
+ regulator-name = "mmci-reg";
|
|
+ regulator-type = "voltage";
|
|
+
|
|
+ startup-delay-us = <100>;
|
|
+ enable-active-high;
|
|
+
|
|
+ states = <1800000 0x1
|
|
+ 2900000 0x0>;
|
|
+ };
|
|
+
|
|
// External Micro SD slot
|
|
sdi0_per1@80126000 {
|
|
arm,primecell-periphid = <0x10480180>;
|
|
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
|
|
index 97d5d21..5deaf3c 100644
|
|
--- a/arch/arm/boot/dts/ste-snowball.dts
|
|
+++ b/arch/arm/boot/dts/ste-snowball.dts
|
|
@@ -146,8 +146,21 @@
|
|
};
|
|
|
|
vmmci: regulator-gpio {
|
|
+ compatible = "regulator-gpio";
|
|
+
|
|
gpios = <&gpio7 4 0x4>;
|
|
enable-gpio = <&gpio6 25 0x4>;
|
|
+
|
|
+ regulator-min-microvolt = <1800000>;
|
|
+ regulator-max-microvolt = <2900000>;
|
|
+ regulator-name = "mmci-reg";
|
|
+ regulator-type = "voltage";
|
|
+
|
|
+ startup-delay-us = <100>;
|
|
+ enable-active-high;
|
|
+
|
|
+ states = <1800000 0x1
|
|
+ 2900000 0x0>;
|
|
};
|
|
|
|
// External Micro SD slot
|
|
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
|
|
index 48d2a7f..ce978bc 100644
|
|
--- a/arch/arm/boot/dts/tegra20.dtsi
|
|
+++ b/arch/arm/boot/dts/tegra20.dtsi
|
|
@@ -76,9 +76,9 @@
|
|
reset-names = "2d";
|
|
};
|
|
|
|
- gr3d@54140000 {
|
|
+ gr3d@54180000 {
|
|
compatible = "nvidia,tegra20-gr3d";
|
|
- reg = <0x54140000 0x00040000>;
|
|
+ reg = <0x54180000 0x00040000>;
|
|
clocks = <&tegra_car TEGRA20_CLK_GR3D>;
|
|
resets = <&tegra_car 24>;
|
|
reset-names = "3d";
|
|
@@ -138,9 +138,9 @@
|
|
status = "disabled";
|
|
};
|
|
|
|
- dsi@542c0000 {
|
|
+ dsi@54300000 {
|
|
compatible = "nvidia,tegra20-dsi";
|
|
- reg = <0x542c0000 0x00040000>;
|
|
+ reg = <0x54300000 0x00040000>;
|
|
clocks = <&tegra_car TEGRA20_CLK_DSI>;
|
|
resets = <&tegra_car 48>;
|
|
reset-names = "dsi";
|
|
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
|
|
index ee69829..cf4823b 100644
|
|
--- a/arch/arm/configs/multi_v7_defconfig
|
|
+++ b/arch/arm/configs/multi_v7_defconfig
|
|
@@ -235,6 +235,7 @@ CONFIG_SND_SOC_TEGRA_MAX98090=y
|
|
CONFIG_USB=y
|
|
CONFIG_USB_XHCI_HCD=y
|
|
CONFIG_USB_EHCI_HCD=y
|
|
+CONFIG_USB_EHCI_EXYNOS=y
|
|
CONFIG_USB_EHCI_TEGRA=y
|
|
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
|
CONFIG_USB_ISP1760_HCD=y
|
|
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
|
|
index 3003fa1..0409b8f 100644
|
|
--- a/arch/arm/crypto/aes_glue.c
|
|
+++ b/arch/arm/crypto/aes_glue.c
|
|
@@ -93,6 +93,6 @@ module_exit(aes_fini);
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("aes");
|
|
-MODULE_ALIAS("aes-asm");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes-asm");
|
|
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
|
|
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
|
|
index 71e5fc7..1d1800f 100644
|
|
--- a/arch/arm/crypto/aesbs-core.S_shipped
|
|
+++ b/arch/arm/crypto/aesbs-core.S_shipped
|
|
@@ -58,14 +58,18 @@
|
|
# define VFP_ABI_FRAME 0
|
|
# define BSAES_ASM_EXTENDED_KEY
|
|
# define XTS_CHAIN_TWEAK
|
|
-# define __ARM_ARCH__ 7
|
|
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
|
|
+# define __ARM_MAX_ARCH__ 7
|
|
#endif
|
|
|
|
#ifdef __thumb__
|
|
# define adrl adr
|
|
#endif
|
|
|
|
-#if __ARM_ARCH__>=7
|
|
+#if __ARM_MAX_ARCH__>=7
|
|
+.arch armv7-a
|
|
+.fpu neon
|
|
+
|
|
.text
|
|
.syntax unified @ ARMv7-capable assembler is expected to handle this
|
|
#ifdef __thumb2__
|
|
@@ -74,8 +78,6 @@
|
|
.code 32
|
|
#endif
|
|
|
|
-.fpu neon
|
|
-
|
|
.type _bsaes_decrypt8,%function
|
|
.align 4
|
|
_bsaes_decrypt8:
|
|
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
|
|
vld1.8 {q8}, [r0] @ initial tweak
|
|
adr r2, .Lxts_magic
|
|
|
|
+#ifndef XTS_CHAIN_TWEAK
|
|
tst r9, #0xf @ if not multiple of 16
|
|
it ne @ Thumb2 thing, sanity check in ARM
|
|
subne r9, #0x10 @ subtract another 16 bytes
|
|
+#endif
|
|
subs r9, #0x80
|
|
|
|
blo .Lxts_dec_short
|
|
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
|
|
index 4522366..15468fb 100644
|
|
--- a/arch/arm/crypto/aesbs-glue.c
|
|
+++ b/arch/arm/crypto/aesbs-glue.c
|
|
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
|
|
dst += AES_BLOCK_SIZE;
|
|
} while (--blocks);
|
|
}
|
|
- err = blkcipher_walk_done(desc, &walk, 0);
|
|
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
return err;
|
|
}
|
|
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
|
|
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
|
walk.nbytes, &ctx->dec, walk.iv);
|
|
kernel_neon_end();
|
|
- err = blkcipher_walk_done(desc, &walk, 0);
|
|
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
while (walk.nbytes) {
|
|
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
|
|
dst += AES_BLOCK_SIZE;
|
|
src += AES_BLOCK_SIZE;
|
|
} while (--blocks);
|
|
- err = blkcipher_walk_done(desc, &walk, 0);
|
|
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
return err;
|
|
}
|
|
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
|
|
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
|
walk.nbytes, &ctx->enc, walk.iv);
|
|
kernel_neon_end();
|
|
- err = blkcipher_walk_done(desc, &walk, 0);
|
|
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
return err;
|
|
}
|
|
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
|
|
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
|
walk.nbytes, &ctx->dec, walk.iv);
|
|
kernel_neon_end();
|
|
- err = blkcipher_walk_done(desc, &walk, 0);
|
|
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
return err;
|
|
}
|
|
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
|
|
index be068db..a4d3856 100644
|
|
--- a/arch/arm/crypto/bsaes-armv7.pl
|
|
+++ b/arch/arm/crypto/bsaes-armv7.pl
|
|
@@ -701,14 +701,18 @@ $code.=<<___;
|
|
# define VFP_ABI_FRAME 0
|
|
# define BSAES_ASM_EXTENDED_KEY
|
|
# define XTS_CHAIN_TWEAK
|
|
-# define __ARM_ARCH__ 7
|
|
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
|
|
+# define __ARM_MAX_ARCH__ 7
|
|
#endif
|
|
|
|
#ifdef __thumb__
|
|
# define adrl adr
|
|
#endif
|
|
|
|
-#if __ARM_ARCH__>=7
|
|
+#if __ARM_MAX_ARCH__>=7
|
|
+.arch armv7-a
|
|
+.fpu neon
|
|
+
|
|
.text
|
|
.syntax unified @ ARMv7-capable assembler is expected to handle this
|
|
#ifdef __thumb2__
|
|
@@ -717,8 +721,6 @@ $code.=<<___;
|
|
.code 32
|
|
#endif
|
|
|
|
-.fpu neon
|
|
-
|
|
.type _bsaes_decrypt8,%function
|
|
.align 4
|
|
_bsaes_decrypt8:
|
|
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
|
|
vld1.8 {@XMM[8]}, [r0] @ initial tweak
|
|
adr $magic, .Lxts_magic
|
|
|
|
+#ifndef XTS_CHAIN_TWEAK
|
|
tst $len, #0xf @ if not multiple of 16
|
|
it ne @ Thumb2 thing, sanity check in ARM
|
|
subne $len, #0x10 @ subtract another 16 bytes
|
|
+#endif
|
|
subs $len, #0x80
|
|
|
|
blo .Lxts_dec_short
|
|
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
|
|
index 76cd976..ace4cd6 100644
|
|
--- a/arch/arm/crypto/sha1_glue.c
|
|
+++ b/arch/arm/crypto/sha1_glue.c
|
|
@@ -175,5 +175,5 @@ module_exit(sha1_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
|
|
-MODULE_ALIAS("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
|
|
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
|
|
index f4b46d3..051b726 100644
|
|
--- a/arch/arm/include/asm/elf.h
|
|
+++ b/arch/arm/include/asm/elf.h
|
|
@@ -114,7 +114,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|
the loader. We need to make sure that it is out of the way of the program
|
|
that it will "exec", and that there is sufficient room for the brk. */
|
|
|
|
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
|
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
|
|
/* When the program starts, a1 contains a pointer to a function to be
|
|
registered with atexit, as per the SVR4 ABI. A value of 0 means we
|
|
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
|
|
index 1d3153c..816db0b 100644
|
|
--- a/arch/arm/include/asm/kvm_arm.h
|
|
+++ b/arch/arm/include/asm/kvm_arm.h
|
|
@@ -55,6 +55,7 @@
|
|
* The bits we set in HCR:
|
|
* TAC: Trap ACTLR
|
|
* TSC: Trap SMC
|
|
+ * TVM: Trap VM ops (until MMU and caches are on)
|
|
* TSW: Trap cache operations by set/way
|
|
* TWI: Trap WFI
|
|
* TWE: Trap WFE
|
|
@@ -68,8 +69,7 @@
|
|
*/
|
|
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
|
|
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
|
|
- HCR_TWE | HCR_SWIO | HCR_TIDCP)
|
|
-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
|
|
+ HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
|
|
|
|
/* System Control Register (SCTLR) bits */
|
|
#define SCTLR_TE (1 << 30)
|
|
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
|
|
index 661da11..53b3c4a 100644
|
|
--- a/arch/arm/include/asm/kvm_asm.h
|
|
+++ b/arch/arm/include/asm/kvm_asm.h
|
|
@@ -48,7 +48,9 @@
|
|
#define c13_TID_URO 26 /* Thread ID, User R/O */
|
|
#define c13_TID_PRIV 27 /* Thread ID, Privileged */
|
|
#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
|
|
-#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
|
|
+#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
|
|
+#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
|
|
+#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
|
|
|
|
#define ARM_EXCEPTION_RESET 0
|
|
#define ARM_EXCEPTION_UNDEFINED 1
|
|
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
|
|
index 0fa90c9..853e2be 100644
|
|
--- a/arch/arm/include/asm/kvm_emulate.h
|
|
+++ b/arch/arm/include/asm/kvm_emulate.h
|
|
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
|
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
|
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ vcpu->arch.hcr = HCR_GUEST_MASK;
|
|
+}
|
|
+
|
|
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
|
|
{
|
|
return 1;
|
|
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
|
|
index 098f7dd..530f56e 100644
|
|
--- a/arch/arm/include/asm/kvm_host.h
|
|
+++ b/arch/arm/include/asm/kvm_host.h
|
|
@@ -42,7 +42,7 @@
|
|
|
|
struct kvm_vcpu;
|
|
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
|
-int kvm_target_cpu(void);
|
|
+int __attribute_const__ kvm_target_cpu(void);
|
|
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
|
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
|
|
|
@@ -101,6 +101,12 @@ struct kvm_vcpu_arch {
|
|
/* The CPU type we expose to the VM */
|
|
u32 midr;
|
|
|
|
+ /* HYP trapping configuration */
|
|
+ u32 hcr;
|
|
+
|
|
+ /* Interrupt related fields */
|
|
+ u32 irq_lines; /* IRQ and FIQ levels */
|
|
+
|
|
/* Exception Information */
|
|
struct kvm_vcpu_fault_info fault;
|
|
|
|
@@ -128,9 +134,6 @@ struct kvm_vcpu_arch {
|
|
/* IO related fields */
|
|
struct kvm_decode mmio_decode;
|
|
|
|
- /* Interrupt related fields */
|
|
- u32 irq_lines; /* IRQ and FIQ levels */
|
|
-
|
|
/* Cache some mmu pages needed inside spinlock regions */
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
|
|
index 2d122ad..7d35af3 100644
|
|
--- a/arch/arm/include/asm/kvm_mmu.h
|
|
+++ b/arch/arm/include/asm/kvm_mmu.h
|
|
@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
|
void free_boot_hyp_pgd(void);
|
|
void free_hyp_pgds(void);
|
|
|
|
+void stage2_unmap_vm(struct kvm *kvm);
|
|
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
|
void kvm_free_stage2_pgd(struct kvm *kvm);
|
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
|
@@ -78,17 +79,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
|
|
flush_pmd_entry(pte);
|
|
}
|
|
|
|
-static inline bool kvm_is_write_fault(unsigned long hsr)
|
|
-{
|
|
- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
|
|
- if (hsr_ec == HSR_EC_IABT)
|
|
- return false;
|
|
- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
|
|
- return false;
|
|
- else
|
|
- return true;
|
|
-}
|
|
-
|
|
static inline void kvm_clean_pgd(pgd_t *pgd)
|
|
{
|
|
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
|
|
@@ -114,11 +104,47 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
|
|
pmd_val(*pmd) |= L_PMD_S2_RDWR;
|
|
}
|
|
|
|
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
|
|
+#define kvm_pgd_addr_end(addr, end) \
|
|
+({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
|
|
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
|
|
+})
|
|
+
|
|
+#define kvm_pud_addr_end(addr,end) (end)
|
|
+
|
|
+#define kvm_pmd_addr_end(addr, end) \
|
|
+({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
|
|
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
|
|
+})
|
|
+
|
|
+#define kvm_pgd_index(addr) pgd_index(addr)
|
|
+
|
|
+static inline bool kvm_page_empty(void *ptr)
|
|
+{
|
|
+ struct page *ptr_page = virt_to_page(ptr);
|
|
+ return page_count(ptr_page) == 1;
|
|
+}
|
|
+
|
|
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
|
|
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
|
+#define kvm_pud_table_empty(pudp) (0)
|
|
+
|
|
+
|
|
struct kvm;
|
|
|
|
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|
- unsigned long size)
|
|
+#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
|
+
|
|
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
|
|
+}
|
|
+
|
|
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
|
|
+ unsigned long size)
|
|
{
|
|
+ if (!vcpu_has_cache_enabled(vcpu))
|
|
+ kvm_flush_dcache_to_poc((void *)hva, size);
|
|
+
|
|
/*
|
|
* If we are going to insert an instruction page and the icache is
|
|
* either VIPT or PIPT, there is a potential problem where the host
|
|
@@ -139,9 +165,10 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|
}
|
|
}
|
|
|
|
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
|
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
|
|
|
|
+void stage2_flush_vm(struct kvm *kvm);
|
|
+
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* __ARM_KVM_MMU_H__ */
|
|
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
|
|
index 626989f..9fd61c7 100644
|
|
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
|
|
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
|
|
@@ -43,7 +43,7 @@
|
|
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
|
|
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
|
|
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
|
|
-#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
|
|
+#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
|
|
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
|
|
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
|
|
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
|
|
@@ -72,6 +72,7 @@
|
|
#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
|
|
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
|
|
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
|
|
+#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
|
|
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
|
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
|
|
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
|
|
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
|
|
index 85c60ad..06e0bc0 100644
|
|
--- a/arch/arm/include/asm/pgtable-3level.h
|
|
+++ b/arch/arm/include/asm/pgtable-3level.h
|
|
@@ -79,18 +79,19 @@
|
|
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
|
|
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
|
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
|
-#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
|
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
|
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
|
|
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
|
|
-#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
|
|
-#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
|
|
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
|
|
+#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
|
|
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
|
|
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
|
|
|
|
-#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
|
-#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
|
|
-#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
|
|
-#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
|
|
+#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
|
+#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
|
|
+#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
|
|
+#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
|
|
+#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
|
|
|
|
/*
|
|
* To be used in assembly code with the upper page attributes.
|
|
@@ -207,27 +208,32 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|
#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
|
|
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
|
|
|
|
-#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
|
|
+#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
|
|
+ : !!(pmd_val(pmd) & (val)))
|
|
+#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
|
|
+
|
|
+#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
|
|
|
|
#define __HAVE_ARCH_PMD_WRITE
|
|
-#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
|
|
+#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
|
|
+#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
|
|
|
|
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
|
|
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
-#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
|
|
-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
|
|
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
|
|
+#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
|
|
#endif
|
|
|
|
#define PMD_BIT_FUNC(fn,op) \
|
|
static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
|
|
|
|
-PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
|
|
+PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
|
|
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
|
|
-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
|
|
-PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
|
|
-PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
|
|
+PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
|
|
+PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
|
|
+PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
|
|
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
|
|
|
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
|
|
@@ -241,8 +247,8 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
{
|
|
- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
|
|
- PMD_SECT_VALID | PMD_SECT_NONE;
|
|
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
|
|
+ L_PMD_SECT_VALID | L_PMD_SECT_NONE;
|
|
pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
|
|
return pmd;
|
|
}
|
|
@@ -253,8 +259,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
BUG_ON(addr >= TASK_SIZE);
|
|
|
|
/* create a faulting entry if PROT_NONE protected */
|
|
- if (pmd_val(pmd) & PMD_SECT_NONE)
|
|
- pmd_val(pmd) &= ~PMD_SECT_VALID;
|
|
+ if (pmd_val(pmd) & L_PMD_SECT_NONE)
|
|
+ pmd_val(pmd) &= ~L_PMD_SECT_VALID;
|
|
+
|
|
+ if (pmd_write(pmd) && pmd_dirty(pmd))
|
|
+ pmd_val(pmd) &= ~PMD_SECT_AP2;
|
|
+ else
|
|
+ pmd_val(pmd) |= PMD_SECT_AP2;
|
|
|
|
*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
|
|
flush_pmd_entry(pmdp);
|
|
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
|
|
index 7d59b52..89dba13 100644
|
|
--- a/arch/arm/include/asm/pgtable.h
|
|
+++ b/arch/arm/include/asm/pgtable.h
|
|
@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|
|
|
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
|
|
|
|
+#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
|
|
+ : !!(pte_val(pte) & (val)))
|
|
+#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
|
|
+
|
|
#define pte_none(pte) (!pte_val(pte))
|
|
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
|
|
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
|
|
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
|
|
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
|
|
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
|
|
+#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
|
|
+#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
|
|
+#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
|
|
+#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
|
|
+#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
|
|
#define pte_special(pte) (0)
|
|
|
|
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
|
|
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
|
|
index 22a3b9b..4157aec 100644
|
|
--- a/arch/arm/include/asm/smp.h
|
|
+++ b/arch/arm/include/asm/smp.h
|
|
@@ -74,6 +74,7 @@ struct secondary_data {
|
|
};
|
|
extern struct secondary_data secondary_data;
|
|
extern volatile int pen_release;
|
|
+extern void secondary_startup(void);
|
|
|
|
extern int __cpu_disable(void);
|
|
|
|
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
|
|
index 71a06b2..3e635ee 100644
|
|
--- a/arch/arm/include/asm/thread_info.h
|
|
+++ b/arch/arm/include/asm/thread_info.h
|
|
@@ -43,16 +43,6 @@ struct cpu_context_save {
|
|
__u32 extra[2]; /* Xscale 'acc' register, etc */
|
|
};
|
|
|
|
-struct arm_restart_block {
|
|
- union {
|
|
- /* For user cache flushing */
|
|
- struct {
|
|
- unsigned long start;
|
|
- unsigned long end;
|
|
- } cache;
|
|
- };
|
|
-};
|
|
-
|
|
/*
|
|
* low level task data that entry.S needs immediate access to.
|
|
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
|
|
@@ -78,7 +68,6 @@ struct thread_info {
|
|
unsigned long thumbee_state; /* ThumbEE Handler Base register */
|
|
#endif
|
|
struct restart_block restart_block;
|
|
- struct arm_restart_block arm_restart_block;
|
|
};
|
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
|
|
index 83259b8..5f833f7 100644
|
|
--- a/arch/arm/include/asm/tls.h
|
|
+++ b/arch/arm/include/asm/tls.h
|
|
@@ -1,6 +1,9 @@
|
|
#ifndef __ASMARM_TLS_H
|
|
#define __ASMARM_TLS_H
|
|
|
|
+#include <linux/compiler.h>
|
|
+#include <asm/thread_info.h>
|
|
+
|
|
#ifdef __ASSEMBLY__
|
|
#include <asm/asm-offsets.h>
|
|
.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
|
|
@@ -50,6 +53,49 @@
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
+
|
|
+static inline void set_tls(unsigned long val)
|
|
+{
|
|
+ struct thread_info *thread;
|
|
+
|
|
+ thread = current_thread_info();
|
|
+
|
|
+ thread->tp_value[0] = val;
|
|
+
|
|
+ /*
|
|
+ * This code runs with preemption enabled and therefore must
|
|
+ * be reentrant with respect to switch_tls.
|
|
+ *
|
|
+ * We need to ensure ordering between the shadow state and the
|
|
+ * hardware state, so that we don't corrupt the hardware state
|
|
+ * with a stale shadow state during context switch.
|
|
+ *
|
|
+ * If we're preempted here, switch_tls will load TPIDRURO from
|
|
+ * thread_info upon resuming execution and the following mcr
|
|
+ * is merely redundant.
|
|
+ */
|
|
+ barrier();
|
|
+
|
|
+ if (!tls_emu) {
|
|
+ if (has_tls_reg) {
|
|
+ asm("mcr p15, 0, %0, c13, c0, 3"
|
|
+ : : "r" (val));
|
|
+ } else {
|
|
+#ifdef CONFIG_KUSER_HELPERS
|
|
+ /*
|
|
+ * User space must never try to access this
|
|
+ * directly. Expect your app to break
|
|
+ * eventually if you do so. The user helper
|
|
+ * at 0xffff0fe0 must be used instead. (see
|
|
+ * entry-armv.S for details)
|
|
+ */
|
|
+ *((unsigned int *)0xffff0ff0) = val;
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ }
|
|
+}
|
|
+
|
|
static inline unsigned long get_tpuser(void)
|
|
{
|
|
unsigned long reg = 0;
|
|
@@ -59,5 +105,23 @@ static inline unsigned long get_tpuser(void)
|
|
|
|
return reg;
|
|
}
|
|
+
|
|
+static inline void set_tpuser(unsigned long val)
|
|
+{
|
|
+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
|
|
+ * we need not update thread_info.
|
|
+ */
|
|
+ if (has_tls_reg && !tls_emu) {
|
|
+ asm("mcr p15, 0, %0, c13, c0, 2"
|
|
+ : : "r" (val));
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void flush_tls(void)
|
|
+{
|
|
+ set_tls(0);
|
|
+ set_tpuser(0);
|
|
+}
|
|
+
|
|
#endif
|
|
#endif /* __ASMARM_TLS_H */
|
|
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
|
|
index 4387624..21ca0ce 100644
|
|
--- a/arch/arm/include/asm/unistd.h
|
|
+++ b/arch/arm/include/asm/unistd.h
|
|
@@ -15,7 +15,17 @@
|
|
|
|
#include <uapi/asm/unistd.h>
|
|
|
|
+/*
|
|
+ * This may need to be greater than __NR_last_syscall+1 in order to
|
|
+ * account for the padding in the syscall table
|
|
+ */
|
|
#define __NR_syscalls (384)
|
|
+
|
|
+/*
|
|
+ * *NOTE*: This is a ghost syscall private to the kernel. Only the
|
|
+ * __kuser_cmpxchg code in entry-armv.S should be aware of its
|
|
+ * existence. Don't ever use this from user code.
|
|
+ */
|
|
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
|
|
|
|
#define __ARCH_WANT_STAT64
|
|
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
|
|
index fb5584d..c377633 100644
|
|
--- a/arch/arm/include/uapi/asm/unistd.h
|
|
+++ b/arch/arm/include/uapi/asm/unistd.h
|
|
@@ -410,11 +410,6 @@
|
|
#define __NR_sched_getattr (__NR_SYSCALL_BASE+381)
|
|
|
|
/*
|
|
- * This may need to be greater than __NR_last_syscall+1 in order to
|
|
- * account for the padding in the syscall table
|
|
- */
|
|
-
|
|
-/*
|
|
* The following SWIs are ARM private.
|
|
*/
|
|
#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000)
|
|
@@ -425,12 +420,6 @@
|
|
#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
|
|
|
|
/*
|
|
- * *NOTE*: This is a ghost syscall private to the kernel. Only the
|
|
- * __kuser_cmpxchg code in entry-armv.S should be aware of its
|
|
- * existence. Don't ever use this from user code.
|
|
- */
|
|
-
|
|
-/*
|
|
* The following syscalls are obsolete and no longer available for EABI.
|
|
*/
|
|
#if !defined(__KERNEL__)
|
|
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
|
|
index ded0417..85598b5 100644
|
|
--- a/arch/arm/kernel/asm-offsets.c
|
|
+++ b/arch/arm/kernel/asm-offsets.c
|
|
@@ -174,6 +174,7 @@ int main(void)
|
|
DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
|
|
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
|
|
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
|
|
+ DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
|
|
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
|
|
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
|
|
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
|
|
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
|
|
index a2dcafd..98dd389 100644
|
|
--- a/arch/arm/kernel/entry-common.S
|
|
+++ b/arch/arm/kernel/entry-common.S
|
|
@@ -32,7 +32,9 @@ ret_fast_syscall:
|
|
UNWIND(.fnstart )
|
|
UNWIND(.cantunwind )
|
|
disable_irq @ disable interrupts
|
|
- ldr r1, [tsk, #TI_FLAGS]
|
|
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
|
+ tst r1, #_TIF_SYSCALL_WORK
|
|
+ bne __sys_trace_return
|
|
tst r1, #_TIF_WORK_MASK
|
|
bne fast_work_pending
|
|
asm_trace_hardirqs_on
|
|
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
|
|
index 797b1a6..7e666cf 100644
|
|
--- a/arch/arm/kernel/hyp-stub.S
|
|
+++ b/arch/arm/kernel/hyp-stub.S
|
|
@@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary)
|
|
mcr p15, 4, r7, c1, c1, 3 @ HSTR
|
|
|
|
THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
|
|
-#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
- orr r7, #(1 << 9) @ HSCTLR.EE
|
|
-#endif
|
|
+ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
|
|
mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
|
|
|
|
mrc p15, 4, r7, c1, c1, 1 @ HDCR
|
|
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
|
|
index 9723d17..1e782bd 100644
|
|
--- a/arch/arm/kernel/irq.c
|
|
+++ b/arch/arm/kernel/irq.c
|
|
@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|
c = irq_data_get_irq_chip(d);
|
|
if (!c->irq_set_affinity)
|
|
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
|
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
cpumask_copy(d->affinity, affinity);
|
|
|
|
return ret;
|
|
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
|
|
index 18a7628..380c20f 100644
|
|
--- a/arch/arm/kernel/kprobes-common.c
|
|
+++ b/arch/arm/kernel/kprobes-common.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/kprobes.h>
|
|
#include <asm/system_info.h>
|
|
+#include <asm/opcodes.h>
|
|
|
|
#include "kprobes.h"
|
|
|
|
@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
|
|
|
if (handler) {
|
|
/* We can emulate the instruction in (possibly) modified form */
|
|
- asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
|
|
+ asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
|
|
+ (rn << 16) | reglist);
|
|
asi->insn_handler = handler;
|
|
return INSN_GOOD;
|
|
}
|
|
@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
if (thumb) {
|
|
u16 *thumb_insn = (u16 *)asi->insn;
|
|
- thumb_insn[1] = 0x4770; /* Thumb bx lr */
|
|
- thumb_insn[2] = 0x4770; /* Thumb bx lr */
|
|
+ /* Thumb bx lr */
|
|
+ thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
|
|
+ thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
|
|
return insn;
|
|
}
|
|
- asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
|
|
+ asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
|
|
#else
|
|
- asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
|
|
+ asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
|
|
#endif
|
|
/* Make an ARM instruction unconditional */
|
|
if (insn < 0xe0000000)
|
|
@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
|
|
if (thumb) {
|
|
u16 *ip = (u16 *)asi->insn;
|
|
if (is_wide_instruction(insn))
|
|
- *ip++ = insn >> 16;
|
|
- *ip++ = insn;
|
|
+ *ip++ = __opcode_to_mem_thumb16(insn >> 16);
|
|
+ *ip++ = __opcode_to_mem_thumb16(insn);
|
|
return;
|
|
}
|
|
#endif
|
|
- asi->insn[0] = insn;
|
|
+ asi->insn[0] = __opcode_to_mem_arm(insn);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
|
|
index 6123daf..241222c 100644
|
|
--- a/arch/arm/kernel/kprobes-thumb.c
|
|
+++ b/arch/arm/kernel/kprobes-thumb.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/module.h>
|
|
+#include <asm/opcodes.h>
|
|
|
|
#include "kprobes.h"
|
|
|
|
@@ -163,9 +164,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
|
enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
|
|
|
|
/* Fixup modified instruction to have halfwords in correct order...*/
|
|
- insn = asi->insn[0];
|
|
- ((u16 *)asi->insn)[0] = insn >> 16;
|
|
- ((u16 *)asi->insn)[1] = insn & 0xffff;
|
|
+ insn = __mem_to_opcode_arm(asi->insn[0]);
|
|
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
|
|
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1153,7 +1154,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
|
{
|
|
insn &= ~0x00ff;
|
|
insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
|
|
- ((u16 *)asi->insn)[0] = insn;
|
|
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
|
|
asi->insn_handler = t16_emulate_hiregs;
|
|
return INSN_GOOD;
|
|
}
|
|
@@ -1182,8 +1183,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
|
* and call it with R9=SP and LR in the register list represented
|
|
* by R8.
|
|
*/
|
|
- ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
|
|
- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
|
|
+ /* 1st half STMDB R9!,{} */
|
|
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
|
|
+ /* 2nd half (register list) */
|
|
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
|
|
asi->insn_handler = t16_emulate_push;
|
|
return INSN_GOOD;
|
|
}
|
|
@@ -1232,8 +1235,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
|
* and call it with R9=SP and PC in the register list represented
|
|
* by R8.
|
|
*/
|
|
- ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
|
|
- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
|
|
+ /* 1st half LDMIA R9!,{} */
|
|
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
|
|
+ /* 2nd half (register list) */
|
|
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
|
|
asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
|
|
: t16_emulate_pop_nopc;
|
|
return INSN_GOOD;
|
|
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
|
|
index a7b621e..49a87b6 100644
|
|
--- a/arch/arm/kernel/kprobes.c
|
|
+++ b/arch/arm/kernel/kprobes.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/stop_machine.h>
|
|
#include <linux/stringify.h>
|
|
#include <asm/traps.h>
|
|
+#include <asm/opcodes.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include "kprobes.h"
|
|
@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
thumb = true;
|
|
addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
|
|
- insn = ((u16 *)addr)[0];
|
|
+ insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
|
|
if (is_wide_instruction(insn)) {
|
|
- insn <<= 16;
|
|
- insn |= ((u16 *)addr)[1];
|
|
+ u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
|
|
+ insn = __opcode_thumb32_compose(insn, inst2);
|
|
decode_insn = thumb32_kprobe_decode_insn;
|
|
} else
|
|
decode_insn = thumb16_kprobe_decode_insn;
|
|
@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
|
thumb = false;
|
|
if (addr & 0x3)
|
|
return -EINVAL;
|
|
- insn = *p->addr;
|
|
+ insn = __mem_to_opcode_arm(*p->addr);
|
|
decode_insn = arm_kprobe_decode_insn;
|
|
#endif
|
|
|
|
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
|
|
index 1bdd78b..dd6553b 100644
|
|
--- a/arch/arm/kernel/process.c
|
|
+++ b/arch/arm/kernel/process.c
|
|
@@ -336,6 +336,8 @@ void flush_thread(void)
|
|
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
|
|
memset(&thread->fpstate, 0, sizeof(union fp_state));
|
|
|
|
+ flush_tls();
|
|
+
|
|
thread_notify(THREAD_NOTIFY_FLUSH, thread);
|
|
}
|
|
|
|
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
|
|
index 4a5e943..a4ef472 100644
|
|
--- a/arch/arm/kernel/setup.c
|
|
+++ b/arch/arm/kernel/setup.c
|
|
@@ -1034,6 +1034,15 @@ static int c_show(struct seq_file *m, void *v)
|
|
seq_printf(m, "model name\t: %s rev %d (%s)\n",
|
|
cpu_name, cpuid & 15, elf_platform);
|
|
|
|
+#if defined(CONFIG_SMP)
|
|
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
|
+ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
|
|
+ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
|
|
+#else
|
|
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
|
+ loops_per_jiffy / (500000/HZ),
|
|
+ (loops_per_jiffy / (5000/HZ)) % 100);
|
|
+#endif
|
|
/* dump out the processor features */
|
|
seq_puts(m, "Features\t: ");
|
|
|
|
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
|
|
index b7b4c86..8cd3724 100644
|
|
--- a/arch/arm/kernel/smp.c
|
|
+++ b/arch/arm/kernel/smp.c
|
|
@@ -388,8 +388,17 @@ asmlinkage void secondary_start_kernel(void)
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
- printk(KERN_INFO "SMP: Total of %d processors activated.\n",
|
|
- num_online_cpus());
|
|
+ int cpu;
|
|
+ unsigned long bogosum = 0;
|
|
+
|
|
+ for_each_online_cpu(cpu)
|
|
+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
|
|
+
|
|
+ printk(KERN_INFO "SMP: Total of %d processors activated "
|
|
+ "(%lu.%02lu BogoMIPS).\n",
|
|
+ num_online_cpus(),
|
|
+ bogosum / (500000/HZ),
|
|
+ (bogosum / (5000/HZ)) % 100);
|
|
|
|
hyp_mode_check();
|
|
}
|
|
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
|
|
index 7b8403b..80f0d69 100644
|
|
--- a/arch/arm/kernel/thumbee.c
|
|
+++ b/arch/arm/kernel/thumbee.c
|
|
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
|
|
|
|
switch (cmd) {
|
|
case THREAD_NOTIFY_FLUSH:
|
|
- thread->thumbee_state = 0;
|
|
+ teehbr_write(0);
|
|
break;
|
|
case THREAD_NOTIFY_SWITCH:
|
|
current_thread_info()->thumbee_state = teehbr_read();
|
|
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
|
|
index 172ee18..3f31443 100644
|
|
--- a/arch/arm/kernel/traps.c
|
|
+++ b/arch/arm/kernel/traps.c
|
|
@@ -510,8 +510,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
|
|
return regs->ARM_r0;
|
|
}
|
|
|
|
-static long do_cache_op_restart(struct restart_block *);
|
|
-
|
|
static inline int
|
|
__do_cache_op(unsigned long start, unsigned long end)
|
|
{
|
|
@@ -520,24 +518,8 @@ __do_cache_op(unsigned long start, unsigned long end)
|
|
do {
|
|
unsigned long chunk = min(PAGE_SIZE, end - start);
|
|
|
|
- if (signal_pending(current)) {
|
|
- struct thread_info *ti = current_thread_info();
|
|
-
|
|
- ti->restart_block = (struct restart_block) {
|
|
- .fn = do_cache_op_restart,
|
|
- };
|
|
-
|
|
- ti->arm_restart_block = (struct arm_restart_block) {
|
|
- {
|
|
- .cache = {
|
|
- .start = start,
|
|
- .end = end,
|
|
- },
|
|
- },
|
|
- };
|
|
-
|
|
- return -ERESTART_RESTARTBLOCK;
|
|
- }
|
|
+ if (fatal_signal_pending(current))
|
|
+ return 0;
|
|
|
|
ret = flush_cache_user_range(start, start + chunk);
|
|
if (ret)
|
|
@@ -550,15 +532,6 @@ __do_cache_op(unsigned long start, unsigned long end)
|
|
return 0;
|
|
}
|
|
|
|
-static long do_cache_op_restart(struct restart_block *unused)
|
|
-{
|
|
- struct arm_restart_block *restart_block;
|
|
-
|
|
- restart_block = ¤t_thread_info()->arm_restart_block;
|
|
- return __do_cache_op(restart_block->cache.start,
|
|
- restart_block->cache.end);
|
|
-}
|
|
-
|
|
static inline int
|
|
do_cache_op(unsigned long start, unsigned long end, int flags)
|
|
{
|
|
@@ -578,7 +551,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
|
|
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
|
|
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
|
{
|
|
- struct thread_info *thread = current_thread_info();
|
|
siginfo_t info;
|
|
|
|
if ((no >> 16) != (__ARM_NR_BASE>> 16))
|
|
@@ -629,21 +601,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
|
return regs->ARM_r0;
|
|
|
|
case NR(set_tls):
|
|
- thread->tp_value[0] = regs->ARM_r0;
|
|
- if (tls_emu)
|
|
- return 0;
|
|
- if (has_tls_reg) {
|
|
- asm ("mcr p15, 0, %0, c13, c0, 3"
|
|
- : : "r" (regs->ARM_r0));
|
|
- } else {
|
|
- /*
|
|
- * User space must never try to access this directly.
|
|
- * Expect your app to break eventually if you do so.
|
|
- * The user helper at 0xffff0fe0 must be used instead.
|
|
- * (see entry-armv.S for details)
|
|
- */
|
|
- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
|
|
- }
|
|
+ set_tls(regs->ARM_r0);
|
|
return 0;
|
|
|
|
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
|
|
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
|
|
index bd18bb8..f6a52a2 100644
|
|
--- a/arch/arm/kvm/arm.c
|
|
+++ b/arch/arm/kvm/arm.c
|
|
@@ -82,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
|
|
/**
|
|
* kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
|
|
*/
|
|
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
|
|
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
|
|
{
|
|
return &kvm_arm_running_vcpu;
|
|
}
|
|
@@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
|
- struct kvm_memory_slot *dont)
|
|
-{
|
|
-}
|
|
-
|
|
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
- unsigned long npages)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
|
|
/**
|
|
* kvm_arch_destroy_vm - destroy the VM data structure
|
|
@@ -224,39 +214,17 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|
return -EINVAL;
|
|
}
|
|
|
|
-void kvm_arch_memslots_updated(struct kvm *kvm)
|
|
-{
|
|
-}
|
|
-
|
|
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
- struct kvm_memory_slot *memslot,
|
|
- struct kvm_userspace_memory_region *mem,
|
|
- enum kvm_mr_change change)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|
- struct kvm_userspace_memory_region *mem,
|
|
- const struct kvm_memory_slot *old,
|
|
- enum kvm_mr_change change)
|
|
-{
|
|
-}
|
|
-
|
|
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|
-{
|
|
-}
|
|
-
|
|
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
- struct kvm_memory_slot *slot)
|
|
-{
|
|
-}
|
|
|
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|
{
|
|
int err;
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
|
|
+ err = -EBUSY;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
|
if (!vcpu) {
|
|
err = -ENOMEM;
|
|
@@ -464,15 +432,16 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
|
/* update vttbr to be used with the new vmid */
|
|
pgd_phys = virt_to_phys(kvm->arch.pgd);
|
|
+ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
|
|
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
|
|
- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
|
|
- kvm->arch.vttbr |= vmid;
|
|
+ kvm->arch.vttbr = pgd_phys | vmid;
|
|
|
|
spin_unlock(&kvm_vmid_lock);
|
|
}
|
|
|
|
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|
{
|
|
+ struct kvm *kvm = vcpu->kvm;
|
|
int ret;
|
|
|
|
if (likely(vcpu->arch.has_run_once))
|
|
@@ -484,12 +453,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|
* Initialize the VGIC before running a vcpu the first time on
|
|
* this VM.
|
|
*/
|
|
- if (unlikely(!vgic_initialized(vcpu->kvm))) {
|
|
- ret = kvm_vgic_init(vcpu->kvm);
|
|
+ if (unlikely(!vgic_initialized(kvm))) {
|
|
+ ret = kvm_vgic_init(kvm);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
+ /*
|
|
+ * Enable the arch timers only if we have an in-kernel VGIC
|
|
+ * and it has been properly initialized, since we cannot handle
|
|
+ * interrupts from the virtual timer with a userspace gic.
|
|
+ */
|
|
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
|
|
+ kvm_timer_enable(kvm);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -713,10 +690,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
|
return ret;
|
|
|
|
/*
|
|
+ * Ensure a rebooted VM will fault in RAM pages and detect if the
|
|
+ * guest MMU is turned off and flush the caches as needed.
|
|
+ */
|
|
+ if (vcpu->arch.has_run_once)
|
|
+ stage2_unmap_vm(vcpu->kvm);
|
|
+
|
|
+ vcpu_reset_hcr(vcpu);
|
|
+
|
|
+ /*
|
|
* Handle the "start in power-off" case by marking the VCPU as paused.
|
|
*/
|
|
- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
|
+ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
|
vcpu->arch.pause = true;
|
|
+ else
|
|
+ vcpu->arch.pause = false;
|
|
|
|
return 0;
|
|
}
|
|
@@ -862,7 +850,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
|
|
switch (action) {
|
|
case CPU_STARTING:
|
|
case CPU_STARTING_FROZEN:
|
|
- cpu_init_hyp_mode(NULL);
|
|
+ if (__hyp_get_vectors() == hyp_default_vectors)
|
|
+ cpu_init_hyp_mode(NULL);
|
|
break;
|
|
}
|
|
|
|
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
|
|
index 78c0885..7c73290 100644
|
|
--- a/arch/arm/kvm/coproc.c
|
|
+++ b/arch/arm/kvm/coproc.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <asm/kvm_host.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_coproc.h>
|
|
+#include <asm/kvm_mmu.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cputype.h>
|
|
#include <trace/events/kvm.h>
|
|
@@ -205,6 +206,44 @@ done:
|
|
}
|
|
|
|
/*
|
|
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
|
|
+ * is set.
|
|
+ */
|
|
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
|
+ const struct coproc_params *p,
|
|
+ const struct coproc_reg *r)
|
|
+{
|
|
+ BUG_ON(!p->is_write);
|
|
+
|
|
+ vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
|
|
+ if (p->is_64bit)
|
|
+ vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * SCTLR accessor. Only called as long as HCR_TVM is set. If the
|
|
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
|
|
+ * it in complete control of the caches.
|
|
+ *
|
|
+ * Used by the cpu-specific code.
|
|
+ */
|
|
+bool access_sctlr(struct kvm_vcpu *vcpu,
|
|
+ const struct coproc_params *p,
|
|
+ const struct coproc_reg *r)
|
|
+{
|
|
+ access_vm_reg(vcpu, p, r);
|
|
+
|
|
+ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
|
|
+ vcpu->arch.hcr &= ~HCR_TVM;
|
|
+ stage2_flush_vm(vcpu->kvm);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
* We could trap ID_DFR0 and tell the guest we don't support performance
|
|
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
|
* NAKed, so it will read the PMCR anyway.
|
|
@@ -261,33 +300,36 @@ static const struct coproc_reg cp15_regs[] = {
|
|
{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
NULL, reset_val, c1_CPACR, 0x00000000 },
|
|
|
|
- /* TTBR0/TTBR1: swapped by interrupt.S. */
|
|
- { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
|
|
- { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
|
|
-
|
|
- /* TTBCR: swapped by interrupt.S. */
|
|
+ /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
|
|
+ { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
|
|
+ { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
+ access_vm_reg, reset_unknown, c2_TTBR0 },
|
|
+ { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
|
|
+ access_vm_reg, reset_unknown, c2_TTBR1 },
|
|
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
- NULL, reset_val, c2_TTBCR, 0x00000000 },
|
|
+ access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
|
|
+ { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
|
|
+
|
|
|
|
/* DACR: swapped by interrupt.S. */
|
|
{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_unknown, c3_DACR },
|
|
+ access_vm_reg, reset_unknown, c3_DACR },
|
|
|
|
/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
|
|
{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_unknown, c5_DFSR },
|
|
+ access_vm_reg, reset_unknown, c5_DFSR },
|
|
{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
|
|
- NULL, reset_unknown, c5_IFSR },
|
|
+ access_vm_reg, reset_unknown, c5_IFSR },
|
|
{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_unknown, c5_ADFSR },
|
|
+ access_vm_reg, reset_unknown, c5_ADFSR },
|
|
{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
|
|
- NULL, reset_unknown, c5_AIFSR },
|
|
+ access_vm_reg, reset_unknown, c5_AIFSR },
|
|
|
|
/* DFAR/IFAR: swapped by interrupt.S. */
|
|
{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_unknown, c6_DFAR },
|
|
+ access_vm_reg, reset_unknown, c6_DFAR },
|
|
{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
- NULL, reset_unknown, c6_IFAR },
|
|
+ access_vm_reg, reset_unknown, c6_IFAR },
|
|
|
|
/* PAR swapped by interrupt.S */
|
|
{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
|
|
@@ -324,9 +366,15 @@ static const struct coproc_reg cp15_regs[] = {
|
|
|
|
/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
|
|
{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_unknown, c10_PRRR},
|
|
+ access_vm_reg, reset_unknown, c10_PRRR},
|
|
{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
|
|
- NULL, reset_unknown, c10_NMRR},
|
|
+ access_vm_reg, reset_unknown, c10_NMRR},
|
|
+
|
|
+ /* AMAIR0/AMAIR1: swapped by interrupt.S. */
|
|
+ { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
|
|
+ access_vm_reg, reset_unknown, c10_AMAIR0},
|
|
+ { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
|
|
+ access_vm_reg, reset_unknown, c10_AMAIR1},
|
|
|
|
/* VBAR: swapped by interrupt.S. */
|
|
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
@@ -334,7 +382,7 @@ static const struct coproc_reg cp15_regs[] = {
|
|
|
|
/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
|
|
- NULL, reset_val, c13_CID, 0x00000000 },
|
|
+ access_vm_reg, reset_val, c13_CID, 0x00000000 },
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
|
|
NULL, reset_unknown, c13_TID_URW },
|
|
{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
|
|
@@ -443,7 +491,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
{
|
|
struct coproc_params params;
|
|
|
|
- params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
|
|
+ params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
|
|
params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
|
|
params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
|
|
params.is_64bit = true;
|
|
@@ -451,7 +499,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
|
|
params.Op2 = 0;
|
|
params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
|
|
- params.CRn = 0;
|
|
+ params.CRm = 0;
|
|
|
|
return emulate_cp15(vcpu, ¶ms);
|
|
}
|
|
@@ -694,7 +742,7 @@ static bool is_valid_cache(u32 val)
|
|
u32 level, ctype;
|
|
|
|
if (val >= CSSELR_MAX)
|
|
- return -ENOENT;
|
|
+ return false;
|
|
|
|
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
|
|
level = (val >> 1);
|
|
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
|
|
index 0461d5c..1a44bbe 100644
|
|
--- a/arch/arm/kvm/coproc.h
|
|
+++ b/arch/arm/kvm/coproc.h
|
|
@@ -58,8 +58,8 @@ static inline void print_cp_instr(const struct coproc_params *p)
|
|
{
|
|
/* Look, we even formatted it for you to paste into the table! */
|
|
if (p->is_64bit) {
|
|
- kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
|
|
- p->CRm, p->Op1, p->is_write ? "write" : "read");
|
|
+ kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
|
|
+ p->CRn, p->Op1, p->is_write ? "write" : "read");
|
|
} else {
|
|
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
|
|
" func_%s },\n",
|
|
@@ -135,13 +135,13 @@ static inline int cmp_reg(const struct coproc_reg *i1,
|
|
return -1;
|
|
if (i1->CRn != i2->CRn)
|
|
return i1->CRn - i2->CRn;
|
|
- if (i1->is_64 != i2->is_64)
|
|
- return i2->is_64 - i1->is_64;
|
|
if (i1->CRm != i2->CRm)
|
|
return i1->CRm - i2->CRm;
|
|
if (i1->Op1 != i2->Op1)
|
|
return i1->Op1 - i2->Op1;
|
|
- return i1->Op2 - i2->Op2;
|
|
+ if (i1->Op2 != i2->Op2)
|
|
+ return i1->Op2 - i2->Op2;
|
|
+ return i2->is_64 - i1->is_64;
|
|
}
|
|
|
|
|
|
@@ -153,4 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
|
|
#define is64 .is_64 = true
|
|
#define is32 .is_64 = false
|
|
|
|
+bool access_sctlr(struct kvm_vcpu *vcpu,
|
|
+ const struct coproc_params *p,
|
|
+ const struct coproc_reg *r);
|
|
+
|
|
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
|
|
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
|
|
index bb0cac1..e6f4ae4 100644
|
|
--- a/arch/arm/kvm/coproc_a15.c
|
|
+++ b/arch/arm/kvm/coproc_a15.c
|
|
@@ -34,7 +34,7 @@
|
|
static const struct coproc_reg a15_regs[] = {
|
|
/* SCTLR: swapped by interrupt.S. */
|
|
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_val, c1_SCTLR, 0x00C50078 },
|
|
+ access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
|
|
};
|
|
|
|
static struct kvm_coproc_target_table a15_target_table = {
|
|
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
|
|
index 1df7673..17fc7cd 100644
|
|
--- a/arch/arm/kvm/coproc_a7.c
|
|
+++ b/arch/arm/kvm/coproc_a7.c
|
|
@@ -37,7 +37,7 @@
|
|
static const struct coproc_reg a7_regs[] = {
|
|
/* SCTLR: swapped by interrupt.S. */
|
|
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
|
|
- NULL, reset_val, c1_SCTLR, 0x00C50878 },
|
|
+ access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
|
|
};
|
|
|
|
static struct kvm_coproc_target_table a7_target_table = {
|
|
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
|
|
index 0de91fc..ec4fa86 100644
|
|
--- a/arch/arm/kvm/handle_exit.c
|
|
+++ b/arch/arm/kvm/handle_exit.c
|
|
@@ -89,6 +89,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
else
|
|
kvm_vcpu_block(vcpu);
|
|
|
|
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
|
|
index 1b9844d..ee4f744 100644
|
|
--- a/arch/arm/kvm/init.S
|
|
+++ b/arch/arm/kvm/init.S
|
|
@@ -98,6 +98,10 @@ __do_hyp_init:
|
|
mrc p15, 0, r0, c10, c2, 1
|
|
mcr p15, 4, r0, c10, c2, 1
|
|
|
|
+ @ Invalidate the stale TLBs from Bootloader
|
|
+ mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
|
|
+ dsb ish
|
|
+
|
|
@ Set the HSCTLR to:
|
|
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
|
|
@ - Endianness: Kernel config
|
|
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
|
|
index 0d68d40..a1467e7 100644
|
|
--- a/arch/arm/kvm/interrupts.S
|
|
+++ b/arch/arm/kvm/interrupts.S
|
|
@@ -159,13 +159,9 @@ __kvm_vcpu_return:
|
|
@ Don't trap coprocessor accesses for host kernel
|
|
set_hstr vmexit
|
|
set_hdcr vmexit
|
|
- set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
|
|
+ set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
|
|
|
|
#ifdef CONFIG_VFPv3
|
|
- @ Save floating point registers we if let guest use them.
|
|
- tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
|
- bne after_vfp_restore
|
|
-
|
|
@ Switch VFP/NEON hardware state to the host's
|
|
add r7, vcpu, #VCPU_VFP_GUEST
|
|
store_vfp_state r7
|
|
@@ -177,6 +173,8 @@ after_vfp_restore:
|
|
@ Restore FPEXC_EN which we clobbered on entry
|
|
pop {r2}
|
|
VFPFMXR FPEXC, r2
|
|
+#else
|
|
+after_vfp_restore:
|
|
#endif
|
|
|
|
@ Reset Hyp-role
|
|
@@ -467,7 +465,7 @@ switch_to_guest_vfp:
|
|
push {r3-r7}
|
|
|
|
@ NEON/VFP used. Turn on VFP access.
|
|
- set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
|
+ set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
|
|
|
@ Switch VFP/NEON hardware state to the guest's
|
|
add r7, r0, #VCPU_VFP_HOST
|
|
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
|
|
index 6f18695..2973b2d 100644
|
|
--- a/arch/arm/kvm/interrupts_head.S
|
|
+++ b/arch/arm/kvm/interrupts_head.S
|
|
@@ -303,13 +303,17 @@ vcpu .req r0 @ vcpu pointer always in r0
|
|
|
|
mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
|
|
mrrc p15, 0, r4, r5, c7 @ PAR
|
|
+ mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
|
|
+ mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
|
|
|
|
.if \store_to_vcpu == 0
|
|
- push {r2,r4-r5}
|
|
+ push {r2,r4-r7}
|
|
.else
|
|
str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
|
|
add r12, vcpu, #CP15_OFFSET(c7_PAR)
|
|
strd r4, r5, [r12]
|
|
+ str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
|
|
+ str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
|
|
.endif
|
|
.endm
|
|
|
|
@@ -322,15 +326,19 @@ vcpu .req r0 @ vcpu pointer always in r0
|
|
*/
|
|
.macro write_cp15_state read_from_vcpu
|
|
.if \read_from_vcpu == 0
|
|
- pop {r2,r4-r5}
|
|
+ pop {r2,r4-r7}
|
|
.else
|
|
ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
|
|
add r12, vcpu, #CP15_OFFSET(c7_PAR)
|
|
ldrd r4, r5, [r12]
|
|
+ ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
|
|
+ ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
|
|
.endif
|
|
|
|
mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
|
|
mcrr p15, 0, r4, r5, c7 @ PAR
|
|
+ mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
|
|
+ mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
|
|
|
|
.if \read_from_vcpu == 0
|
|
pop {r2-r12}
|
|
@@ -570,8 +578,13 @@ vcpu .req r0 @ vcpu pointer always in r0
|
|
.endm
|
|
|
|
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
|
|
- * (hardware reset value is 0). Keep previous value in r2. */
|
|
-.macro set_hcptr operation, mask
|
|
+ * (hardware reset value is 0). Keep previous value in r2.
|
|
+ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
|
|
+ * VFP wasn't already enabled (always executed on vmtrap).
|
|
+ * If a label is specified with vmexit, it is branched to if VFP wasn't
|
|
+ * enabled.
|
|
+ */
|
|
+.macro set_hcptr operation, mask, label = none
|
|
mrc p15, 4, r2, c1, c1, 2
|
|
ldr r3, =\mask
|
|
.if \operation == vmentry
|
|
@@ -580,6 +593,17 @@ vcpu .req r0 @ vcpu pointer always in r0
|
|
bic r3, r2, r3 @ Don't trap defined coproc-accesses
|
|
.endif
|
|
mcr p15, 4, r3, c1, c1, 2
|
|
+ .if \operation != vmentry
|
|
+ .if \operation == vmexit
|
|
+ tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
|
+ beq 1f
|
|
+ .endif
|
|
+ isb
|
|
+ .if \label != none
|
|
+ b \label
|
|
+ .endif
|
|
+1:
|
|
+ .endif
|
|
.endm
|
|
|
|
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
|
|
@@ -597,17 +621,14 @@ vcpu .req r0 @ vcpu pointer always in r0
|
|
|
|
/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
|
|
.macro configure_hyp_role operation
|
|
- mrc p15, 4, r2, c1, c1, 0 @ HCR
|
|
- bic r2, r2, #HCR_VIRT_EXCP_MASK
|
|
- ldr r3, =HCR_GUEST_MASK
|
|
.if \operation == vmentry
|
|
- orr r2, r2, r3
|
|
+ ldr r2, [vcpu, #VCPU_HCR]
|
|
ldr r3, [vcpu, #VCPU_IRQ_LINES]
|
|
orr r2, r2, r3
|
|
.else
|
|
- bic r2, r2, r3
|
|
+ mov r2, #0
|
|
.endif
|
|
- mcr p15, 4, r2, c1, c1, 0
|
|
+ mcr p15, 4, r2, c1, c1, 0 @ HCR
|
|
.endm
|
|
|
|
.macro load_vcpu
|
|
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
|
|
index 575d790..c612e37 100644
|
|
--- a/arch/arm/kvm/mmu.c
|
|
+++ b/arch/arm/kvm/mmu.c
|
|
@@ -90,103 +90,209 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
|
|
return p;
|
|
}
|
|
|
|
-static bool page_empty(void *ptr)
|
|
+static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
|
|
{
|
|
- struct page *ptr_page = virt_to_page(ptr);
|
|
- return page_count(ptr_page) == 1;
|
|
+ pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
|
|
+ pgd_clear(pgd);
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ pud_free(NULL, pud_table);
|
|
+ put_page(virt_to_page(pgd));
|
|
}
|
|
|
|
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
|
|
{
|
|
- if (pud_huge(*pud)) {
|
|
- pud_clear(pud);
|
|
- kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
- } else {
|
|
- pmd_t *pmd_table = pmd_offset(pud, 0);
|
|
- pud_clear(pud);
|
|
- kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
- pmd_free(NULL, pmd_table);
|
|
- }
|
|
+ pmd_t *pmd_table = pmd_offset(pud, 0);
|
|
+ VM_BUG_ON(pud_huge(*pud));
|
|
+ pud_clear(pud);
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ pmd_free(NULL, pmd_table);
|
|
put_page(virt_to_page(pud));
|
|
}
|
|
|
|
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
|
|
{
|
|
- if (kvm_pmd_huge(*pmd)) {
|
|
- pmd_clear(pmd);
|
|
- kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
- } else {
|
|
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
|
|
- pmd_clear(pmd);
|
|
- kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
- pte_free_kernel(NULL, pte_table);
|
|
- }
|
|
+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
|
|
+ VM_BUG_ON(kvm_pmd_huge(*pmd));
|
|
+ pmd_clear(pmd);
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ pte_free_kernel(NULL, pte_table);
|
|
put_page(virt_to_page(pmd));
|
|
}
|
|
|
|
-static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
|
|
+static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
{
|
|
- if (pte_present(*pte)) {
|
|
- kvm_set_pte(pte, __pte(0));
|
|
- put_page(virt_to_page(pte));
|
|
- kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ phys_addr_t start_addr = addr;
|
|
+ pte_t *pte, *start_pte;
|
|
+
|
|
+ start_pte = pte = pte_offset_kernel(pmd, addr);
|
|
+ do {
|
|
+ if (!pte_none(*pte)) {
|
|
+ kvm_set_pte(pte, __pte(0));
|
|
+ put_page(virt_to_page(pte));
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ }
|
|
+ } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
+
|
|
+ if (kvm_pte_table_empty(start_pte))
|
|
+ clear_pmd_entry(kvm, pmd, start_addr);
|
|
}
|
|
+
|
|
+static void unmap_pmds(struct kvm *kvm, pud_t *pud,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
+{
|
|
+ phys_addr_t next, start_addr = addr;
|
|
+ pmd_t *pmd, *start_pmd;
|
|
+
|
|
+ start_pmd = pmd = pmd_offset(pud, addr);
|
|
+ do {
|
|
+ next = kvm_pmd_addr_end(addr, end);
|
|
+ if (!pmd_none(*pmd)) {
|
|
+ if (kvm_pmd_huge(*pmd)) {
|
|
+ pmd_clear(pmd);
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ put_page(virt_to_page(pmd));
|
|
+ } else {
|
|
+ unmap_ptes(kvm, pmd, addr, next);
|
|
+ }
|
|
+ }
|
|
+ } while (pmd++, addr = next, addr != end);
|
|
+
|
|
+ if (kvm_pmd_table_empty(start_pmd))
|
|
+ clear_pud_entry(kvm, pud, start_addr);
|
|
+}
|
|
+
|
|
+static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
+{
|
|
+ phys_addr_t next, start_addr = addr;
|
|
+ pud_t *pud, *start_pud;
|
|
+
|
|
+ start_pud = pud = pud_offset(pgd, addr);
|
|
+ do {
|
|
+ next = kvm_pud_addr_end(addr, end);
|
|
+ if (!pud_none(*pud)) {
|
|
+ if (pud_huge(*pud)) {
|
|
+ pud_clear(pud);
|
|
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
+ put_page(virt_to_page(pud));
|
|
+ } else {
|
|
+ unmap_pmds(kvm, pud, addr, next);
|
|
+ }
|
|
+ }
|
|
+ } while (pud++, addr = next, addr != end);
|
|
+
|
|
+ if (kvm_pud_table_empty(start_pud))
|
|
+ clear_pgd_entry(kvm, pgd, start_addr);
|
|
}
|
|
|
|
+
|
|
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
|
|
- unsigned long long start, u64 size)
|
|
+ phys_addr_t start, u64 size)
|
|
{
|
|
pgd_t *pgd;
|
|
- pud_t *pud;
|
|
- pmd_t *pmd;
|
|
+ phys_addr_t addr = start, end = start + size;
|
|
+ phys_addr_t next;
|
|
+
|
|
+ pgd = pgdp + kvm_pgd_index(addr);
|
|
+ do {
|
|
+ next = kvm_pgd_addr_end(addr, end);
|
|
+ if (!pgd_none(*pgd))
|
|
+ unmap_puds(kvm, pgd, addr, next);
|
|
+ } while (pgd++, addr = next, addr != end);
|
|
+}
|
|
+
|
|
+static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
+{
|
|
pte_t *pte;
|
|
- unsigned long long addr = start, end = start + size;
|
|
- u64 next;
|
|
|
|
- while (addr < end) {
|
|
- pgd = pgdp + pgd_index(addr);
|
|
- pud = pud_offset(pgd, addr);
|
|
- if (pud_none(*pud)) {
|
|
- addr = pud_addr_end(addr, end);
|
|
- continue;
|
|
+ pte = pte_offset_kernel(pmd, addr);
|
|
+ do {
|
|
+ if (!pte_none(*pte)) {
|
|
+ hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
|
|
+ kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
|
|
}
|
|
+ } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
+}
|
|
|
|
- if (pud_huge(*pud)) {
|
|
- /*
|
|
- * If we are dealing with a huge pud, just clear it and
|
|
- * move on.
|
|
- */
|
|
- clear_pud_entry(kvm, pud, addr);
|
|
- addr = pud_addr_end(addr, end);
|
|
- continue;
|
|
- }
|
|
+static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
+{
|
|
+ pmd_t *pmd;
|
|
+ phys_addr_t next;
|
|
|
|
- pmd = pmd_offset(pud, addr);
|
|
- if (pmd_none(*pmd)) {
|
|
- addr = pmd_addr_end(addr, end);
|
|
- continue;
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ do {
|
|
+ next = kvm_pmd_addr_end(addr, end);
|
|
+ if (!pmd_none(*pmd)) {
|
|
+ if (kvm_pmd_huge(*pmd)) {
|
|
+ hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
|
|
+ kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
|
|
+ } else {
|
|
+ stage2_flush_ptes(kvm, pmd, addr, next);
|
|
+ }
|
|
}
|
|
+ } while (pmd++, addr = next, addr != end);
|
|
+}
|
|
|
|
- if (!kvm_pmd_huge(*pmd)) {
|
|
- pte = pte_offset_kernel(pmd, addr);
|
|
- clear_pte_entry(kvm, pte, addr);
|
|
- next = addr + PAGE_SIZE;
|
|
- }
|
|
+static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
|
|
+ phys_addr_t addr, phys_addr_t end)
|
|
+{
|
|
+ pud_t *pud;
|
|
+ phys_addr_t next;
|
|
|
|
- /*
|
|
- * If the pmd entry is to be cleared, walk back up the ladder
|
|
- */
|
|
- if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
|
|
- clear_pmd_entry(kvm, pmd, addr);
|
|
- next = pmd_addr_end(addr, end);
|
|
- if (page_empty(pmd) && !page_empty(pud)) {
|
|
- clear_pud_entry(kvm, pud, addr);
|
|
- next = pud_addr_end(addr, end);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ do {
|
|
+ next = kvm_pud_addr_end(addr, end);
|
|
+ if (!pud_none(*pud)) {
|
|
+ if (pud_huge(*pud)) {
|
|
+ hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
|
|
+ kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
|
|
+ } else {
|
|
+ stage2_flush_pmds(kvm, pud, addr, next);
|
|
}
|
|
}
|
|
+ } while (pud++, addr = next, addr != end);
|
|
+}
|
|
|
|
- addr = next;
|
|
- }
|
|
+static void stage2_flush_memslot(struct kvm *kvm,
|
|
+ struct kvm_memory_slot *memslot)
|
|
+{
|
|
+ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
|
|
+ phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
|
|
+ phys_addr_t next;
|
|
+ pgd_t *pgd;
|
|
+
|
|
+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
|
|
+ do {
|
|
+ next = kvm_pgd_addr_end(addr, end);
|
|
+ stage2_flush_puds(kvm, pgd, addr, next);
|
|
+ } while (pgd++, addr = next, addr != end);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
|
|
+ * @kvm: The struct kvm pointer
|
|
+ *
|
|
+ * Go through the stage 2 page tables and invalidate any cache lines
|
|
+ * backing memory already mapped to the VM.
|
|
+ */
|
|
+void stage2_flush_vm(struct kvm *kvm)
|
|
+{
|
|
+ struct kvm_memslots *slots;
|
|
+ struct kvm_memory_slot *memslot;
|
|
+ int idx;
|
|
+
|
|
+ idx = srcu_read_lock(&kvm->srcu);
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+
|
|
+ slots = kvm_memslots(kvm);
|
|
+ kvm_for_each_memslot(memslot, slots)
|
|
+ stage2_flush_memslot(kvm, memslot);
|
|
+
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
}
|
|
|
|
/**
|
|
@@ -450,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
|
unmap_range(kvm, kvm->arch.pgd, start, size);
|
|
}
|
|
|
|
+static void stage2_unmap_memslot(struct kvm *kvm,
|
|
+ struct kvm_memory_slot *memslot)
|
|
+{
|
|
+ hva_t hva = memslot->userspace_addr;
|
|
+ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
|
|
+ phys_addr_t size = PAGE_SIZE * memslot->npages;
|
|
+ hva_t reg_end = hva + size;
|
|
+
|
|
+ /*
|
|
+ * A memory region could potentially cover multiple VMAs, and any holes
|
|
+ * between them, so iterate over all of them to find out if we should
|
|
+ * unmap any of them.
|
|
+ *
|
|
+ * +--------------------------------------------+
|
|
+ * +---------------+----------------+ +----------------+
|
|
+ * | : VMA 1 | VMA 2 | | VMA 3 : |
|
|
+ * +---------------+----------------+ +----------------+
|
|
+ * | memory region |
|
|
+ * +--------------------------------------------+
|
|
+ */
|
|
+ do {
|
|
+ struct vm_area_struct *vma = find_vma(current->mm, hva);
|
|
+ hva_t vm_start, vm_end;
|
|
+
|
|
+ if (!vma || vma->vm_start >= reg_end)
|
|
+ break;
|
|
+
|
|
+ /*
|
|
+ * Take the intersection of this VMA with the memory region
|
|
+ */
|
|
+ vm_start = max(hva, vma->vm_start);
|
|
+ vm_end = min(reg_end, vma->vm_end);
|
|
+
|
|
+ if (!(vma->vm_flags & VM_PFNMAP)) {
|
|
+ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
|
|
+ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
|
|
+ }
|
|
+ hva = vm_end;
|
|
+ } while (hva < reg_end);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
|
|
+ * @kvm: The struct kvm pointer
|
|
+ *
|
|
+ * Go through the memregions and unmap any reguler RAM
|
|
+ * backing memory already mapped to the VM.
|
|
+ */
|
|
+void stage2_unmap_vm(struct kvm *kvm)
|
|
+{
|
|
+ struct kvm_memslots *slots;
|
|
+ struct kvm_memory_slot *memslot;
|
|
+ int idx;
|
|
+
|
|
+ idx = srcu_read_lock(&kvm->srcu);
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+
|
|
+ slots = kvm_memslots(kvm);
|
|
+ kvm_for_each_memslot(memslot, slots)
|
|
+ stage2_unmap_memslot(kvm, memslot);
|
|
+
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
+}
|
|
+
|
|
/**
|
|
* kvm_free_stage2_pgd - free all stage-2 tables
|
|
* @kvm: The KVM struct pointer for the VM.
|
|
@@ -478,7 +649,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
- pgd = kvm->arch.pgd + pgd_index(addr);
|
|
+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
|
|
pud = pud_offset(pgd, addr);
|
|
if (pud_none(*pud)) {
|
|
if (!cache)
|
|
@@ -641,6 +812,19 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
|
|
return false;
|
|
}
|
|
|
|
+static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ if (kvm_vcpu_trap_is_iabt(vcpu))
|
|
+ return false;
|
|
+
|
|
+ return kvm_vcpu_dabt_iswrite(vcpu);
|
|
+}
|
|
+
|
|
+static bool kvm_is_device_pfn(unsigned long pfn)
|
|
+{
|
|
+ return !pfn_valid(pfn);
|
|
+}
|
|
+
|
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
struct kvm_memory_slot *memslot,
|
|
unsigned long fault_status)
|
|
@@ -654,8 +838,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
|
|
struct vm_area_struct *vma;
|
|
pfn_t pfn;
|
|
+ pgprot_t mem_type = PAGE_S2;
|
|
|
|
- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
|
|
+ write_fault = kvm_is_write_fault(vcpu);
|
|
if (fault_status == FSC_PERM && !write_fault) {
|
|
kvm_err("Unexpected L2 read permission error\n");
|
|
return -EFAULT;
|
|
@@ -664,6 +849,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
/* Let's check if we will get back a huge page backed by hugetlbfs */
|
|
down_read(¤t->mm->mmap_sem);
|
|
vma = find_vma_intersection(current->mm, hva, hva + 1);
|
|
+ if (unlikely(!vma)) {
|
|
+ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
if (is_vm_hugetlb_page(vma)) {
|
|
hugetlb = true;
|
|
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
|
@@ -704,6 +895,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
if (is_error_pfn(pfn))
|
|
return -EFAULT;
|
|
|
|
+ if (kvm_is_device_pfn(pfn))
|
|
+ mem_type = PAGE_S2_DEVICE;
|
|
+
|
|
spin_lock(&kvm->mmu_lock);
|
|
if (mmu_notifier_retry(kvm, mmu_seq))
|
|
goto out_unlock;
|
|
@@ -711,22 +905,23 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
|
|
|
|
if (hugetlb) {
|
|
- pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
|
|
+ pmd_t new_pmd = pfn_pmd(pfn, mem_type);
|
|
new_pmd = pmd_mkhuge(new_pmd);
|
|
if (writable) {
|
|
kvm_set_s2pmd_writable(&new_pmd);
|
|
kvm_set_pfn_dirty(pfn);
|
|
}
|
|
- coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
|
|
+ coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
|
|
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
|
|
} else {
|
|
- pte_t new_pte = pfn_pte(pfn, PAGE_S2);
|
|
+ pte_t new_pte = pfn_pte(pfn, mem_type);
|
|
if (writable) {
|
|
kvm_set_s2pte_writable(&new_pte);
|
|
kvm_set_pfn_dirty(pfn);
|
|
}
|
|
- coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
|
|
- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
|
|
+ coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
|
|
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
|
|
+ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
|
|
}
|
|
|
|
|
|
@@ -802,6 +997,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
memslot = gfn_to_memslot(vcpu->kvm, gfn);
|
|
|
|
+ /* Userspace should not be able to register out-of-bounds IPAs */
|
|
+ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
|
|
+
|
|
ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
|
|
if (ret == 0)
|
|
ret = 1;
|
|
@@ -1006,3 +1204,57 @@ out:
|
|
free_hyp_pgds();
|
|
return err;
|
|
}
|
|
+
|
|
+void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|
+ struct kvm_userspace_memory_region *mem,
|
|
+ const struct kvm_memory_slot *old,
|
|
+ enum kvm_mr_change change)
|
|
+{
|
|
+ gpa_t gpa = old->base_gfn << PAGE_SHIFT;
|
|
+ phys_addr_t size = old->npages << PAGE_SHIFT;
|
|
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+ unmap_stage2_range(kvm, gpa, size);
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
+ struct kvm_memory_slot *memslot,
|
|
+ struct kvm_userspace_memory_region *mem,
|
|
+ enum kvm_mr_change change)
|
|
+{
|
|
+ /*
|
|
+ * Prevent userspace from creating a memory region outside of the IPA
|
|
+ * space addressable by the KVM guest IPA space.
|
|
+ */
|
|
+ if (memslot->base_gfn + memslot->npages >=
|
|
+ (KVM_PHYS_SIZE >> PAGE_SHIFT))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
|
+ struct kvm_memory_slot *dont)
|
|
+{
|
|
+}
|
|
+
|
|
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
+ unsigned long npages)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void kvm_arch_memslots_updated(struct kvm *kvm)
|
|
+{
|
|
+}
|
|
+
|
|
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|
+{
|
|
+}
|
|
+
|
|
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
+ struct kvm_memory_slot *slot)
|
|
+{
|
|
+}
|
|
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
|
|
index 034529d..d66f102 100644
|
|
--- a/arch/arm/mach-at91/clock.c
|
|
+++ b/arch/arm/mach-at91/clock.c
|
|
@@ -962,6 +962,7 @@ static int __init at91_clock_reset(void)
|
|
}
|
|
|
|
at91_pmc_write(AT91_PMC_SCDR, scdr);
|
|
+ at91_pmc_write(AT91_PMC_PCDR, pcdr);
|
|
if (cpu_is_sama5d3())
|
|
at91_pmc_write(AT91_PMC_PCDR1, pcdr1);
|
|
|
|
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
|
|
index c5101dc..1d4df3b 100644
|
|
--- a/arch/arm/mach-at91/pm.h
|
|
+++ b/arch/arm/mach-at91/pm.h
|
|
@@ -45,7 +45,7 @@ static inline void at91rm9200_standby(void)
|
|
" mcr p15, 0, %0, c7, c0, 4\n\t"
|
|
" str %5, [%1, %2]"
|
|
:
|
|
- : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
|
|
+ : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
|
|
"r" (1), "r" (AT91RM9200_SDRAMC_SRR),
|
|
"r" (lpr));
|
|
}
|
|
diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
|
|
index 49fa9ab..7a7a09a5 100644
|
|
--- a/arch/arm/mach-dove/board-dt.c
|
|
+++ b/arch/arm/mach-dove/board-dt.c
|
|
@@ -26,7 +26,7 @@ static void __init dove_dt_init(void)
|
|
#ifdef CONFIG_CACHE_TAUROS2
|
|
tauros2_init(0);
|
|
#endif
|
|
- BUG_ON(mvebu_mbus_dt_init());
|
|
+ BUG_ON(mvebu_mbus_dt_init(false));
|
|
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
|
}
|
|
|
|
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
|
|
index 3c27907..fdabccd 100644
|
|
--- a/arch/arm/mach-imx/clk-imx6q.c
|
|
+++ b/arch/arm/mach-imx/clk-imx6q.c
|
|
@@ -283,8 +283,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
|
post_div_table[1].div = 1;
|
|
post_div_table[2].div = 1;
|
|
video_div_table[1].div = 1;
|
|
- video_div_table[2].div = 1;
|
|
- };
|
|
+ video_div_table[3].div = 1;
|
|
+ }
|
|
|
|
/* type name parent_name base div_mask */
|
|
clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, false);
|
|
@@ -536,7 +536,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
|
clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
|
|
clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
|
|
clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
|
|
- clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
|
|
+ clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
|
|
clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
|
|
clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
|
|
clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
|
|
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
|
|
index 7818815..79e629d 100644
|
|
--- a/arch/arm/mach-kirkwood/board-dt.c
|
|
+++ b/arch/arm/mach-kirkwood/board-dt.c
|
|
@@ -116,7 +116,7 @@ static void __init kirkwood_dt_init(void)
|
|
*/
|
|
writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
|
|
|
|
- BUG_ON(mvebu_mbus_dt_init());
|
|
+ BUG_ON(mvebu_mbus_dt_init(false));
|
|
|
|
kirkwood_l2_init();
|
|
|
|
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
|
|
index f6c9d1d..79c3766 100644
|
|
--- a/arch/arm/mach-mvebu/armada-370-xp.c
|
|
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
|
|
@@ -41,7 +41,7 @@ static void __init armada_370_xp_timer_and_clk_init(void)
|
|
of_clk_init(NULL);
|
|
clocksource_of_init();
|
|
coherency_init();
|
|
- BUG_ON(mvebu_mbus_dt_init());
|
|
+ BUG_ON(mvebu_mbus_dt_init(coherency_available()));
|
|
#ifdef CONFIG_CACHE_L2X0
|
|
l2x0_of_init(0, ~0UL);
|
|
#endif
|
|
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
|
|
index 4e9d581..49bad4d 100644
|
|
--- a/arch/arm/mach-mvebu/coherency.c
|
|
+++ b/arch/arm/mach-mvebu/coherency.c
|
|
@@ -121,10 +121,47 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
|
|
.notifier_call = mvebu_hwcc_platform_notifier,
|
|
};
|
|
|
|
+/*
|
|
+ * Keep track of whether we have IO hardware coherency enabled or not.
|
|
+ * On Armada 370's we will not be using it for example. We need to make
|
|
+ * that available [through coherency_available()] so the mbus controller
|
|
+ * doesn't enable the IO coherency bit in the attribute bits of the
|
|
+ * chip selects.
|
|
+ */
|
|
+static int coherency_enabled;
|
|
+
|
|
+int coherency_available(void)
|
|
+{
|
|
+ return coherency_enabled;
|
|
+}
|
|
+
|
|
int __init coherency_init(void)
|
|
{
|
|
struct device_node *np;
|
|
|
|
+ /*
|
|
+ * The coherency fabric is needed:
|
|
+ * - For coherency between processors on Armada XP, so only
|
|
+ * when SMP is enabled.
|
|
+ * - For coherency between the processor and I/O devices, but
|
|
+ * this coherency requires many pre-requisites (write
|
|
+ * allocate cache policy, shareable pages, SMP bit set) that
|
|
+ * are only meant in SMP situations.
|
|
+ *
|
|
+ * Note that this means that on Armada 370, there is currently
|
|
+ * no way to use hardware I/O coherency, because even when
|
|
+ * CONFIG_SMP is enabled, is_smp() returns false due to the
|
|
+ * Armada 370 being a single-core processor. To lift this
|
|
+ * limitation, we would have to find a way to make the cache
|
|
+ * policy set to write-allocate (on all Armada SoCs), and to
|
|
+ * set the shareable attribute in page tables (on all Armada
|
|
+ * SoCs except the Armada 370). Unfortunately, such decisions
|
|
+ * are taken very early in the kernel boot process, at a point
|
|
+ * where we don't know yet on which SoC we are running.
|
|
+ */
|
|
+ if (!is_smp())
|
|
+ return 0;
|
|
+
|
|
np = of_find_matching_node(NULL, of_coherency_table);
|
|
if (np) {
|
|
struct resource res;
|
|
@@ -141,6 +178,7 @@ int __init coherency_init(void)
|
|
coherency_base = of_iomap(np, 0);
|
|
coherency_cpu_base = of_iomap(np, 1);
|
|
set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
|
|
+ coherency_enabled = 1;
|
|
of_node_put(np);
|
|
}
|
|
|
|
@@ -151,6 +189,9 @@ static int __init coherency_late_init(void)
|
|
{
|
|
struct device_node *np;
|
|
|
|
+ if (!is_smp())
|
|
+ return 0;
|
|
+
|
|
np = of_find_matching_node(NULL, of_coherency_table);
|
|
if (np) {
|
|
bus_register_notifier(&platform_bus_type,
|
|
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
|
|
index 760226c..63e18c6 100644
|
|
--- a/arch/arm/mach-mvebu/coherency.h
|
|
+++ b/arch/arm/mach-mvebu/coherency.h
|
|
@@ -17,6 +17,7 @@
|
|
extern unsigned long coherency_phys_base;
|
|
|
|
int set_cpu_coherent(unsigned int cpu_id, int smp_group_id);
|
|
+int coherency_available(void);
|
|
int coherency_init(void);
|
|
|
|
#endif /* __MACH_370_XP_COHERENCY_H */
|
|
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
|
|
index 57d5df0..7581e03 100644
|
|
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
|
|
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
|
|
@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
|
|
.dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT,
|
|
.wkdep_srcs = l4per2_wkup_sleep_deps,
|
|
.sleepdep_srcs = l4per2_wkup_sleep_deps,
|
|
- .flags = CLKDM_CAN_HWSUP_SWSUP,
|
|
+ .flags = CLKDM_CAN_SWSUP,
|
|
};
|
|
|
|
static struct clockdomain mpu0_7xx_clkdm = {
|
|
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
|
|
index 44bb4d5..89cde07 100644
|
|
--- a/arch/arm/mach-omap2/control.c
|
|
+++ b/arch/arm/mach-omap2/control.c
|
|
@@ -314,7 +314,8 @@ void omap3_save_scratchpad_contents(void)
|
|
scratchpad_contents.public_restore_ptr =
|
|
virt_to_phys(omap3_restore_3630);
|
|
else if (omap_rev() != OMAP3430_REV_ES3_0 &&
|
|
- omap_rev() != OMAP3430_REV_ES3_1)
|
|
+ omap_rev() != OMAP3430_REV_ES3_1 &&
|
|
+ omap_rev() != OMAP3430_REV_ES3_1_2)
|
|
scratchpad_contents.public_restore_ptr =
|
|
virt_to_phys(omap3_restore);
|
|
else
|
|
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
|
|
index 66c60fe..399af1e 100644
|
|
--- a/arch/arm/mach-omap2/omap_hwmod.c
|
|
+++ b/arch/arm/mach-omap2/omap_hwmod.c
|
|
@@ -2185,6 +2185,8 @@ static int _enable(struct omap_hwmod *oh)
|
|
oh->mux->pads_dynamic))) {
|
|
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
|
|
_reconfigure_io_chain();
|
|
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
|
|
+ _reconfigure_io_chain();
|
|
}
|
|
|
|
_add_initiator_dep(oh, mpu_oh);
|
|
@@ -2291,6 +2293,8 @@ static int _idle(struct omap_hwmod *oh)
|
|
if (oh->mux && oh->mux->pads_dynamic) {
|
|
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
|
|
_reconfigure_io_chain();
|
|
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
|
|
+ _reconfigure_io_chain();
|
|
}
|
|
|
|
oh->_state = _HWMOD_STATE_IDLE;
|
|
@@ -2448,6 +2452,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
|
|
* registers. This address is needed early so the OCP registers that
|
|
* are part of the device's address space can be ioremapped properly.
|
|
*
|
|
+ * If SYSC access is not needed, the registers will not be remapped
|
|
+ * and non-availability of MPU access is not treated as an error.
|
|
+ *
|
|
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
|
|
* -ENXIO on absent or invalid register target address space.
|
|
*/
|
|
@@ -2462,6 +2469,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
|
|
|
|
_save_mpu_port_index(oh);
|
|
|
|
+ /* if we don't need sysc access we don't need to ioremap */
|
|
+ if (!oh->class->sysc)
|
|
+ return 0;
|
|
+
|
|
+ /* we can't continue without MPU PORT if we need sysc access */
|
|
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
|
|
return -ENXIO;
|
|
|
|
@@ -2471,8 +2483,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
|
|
oh->name);
|
|
|
|
/* Extract the IO space from device tree blob */
|
|
- if (!np)
|
|
+ if (!np) {
|
|
+ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
|
|
return -ENXIO;
|
|
+ }
|
|
|
|
va_start = of_iomap(np, index + oh->mpu_rt_idx);
|
|
} else {
|
|
@@ -2531,13 +2545,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
|
|
oh->name, np->name);
|
|
}
|
|
|
|
- if (oh->class->sysc) {
|
|
- r = _init_mpu_rt_base(oh, NULL, index, np);
|
|
- if (r < 0) {
|
|
- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
|
|
- oh->name);
|
|
- return 0;
|
|
- }
|
|
+ r = _init_mpu_rt_base(oh, NULL, index, np);
|
|
+ if (r < 0) {
|
|
+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
|
|
+ oh->name);
|
|
+ return 0;
|
|
}
|
|
|
|
r = _init_clocks(oh, NULL);
|
|
@@ -3345,6 +3357,9 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
|
|
if (!ois)
|
|
return 0;
|
|
|
|
+ if (ois[0] == NULL) /* Empty list */
|
|
+ return 0;
|
|
+
|
|
if (!linkspace) {
|
|
if (_alloc_linkspace(ois)) {
|
|
pr_err("omap_hwmod: could not allocate link space\n");
|
|
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
|
|
index 810c205..d3ac4c6 100644
|
|
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
|
|
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
|
|
@@ -35,6 +35,7 @@
|
|
#include "i2c.h"
|
|
#include "mmc.h"
|
|
#include "wd_timer.h"
|
|
+#include "soc.h"
|
|
|
|
/* Base offset for all DRA7XX interrupts external to MPUSS */
|
|
#define DRA7XX_IRQ_GIC_START 32
|
|
@@ -1668,7 +1669,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
|
|
.class = &dra7xx_uart_hwmod_class,
|
|
.clkdm_name = "l4per_clkdm",
|
|
.main_clk = "uart3_gfclk_mux",
|
|
- .flags = HWMOD_SWSUP_SIDLE_ACT,
|
|
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
|
|
.prcm = {
|
|
.omap4 = {
|
|
.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
|
|
@@ -2707,7 +2708,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
|
&dra7xx_l4_per3__usb_otg_ss1,
|
|
&dra7xx_l4_per3__usb_otg_ss2,
|
|
&dra7xx_l4_per3__usb_otg_ss3,
|
|
- &dra7xx_l4_per3__usb_otg_ss4,
|
|
&dra7xx_l3_main_1__vcp1,
|
|
&dra7xx_l4_per2__vcp1,
|
|
&dra7xx_l3_main_1__vcp2,
|
|
@@ -2716,8 +2716,26 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
|
NULL,
|
|
};
|
|
|
|
+static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
|
|
+ &dra7xx_l4_per3__usb_otg_ss4,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
|
|
+ NULL,
|
|
+};
|
|
+
|
|
int __init dra7xx_hwmod_init(void)
|
|
{
|
|
+ int ret;
|
|
+
|
|
omap_hwmod_init();
|
|
- return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
|
+ ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
|
+
|
|
+ if (!ret && soc_is_dra74x())
|
|
+ return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
|
|
+ else if (!ret && soc_is_dra72x())
|
|
+ return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
|
|
+
|
|
+ return ret;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
|
|
index eefb30c..2b9cff9 100644
|
|
--- a/arch/arm/mach-omap2/pm44xx.c
|
|
+++ b/arch/arm/mach-omap2/pm44xx.c
|
|
@@ -148,26 +148,6 @@ static inline int omap4_init_static_deps(void)
|
|
struct clockdomain *ducati_clkdm, *l3_2_clkdm;
|
|
int ret = 0;
|
|
|
|
- if (omap_rev() == OMAP4430_REV_ES1_0) {
|
|
- WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
|
|
- return -ENODEV;
|
|
- }
|
|
-
|
|
- pr_err("Power Management for TI OMAP4.\n");
|
|
- /*
|
|
- * OMAP4 chip PM currently works only with certain (newer)
|
|
- * versions of bootloaders. This is due to missing code in the
|
|
- * kernel to properly reset and initialize some devices.
|
|
- * http://www.spinics.net/lists/arm-kernel/msg218641.html
|
|
- */
|
|
- pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
|
|
-
|
|
- ret = pwrdm_for_each(pwrdms_setup, NULL);
|
|
- if (ret) {
|
|
- pr_err("Failed to setup powerdomains\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
/*
|
|
* The dynamic dependency between MPUSS -> MEMIF and
|
|
* MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
|
|
@@ -231,6 +211,15 @@ int __init omap4_pm_init(void)
|
|
|
|
pr_info("Power Management for TI OMAP4+ devices.\n");
|
|
|
|
+ /*
|
|
+ * OMAP4 chip PM currently works only with certain (newer)
|
|
+ * versions of bootloaders. This is due to missing code in the
|
|
+ * kernel to properly reset and initialize some devices.
|
|
+ * http://www.spinics.net/lists/arm-kernel/msg218641.html
|
|
+ */
|
|
+ if (cpu_is_omap44xx())
|
|
+ pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
|
|
+
|
|
ret = pwrdm_for_each(pwrdms_setup, NULL);
|
|
if (ret) {
|
|
pr_err("Failed to setup powerdomains.\n");
|
|
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
|
|
index 076bd90..8a9be09 100644
|
|
--- a/arch/arm/mach-omap2/soc.h
|
|
+++ b/arch/arm/mach-omap2/soc.h
|
|
@@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437)
|
|
#define soc_is_omap54xx() 0
|
|
#define soc_is_omap543x() 0
|
|
#define soc_is_dra7xx() 0
|
|
+#define soc_is_dra74x() 0
|
|
+#define soc_is_dra72x() 0
|
|
|
|
#if defined(MULTI_OMAP2)
|
|
# if defined(CONFIG_ARCH_OMAP2)
|
|
@@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430)
|
|
|
|
#if defined(CONFIG_SOC_DRA7XX)
|
|
#undef soc_is_dra7xx
|
|
+#undef soc_is_dra74x
|
|
+#undef soc_is_dra72x
|
|
#define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7"))
|
|
+#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74"))
|
|
+#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72"))
|
|
#endif
|
|
|
|
/* Various silicon revisions for omap2 */
|
|
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
|
|
index 74044aa..73d80b8 100644
|
|
--- a/arch/arm/mach-omap2/timer.c
|
|
+++ b/arch/arm/mach-omap2/timer.c
|
|
@@ -513,11 +513,11 @@ static void __init realtime_counter_init(void)
|
|
rate = clk_get_rate(sys_clk);
|
|
/* Numerator/denumerator values refer TRM Realtime Counter section */
|
|
switch (rate) {
|
|
- case 1200000:
|
|
+ case 12000000:
|
|
num = 64;
|
|
den = 125;
|
|
break;
|
|
- case 1300000:
|
|
+ case 13000000:
|
|
num = 768;
|
|
den = 1625;
|
|
break;
|
|
@@ -529,11 +529,11 @@ static void __init realtime_counter_init(void)
|
|
num = 192;
|
|
den = 625;
|
|
break;
|
|
- case 2600000:
|
|
+ case 26000000:
|
|
num = 384;
|
|
den = 1625;
|
|
break;
|
|
- case 2700000:
|
|
+ case 27000000:
|
|
num = 256;
|
|
den = 1125;
|
|
break;
|
|
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
|
|
index f162f1b..82fd9dd 100644
|
|
--- a/arch/arm/mach-pxa/corgi.c
|
|
+++ b/arch/arm/mach-pxa/corgi.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/i2c.h>
|
|
#include <linux/i2c/pxa-i2c.h>
|
|
#include <linux/io.h>
|
|
+#include <linux/regulator/machine.h>
|
|
#include <linux/spi/spi.h>
|
|
#include <linux/spi/ads7846.h>
|
|
#include <linux/spi/corgi_lcd.h>
|
|
@@ -711,6 +712,8 @@ static void __init corgi_init(void)
|
|
sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
|
|
|
|
platform_add_devices(devices, ARRAY_SIZE(devices));
|
|
+
|
|
+ regulator_has_full_constraints();
|
|
}
|
|
|
|
static void __init fixup_corgi(struct tag *tags, char **cmdline,
|
|
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
|
|
index a7c30eb..007fd8a 100644
|
|
--- a/arch/arm/mach-pxa/hx4700.c
|
|
+++ b/arch/arm/mach-pxa/hx4700.c
|
|
@@ -892,6 +892,8 @@ static void __init hx4700_init(void)
|
|
mdelay(10);
|
|
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
|
|
mdelay(10);
|
|
+
|
|
+ regulator_has_full_constraints();
|
|
}
|
|
|
|
MACHINE_START(H4700, "HP iPAQ HX4700")
|
|
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
|
|
index aedf053..b4fff29 100644
|
|
--- a/arch/arm/mach-pxa/poodle.c
|
|
+++ b/arch/arm/mach-pxa/poodle.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/gpio.h>
|
|
#include <linux/i2c.h>
|
|
#include <linux/i2c/pxa-i2c.h>
|
|
+#include <linux/regulator/machine.h>
|
|
#include <linux/spi/spi.h>
|
|
#include <linux/spi/ads7846.h>
|
|
#include <linux/spi/pxa2xx_spi.h>
|
|
@@ -454,6 +455,7 @@ static void __init poodle_init(void)
|
|
pxa_set_i2c_info(NULL);
|
|
i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
|
|
poodle_init_spi();
|
|
+ regulator_has_full_constraints();
|
|
}
|
|
|
|
static void __init fixup_poodle(struct tag *tags, char **cmdline,
|
|
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
|
|
index 2022e09..db09170 100644
|
|
--- a/arch/arm/mach-realview/include/mach/memory.h
|
|
+++ b/arch/arm/mach-realview/include/mach/memory.h
|
|
@@ -56,6 +56,8 @@
|
|
#define PAGE_OFFSET1 (PAGE_OFFSET + 0x10000000)
|
|
#define PAGE_OFFSET2 (PAGE_OFFSET + 0x30000000)
|
|
|
|
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
|
+
|
|
#define __phys_to_virt(phys) \
|
|
((phys) >= 0x80000000 ? (phys) - 0x80000000 + PAGE_OFFSET2 : \
|
|
(phys) >= 0x20000000 ? (phys) - 0x20000000 + PAGE_OFFSET1 : \
|
|
diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
|
|
index 7bc6668..dcbe17f 100644
|
|
--- a/arch/arm/mach-s3c64xx/crag6410.h
|
|
+++ b/arch/arm/mach-s3c64xx/crag6410.h
|
|
@@ -14,6 +14,7 @@
|
|
#include <mach/gpio-samsung.h>
|
|
|
|
#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
|
|
+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
|
|
|
|
#define PCA935X_GPIO_BASE GPIO_BOARD_START
|
|
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
|
|
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
|
|
index 3df3c37..66b95c4 100644
|
|
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
|
|
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
|
|
@@ -555,6 +555,7 @@ static struct wm831x_touch_pdata touch_pdata = {
|
|
|
|
static struct wm831x_pdata crag_pmic_pdata = {
|
|
.wm831x_num = 1,
|
|
+ .irq_base = BANFF_PMIC_IRQ_BASE,
|
|
.gpio_base = BANFF_PMIC_GPIO_BASE,
|
|
.soft_shutdown = true,
|
|
|
|
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
|
|
index 6645d1e..34853d5 100644
|
|
--- a/arch/arm/mach-sa1100/pm.c
|
|
+++ b/arch/arm/mach-sa1100/pm.c
|
|
@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
|
|
/*
|
|
* Ensure not to come back here if it wasn't intended
|
|
*/
|
|
+ RCSR = RCSR_SMR;
|
|
PSPR = 0;
|
|
|
|
/*
|
|
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
|
|
index f74ab53..2b73c8a 100644
|
|
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
|
|
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
|
|
@@ -617,6 +617,7 @@ static struct platform_device ipmmu_device = {
|
|
|
|
static struct renesas_intc_irqpin_config irqpin0_platform_data = {
|
|
.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
|
|
+ .control_parent = true,
|
|
};
|
|
|
|
static struct resource irqpin0_resources[] = {
|
|
@@ -678,6 +679,7 @@ static struct platform_device irqpin1_device = {
|
|
|
|
static struct renesas_intc_irqpin_config irqpin2_platform_data = {
|
|
.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
|
|
+ .control_parent = true,
|
|
};
|
|
|
|
static struct resource irqpin2_resources[] = {
|
|
@@ -708,6 +710,7 @@ static struct platform_device irqpin2_device = {
|
|
|
|
static struct renesas_intc_irqpin_config irqpin3_platform_data = {
|
|
.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
|
|
+ .control_parent = true,
|
|
};
|
|
|
|
static struct resource irqpin3_resources[] = {
|
|
diff --git a/arch/arm/mach-sunxi/Makefile b/arch/arm/mach-sunxi/Makefile
|
|
index d939720..27b168f 100644
|
|
--- a/arch/arm/mach-sunxi/Makefile
|
|
+++ b/arch/arm/mach-sunxi/Makefile
|
|
@@ -1,2 +1,2 @@
|
|
obj-$(CONFIG_ARCH_SUNXI) += sunxi.o
|
|
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
|
|
+obj-$(CONFIG_SMP) += platsmp.o
|
|
diff --git a/arch/arm/mach-sunxi/headsmp.S b/arch/arm/mach-sunxi/headsmp.S
|
|
deleted file mode 100644
|
|
index a10d494..0000000
|
|
--- a/arch/arm/mach-sunxi/headsmp.S
|
|
+++ /dev/null
|
|
@@ -1,9 +0,0 @@
|
|
-#include <linux/linkage.h>
|
|
-#include <linux/init.h>
|
|
-
|
|
- .section ".text.head", "ax"
|
|
-
|
|
-ENTRY(sun6i_secondary_startup)
|
|
- msr cpsr_fsxc, #0xd3
|
|
- b secondary_startup
|
|
-ENDPROC(sun6i_secondary_startup)
|
|
diff --git a/arch/arm/mach-sunxi/platsmp.c b/arch/arm/mach-sunxi/platsmp.c
|
|
index 7b141d8..0c7dbce 100644
|
|
--- a/arch/arm/mach-sunxi/platsmp.c
|
|
+++ b/arch/arm/mach-sunxi/platsmp.c
|
|
@@ -82,7 +82,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu,
|
|
spin_lock(&cpu_lock);
|
|
|
|
/* Set CPU boot address */
|
|
- writel(virt_to_phys(sun6i_secondary_startup),
|
|
+ writel(virt_to_phys(secondary_startup),
|
|
cpucfg_membase + CPUCFG_PRIVATE0_REG);
|
|
|
|
/* Assert the CPU core in reset */
|
|
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
|
|
index 578d4d1..6448324 100644
|
|
--- a/arch/arm/mach-tegra/reset-handler.S
|
|
+++ b/arch/arm/mach-tegra/reset-handler.S
|
|
@@ -50,6 +50,7 @@ ENTRY(tegra_resume)
|
|
THUMB( it ne )
|
|
bne cpu_resume @ no
|
|
|
|
+ tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
|
|
/* Are we on Tegra20? */
|
|
cmp r6, #TEGRA20
|
|
beq 1f @ Yes
|
|
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
|
|
index 118d691..57b9650 100644
|
|
--- a/arch/arm/mm/Kconfig
|
|
+++ b/arch/arm/mm/Kconfig
|
|
@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
|
|
|
|
config KUSER_HELPERS
|
|
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
|
|
+ depends on MMU
|
|
default y
|
|
help
|
|
Warning: disabling this option may break user programs.
|
|
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
|
|
index 3815a82..8c48c5c 100644
|
|
--- a/arch/arm/mm/abort-ev6.S
|
|
+++ b/arch/arm/mm/abort-ev6.S
|
|
@@ -17,12 +17,6 @@
|
|
*/
|
|
.align 5
|
|
ENTRY(v6_early_abort)
|
|
-#ifdef CONFIG_CPU_V6
|
|
- sub r1, sp, #4 @ Get unused stack location
|
|
- strex r0, r1, [r1] @ Clear the exclusive monitor
|
|
-#elif defined(CONFIG_CPU_32v6K)
|
|
- clrex
|
|
-#endif
|
|
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
|
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
|
/*
|
|
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
|
|
index 7033752..4812ad0 100644
|
|
--- a/arch/arm/mm/abort-ev7.S
|
|
+++ b/arch/arm/mm/abort-ev7.S
|
|
@@ -13,12 +13,6 @@
|
|
*/
|
|
.align 5
|
|
ENTRY(v7_early_abort)
|
|
- /*
|
|
- * The effect of data aborts on on the exclusive access monitor are
|
|
- * UNPREDICTABLE. Do a CLREX to clear the state
|
|
- */
|
|
- clrex
|
|
-
|
|
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
|
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
|
|
|
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
|
|
index 9240364..d301662 100644
|
|
--- a/arch/arm/mm/alignment.c
|
|
+++ b/arch/arm/mm/alignment.c
|
|
@@ -40,6 +40,7 @@
|
|
* This code is not portable to processors with late data abort handling.
|
|
*/
|
|
#define CODING_BITS(i) (i & 0x0e000000)
|
|
+#define COND_BITS(i) (i & 0xf0000000)
|
|
|
|
#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
|
|
#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
|
|
@@ -817,6 +818,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
break;
|
|
|
|
case 0x04000000: /* ldr or str immediate */
|
|
+ if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
|
|
+ goto bad;
|
|
offset.un = OFFSET_BITS(instr);
|
|
handler = do_alignment_ldrstr;
|
|
break;
|
|
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
|
|
index 6eb97b3..4370933 100644
|
|
--- a/arch/arm/mm/context.c
|
|
+++ b/arch/arm/mm/context.c
|
|
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
|
|
/* Update the list of reserved ASIDs and the ASID bitmap. */
|
|
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
|
|
for_each_possible_cpu(i) {
|
|
- if (i == cpu) {
|
|
- asid = 0;
|
|
- } else {
|
|
- asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
|
|
- /*
|
|
- * If this CPU has already been through a
|
|
- * rollover, but hasn't run another task in
|
|
- * the meantime, we must preserve its reserved
|
|
- * ASID, as this is the only trace we have of
|
|
- * the process it is still running.
|
|
- */
|
|
- if (asid == 0)
|
|
- asid = per_cpu(reserved_asids, i);
|
|
- __set_bit(asid & ~ASID_MASK, asid_map);
|
|
- }
|
|
+ asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
|
|
+ /*
|
|
+ * If this CPU has already been through a
|
|
+ * rollover, but hasn't run another task in
|
|
+ * the meantime, we must preserve its reserved
|
|
+ * ASID, as this is the only trace we have of
|
|
+ * the process it is still running.
|
|
+ */
|
|
+ if (asid == 0)
|
|
+ asid = per_cpu(reserved_asids, i);
|
|
+ __set_bit(asid & ~ASID_MASK, asid_map);
|
|
per_cpu(reserved_asids, i) = asid;
|
|
}
|
|
|
|
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
|
|
index a810425..fae192b 100644
|
|
--- a/arch/arm/mm/dma-mapping.c
|
|
+++ b/arch/arm/mm/dma-mapping.c
|
|
@@ -465,12 +465,21 @@ void __init dma_contiguous_remap(void)
|
|
map.type = MT_MEMORY_DMA_READY;
|
|
|
|
/*
|
|
- * Clear previous low-memory mapping
|
|
+ * Clear previous low-memory mapping to ensure that the
|
|
+ * TLB does not see any conflicting entries, then flush
|
|
+ * the TLB of the old entries before creating new mappings.
|
|
+ *
|
|
+ * This ensures that any speculatively loaded TLB entries
|
|
+ * (even though they may be rare) can not cause any problems,
|
|
+ * and ensures that this code is architecturally compliant.
|
|
*/
|
|
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
|
|
addr += PMD_SIZE)
|
|
pmd_clear(pmd_off_k(addr));
|
|
|
|
+ flush_tlb_kernel_range(__phys_to_virt(start),
|
|
+ __phys_to_virt(end));
|
|
+
|
|
iotable_init(&map, 1);
|
|
}
|
|
}
|
|
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
|
|
index 8e0e52e..d7a0ee8 100644
|
|
--- a/arch/arm/mm/idmap.c
|
|
+++ b/arch/arm/mm/idmap.c
|
|
@@ -25,6 +25,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
pr_warning("Failed to allocate identity pmd.\n");
|
|
return;
|
|
}
|
|
+ /*
|
|
+ * Copy the original PMD to ensure that the PMD entries for
|
|
+ * the kernel image are preserved.
|
|
+ */
|
|
+ if (!pud_none(*pud))
|
|
+ memcpy(pmd, pmd_offset(pud, 0),
|
|
+ PTRS_PER_PMD * sizeof(pmd_t));
|
|
pud_populate(&init_mm, pud, pmd);
|
|
pmd += pmd_index(addr);
|
|
} else
|
|
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
|
|
index b68c6b2..f15c22e 100644
|
|
--- a/arch/arm/mm/mmu.c
|
|
+++ b/arch/arm/mm/mmu.c
|
|
@@ -1436,8 +1436,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
|
|
return;
|
|
|
|
/* remap kernel code and data */
|
|
- map_start = init_mm.start_code;
|
|
- map_end = init_mm.brk;
|
|
+ map_start = init_mm.start_code & PMD_MASK;
|
|
+ map_end = ALIGN(init_mm.brk, PMD_SIZE);
|
|
|
|
/* get a handle on things... */
|
|
pgd0 = pgd_offset_k(0);
|
|
@@ -1472,7 +1472,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
|
|
}
|
|
|
|
/* remap pmds for kernel mapping */
|
|
- phys = __pa(map_start) & PMD_MASK;
|
|
+ phys = __pa(map_start);
|
|
do {
|
|
*pmdk++ = __pmd(phys | pmdprot);
|
|
phys += PMD_SIZE;
|
|
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
|
|
index 22e3ad6..eb81123 100644
|
|
--- a/arch/arm/mm/proc-v7-3level.S
|
|
+++ b/arch/arm/mm/proc-v7-3level.S
|
|
@@ -86,8 +86,13 @@ ENTRY(cpu_v7_set_pte_ext)
|
|
tst rh, #1 << (57 - 32) @ L_PTE_NONE
|
|
bicne rl, #L_PTE_VALID
|
|
bne 1f
|
|
- tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
|
|
- orreq rl, #L_PTE_RDONLY
|
|
+
|
|
+ eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
|
|
+ @ test for !L_PTE_DIRTY || L_PTE_RDONLY
|
|
+ tst ip, #1 << (55 - 32) | 1 << (58 - 32)
|
|
+ orrne rl, #PTE_AP2
|
|
+ biceq rl, #PTE_AP2
|
|
+
|
|
1: strd r2, r3, [r0]
|
|
ALT_SMP(W(nop))
|
|
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
|
|
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
|
|
index 158b875..bf7487d 100644
|
|
--- a/arch/arm/mm/proc-v7.S
|
|
+++ b/arch/arm/mm/proc-v7.S
|
|
@@ -211,7 +211,6 @@ __v7_pj4b_setup:
|
|
/* Auxiliary Debug Modes Control 1 Register */
|
|
#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
|
|
#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
|
|
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
|
|
#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
|
|
|
|
/* Auxiliary Debug Modes Control 2 Register */
|
|
@@ -234,7 +233,6 @@ __v7_pj4b_setup:
|
|
/* Auxiliary Debug Modes Control 1 Register */
|
|
mrc p15, 1, r0, c15, c1, 1
|
|
orr r0, r0, #PJ4B_CLEAN_LINE
|
|
- orr r0, r0, #PJ4B_BCK_OFF_STREX
|
|
orr r0, r0, #PJ4B_INTER_PARITY
|
|
bic r0, r0, #PJ4B_STATIC_BP
|
|
mcr p15, 1, r0, c15, c1, 1
|
|
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
|
|
index d19b1cf..b34b95f 100644
|
|
--- a/arch/arm/mm/proc-xscale.S
|
|
+++ b/arch/arm/mm/proc-xscale.S
|
|
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
|
|
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
|
|
mrc p15, 0, r6, c13, c0, 0 @ PID
|
|
mrc p15, 0, r7, c3, c0, 0 @ domain ID
|
|
- mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
|
|
+ mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
|
|
mrc p15, 0, r9, c1, c0, 0 @ control reg
|
|
bic r4, r4, #2 @ clear frequency change bit
|
|
stmia r0, {r4 - r9} @ store cp regs
|
|
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
|
|
mcr p15, 0, r6, c13, c0, 0 @ PID
|
|
mcr p15, 0, r7, c3, c0, 0 @ domain ID
|
|
mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
|
|
- mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
|
|
+ mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
|
|
mov r0, r9 @ control register
|
|
b cpu_resume_mmu
|
|
ENDPROC(cpu_xscale_do_resume)
|
|
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
|
|
index 271b5e9..6adf591 100644
|
|
--- a/arch/arm/net/bpf_jit_32.c
|
|
+++ b/arch/arm/net/bpf_jit_32.c
|
|
@@ -449,10 +449,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
|
|
return;
|
|
}
|
|
#endif
|
|
- if (rm != ARM_R0)
|
|
- emit(ARM_MOV_R(ARM_R0, rm), ctx);
|
|
+
|
|
+ /*
|
|
+ * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
|
|
+ * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
|
|
+ * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
|
|
+ * before using it as a source for ARM_R1.
|
|
+ *
|
|
+ * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
|
|
+ * ARM_R5 (r_X) so there is no particular register overlap
|
|
+ * issues.
|
|
+ */
|
|
if (rn != ARM_R1)
|
|
emit(ARM_MOV_R(ARM_R1, rn), ctx);
|
|
+ if (rm != ARM_R0)
|
|
+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
|
|
|
|
ctx->seen |= SEEN_CALL;
|
|
emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
|
|
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
|
|
index 253e33b..56de5aa 100644
|
|
--- a/arch/arm64/include/asm/compat.h
|
|
+++ b/arch/arm64/include/asm/compat.h
|
|
@@ -37,8 +37,8 @@ typedef s32 compat_ssize_t;
|
|
typedef s32 compat_time_t;
|
|
typedef s32 compat_clock_t;
|
|
typedef s32 compat_pid_t;
|
|
-typedef u32 __compat_uid_t;
|
|
-typedef u32 __compat_gid_t;
|
|
+typedef u16 __compat_uid_t;
|
|
+typedef u16 __compat_gid_t;
|
|
typedef u16 __compat_uid16_t;
|
|
typedef u16 __compat_gid16_t;
|
|
typedef u32 __compat_uid32_t;
|
|
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
|
|
index c404fb0..64bc6c6 100644
|
|
--- a/arch/arm64/include/asm/cputype.h
|
|
+++ b/arch/arm64/include/asm/cputype.h
|
|
@@ -77,6 +77,8 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
|
|
return read_cpuid(CTR_EL0);
|
|
}
|
|
|
|
+void cpuinfo_store_cpu(void);
|
|
+
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|
|
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
|
|
index d064047..52b484b 100644
|
|
--- a/arch/arm64/include/asm/hw_breakpoint.h
|
|
+++ b/arch/arm64/include/asm/hw_breakpoint.h
|
|
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
|
|
*/
|
|
#define ARM_MAX_BRP 16
|
|
#define ARM_MAX_WRP 16
|
|
-#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
|
|
|
|
/* Virtual debug register bases. */
|
|
#define AARCH64_DBG_REG_BVR 0
|
|
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
|
|
index 024c461..0ad7351 100644
|
|
--- a/arch/arm64/include/asm/hwcap.h
|
|
+++ b/arch/arm64/include/asm/hwcap.h
|
|
@@ -30,6 +30,7 @@
|
|
#define COMPAT_HWCAP_IDIVA (1 << 17)
|
|
#define COMPAT_HWCAP_IDIVT (1 << 18)
|
|
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
|
|
+#define COMPAT_HWCAP_LPAE (1 << 20)
|
|
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
|
|
|
|
#define COMPAT_HWCAP2_AES (1 << 0)
|
|
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
|
|
index 0eb3986..ea68925 100644
|
|
--- a/arch/arm64/include/asm/kvm_arm.h
|
|
+++ b/arch/arm64/include/asm/kvm_arm.h
|
|
@@ -18,6 +18,7 @@
|
|
#ifndef __ARM64_KVM_ARM_H__
|
|
#define __ARM64_KVM_ARM_H__
|
|
|
|
+#include <asm/memory.h>
|
|
#include <asm/types.h>
|
|
|
|
/* Hyp Configuration Register (HCR) bits */
|
|
@@ -62,6 +63,7 @@
|
|
* RW: 64bit by default, can be overriden for 32bit VMs
|
|
* TAC: Trap ACTLR
|
|
* TSC: Trap SMC
|
|
+ * TVM: Trap VM ops (until M+C set in SCTLR_EL1)
|
|
* TSW: Trap cache operations by set/way
|
|
* TWE: Trap WFE
|
|
* TWI: Trap WFI
|
|
@@ -74,7 +76,7 @@
|
|
* SWIO: Turn set/way invalidates into set/way clean+invalidate
|
|
*/
|
|
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
|
|
- HCR_BSU_IS | HCR_FB | HCR_TAC | \
|
|
+ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
|
|
HCR_AMO | HCR_IMO | HCR_FMO | \
|
|
HCR_SWIO | HCR_TIDCP | HCR_RW)
|
|
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
|
|
@@ -121,6 +123,17 @@
|
|
#define VTCR_EL2_T0SZ_MASK 0x3f
|
|
#define VTCR_EL2_T0SZ_40B 24
|
|
|
|
+/*
|
|
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
|
|
+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
|
|
+ * not known to exist and will break with this configuration.
|
|
+ *
|
|
+ * Note that when using 4K pages, we concatenate two first level page tables
|
|
+ * together.
|
|
+ *
|
|
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
|
|
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
|
|
+ */
|
|
#ifdef CONFIG_ARM64_64K_PAGES
|
|
/*
|
|
* Stage2 translation configuration:
|
|
@@ -150,9 +163,9 @@
|
|
#endif
|
|
|
|
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
|
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
|
-#define VTTBR_VMID_SHIFT (48LLU)
|
|
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
|
|
+#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
|
+#define VTTBR_VMID_SHIFT (UL(48))
|
|
+#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
|
|
|
|
/* Hyp System Trap Register */
|
|
#define HSTR_EL2_TTEE (1 << 16)
|
|
@@ -175,13 +188,13 @@
|
|
|
|
/* Exception Syndrome Register (ESR) bits */
|
|
#define ESR_EL2_EC_SHIFT (26)
|
|
-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
|
|
-#define ESR_EL2_IL (1U << 25)
|
|
+#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
|
|
+#define ESR_EL2_IL (UL(1) << 25)
|
|
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
|
|
#define ESR_EL2_ISV_SHIFT (24)
|
|
-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
|
|
+#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
|
|
#define ESR_EL2_SAS_SHIFT (22)
|
|
-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
|
|
+#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
|
|
#define ESR_EL2_SSE (1 << 21)
|
|
#define ESR_EL2_SRT_SHIFT (16)
|
|
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
|
|
@@ -195,16 +208,16 @@
|
|
#define ESR_EL2_FSC_TYPE (0x3c)
|
|
|
|
#define ESR_EL2_CV_SHIFT (24)
|
|
-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
|
|
+#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
|
|
#define ESR_EL2_COND_SHIFT (20)
|
|
-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
|
|
+#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
|
|
|
|
|
|
#define FSC_FAULT (0x04)
|
|
#define FSC_PERM (0x0c)
|
|
|
|
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
|
-#define HPFAR_MASK (~0xFUL)
|
|
+#define HPFAR_MASK (~UL(0xf))
|
|
|
|
#define ESR_EL2_EC_UNKNOWN (0x00)
|
|
#define ESR_EL2_EC_WFI (0x01)
|
|
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
|
|
index b25763b..9fcd54b 100644
|
|
--- a/arch/arm64/include/asm/kvm_asm.h
|
|
+++ b/arch/arm64/include/asm/kvm_asm.h
|
|
@@ -79,7 +79,8 @@
|
|
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
|
|
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
|
|
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
|
|
-#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
|
|
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
|
|
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
|
|
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
|
|
#define NR_CP15_REGS (NR_SYS_REGS * 2)
|
|
|
|
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
|
|
index dd8ecfc..91f33c2 100644
|
|
--- a/arch/arm64/include/asm/kvm_emulate.h
|
|
+++ b/arch/arm64/include/asm/kvm_emulate.h
|
|
@@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
|
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
|
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
|
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
|
|
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
|
|
+}
|
|
+
|
|
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
|
{
|
|
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
|
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
|
|
index 0a1d697..3fb0946 100644
|
|
--- a/arch/arm64/include/asm/kvm_host.h
|
|
+++ b/arch/arm64/include/asm/kvm_host.h
|
|
@@ -42,7 +42,7 @@
|
|
#define KVM_VCPU_MAX_FEATURES 2
|
|
|
|
struct kvm_vcpu;
|
|
-int kvm_target_cpu(void);
|
|
+int __attribute_const__ kvm_target_cpu(void);
|
|
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
|
int kvm_arch_dev_ioctl_check_extension(long ext);
|
|
|
|
@@ -177,7 +177,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
}
|
|
|
|
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
|
|
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
|
|
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
|
|
|
|
u64 kvm_call_hyp(void *hypfn, ...);
|
|
|
|
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
|
|
index 7f1f940..15a8a86 100644
|
|
--- a/arch/arm64/include/asm/kvm_mmu.h
|
|
+++ b/arch/arm64/include/asm/kvm_mmu.h
|
|
@@ -59,10 +59,9 @@
|
|
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
|
|
|
|
/*
|
|
- * Align KVM with the kernel's view of physical memory. Should be
|
|
- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
|
|
+ * We currently only support a 40bit IPA.
|
|
*/
|
|
-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
|
|
+#define KVM_PHYS_SHIFT (40)
|
|
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
|
|
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
|
|
|
|
@@ -70,11 +69,14 @@
|
|
#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
|
|
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
|
|
|
|
+#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
|
|
+
|
|
int create_hyp_mappings(void *from, void *to);
|
|
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
|
void free_boot_hyp_pgd(void);
|
|
void free_hyp_pgds(void);
|
|
|
|
+void stage2_unmap_vm(struct kvm *kvm);
|
|
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
|
void kvm_free_stage2_pgd(struct kvm *kvm);
|
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
|
@@ -93,20 +95,6 @@ void kvm_clear_hyp_idmap(void);
|
|
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
|
|
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
|
|
|
|
-static inline bool kvm_is_write_fault(unsigned long esr)
|
|
-{
|
|
- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
|
|
-
|
|
- if (esr_ec == ESR_EL2_EC_IABT)
|
|
- return false;
|
|
-
|
|
- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
|
|
- return false;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
|
|
static inline void kvm_clean_pgd(pgd_t *pgd) {}
|
|
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
|
|
static inline void kvm_clean_pte(pte_t *pte) {}
|
|
@@ -122,11 +110,40 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
|
|
pmd_val(*pmd) |= PMD_S2_RDWR;
|
|
}
|
|
|
|
+#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
|
|
+#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
|
|
+#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
|
|
+
|
|
+static inline bool kvm_page_empty(void *ptr)
|
|
+{
|
|
+ struct page *ptr_page = virt_to_page(ptr);
|
|
+ return page_count(ptr_page) == 1;
|
|
+}
|
|
+
|
|
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
|
|
+#ifndef CONFIG_ARM64_64K_PAGES
|
|
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
|
+#else
|
|
+#define kvm_pmd_table_empty(pmdp) (0)
|
|
+#endif
|
|
+#define kvm_pud_table_empty(pudp) (0)
|
|
+
|
|
+
|
|
struct kvm;
|
|
|
|
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|
- unsigned long size)
|
|
+#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
|
+
|
|
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
+ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
|
|
+}
|
|
+
|
|
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
|
|
+ unsigned long size)
|
|
+{
|
|
+ if (!vcpu_has_cache_enabled(vcpu))
|
|
+ kvm_flush_dcache_to_poc((void *)hva, size);
|
|
+
|
|
if (!icache_is_aliasing()) { /* PIPT */
|
|
flush_icache_range(hva, hva + size);
|
|
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
|
|
@@ -135,8 +152,9 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|
}
|
|
}
|
|
|
|
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
|
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
|
|
|
|
+void stage2_flush_vm(struct kvm *kvm);
|
|
+
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* __ARM64_KVM_MMU_H__ */
|
|
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
|
|
index a9eee33..101a42b 100644
|
|
--- a/arch/arm64/include/asm/mmu_context.h
|
|
+++ b/arch/arm64/include/asm/mmu_context.h
|
|
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
+ /*
|
|
+ * init_mm.pgd does not contain any user mappings and it is always
|
|
+ * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
|
|
+ */
|
|
+ if (next == &init_mm) {
|
|
+ cpu_set_reserved_ttbr0();
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
|
|
check_and_switch_context(next, tsk);
|
|
}
|
|
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
|
|
index e9c149c..456d67c 100644
|
|
--- a/arch/arm64/include/asm/suspend.h
|
|
+++ b/arch/arm64/include/asm/suspend.h
|
|
@@ -21,6 +21,7 @@ struct sleep_save_sp {
|
|
phys_addr_t save_ptr_stash_phys;
|
|
};
|
|
|
|
+extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
|
|
extern void cpu_resume(void);
|
|
extern int cpu_suspend(unsigned long);
|
|
|
|
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
|
|
index 92f3683..565e26f 100644
|
|
--- a/arch/arm64/kernel/insn.c
|
|
+++ b/arch/arm64/kernel/insn.c
|
|
@@ -156,9 +156,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
|
|
* which ends with "dsb; isb" pair guaranteeing global
|
|
* visibility.
|
|
*/
|
|
- atomic_set(&pp->cpu_count, -1);
|
|
+ /* Notify other processors with an additional increment. */
|
|
+ atomic_inc(&pp->cpu_count);
|
|
} else {
|
|
- while (atomic_read(&pp->cpu_count) != -1)
|
|
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
|
|
cpu_relax();
|
|
isb();
|
|
}
|
|
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
|
|
index 0f08dfd..dfa6e3e 100644
|
|
--- a/arch/arm64/kernel/irq.c
|
|
+++ b/arch/arm64/kernel/irq.c
|
|
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
|
return false;
|
|
|
|
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
|
|
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
+ affinity = cpu_online_mask;
|
|
ret = true;
|
|
+ }
|
|
|
|
- /*
|
|
- * when using forced irq_set_affinity we must ensure that the cpu
|
|
- * being offlined is not present in the affinity mask, it may be
|
|
- * selected as the target CPU otherwise
|
|
- */
|
|
- affinity = cpu_online_mask;
|
|
c = irq_data_get_irq_chip(d);
|
|
if (!c->irq_set_affinity)
|
|
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
|
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
cpumask_copy(d->affinity, affinity);
|
|
|
|
return ret;
|
|
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
|
|
index fc8a387..98861d3 100644
|
|
--- a/arch/arm64/kernel/process.c
|
|
+++ b/arch/arm64/kernel/process.c
|
|
@@ -188,9 +188,27 @@ void exit_thread(void)
|
|
{
|
|
}
|
|
|
|
+static void tls_thread_flush(void)
|
|
+{
|
|
+ asm ("msr tpidr_el0, xzr");
|
|
+
|
|
+ if (is_compat_task()) {
|
|
+ current->thread.tp_value = 0;
|
|
+
|
|
+ /*
|
|
+ * We need to ensure ordering between the shadow state and the
|
|
+ * hardware state, so that we don't corrupt the hardware state
|
|
+ * with a stale shadow state during context switch.
|
|
+ */
|
|
+ barrier();
|
|
+ asm ("msr tpidrro_el0, xzr");
|
|
+ }
|
|
+}
|
|
+
|
|
void flush_thread(void)
|
|
{
|
|
fpsimd_flush_thread();
|
|
+ tls_thread_flush();
|
|
flush_ptrace_hw_breakpoint(current);
|
|
}
|
|
|
|
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
|
|
index 7c8e809..8ba6b0f 100644
|
|
--- a/arch/arm64/kernel/ptrace.c
|
|
+++ b/arch/arm64/kernel/ptrace.c
|
|
@@ -85,7 +85,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
|
|
break;
|
|
}
|
|
}
|
|
- for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
|
|
+
|
|
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
|
|
if (current->thread.debug.hbp_watch[i] == bp) {
|
|
info.si_errno = -((i << 1) + 1);
|
|
break;
|
|
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
|
|
index e633c5e..4bc8d27 100644
|
|
--- a/arch/arm64/kernel/setup.c
|
|
+++ b/arch/arm64/kernel/setup.c
|
|
@@ -41,6 +41,7 @@
|
|
#include <linux/memblock.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/of_platform.h>
|
|
+#include <linux/personality.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/elf.h>
|
|
@@ -67,13 +68,13 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
|
|
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
|
|
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
|
|
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
|
|
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
|
|
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
|
|
+ COMPAT_HWCAP_LPAE)
|
|
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
|
|
unsigned int compat_elf_hwcap2 __read_mostly;
|
|
#endif
|
|
|
|
static const char *cpu_name;
|
|
-static const char *machine_name;
|
|
phys_addr_t __fdt_pointer __initdata;
|
|
|
|
/*
|
|
@@ -193,6 +194,19 @@ static void __init smp_build_mpidr_hash(void)
|
|
}
|
|
#endif
|
|
|
|
+struct cpuinfo_arm64 {
|
|
+ struct cpu cpu;
|
|
+ u32 reg_midr;
|
|
+};
|
|
+
|
|
+static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
|
|
+
|
|
+void cpuinfo_store_cpu(void)
|
|
+{
|
|
+ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
|
|
+ info->reg_midr = read_cpuid_id();
|
|
+}
|
|
+
|
|
static void __init setup_processor(void)
|
|
{
|
|
struct cpu_info *cpu_info;
|
|
@@ -213,6 +227,8 @@ static void __init setup_processor(void)
|
|
sprintf(init_utsname()->machine, ELF_PLATFORM);
|
|
elf_hwcap = 0;
|
|
|
|
+ cpuinfo_store_cpu();
|
|
+
|
|
/*
|
|
* ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
|
|
* The blocks we test below represent incremental functionality
|
|
@@ -289,8 +305,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
|
while (true)
|
|
cpu_relax();
|
|
}
|
|
-
|
|
- machine_name = of_flat_dt_get_machine_name();
|
|
}
|
|
|
|
/*
|
|
@@ -395,14 +409,12 @@ static int __init arm64_device_init(void)
|
|
}
|
|
arch_initcall_sync(arm64_device_init);
|
|
|
|
-static DEFINE_PER_CPU(struct cpu, cpu_data);
|
|
-
|
|
static int __init topology_init(void)
|
|
{
|
|
int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
- struct cpu *cpu = &per_cpu(cpu_data, i);
|
|
+ struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
|
|
cpu->hotpluggable = 1;
|
|
register_cpu(cpu, i);
|
|
}
|
|
@@ -423,14 +435,41 @@ static const char *hwcap_str[] = {
|
|
NULL
|
|
};
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
+static const char *compat_hwcap_str[] = {
|
|
+ "swp",
|
|
+ "half",
|
|
+ "thumb",
|
|
+ "26bit",
|
|
+ "fastmult",
|
|
+ "fpa",
|
|
+ "vfp",
|
|
+ "edsp",
|
|
+ "java",
|
|
+ "iwmmxt",
|
|
+ "crunch",
|
|
+ "thumbee",
|
|
+ "neon",
|
|
+ "vfpv3",
|
|
+ "vfpv3d16",
|
|
+ "tls",
|
|
+ "vfpv4",
|
|
+ "idiva",
|
|
+ "idivt",
|
|
+ "vfpd32",
|
|
+ "lpae",
|
|
+ "evtstrm"
|
|
+};
|
|
+#endif /* CONFIG_COMPAT */
|
|
+
|
|
static int c_show(struct seq_file *m, void *v)
|
|
{
|
|
- int i;
|
|
-
|
|
- seq_printf(m, "Processor\t: %s rev %d (%s)\n",
|
|
- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
|
|
+ int i, j;
|
|
|
|
for_each_online_cpu(i) {
|
|
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
|
|
+ u32 midr = cpuinfo->reg_midr;
|
|
+
|
|
/*
|
|
* glibc reads /proc/cpuinfo to determine the number of
|
|
* online processors, looking for lines beginning with
|
|
@@ -439,24 +478,33 @@ static int c_show(struct seq_file *m, void *v)
|
|
#ifdef CONFIG_SMP
|
|
seq_printf(m, "processor\t: %d\n", i);
|
|
#endif
|
|
- }
|
|
-
|
|
- /* dump out the processor features */
|
|
- seq_puts(m, "Features\t: ");
|
|
-
|
|
- for (i = 0; hwcap_str[i]; i++)
|
|
- if (elf_hwcap & (1 << i))
|
|
- seq_printf(m, "%s ", hwcap_str[i]);
|
|
|
|
- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
|
|
- seq_printf(m, "CPU architecture: AArch64\n");
|
|
- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
|
|
- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
|
|
- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
|
|
-
|
|
- seq_puts(m, "\n");
|
|
+ /*
|
|
+ * Dump out the common processor features in a single line.
|
|
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
|
|
+ * rather than attempting to parse this, but there's a body of
|
|
+ * software which does already (at least for 32-bit).
|
|
+ */
|
|
+ seq_puts(m, "Features\t:");
|
|
+ if (personality(current->personality) == PER_LINUX32) {
|
|
+#ifdef CONFIG_COMPAT
|
|
+ for (j = 0; compat_hwcap_str[j]; j++)
|
|
+ if (compat_elf_hwcap & (1 << j))
|
|
+ seq_printf(m, " %s", compat_hwcap_str[j]);
|
|
+#endif /* CONFIG_COMPAT */
|
|
+ } else {
|
|
+ for (j = 0; hwcap_str[j]; j++)
|
|
+ if (elf_hwcap & (1 << j))
|
|
+ seq_printf(m, " %s", hwcap_str[j]);
|
|
+ }
|
|
+ seq_puts(m, "\n");
|
|
|
|
- seq_printf(m, "Hardware\t: %s\n", machine_name);
|
|
+ seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
|
|
+ seq_printf(m, "CPU architecture: 8\n");
|
|
+ seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
|
|
+ seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
|
|
+ seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
|
|
index b3fc9f5..a966bac 100644
|
|
--- a/arch/arm64/kernel/signal32.c
|
|
+++ b/arch/arm64/kernel/signal32.c
|
|
@@ -151,8 +151,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|
case __SI_TIMER:
|
|
err |= __put_user(from->si_tid, &to->si_tid);
|
|
err |= __put_user(from->si_overrun, &to->si_overrun);
|
|
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
|
|
- &to->si_ptr);
|
|
+ err |= __put_user(from->si_int, &to->si_int);
|
|
break;
|
|
case __SI_POLL:
|
|
err |= __put_user(from->si_band, &to->si_band);
|
|
@@ -166,7 +165,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|
* Other callers might not initialize the si_lsb field,
|
|
* so check explicitely for the right codes here.
|
|
*/
|
|
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
|
|
+ if (from->si_signo == SIGBUS &&
|
|
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
|
|
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
|
|
#endif
|
|
break;
|
|
@@ -181,7 +181,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|
case __SI_MESGQ: /* But this is */
|
|
err |= __put_user(from->si_pid, &to->si_pid);
|
|
err |= __put_user(from->si_uid, &to->si_uid);
|
|
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
|
|
+ err |= __put_user(from->si_int, &to->si_int);
|
|
break;
|
|
default: /* this is just in case for now ... */
|
|
err |= __put_user(from->si_pid, &to->si_pid);
|
|
@@ -193,8 +193,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|
|
|
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
|
{
|
|
- memset(to, 0, sizeof *to);
|
|
-
|
|
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
|
|
copy_from_user(to->_sifields._pad,
|
|
from->_sifields._pad, SI_PAD_SIZE))
|
|
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
|
|
index b192572..ede186c 100644
|
|
--- a/arch/arm64/kernel/sleep.S
|
|
+++ b/arch/arm64/kernel/sleep.S
|
|
@@ -49,28 +49,39 @@
|
|
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
|
.endm
|
|
/*
|
|
- * Save CPU state for a suspend. This saves callee registers, and allocates
|
|
- * space on the kernel stack to save the CPU specific registers + some
|
|
- * other data for resume.
|
|
+ * Save CPU state for a suspend and execute the suspend finisher.
|
|
+ * On success it will return 0 through cpu_resume - ie through a CPU
|
|
+ * soft/hard reboot from the reset vector.
|
|
+ * On failure it returns the suspend finisher return value or force
|
|
+ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
|
|
+ * is not allowed to return, if it does this must be considered failure).
|
|
+ * It saves callee registers, and allocates space on the kernel stack
|
|
+ * to save the CPU specific registers + some other data for resume.
|
|
*
|
|
* x0 = suspend finisher argument
|
|
+ * x1 = suspend finisher function pointer
|
|
*/
|
|
-ENTRY(__cpu_suspend)
|
|
+ENTRY(__cpu_suspend_enter)
|
|
stp x29, lr, [sp, #-96]!
|
|
stp x19, x20, [sp,#16]
|
|
stp x21, x22, [sp,#32]
|
|
stp x23, x24, [sp,#48]
|
|
stp x25, x26, [sp,#64]
|
|
stp x27, x28, [sp,#80]
|
|
+ /*
|
|
+ * Stash suspend finisher and its argument in x20 and x19
|
|
+ */
|
|
+ mov x19, x0
|
|
+ mov x20, x1
|
|
mov x2, sp
|
|
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
|
|
- mov x1, sp
|
|
+ mov x0, sp
|
|
/*
|
|
- * x1 now points to struct cpu_suspend_ctx allocated on the stack
|
|
+ * x0 now points to struct cpu_suspend_ctx allocated on the stack
|
|
*/
|
|
- str x2, [x1, #CPU_CTX_SP]
|
|
- ldr x2, =sleep_save_sp
|
|
- ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
|
|
+ str x2, [x0, #CPU_CTX_SP]
|
|
+ ldr x1, =sleep_save_sp
|
|
+ ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
|
#ifdef CONFIG_SMP
|
|
mrs x7, mpidr_el1
|
|
ldr x9, =mpidr_hash
|
|
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
|
|
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
|
|
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
|
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
|
- add x2, x2, x8, lsl #3
|
|
+ add x1, x1, x8, lsl #3
|
|
#endif
|
|
- bl __cpu_suspend_finisher
|
|
+ bl __cpu_suspend_save
|
|
+ /*
|
|
+ * Grab suspend finisher in x20 and its argument in x19
|
|
+ */
|
|
+ mov x0, x19
|
|
+ mov x1, x20
|
|
+ /*
|
|
+ * We are ready for power down, fire off the suspend finisher
|
|
+ * in x1, with argument in x0
|
|
+ */
|
|
+ blr x1
|
|
/*
|
|
- * Never gets here, unless suspend fails.
|
|
+ * Never gets here, unless suspend finisher fails.
|
|
* Successful cpu_suspend should return from cpu_resume, returning
|
|
* through this code path is considered an error
|
|
* If the return value is set to 0 force x0 = -EOPNOTSUPP
|
|
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
|
|
ldp x27, x28, [sp, #80]
|
|
ldp x29, lr, [sp], #96
|
|
ret
|
|
-ENDPROC(__cpu_suspend)
|
|
+ENDPROC(__cpu_suspend_enter)
|
|
.ltorg
|
|
|
|
/*
|
|
@@ -126,14 +147,12 @@ cpu_resume_after_mmu:
|
|
ret
|
|
ENDPROC(cpu_resume_after_mmu)
|
|
|
|
- .data
|
|
ENTRY(cpu_resume)
|
|
bl el2_setup // if in EL2 drop to EL1 cleanly
|
|
#ifdef CONFIG_SMP
|
|
mrs x1, mpidr_el1
|
|
- adr x4, mpidr_hash_ptr
|
|
- ldr x5, [x4]
|
|
- add x8, x4, x5 // x8 = struct mpidr_hash phys address
|
|
+ adrp x8, mpidr_hash
|
|
+ add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
|
|
/* retrieve mpidr_hash members to compute the hash */
|
|
ldr x2, [x8, #MPIDR_HASH_MASK]
|
|
ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
|
|
@@ -143,14 +162,15 @@ ENTRY(cpu_resume)
|
|
#else
|
|
mov x7, xzr
|
|
#endif
|
|
- adr x0, sleep_save_sp
|
|
+ adrp x0, sleep_save_sp
|
|
+ add x0, x0, #:lo12:sleep_save_sp
|
|
ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
|
|
ldr x0, [x0, x7, lsl #3]
|
|
/* load sp from context */
|
|
ldr x2, [x0, #CPU_CTX_SP]
|
|
- adr x1, sleep_idmap_phys
|
|
+ adrp x1, sleep_idmap_phys
|
|
/* load physical address of identity map page table in x1 */
|
|
- ldr x1, [x1]
|
|
+ ldr x1, [x1, #:lo12:sleep_idmap_phys]
|
|
mov sp, x2
|
|
/*
|
|
* cpu_do_resume expects x0 to contain context physical address
|
|
@@ -159,26 +179,3 @@ ENTRY(cpu_resume)
|
|
bl cpu_do_resume // PC relative jump, MMU off
|
|
b cpu_resume_mmu // Resume MMU, never returns
|
|
ENDPROC(cpu_resume)
|
|
-
|
|
- .align 3
|
|
-mpidr_hash_ptr:
|
|
- /*
|
|
- * offset of mpidr_hash symbol from current location
|
|
- * used to obtain run-time mpidr_hash address with MMU off
|
|
- */
|
|
- .quad mpidr_hash - .
|
|
-/*
|
|
- * physical address of identity mapped page tables
|
|
- */
|
|
- .type sleep_idmap_phys, #object
|
|
-ENTRY(sleep_idmap_phys)
|
|
- .quad 0
|
|
-/*
|
|
- * struct sleep_save_sp {
|
|
- * phys_addr_t *save_ptr_stash;
|
|
- * phys_addr_t save_ptr_stash_phys;
|
|
- * };
|
|
- */
|
|
- .type sleep_save_sp, #object
|
|
-ENTRY(sleep_save_sp)
|
|
- .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
|
|
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
|
|
index 9660750..665049d 100644
|
|
--- a/arch/arm64/kernel/smp.c
|
|
+++ b/arch/arm64/kernel/smp.c
|
|
@@ -153,6 +153,11 @@ asmlinkage void secondary_start_kernel(void)
|
|
cpu_ops[cpu]->cpu_postboot();
|
|
|
|
/*
|
|
+ * Log the CPU info before it is marked online and might get read.
|
|
+ */
|
|
+ cpuinfo_store_cpu();
|
|
+
|
|
+ /*
|
|
* Enable GIC and timers.
|
|
*/
|
|
notify_cpu_starting(cpu);
|
|
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
|
|
index 1fa9ce4..2d6b606 100644
|
|
--- a/arch/arm64/kernel/suspend.c
|
|
+++ b/arch/arm64/kernel/suspend.c
|
|
@@ -5,26 +5,24 @@
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/memory.h>
|
|
+#include <asm/mmu_context.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
-extern int __cpu_suspend(unsigned long);
|
|
+extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
|
|
/*
|
|
- * This is called by __cpu_suspend() to save the state, and do whatever
|
|
+ * This is called by __cpu_suspend_enter() to save the state, and do whatever
|
|
* flushing is required to ensure that when the CPU goes to sleep we have
|
|
* the necessary data available when the caches are not searched.
|
|
*
|
|
- * @arg: Argument to pass to suspend operations
|
|
- * @ptr: CPU context virtual address
|
|
- * @save_ptr: address of the location where the context physical address
|
|
- * must be saved
|
|
+ * ptr: CPU context virtual address
|
|
+ * save_ptr: address of the location where the context physical address
|
|
+ * must be saved
|
|
*/
|
|
-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
|
|
- phys_addr_t *save_ptr)
|
|
+void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
|
|
+ phys_addr_t *save_ptr)
|
|
{
|
|
- int cpu = smp_processor_id();
|
|
-
|
|
*save_ptr = virt_to_phys(ptr);
|
|
|
|
cpu_do_suspend(ptr);
|
|
@@ -35,8 +33,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
|
|
*/
|
|
__flush_dcache_area(ptr, sizeof(*ptr));
|
|
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
|
|
-
|
|
- return cpu_ops[cpu]->cpu_suspend(arg);
|
|
}
|
|
|
|
/*
|
|
@@ -56,15 +52,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
|
|
}
|
|
|
|
/**
|
|
- * cpu_suspend
|
|
+ * cpu_suspend() - function to enter a low-power state
|
|
+ * @arg: argument to pass to CPU suspend operations
|
|
*
|
|
- * @arg: argument to pass to the finisher function
|
|
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
|
+ * operations back-end error code otherwise.
|
|
*/
|
|
int cpu_suspend(unsigned long arg)
|
|
{
|
|
- struct mm_struct *mm = current->active_mm;
|
|
- int ret, cpu = smp_processor_id();
|
|
- unsigned long flags;
|
|
+ int cpu = smp_processor_id();
|
|
|
|
/*
|
|
* If cpu_ops have not been registered or suspend
|
|
@@ -72,6 +68,21 @@ int cpu_suspend(unsigned long arg)
|
|
*/
|
|
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
|
|
return -EOPNOTSUPP;
|
|
+ return cpu_ops[cpu]->cpu_suspend(arg);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * __cpu_suspend
|
|
+ *
|
|
+ * arg: argument to pass to the finisher function
|
|
+ * fn: finisher function pointer
|
|
+ *
|
|
+ */
|
|
+int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|
+{
|
|
+ struct mm_struct *mm = current->active_mm;
|
|
+ int ret;
|
|
+ unsigned long flags;
|
|
|
|
/*
|
|
* From this point debug exceptions are disabled to prevent
|
|
@@ -86,16 +97,27 @@ int cpu_suspend(unsigned long arg)
|
|
* page tables, so that the thread address space is properly
|
|
* set-up on function return.
|
|
*/
|
|
- ret = __cpu_suspend(arg);
|
|
+ ret = __cpu_suspend_enter(arg, fn);
|
|
if (ret == 0) {
|
|
- cpu_switch_mm(mm->pgd, mm);
|
|
+ /*
|
|
+ * We are resuming from reset with TTBR0_EL1 set to the
|
|
+ * idmap to enable the MMU; restore the active_mm mappings in
|
|
+ * TTBR0_EL1 unless the active_mm == &init_mm, in which case
|
|
+ * the thread entered __cpu_suspend with TTBR0_EL1 set to
|
|
+ * reserved TTBR0 page tables and should be restored as such.
|
|
+ */
|
|
+ if (mm == &init_mm)
|
|
+ cpu_set_reserved_ttbr0();
|
|
+ else
|
|
+ cpu_switch_mm(mm->pgd, mm);
|
|
+
|
|
flush_tlb_all();
|
|
|
|
/*
|
|
* Restore per-cpu offset before any kernel
|
|
* subsystem relying on it has a chance to run.
|
|
*/
|
|
- set_my_cpu_offset(per_cpu_offset(cpu));
|
|
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
|
|
|
/*
|
|
* Restore HW breakpoint registers to sane values
|
|
@@ -116,10 +138,10 @@ int cpu_suspend(unsigned long arg)
|
|
return ret;
|
|
}
|
|
|
|
-extern struct sleep_save_sp sleep_save_sp;
|
|
-extern phys_addr_t sleep_idmap_phys;
|
|
+struct sleep_save_sp sleep_save_sp;
|
|
+phys_addr_t sleep_idmap_phys;
|
|
|
|
-static int cpu_suspend_init(void)
|
|
+static int __init cpu_suspend_init(void)
|
|
{
|
|
void *ctx_ptr;
|
|
|
|
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
|
|
index 26e9c4e..7803992 100644
|
|
--- a/arch/arm64/kernel/sys_compat.c
|
|
+++ b/arch/arm64/kernel/sys_compat.c
|
|
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
|
|
|
|
case __ARM_NR_compat_set_tls:
|
|
current->thread.tp_value = regs->regs[0];
|
|
+
|
|
+ /*
|
|
+ * Protect against register corruption from context switch.
|
|
+ * See comment in tls_thread_flush.
|
|
+ */
|
|
+ barrier();
|
|
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
|
|
return 0;
|
|
|
|
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
|
|
index 84b9426..f6fe17d 100644
|
|
--- a/arch/arm64/kernel/vdso/Makefile
|
|
+++ b/arch/arm64/kernel/vdso/Makefile
|
|
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
|
|
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
|
|
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
|
|
|
+# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
|
|
+# down to collect2, resulting in silent corruption of the vDSO image.
|
|
+ccflags-y += -Wl,-shared
|
|
+
|
|
obj-y += vdso.o
|
|
extra-y += vdso.lds vdso-offsets.h
|
|
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
|
@@ -43,7 +47,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
|
|
$(call if_changed,vdsosym)
|
|
|
|
# Assembly rules for the .S files
|
|
-$(obj-vdso): %.o: %.S
|
|
+$(obj-vdso): %.o: %.S FORCE
|
|
$(call if_changed_dep,vdsoas)
|
|
|
|
# Actual build commands
|
|
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
|
|
index 0874557..a8d81fa 100644
|
|
--- a/arch/arm64/kvm/guest.c
|
|
+++ b/arch/arm64/kvm/guest.c
|
|
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|
|
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
{
|
|
- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
|
|
index 7bc41ea..fd9aeba 100644
|
|
--- a/arch/arm64/kvm/handle_exit.c
|
|
+++ b/arch/arm64/kvm/handle_exit.c
|
|
@@ -62,6 +62,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
else
|
|
kvm_vcpu_block(vcpu);
|
|
|
|
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
|
|
index 2b0244d..12e26f3 100644
|
|
--- a/arch/arm64/kvm/hyp-init.S
|
|
+++ b/arch/arm64/kvm/hyp-init.S
|
|
@@ -74,6 +74,10 @@ __do_hyp_init:
|
|
msr mair_el2, x4
|
|
isb
|
|
|
|
+ /* Invalidate the stale TLBs from Bootloader */
|
|
+ tlbi alle2
|
|
+ dsb sy
|
|
+
|
|
mrs x4, sctlr_el2
|
|
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
|
|
ldr x5, =SCTLR_EL2_FLAGS
|
|
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
|
|
index 2c56012..3aaf3bc 100644
|
|
--- a/arch/arm64/kvm/hyp.S
|
|
+++ b/arch/arm64/kvm/hyp.S
|
|
@@ -629,10 +629,17 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
|
|
* Instead, we invalidate Stage-2 for this IPA, and the
|
|
* whole of Stage-1. Weep...
|
|
*/
|
|
+ lsr x1, x1, #12
|
|
tlbi ipas2e1is, x1
|
|
- dsb sy
|
|
+ /*
|
|
+ * We have to ensure completion of the invalidation at Stage-2,
|
|
+ * since a table walk on another CPU could refill a TLB with a
|
|
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
|
|
+ * the Stage-1 invalidation happened first.
|
|
+ */
|
|
+ dsb ish
|
|
tlbi vmalle1is
|
|
- dsb sy
|
|
+ dsb ish
|
|
isb
|
|
|
|
msr vttbr_el2, xzr
|
|
@@ -643,7 +650,7 @@ ENTRY(__kvm_flush_vm_context)
|
|
dsb ishst
|
|
tlbi alle1is
|
|
ic ialluis
|
|
- dsb sy
|
|
+ dsb ish
|
|
ret
|
|
ENDPROC(__kvm_flush_vm_context)
|
|
|
|
@@ -824,7 +831,7 @@ el1_trap:
|
|
mrs x2, far_el2
|
|
|
|
2: mrs x0, tpidr_el2
|
|
- str x1, [x0, #VCPU_ESR_EL2]
|
|
+ str w1, [x0, #VCPU_ESR_EL2]
|
|
str x2, [x0, #VCPU_FAR_EL2]
|
|
str x3, [x0, #VCPU_HPFAR_EL2]
|
|
|
|
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
|
|
index 81a02a8..86825f8 100644
|
|
--- a/arch/arm64/kvm/inject_fault.c
|
|
+++ b/arch/arm64/kvm/inject_fault.c
|
|
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
{
|
|
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
inject_abt32(vcpu, false, addr);
|
|
-
|
|
- inject_abt64(vcpu, false, addr);
|
|
+ else
|
|
+ inject_abt64(vcpu, false, addr);
|
|
}
|
|
|
|
/**
|
|
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
{
|
|
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
inject_abt32(vcpu, true, addr);
|
|
-
|
|
- inject_abt64(vcpu, true, addr);
|
|
+ else
|
|
+ inject_abt64(vcpu, true, addr);
|
|
}
|
|
|
|
/**
|
|
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
inject_undef32(vcpu);
|
|
-
|
|
- inject_undef64(vcpu);
|
|
+ else
|
|
+ inject_undef64(vcpu);
|
|
}
|
|
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
|
|
index 70a7816..0b43265 100644
|
|
--- a/arch/arm64/kvm/reset.c
|
|
+++ b/arch/arm64/kvm/reset.c
|
|
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
if (!cpu_has_32bit_el1())
|
|
return -EINVAL;
|
|
cpu_reset = &default_regs_reset32;
|
|
- vcpu->arch.hcr_el2 &= ~HCR_RW;
|
|
} else {
|
|
cpu_reset = &default_regs_reset;
|
|
}
|
|
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
|
|
index 02e9d09..7691b25 100644
|
|
--- a/arch/arm64/kvm/sys_regs.c
|
|
+++ b/arch/arm64/kvm/sys_regs.c
|
|
@@ -27,6 +27,7 @@
|
|
#include <asm/kvm_host.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_coproc.h>
|
|
+#include <asm/kvm_mmu.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cputype.h>
|
|
#include <trace/events/kvm.h>
|
|
@@ -121,6 +122,48 @@ done:
|
|
}
|
|
|
|
/*
|
|
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
|
|
+ * is set.
|
|
+ */
|
|
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
|
+ const struct sys_reg_params *p,
|
|
+ const struct sys_reg_desc *r)
|
|
+{
|
|
+ unsigned long val;
|
|
+
|
|
+ BUG_ON(!p->is_write);
|
|
+
|
|
+ val = *vcpu_reg(vcpu, p->Rt);
|
|
+ if (!p->is_aarch32) {
|
|
+ vcpu_sys_reg(vcpu, r->reg) = val;
|
|
+ } else {
|
|
+ vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
|
|
+ if (!p->is_32bit)
|
|
+ vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
|
|
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
|
|
+ * it in complete control of the caches.
|
|
+ */
|
|
+static bool access_sctlr(struct kvm_vcpu *vcpu,
|
|
+ const struct sys_reg_params *p,
|
|
+ const struct sys_reg_desc *r)
|
|
+{
|
|
+ access_vm_reg(vcpu, p, r);
|
|
+
|
|
+ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
|
|
+ vcpu->arch.hcr_el2 &= ~HCR_TVM;
|
|
+ stage2_flush_vm(vcpu->kvm);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
* We could trap ID_DFR0 and tell the guest we don't support performance
|
|
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
|
* NAKed, so it will read the PMCR anyway.
|
|
@@ -185,32 +228,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
NULL, reset_mpidr, MPIDR_EL1 },
|
|
/* SCTLR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
|
|
- NULL, reset_val, SCTLR_EL1, 0x00C50078 },
|
|
+ access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
|
|
/* CPACR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
|
|
NULL, reset_val, CPACR_EL1, 0 },
|
|
/* TTBR0_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
|
|
- NULL, reset_unknown, TTBR0_EL1 },
|
|
+ access_vm_reg, reset_unknown, TTBR0_EL1 },
|
|
/* TTBR1_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
|
|
- NULL, reset_unknown, TTBR1_EL1 },
|
|
+ access_vm_reg, reset_unknown, TTBR1_EL1 },
|
|
/* TCR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
|
|
- NULL, reset_val, TCR_EL1, 0 },
|
|
+ access_vm_reg, reset_val, TCR_EL1, 0 },
|
|
|
|
/* AFSR0_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
|
|
- NULL, reset_unknown, AFSR0_EL1 },
|
|
+ access_vm_reg, reset_unknown, AFSR0_EL1 },
|
|
/* AFSR1_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
|
|
- NULL, reset_unknown, AFSR1_EL1 },
|
|
+ access_vm_reg, reset_unknown, AFSR1_EL1 },
|
|
/* ESR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
|
|
- NULL, reset_unknown, ESR_EL1 },
|
|
+ access_vm_reg, reset_unknown, ESR_EL1 },
|
|
/* FAR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
|
|
- NULL, reset_unknown, FAR_EL1 },
|
|
+ access_vm_reg, reset_unknown, FAR_EL1 },
|
|
/* PAR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
|
|
NULL, reset_unknown, PAR_EL1 },
|
|
@@ -224,17 +267,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
|
|
/* MAIR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
|
|
- NULL, reset_unknown, MAIR_EL1 },
|
|
+ access_vm_reg, reset_unknown, MAIR_EL1 },
|
|
/* AMAIR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
|
|
- NULL, reset_amair_el1, AMAIR_EL1 },
|
|
+ access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
|
|
|
/* VBAR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
|
|
NULL, reset_val, VBAR_EL1, 0 },
|
|
/* CONTEXTIDR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
|
|
- NULL, reset_val, CONTEXTIDR_EL1, 0 },
|
|
+ access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
|
/* TPIDR_EL1 */
|
|
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
|
|
NULL, reset_unknown, TPIDR_EL1 },
|
|
@@ -305,14 +348,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
NULL, reset_val, FPEXC32_EL2, 0x70 },
|
|
};
|
|
|
|
-/* Trapped cp15 registers */
|
|
+/*
|
|
+ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
|
|
+ * depending on the way they are accessed (as a 32bit or a 64bit
|
|
+ * register).
|
|
+ */
|
|
static const struct sys_reg_desc cp15_regs[] = {
|
|
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
|
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
|
|
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
|
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
|
|
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
|
|
+ { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
|
|
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
|
|
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
|
|
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
|
|
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
|
|
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
|
|
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
|
|
+
|
|
/*
|
|
* DC{C,I,CI}SW operations:
|
|
*/
|
|
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
|
|
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
|
|
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
|
|
+
|
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
|
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
|
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
|
|
@@ -326,6 +387,14 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
|
|
+
|
|
+ { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
|
|
+ { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
|
|
+ { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
|
|
+ { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
|
+ { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
|
+
|
|
+ { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
|
|
};
|
|
|
|
/* Target specific emulation tables */
|
|
@@ -437,6 +506,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
int Rt2 = (hsr >> 10) & 0xf;
|
|
|
|
+ params.is_aarch32 = true;
|
|
+ params.is_32bit = false;
|
|
params.CRm = (hsr >> 1) & 0xf;
|
|
params.Rt = (hsr >> 5) & 0xf;
|
|
params.is_write = ((hsr & 1) == 0);
|
|
@@ -480,6 +551,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
struct sys_reg_params params;
|
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
|
|
+ params.is_aarch32 = true;
|
|
+ params.is_32bit = true;
|
|
params.CRm = (hsr >> 1) & 0xf;
|
|
params.Rt = (hsr >> 5) & 0xf;
|
|
params.is_write = ((hsr & 1) == 0);
|
|
@@ -549,6 +622,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
struct sys_reg_params params;
|
|
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
|
|
|
|
+ params.is_aarch32 = false;
|
|
+ params.is_32bit = false;
|
|
params.Op0 = (esr >> 20) & 3;
|
|
params.Op1 = (esr >> 14) & 0x7;
|
|
params.CRn = (esr >> 10) & 0xf;
|
|
@@ -761,7 +836,7 @@ static bool is_valid_cache(u32 val)
|
|
u32 level, ctype;
|
|
|
|
if (val >= CSSELR_MAX)
|
|
- return -ENOENT;
|
|
+ return false;
|
|
|
|
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
|
|
level = (val >> 1);
|
|
@@ -887,7 +962,7 @@ static unsigned int num_demux_regs(void)
|
|
|
|
static int write_demux_regids(u64 __user *uindices)
|
|
{
|
|
- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
|
|
+ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
|
|
unsigned int i;
|
|
|
|
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
|
|
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
|
|
index d50d372..d411e25 100644
|
|
--- a/arch/arm64/kvm/sys_regs.h
|
|
+++ b/arch/arm64/kvm/sys_regs.h
|
|
@@ -30,6 +30,8 @@ struct sys_reg_params {
|
|
u8 Op2;
|
|
u8 Rt;
|
|
bool is_write;
|
|
+ bool is_aarch32;
|
|
+ bool is_32bit; /* Only valid if is_aarch32 is true */
|
|
};
|
|
|
|
struct sys_reg_desc {
|
|
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
|
|
index 6e0ed93..c17967f 100644
|
|
--- a/arch/arm64/lib/clear_user.S
|
|
+++ b/arch/arm64/lib/clear_user.S
|
|
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
|
|
sub x1, x1, #2
|
|
4: adds x1, x1, #1
|
|
b.mi 5f
|
|
- strb wzr, [x0]
|
|
+USER(9f, strb wzr, [x0] )
|
|
5: mov x0, #0
|
|
ret
|
|
ENDPROC(__clear_user)
|
|
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
|
|
index baa758d..76c1e6c 100644
|
|
--- a/arch/arm64/mm/context.c
|
|
+++ b/arch/arm64/mm/context.c
|
|
@@ -92,6 +92,14 @@ static void reset_context(void *info)
|
|
unsigned int cpu = smp_processor_id();
|
|
struct mm_struct *mm = current->active_mm;
|
|
|
|
+ /*
|
|
+ * current->active_mm could be init_mm for the idle thread immediately
|
|
+ * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
|
|
+ * the reserved value, so no need to reset any context.
|
|
+ */
|
|
+ if (mm == &init_mm)
|
|
+ return;
|
|
+
|
|
smp_rmb();
|
|
asid = cpu_last_asid + cpu;
|
|
|
|
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
|
|
index 9337524..05d9815 100644
|
|
--- a/arch/arm64/mm/dma-mapping.c
|
|
+++ b/arch/arm64/mm/dma-mapping.c
|
|
@@ -57,6 +57,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
|
|
flags |= GFP_DMA;
|
|
if (IS_ENABLED(CONFIG_DMA_CMA)) {
|
|
struct page *page;
|
|
+ void *addr;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
|
@@ -65,7 +66,9 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
|
|
return NULL;
|
|
|
|
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
|
- return page_address(page);
|
|
+ addr = page_address(page);
|
|
+ memset(addr, 0, size);
|
|
+ return addr;
|
|
} else {
|
|
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
|
}
|
|
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
|
|
index 023747b..e3a24b7 100644
|
|
--- a/arch/arm64/mm/hugetlbpage.c
|
|
+++ b/arch/arm64/mm/hugetlbpage.c
|
|
@@ -46,13 +46,13 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
- return !(pmd_val(pmd) & PMD_TABLE_BIT);
|
|
+ return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
{
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
- return !(pud_val(pud) & PUD_TABLE_BIT);
|
|
+ return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
|
|
index c872988..6ca4285 100644
|
|
--- a/arch/arm64/mm/init.c
|
|
+++ b/arch/arm64/mm/init.c
|
|
@@ -240,7 +240,7 @@ static void __init free_unused_memmap(void)
|
|
* memmap entries are valid from the bank end aligned to
|
|
* MAX_ORDER_NR_PAGES.
|
|
*/
|
|
- prev_end = ALIGN(start + __phys_to_pfn(reg->size),
|
|
+ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
|
|
MAX_ORDER_NR_PAGES);
|
|
}
|
|
|
|
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
|
|
index 8ed6cb1..8f7ffff 100644
|
|
--- a/arch/arm64/mm/mmap.c
|
|
+++ b/arch/arm64/mm/mmap.c
|
|
@@ -47,22 +47,14 @@ static int mmap_is_legacy(void)
|
|
return sysctl_legacy_va_layout;
|
|
}
|
|
|
|
-/*
|
|
- * Since get_random_int() returns the same value within a 1 jiffy window, we
|
|
- * will almost always get the same randomisation for the stack and mmap
|
|
- * region. This will mean the relative distance between stack and mmap will be
|
|
- * the same.
|
|
- *
|
|
- * To avoid this we can shift the randomness by 1 bit.
|
|
- */
|
|
static unsigned long mmap_rnd(void)
|
|
{
|
|
unsigned long rnd = 0;
|
|
|
|
if (current->flags & PF_RANDOMIZE)
|
|
- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
|
|
+ rnd = (long)get_random_int() & STACK_RND_MASK;
|
|
|
|
- return rnd << (PAGE_SHIFT + 1);
|
|
+ return rnd << PAGE_SHIFT;
|
|
}
|
|
|
|
static unsigned long mmap_base(void)
|
|
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
|
|
index 23b1a97..52c179b 100644
|
|
--- a/arch/avr32/mach-at32ap/clock.c
|
|
+++ b/arch/avr32/mach-at32ap/clock.c
|
|
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
|
|
{
|
|
unsigned long flags;
|
|
|
|
+ if (!clk)
|
|
+ return 0;
|
|
+
|
|
spin_lock_irqsave(&clk_lock, flags);
|
|
__clk_enable(clk);
|
|
spin_unlock_irqrestore(&clk_lock, flags);
|
|
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
|
|
{
|
|
unsigned long flags;
|
|
|
|
+ if (IS_ERR_OR_NULL(clk))
|
|
+ return;
|
|
+
|
|
spin_lock_irqsave(&clk_lock, flags);
|
|
__clk_disable(clk);
|
|
spin_unlock_irqrestore(&clk_lock, flags);
|
|
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
|
|
unsigned long flags;
|
|
unsigned long rate;
|
|
|
|
+ if (!clk)
|
|
+ return 0;
|
|
+
|
|
spin_lock_irqsave(&clk_lock, flags);
|
|
rate = clk->get_rate(clk);
|
|
spin_unlock_irqrestore(&clk_lock, flags);
|
|
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
|
|
{
|
|
unsigned long flags, actual_rate;
|
|
|
|
+ if (!clk)
|
|
+ return 0;
|
|
+
|
|
if (!clk->set_rate)
|
|
return -ENOSYS;
|
|
|
|
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
|
unsigned long flags;
|
|
long ret;
|
|
|
|
+ if (!clk)
|
|
+ return 0;
|
|
+
|
|
if (!clk->set_rate)
|
|
return -ENOSYS;
|
|
|
|
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
+ if (!clk)
|
|
+ return 0;
|
|
+
|
|
if (!clk->set_parent)
|
|
return -ENOSYS;
|
|
|
|
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
|
|
|
|
struct clk *clk_get_parent(struct clk *clk)
|
|
{
|
|
- return clk->parent;
|
|
+ return !clk ? NULL : clk->parent;
|
|
}
|
|
EXPORT_SYMBOL(clk_get_parent);
|
|
|
|
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
|
|
index 0eca933..d223a8b 100644
|
|
--- a/arch/avr32/mm/fault.c
|
|
+++ b/arch/avr32/mm/fault.c
|
|
@@ -142,6 +142,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
|
|
index 356ee84..04845aa 100644
|
|
--- a/arch/c6x/kernel/time.c
|
|
+++ b/arch/c6x/kernel/time.c
|
|
@@ -49,7 +49,7 @@ u64 sched_clock(void)
|
|
return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
|
|
}
|
|
|
|
-void time_init(void)
|
|
+void __init time_init(void)
|
|
{
|
|
u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
|
|
|
|
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
|
|
index 1790f22..2686a7a 100644
|
|
--- a/arch/cris/mm/fault.c
|
|
+++ b/arch/cris/mm/fault.c
|
|
@@ -176,6 +176,8 @@ retry:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
|
|
index 9a66372..ec4917d 100644
|
|
--- a/arch/frv/mm/fault.c
|
|
+++ b/arch/frv/mm/fault.c
|
|
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
|
|
index 7225dad..ba5ba7a 100644
|
|
--- a/arch/ia64/mm/fault.c
|
|
+++ b/arch/ia64/mm/fault.c
|
|
@@ -172,6 +172,8 @@ retry:
|
|
*/
|
|
if (fault & VM_FAULT_OOM) {
|
|
goto out_of_memory;
|
|
+ } else if (fault & VM_FAULT_SIGSEGV) {
|
|
+ goto bad_area;
|
|
} else if (fault & VM_FAULT_SIGBUS) {
|
|
signal = SIGBUS;
|
|
goto bad_area;
|
|
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
|
|
index e9c6a80..e3d4d48901 100644
|
|
--- a/arch/m32r/mm/fault.c
|
|
+++ b/arch/m32r/mm/fault.c
|
|
@@ -200,6 +200,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
|
|
index 2bd7487..b2f04ae 100644
|
|
--- a/arch/m68k/mm/fault.c
|
|
+++ b/arch/m68k/mm/fault.c
|
|
@@ -145,6 +145,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto map_err;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto bus_err;
|
|
BUG();
|
|
diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
|
|
index 2c7dde3..2a5259f 100644
|
|
--- a/arch/m68k/mm/hwtest.c
|
|
+++ b/arch/m68k/mm/hwtest.c
|
|
@@ -28,9 +28,11 @@
|
|
int hwreg_present( volatile void *regp )
|
|
{
|
|
int ret = 0;
|
|
+ unsigned long flags;
|
|
long save_sp, save_vbr;
|
|
long tmp_vectors[3];
|
|
|
|
+ local_irq_save(flags);
|
|
__asm__ __volatile__
|
|
( "movec %/vbr,%2\n\t"
|
|
"movel #Lberr1,%4@(8)\n\t"
|
|
@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
|
|
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
|
: "a" (regp), "a" (tmp_vectors)
|
|
);
|
|
+ local_irq_restore(flags);
|
|
|
|
return( ret );
|
|
}
|
|
@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
|
|
int hwreg_write( volatile void *regp, unsigned short val )
|
|
{
|
|
int ret;
|
|
+ unsigned long flags;
|
|
long save_sp, save_vbr;
|
|
long tmp_vectors[3];
|
|
|
|
+ local_irq_save(flags);
|
|
__asm__ __volatile__
|
|
( "movec %/vbr,%2\n\t"
|
|
"movel #Lberr2,%4@(8)\n\t"
|
|
@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
|
|
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
|
: "a" (regp), "a" (tmp_vectors), "g" (val)
|
|
);
|
|
+ local_irq_restore(flags);
|
|
|
|
return( ret );
|
|
}
|
|
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
|
|
index a8a3747..eb2005b 100644
|
|
--- a/arch/metag/include/asm/processor.h
|
|
+++ b/arch/metag/include/asm/processor.h
|
|
@@ -149,8 +149,8 @@ extern void exit_thread(void);
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
-#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC)
|
|
-#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0)
|
|
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
|
|
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
|
|
|
|
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
|
|
|
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
|
|
index 332680e..2de5dc6 100644
|
|
--- a/arch/metag/mm/fault.c
|
|
+++ b/arch/metag/mm/fault.c
|
|
@@ -141,6 +141,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
|
|
index fa4cf52..d46a5eb 100644
|
|
--- a/arch/microblaze/mm/fault.c
|
|
+++ b/arch/microblaze/mm/fault.c
|
|
@@ -224,6 +224,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
|
|
index c00c4dd..5244cec 100644
|
|
--- a/arch/mips/boot/compressed/decompress.c
|
|
+++ b/arch/mips/boot/compressed/decompress.c
|
|
@@ -13,6 +13,7 @@
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
+#include <linux/string.h>
|
|
|
|
#include <asm/addrspace.h>
|
|
|
|
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
|
|
index 331b837..270cb3c 100644
|
|
--- a/arch/mips/cavium-octeon/setup.c
|
|
+++ b/arch/mips/cavium-octeon/setup.c
|
|
@@ -458,6 +458,18 @@ static void octeon_halt(void)
|
|
octeon_kill_core(NULL);
|
|
}
|
|
|
|
+static char __read_mostly octeon_system_type[80];
|
|
+
|
|
+static int __init init_octeon_system_type(void)
|
|
+{
|
|
+ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
|
|
+ cvmx_board_type_to_string(octeon_bootinfo->board_type),
|
|
+ octeon_model_get_string(read_c0_prid()));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(init_octeon_system_type);
|
|
+
|
|
/**
|
|
* Return a string representing the system type
|
|
*
|
|
@@ -465,11 +477,7 @@ static void octeon_halt(void)
|
|
*/
|
|
const char *octeon_board_type_string(void)
|
|
{
|
|
- static char name[80];
|
|
- sprintf(name, "%s (%s)",
|
|
- cvmx_board_type_to_string(octeon_bootinfo->board_type),
|
|
- octeon_model_get_string(read_c0_prid()));
|
|
- return name;
|
|
+ return octeon_system_type;
|
|
}
|
|
|
|
const char *get_system_type(void)
|
|
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
|
|
index 67a078f..34467ac 100644
|
|
--- a/arch/mips/cavium-octeon/smp.c
|
|
+++ b/arch/mips/cavium-octeon/smp.c
|
|
@@ -263,9 +263,7 @@ static int octeon_cpu_disable(void)
|
|
|
|
set_cpu_online(cpu, false);
|
|
cpu_clear(cpu, cpu_callin_map);
|
|
- local_irq_disable();
|
|
octeon_fixup_irqs();
|
|
- local_irq_enable();
|
|
|
|
flush_cache_all();
|
|
local_flush_tlb_all();
|
|
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
|
|
index 992aaba..b463f2a 100644
|
|
--- a/arch/mips/include/asm/ftrace.h
|
|
+++ b/arch/mips/include/asm/ftrace.h
|
|
@@ -24,7 +24,7 @@ do { \
|
|
asm volatile ( \
|
|
"1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
|
|
" li %[tmp_err], 0\n" \
|
|
- "2:\n" \
|
|
+ "2: .insn\n" \
|
|
\
|
|
".section .fixup, \"ax\"\n" \
|
|
"3: li %[tmp_err], 1\n" \
|
|
@@ -46,7 +46,7 @@ do { \
|
|
asm volatile ( \
|
|
"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
|
|
" li %[tmp_err], 0\n" \
|
|
- "2:\n" \
|
|
+ "2: .insn\n" \
|
|
\
|
|
".section .fixup, \"ax\"\n" \
|
|
"3: li %[tmp_err], 1\n" \
|
|
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
|
|
index 9488fa5..afc96ec 100644
|
|
--- a/arch/mips/include/asm/mach-generic/spaces.h
|
|
+++ b/arch/mips/include/asm/mach-generic/spaces.h
|
|
@@ -94,7 +94,11 @@
|
|
#endif
|
|
|
|
#ifndef FIXADDR_TOP
|
|
+#ifdef CONFIG_KVM_GUEST
|
|
+#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
|
|
+#else
|
|
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
|
|
#endif
|
|
+#endif
|
|
|
|
#endif /* __ASM_MACH_GENERIC_SPACES_H */
|
|
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
|
|
index 008324d..b154953 100644
|
|
--- a/arch/mips/include/asm/pgtable.h
|
|
+++ b/arch/mips/include/asm/pgtable.h
|
|
@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|
* Make sure the buddy is global too (if it's !none,
|
|
* it better already be global)
|
|
*/
|
|
+#ifdef CONFIG_SMP
|
|
+ /*
|
|
+ * For SMP, multiple CPUs can race, so we need to do
|
|
+ * this atomically.
|
|
+ */
|
|
+#ifdef CONFIG_64BIT
|
|
+#define LL_INSN "lld"
|
|
+#define SC_INSN "scd"
|
|
+#else /* CONFIG_32BIT */
|
|
+#define LL_INSN "ll"
|
|
+#define SC_INSN "sc"
|
|
+#endif
|
|
+ unsigned long page_global = _PAGE_GLOBAL;
|
|
+ unsigned long tmp;
|
|
+
|
|
+ __asm__ __volatile__ (
|
|
+ " .set push\n"
|
|
+ " .set noreorder\n"
|
|
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
|
|
+ " bnez %[tmp], 2f\n"
|
|
+ " or %[tmp], %[tmp], %[global]\n"
|
|
+ " " SC_INSN " %[tmp], %[buddy]\n"
|
|
+ " beqz %[tmp], 1b\n"
|
|
+ " nop\n"
|
|
+ "2:\n"
|
|
+ " .set pop"
|
|
+ : [buddy] "+m" (buddy->pte),
|
|
+ [tmp] "=&r" (tmp)
|
|
+ : [global] "r" (page_global));
|
|
+#else /* !CONFIG_SMP */
|
|
if (pte_none(*buddy))
|
|
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
|
+#endif /* CONFIG_SMP */
|
|
}
|
|
#endif
|
|
}
|
|
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
|
|
index 7bba9da..6d019ca 100644
|
|
--- a/arch/mips/include/asm/ptrace.h
|
|
+++ b/arch/mips/include/asm/ptrace.h
|
|
@@ -23,7 +23,7 @@
|
|
struct pt_regs {
|
|
#ifdef CONFIG_32BIT
|
|
/* Pad bytes for argument save space on the stack. */
|
|
- unsigned long pad0[6];
|
|
+ unsigned long pad0[8];
|
|
#endif
|
|
|
|
/* Saved main processor registers. */
|
|
diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
|
|
index 910e71a..b8343cc 100644
|
|
--- a/arch/mips/include/asm/reg.h
|
|
+++ b/arch/mips/include/asm/reg.h
|
|
@@ -12,116 +12,194 @@
|
|
#ifndef __ASM_MIPS_REG_H
|
|
#define __ASM_MIPS_REG_H
|
|
|
|
-
|
|
-#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
|
|
-
|
|
-#define EF_R0 6
|
|
-#define EF_R1 7
|
|
-#define EF_R2 8
|
|
-#define EF_R3 9
|
|
-#define EF_R4 10
|
|
-#define EF_R5 11
|
|
-#define EF_R6 12
|
|
-#define EF_R7 13
|
|
-#define EF_R8 14
|
|
-#define EF_R9 15
|
|
-#define EF_R10 16
|
|
-#define EF_R11 17
|
|
-#define EF_R12 18
|
|
-#define EF_R13 19
|
|
-#define EF_R14 20
|
|
-#define EF_R15 21
|
|
-#define EF_R16 22
|
|
-#define EF_R17 23
|
|
-#define EF_R18 24
|
|
-#define EF_R19 25
|
|
-#define EF_R20 26
|
|
-#define EF_R21 27
|
|
-#define EF_R22 28
|
|
-#define EF_R23 29
|
|
-#define EF_R24 30
|
|
-#define EF_R25 31
|
|
+#define MIPS32_EF_R0 6
|
|
+#define MIPS32_EF_R1 7
|
|
+#define MIPS32_EF_R2 8
|
|
+#define MIPS32_EF_R3 9
|
|
+#define MIPS32_EF_R4 10
|
|
+#define MIPS32_EF_R5 11
|
|
+#define MIPS32_EF_R6 12
|
|
+#define MIPS32_EF_R7 13
|
|
+#define MIPS32_EF_R8 14
|
|
+#define MIPS32_EF_R9 15
|
|
+#define MIPS32_EF_R10 16
|
|
+#define MIPS32_EF_R11 17
|
|
+#define MIPS32_EF_R12 18
|
|
+#define MIPS32_EF_R13 19
|
|
+#define MIPS32_EF_R14 20
|
|
+#define MIPS32_EF_R15 21
|
|
+#define MIPS32_EF_R16 22
|
|
+#define MIPS32_EF_R17 23
|
|
+#define MIPS32_EF_R18 24
|
|
+#define MIPS32_EF_R19 25
|
|
+#define MIPS32_EF_R20 26
|
|
+#define MIPS32_EF_R21 27
|
|
+#define MIPS32_EF_R22 28
|
|
+#define MIPS32_EF_R23 29
|
|
+#define MIPS32_EF_R24 30
|
|
+#define MIPS32_EF_R25 31
|
|
|
|
/*
|
|
* k0/k1 unsaved
|
|
*/
|
|
-#define EF_R26 32
|
|
-#define EF_R27 33
|
|
+#define MIPS32_EF_R26 32
|
|
+#define MIPS32_EF_R27 33
|
|
|
|
-#define EF_R28 34
|
|
-#define EF_R29 35
|
|
-#define EF_R30 36
|
|
-#define EF_R31 37
|
|
+#define MIPS32_EF_R28 34
|
|
+#define MIPS32_EF_R29 35
|
|
+#define MIPS32_EF_R30 36
|
|
+#define MIPS32_EF_R31 37
|
|
|
|
/*
|
|
* Saved special registers
|
|
*/
|
|
-#define EF_LO 38
|
|
-#define EF_HI 39
|
|
-
|
|
-#define EF_CP0_EPC 40
|
|
-#define EF_CP0_BADVADDR 41
|
|
-#define EF_CP0_STATUS 42
|
|
-#define EF_CP0_CAUSE 43
|
|
-#define EF_UNUSED0 44
|
|
-
|
|
-#define EF_SIZE 180
|
|
-
|
|
-#endif
|
|
-
|
|
-#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
|
|
-
|
|
-#define EF_R0 0
|
|
-#define EF_R1 1
|
|
-#define EF_R2 2
|
|
-#define EF_R3 3
|
|
-#define EF_R4 4
|
|
-#define EF_R5 5
|
|
-#define EF_R6 6
|
|
-#define EF_R7 7
|
|
-#define EF_R8 8
|
|
-#define EF_R9 9
|
|
-#define EF_R10 10
|
|
-#define EF_R11 11
|
|
-#define EF_R12 12
|
|
-#define EF_R13 13
|
|
-#define EF_R14 14
|
|
-#define EF_R15 15
|
|
-#define EF_R16 16
|
|
-#define EF_R17 17
|
|
-#define EF_R18 18
|
|
-#define EF_R19 19
|
|
-#define EF_R20 20
|
|
-#define EF_R21 21
|
|
-#define EF_R22 22
|
|
-#define EF_R23 23
|
|
-#define EF_R24 24
|
|
-#define EF_R25 25
|
|
+#define MIPS32_EF_LO 38
|
|
+#define MIPS32_EF_HI 39
|
|
+
|
|
+#define MIPS32_EF_CP0_EPC 40
|
|
+#define MIPS32_EF_CP0_BADVADDR 41
|
|
+#define MIPS32_EF_CP0_STATUS 42
|
|
+#define MIPS32_EF_CP0_CAUSE 43
|
|
+#define MIPS32_EF_UNUSED0 44
|
|
+
|
|
+#define MIPS32_EF_SIZE 180
|
|
+
|
|
+#define MIPS64_EF_R0 0
|
|
+#define MIPS64_EF_R1 1
|
|
+#define MIPS64_EF_R2 2
|
|
+#define MIPS64_EF_R3 3
|
|
+#define MIPS64_EF_R4 4
|
|
+#define MIPS64_EF_R5 5
|
|
+#define MIPS64_EF_R6 6
|
|
+#define MIPS64_EF_R7 7
|
|
+#define MIPS64_EF_R8 8
|
|
+#define MIPS64_EF_R9 9
|
|
+#define MIPS64_EF_R10 10
|
|
+#define MIPS64_EF_R11 11
|
|
+#define MIPS64_EF_R12 12
|
|
+#define MIPS64_EF_R13 13
|
|
+#define MIPS64_EF_R14 14
|
|
+#define MIPS64_EF_R15 15
|
|
+#define MIPS64_EF_R16 16
|
|
+#define MIPS64_EF_R17 17
|
|
+#define MIPS64_EF_R18 18
|
|
+#define MIPS64_EF_R19 19
|
|
+#define MIPS64_EF_R20 20
|
|
+#define MIPS64_EF_R21 21
|
|
+#define MIPS64_EF_R22 22
|
|
+#define MIPS64_EF_R23 23
|
|
+#define MIPS64_EF_R24 24
|
|
+#define MIPS64_EF_R25 25
|
|
|
|
/*
|
|
* k0/k1 unsaved
|
|
*/
|
|
-#define EF_R26 26
|
|
-#define EF_R27 27
|
|
+#define MIPS64_EF_R26 26
|
|
+#define MIPS64_EF_R27 27
|
|
|
|
|
|
-#define EF_R28 28
|
|
-#define EF_R29 29
|
|
-#define EF_R30 30
|
|
-#define EF_R31 31
|
|
+#define MIPS64_EF_R28 28
|
|
+#define MIPS64_EF_R29 29
|
|
+#define MIPS64_EF_R30 30
|
|
+#define MIPS64_EF_R31 31
|
|
|
|
/*
|
|
* Saved special registers
|
|
*/
|
|
-#define EF_LO 32
|
|
-#define EF_HI 33
|
|
-
|
|
-#define EF_CP0_EPC 34
|
|
-#define EF_CP0_BADVADDR 35
|
|
-#define EF_CP0_STATUS 36
|
|
-#define EF_CP0_CAUSE 37
|
|
-
|
|
-#define EF_SIZE 304 /* size in bytes */
|
|
+#define MIPS64_EF_LO 32
|
|
+#define MIPS64_EF_HI 33
|
|
+
|
|
+#define MIPS64_EF_CP0_EPC 34
|
|
+#define MIPS64_EF_CP0_BADVADDR 35
|
|
+#define MIPS64_EF_CP0_STATUS 36
|
|
+#define MIPS64_EF_CP0_CAUSE 37
|
|
+
|
|
+#define MIPS64_EF_SIZE 304 /* size in bytes */
|
|
+
|
|
+#if defined(CONFIG_32BIT)
|
|
+
|
|
+#define EF_R0 MIPS32_EF_R0
|
|
+#define EF_R1 MIPS32_EF_R1
|
|
+#define EF_R2 MIPS32_EF_R2
|
|
+#define EF_R3 MIPS32_EF_R3
|
|
+#define EF_R4 MIPS32_EF_R4
|
|
+#define EF_R5 MIPS32_EF_R5
|
|
+#define EF_R6 MIPS32_EF_R6
|
|
+#define EF_R7 MIPS32_EF_R7
|
|
+#define EF_R8 MIPS32_EF_R8
|
|
+#define EF_R9 MIPS32_EF_R9
|
|
+#define EF_R10 MIPS32_EF_R10
|
|
+#define EF_R11 MIPS32_EF_R11
|
|
+#define EF_R12 MIPS32_EF_R12
|
|
+#define EF_R13 MIPS32_EF_R13
|
|
+#define EF_R14 MIPS32_EF_R14
|
|
+#define EF_R15 MIPS32_EF_R15
|
|
+#define EF_R16 MIPS32_EF_R16
|
|
+#define EF_R17 MIPS32_EF_R17
|
|
+#define EF_R18 MIPS32_EF_R18
|
|
+#define EF_R19 MIPS32_EF_R19
|
|
+#define EF_R20 MIPS32_EF_R20
|
|
+#define EF_R21 MIPS32_EF_R21
|
|
+#define EF_R22 MIPS32_EF_R22
|
|
+#define EF_R23 MIPS32_EF_R23
|
|
+#define EF_R24 MIPS32_EF_R24
|
|
+#define EF_R25 MIPS32_EF_R25
|
|
+#define EF_R26 MIPS32_EF_R26
|
|
+#define EF_R27 MIPS32_EF_R27
|
|
+#define EF_R28 MIPS32_EF_R28
|
|
+#define EF_R29 MIPS32_EF_R29
|
|
+#define EF_R30 MIPS32_EF_R30
|
|
+#define EF_R31 MIPS32_EF_R31
|
|
+#define EF_LO MIPS32_EF_LO
|
|
+#define EF_HI MIPS32_EF_HI
|
|
+#define EF_CP0_EPC MIPS32_EF_CP0_EPC
|
|
+#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
|
|
+#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
|
|
+#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
|
|
+#define EF_UNUSED0 MIPS32_EF_UNUSED0
|
|
+#define EF_SIZE MIPS32_EF_SIZE
|
|
+
|
|
+#elif defined(CONFIG_64BIT)
|
|
+
|
|
+#define EF_R0 MIPS64_EF_R0
|
|
+#define EF_R1 MIPS64_EF_R1
|
|
+#define EF_R2 MIPS64_EF_R2
|
|
+#define EF_R3 MIPS64_EF_R3
|
|
+#define EF_R4 MIPS64_EF_R4
|
|
+#define EF_R5 MIPS64_EF_R5
|
|
+#define EF_R6 MIPS64_EF_R6
|
|
+#define EF_R7 MIPS64_EF_R7
|
|
+#define EF_R8 MIPS64_EF_R8
|
|
+#define EF_R9 MIPS64_EF_R9
|
|
+#define EF_R10 MIPS64_EF_R10
|
|
+#define EF_R11 MIPS64_EF_R11
|
|
+#define EF_R12 MIPS64_EF_R12
|
|
+#define EF_R13 MIPS64_EF_R13
|
|
+#define EF_R14 MIPS64_EF_R14
|
|
+#define EF_R15 MIPS64_EF_R15
|
|
+#define EF_R16 MIPS64_EF_R16
|
|
+#define EF_R17 MIPS64_EF_R17
|
|
+#define EF_R18 MIPS64_EF_R18
|
|
+#define EF_R19 MIPS64_EF_R19
|
|
+#define EF_R20 MIPS64_EF_R20
|
|
+#define EF_R21 MIPS64_EF_R21
|
|
+#define EF_R22 MIPS64_EF_R22
|
|
+#define EF_R23 MIPS64_EF_R23
|
|
+#define EF_R24 MIPS64_EF_R24
|
|
+#define EF_R25 MIPS64_EF_R25
|
|
+#define EF_R26 MIPS64_EF_R26
|
|
+#define EF_R27 MIPS64_EF_R27
|
|
+#define EF_R28 MIPS64_EF_R28
|
|
+#define EF_R29 MIPS64_EF_R29
|
|
+#define EF_R30 MIPS64_EF_R30
|
|
+#define EF_R31 MIPS64_EF_R31
|
|
+#define EF_LO MIPS64_EF_LO
|
|
+#define EF_HI MIPS64_EF_HI
|
|
+#define EF_CP0_EPC MIPS64_EF_CP0_EPC
|
|
+#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
|
|
+#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
|
|
+#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
|
|
+#define EF_SIZE MIPS64_EF_SIZE
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
|
|
deleted file mode 100644
|
|
index 3adac3b..0000000
|
|
--- a/arch/mips/include/asm/suspend.h
|
|
+++ /dev/null
|
|
@@ -1,7 +0,0 @@
|
|
-#ifndef __ASM_SUSPEND_H
|
|
-#define __ASM_SUSPEND_H
|
|
-
|
|
-/* References to section boundaries */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
-
|
|
-#endif /* __ASM_SUSPEND_H */
|
|
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
|
|
index 7faf5f2..71df942 100644
|
|
--- a/arch/mips/kernel/binfmt_elfo32.c
|
|
+++ b/arch/mips/kernel/binfmt_elfo32.c
|
|
@@ -72,12 +72,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|
|
|
#include <asm/processor.h>
|
|
|
|
-/*
|
|
- * When this file is selected, we are definitely running a 64bit kernel.
|
|
- * So using the right regs define in asm/reg.h
|
|
- */
|
|
-#define WANT_COMPAT_REG_H
|
|
-
|
|
/* These MUST be defined before elf.h gets included */
|
|
extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
|
|
#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
|
|
@@ -149,21 +143,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
|
|
{
|
|
int i;
|
|
|
|
- for (i = 0; i < EF_R0; i++)
|
|
+ for (i = 0; i < MIPS32_EF_R0; i++)
|
|
grp[i] = 0;
|
|
- grp[EF_R0] = 0;
|
|
+ grp[MIPS32_EF_R0] = 0;
|
|
for (i = 1; i <= 31; i++)
|
|
- grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
|
|
- grp[EF_R26] = 0;
|
|
- grp[EF_R27] = 0;
|
|
- grp[EF_LO] = (elf_greg_t) regs->lo;
|
|
- grp[EF_HI] = (elf_greg_t) regs->hi;
|
|
- grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
|
|
- grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
|
|
- grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
|
|
- grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
|
|
-#ifdef EF_UNUSED0
|
|
- grp[EF_UNUSED0] = 0;
|
|
+ grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
|
|
+ grp[MIPS32_EF_R26] = 0;
|
|
+ grp[MIPS32_EF_R27] = 0;
|
|
+ grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
|
|
+ grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
|
|
+ grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
|
|
+ grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
|
|
+ grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
|
|
+ grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
|
|
+#ifdef MIPS32_EF_UNUSED0
|
|
+ grp[MIPS32_EF_UNUSED0] = 0;
|
|
#endif
|
|
}
|
|
|
|
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
|
|
index 5b5ddb2..78f1843 100644
|
|
--- a/arch/mips/kernel/irq-gic.c
|
|
+++ b/arch/mips/kernel/irq-gic.c
|
|
@@ -255,11 +255,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
|
|
|
|
/* Setup Intr to Pin mapping */
|
|
if (pin & GIC_MAP_TO_NMI_MSK) {
|
|
+ int i;
|
|
+
|
|
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
|
|
/* FIXME: hack to route NMI to all cpu's */
|
|
- for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
|
|
+ for (i = 0; i < NR_CPUS; i += 32) {
|
|
GICWRITE(GIC_REG_ADDR(SHARED,
|
|
- GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
|
|
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
|
|
0xffffffff);
|
|
}
|
|
} else {
|
|
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
|
|
index d1fea7a..7479d8d 100644
|
|
--- a/arch/mips/kernel/irq.c
|
|
+++ b/arch/mips/kernel/irq.c
|
|
@@ -110,7 +110,7 @@ void __init init_IRQ(void)
|
|
#endif
|
|
}
|
|
|
|
-#ifdef DEBUG_STACKOVERFLOW
|
|
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
static inline void check_stack_overflow(void)
|
|
{
|
|
unsigned long sp;
|
|
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
|
|
index e498f2b..f5598e2 100644
|
|
--- a/arch/mips/kernel/irq_cpu.c
|
|
+++ b/arch/mips/kernel/irq_cpu.c
|
|
@@ -56,6 +56,8 @@ static struct irq_chip mips_cpu_irq_controller = {
|
|
.irq_mask_ack = mask_mips_irq,
|
|
.irq_unmask = unmask_mips_irq,
|
|
.irq_eoi = unmask_mips_irq,
|
|
+ .irq_disable = mask_mips_irq,
|
|
+ .irq_enable = unmask_mips_irq,
|
|
};
|
|
|
|
/*
|
|
@@ -92,6 +94,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
|
|
.irq_mask_ack = mips_mt_cpu_irq_ack,
|
|
.irq_unmask = unmask_mips_irq,
|
|
.irq_eoi = unmask_mips_irq,
|
|
+ .irq_disable = mask_mips_irq,
|
|
+ .irq_enable = unmask_mips_irq,
|
|
};
|
|
|
|
void __init mips_cpu_irq_init(void)
|
|
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
|
|
index 539b629..8f89ff4 100644
|
|
--- a/arch/mips/kernel/mcount.S
|
|
+++ b/arch/mips/kernel/mcount.S
|
|
@@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra)
|
|
nop
|
|
#endif
|
|
b ftrace_stub
|
|
+#ifdef CONFIG_32BIT
|
|
+ addiu sp, sp, 8
|
|
+#else
|
|
nop
|
|
+#endif
|
|
|
|
static_trace:
|
|
MCOUNT_SAVE_REGS
|
|
@@ -133,6 +137,9 @@ static_trace:
|
|
move a1, AT /* arg2: parent's return address */
|
|
|
|
MCOUNT_RESTORE_REGS
|
|
+#ifdef CONFIG_32BIT
|
|
+ addiu sp, sp, 8
|
|
+#endif
|
|
.globl ftrace_stub
|
|
ftrace_stub:
|
|
RETURN_BACK
|
|
@@ -177,6 +184,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
|
|
jal prepare_ftrace_return
|
|
nop
|
|
MCOUNT_RESTORE_REGS
|
|
+#ifndef CONFIG_DYNAMIC_FTRACE
|
|
+#ifdef CONFIG_32BIT
|
|
+ addiu sp, sp, 8
|
|
+#endif
|
|
+#endif
|
|
RETURN_BACK
|
|
END(ftrace_graph_caller)
|
|
|
|
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
|
|
index cb09862..ca16964 100644
|
|
--- a/arch/mips/kernel/mips-mt-fpaff.c
|
|
+++ b/arch/mips/kernel/mips-mt-fpaff.c
|
|
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|
unsigned long __user *user_mask_ptr)
|
|
{
|
|
unsigned int real_len;
|
|
- cpumask_t mask;
|
|
+ cpumask_t allowed, mask;
|
|
int retval;
|
|
struct task_struct *p;
|
|
|
|
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
|
|
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
|
+ cpumask_and(&mask, &allowed, cpu_active_mask);
|
|
|
|
out_unlock:
|
|
read_unlock(&tasklist_lock);
|
|
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
|
|
index 6e58e97..cedeb56 100644
|
|
--- a/arch/mips/kernel/mips_ksyms.c
|
|
+++ b/arch/mips/kernel/mips_ksyms.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/mm.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/ftrace.h>
|
|
+#include <asm/fpu.h>
|
|
|
|
extern void *__bzero(void *__s, size_t __count);
|
|
extern long __strncpy_from_user_nocheck_asm(char *__to,
|
|
@@ -26,6 +27,13 @@ extern long __strnlen_user_nocheck_asm(const char *s);
|
|
extern long __strnlen_user_asm(const char *s);
|
|
|
|
/*
|
|
+ * Core architecture code
|
|
+ */
|
|
+#ifdef CONFIG_CPU_R4K_FPU
|
|
+EXPORT_SYMBOL_GPL(_save_fp);
|
|
+#endif
|
|
+
|
|
+/*
|
|
* String functions
|
|
*/
|
|
EXPORT_SYMBOL(memset);
|
|
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
|
|
index 7da9b76..60f48fe 100644
|
|
--- a/arch/mips/kernel/ptrace.c
|
|
+++ b/arch/mips/kernel/ptrace.c
|
|
@@ -170,6 +170,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
|
|
__get_user(fregs[i], i + (__u64 __user *) data);
|
|
|
|
__get_user(child->thread.fpu.fcr31, data + 64);
|
|
+ child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
|
|
|
/* FIR may not be written. */
|
|
|
|
@@ -265,36 +266,160 @@ int ptrace_set_watch_regs(struct task_struct *child,
|
|
|
|
/* regset get/set implementations */
|
|
|
|
-static int gpr_get(struct task_struct *target,
|
|
- const struct user_regset *regset,
|
|
- unsigned int pos, unsigned int count,
|
|
- void *kbuf, void __user *ubuf)
|
|
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
|
|
+
|
|
+static int gpr32_get(struct task_struct *target,
|
|
+ const struct user_regset *regset,
|
|
+ unsigned int pos, unsigned int count,
|
|
+ void *kbuf, void __user *ubuf)
|
|
{
|
|
struct pt_regs *regs = task_pt_regs(target);
|
|
+ u32 uregs[ELF_NGREG] = {};
|
|
+ unsigned i;
|
|
|
|
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
- regs, 0, sizeof(*regs));
|
|
+ for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
|
|
+ /* k0/k1 are copied as zero. */
|
|
+ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
|
|
+ continue;
|
|
+
|
|
+ uregs[i] = regs->regs[i - MIPS32_EF_R0];
|
|
+ }
|
|
+
|
|
+ uregs[MIPS32_EF_LO] = regs->lo;
|
|
+ uregs[MIPS32_EF_HI] = regs->hi;
|
|
+ uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
|
|
+ uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
|
|
+ uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
|
|
+ uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
|
|
+
|
|
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
|
|
+ sizeof(uregs));
|
|
}
|
|
|
|
-static int gpr_set(struct task_struct *target,
|
|
- const struct user_regset *regset,
|
|
- unsigned int pos, unsigned int count,
|
|
- const void *kbuf, const void __user *ubuf)
|
|
+static int gpr32_set(struct task_struct *target,
|
|
+ const struct user_regset *regset,
|
|
+ unsigned int pos, unsigned int count,
|
|
+ const void *kbuf, const void __user *ubuf)
|
|
{
|
|
- struct pt_regs newregs;
|
|
- int ret;
|
|
+ struct pt_regs *regs = task_pt_regs(target);
|
|
+ u32 uregs[ELF_NGREG];
|
|
+ unsigned start, num_regs, i;
|
|
+ int err;
|
|
|
|
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
- &newregs,
|
|
- 0, sizeof(newregs));
|
|
- if (ret)
|
|
- return ret;
|
|
+ start = pos / sizeof(u32);
|
|
+ num_regs = count / sizeof(u32);
|
|
|
|
- *task_pt_regs(target) = newregs;
|
|
+ if (start + num_regs > ELF_NGREG)
|
|
+ return -EIO;
|
|
+
|
|
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
|
|
+ sizeof(uregs));
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ for (i = start; i < num_regs; i++) {
|
|
+ /*
|
|
+ * Cast all values to signed here so that if this is a 64-bit
|
|
+ * kernel, the supplied 32-bit values will be sign extended.
|
|
+ */
|
|
+ switch (i) {
|
|
+ case MIPS32_EF_R1 ... MIPS32_EF_R25:
|
|
+ /* k0/k1 are ignored. */
|
|
+ case MIPS32_EF_R28 ... MIPS32_EF_R31:
|
|
+ regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
|
|
+ break;
|
|
+ case MIPS32_EF_LO:
|
|
+ regs->lo = (s32)uregs[i];
|
|
+ break;
|
|
+ case MIPS32_EF_HI:
|
|
+ regs->hi = (s32)uregs[i];
|
|
+ break;
|
|
+ case MIPS32_EF_CP0_EPC:
|
|
+ regs->cp0_epc = (s32)uregs[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
|
|
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
|
|
+
|
|
+#ifdef CONFIG_64BIT
|
|
+
|
|
+static int gpr64_get(struct task_struct *target,
|
|
+ const struct user_regset *regset,
|
|
+ unsigned int pos, unsigned int count,
|
|
+ void *kbuf, void __user *ubuf)
|
|
+{
|
|
+ struct pt_regs *regs = task_pt_regs(target);
|
|
+ u64 uregs[ELF_NGREG] = {};
|
|
+ unsigned i;
|
|
+
|
|
+ for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
|
|
+ /* k0/k1 are copied as zero. */
|
|
+ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
|
|
+ continue;
|
|
+
|
|
+ uregs[i] = regs->regs[i - MIPS64_EF_R0];
|
|
+ }
|
|
+
|
|
+ uregs[MIPS64_EF_LO] = regs->lo;
|
|
+ uregs[MIPS64_EF_HI] = regs->hi;
|
|
+ uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
|
|
+ uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
|
|
+ uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
|
|
+ uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
|
|
+
|
|
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
|
|
+ sizeof(uregs));
|
|
+}
|
|
+
|
|
+static int gpr64_set(struct task_struct *target,
|
|
+ const struct user_regset *regset,
|
|
+ unsigned int pos, unsigned int count,
|
|
+ const void *kbuf, const void __user *ubuf)
|
|
+{
|
|
+ struct pt_regs *regs = task_pt_regs(target);
|
|
+ u64 uregs[ELF_NGREG];
|
|
+ unsigned start, num_regs, i;
|
|
+ int err;
|
|
+
|
|
+ start = pos / sizeof(u64);
|
|
+ num_regs = count / sizeof(u64);
|
|
+
|
|
+ if (start + num_regs > ELF_NGREG)
|
|
+ return -EIO;
|
|
+
|
|
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
|
|
+ sizeof(uregs));
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ for (i = start; i < num_regs; i++) {
|
|
+ switch (i) {
|
|
+ case MIPS64_EF_R1 ... MIPS64_EF_R25:
|
|
+ /* k0/k1 are ignored. */
|
|
+ case MIPS64_EF_R28 ... MIPS64_EF_R31:
|
|
+ regs->regs[i - MIPS64_EF_R0] = uregs[i];
|
|
+ break;
|
|
+ case MIPS64_EF_LO:
|
|
+ regs->lo = uregs[i];
|
|
+ break;
|
|
+ case MIPS64_EF_HI:
|
|
+ regs->hi = uregs[i];
|
|
+ break;
|
|
+ case MIPS64_EF_CP0_EPC:
|
|
+ regs->cp0_epc = uregs[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_64BIT */
|
|
+
|
|
static int fpr_get(struct task_struct *target,
|
|
const struct user_regset *regset,
|
|
unsigned int pos, unsigned int count,
|
|
@@ -322,14 +447,16 @@ enum mips_regset {
|
|
REGSET_FPR,
|
|
};
|
|
|
|
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
|
|
+
|
|
static const struct user_regset mips_regsets[] = {
|
|
[REGSET_GPR] = {
|
|
.core_note_type = NT_PRSTATUS,
|
|
.n = ELF_NGREG,
|
|
.size = sizeof(unsigned int),
|
|
.align = sizeof(unsigned int),
|
|
- .get = gpr_get,
|
|
- .set = gpr_set,
|
|
+ .get = gpr32_get,
|
|
+ .set = gpr32_set,
|
|
},
|
|
[REGSET_FPR] = {
|
|
.core_note_type = NT_PRFPREG,
|
|
@@ -349,14 +476,18 @@ static const struct user_regset_view user_mips_view = {
|
|
.n = ARRAY_SIZE(mips_regsets),
|
|
};
|
|
|
|
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
|
|
+
|
|
+#ifdef CONFIG_64BIT
|
|
+
|
|
static const struct user_regset mips64_regsets[] = {
|
|
[REGSET_GPR] = {
|
|
.core_note_type = NT_PRSTATUS,
|
|
.n = ELF_NGREG,
|
|
.size = sizeof(unsigned long),
|
|
.align = sizeof(unsigned long),
|
|
- .get = gpr_get,
|
|
- .set = gpr_set,
|
|
+ .get = gpr64_get,
|
|
+ .set = gpr64_set,
|
|
},
|
|
[REGSET_FPR] = {
|
|
.core_note_type = NT_PRFPREG,
|
|
@@ -369,25 +500,26 @@ static const struct user_regset mips64_regsets[] = {
|
|
};
|
|
|
|
static const struct user_regset_view user_mips64_view = {
|
|
- .name = "mips",
|
|
+ .name = "mips64",
|
|
.e_machine = ELF_ARCH,
|
|
.ei_osabi = ELF_OSABI,
|
|
.regsets = mips64_regsets,
|
|
- .n = ARRAY_SIZE(mips_regsets),
|
|
+ .n = ARRAY_SIZE(mips64_regsets),
|
|
};
|
|
|
|
+#endif /* CONFIG_64BIT */
|
|
+
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
{
|
|
#ifdef CONFIG_32BIT
|
|
return &user_mips_view;
|
|
-#endif
|
|
-
|
|
+#else
|
|
#ifdef CONFIG_MIPS32_O32
|
|
- if (test_thread_flag(TIF_32BIT_REGS))
|
|
- return &user_mips_view;
|
|
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
|
|
+ return &user_mips_view;
|
|
#endif
|
|
-
|
|
return &user_mips64_view;
|
|
+#endif
|
|
}
|
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
@@ -593,7 +725,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|
break;
|
|
#endif
|
|
case FPC_CSR:
|
|
- child->thread.fpu.fcr31 = data;
|
|
+ child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
|
|
break;
|
|
case DSP_BASE ... DSP_BASE + 5: {
|
|
dspreg_t *dregs;
|
|
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
|
|
index 3d60f77..ea585cf 100644
|
|
--- a/arch/mips/kernel/signal32.c
|
|
+++ b/arch/mips/kernel/signal32.c
|
|
@@ -370,8 +370,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|
|
|
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
|
{
|
|
- memset(to, 0, sizeof *to);
|
|
-
|
|
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
|
copy_from_user(to->_sifields._pad,
|
|
from->_sifields._pad, SI_PAD_SIZE32))
|
|
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
|
|
index 0a022ee..18ed112 100644
|
|
--- a/arch/mips/kernel/smp.c
|
|
+++ b/arch/mips/kernel/smp.c
|
|
@@ -109,10 +109,10 @@ asmlinkage void start_secondary(void)
|
|
else
|
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
|
cpu_probe();
|
|
- cpu_report();
|
|
per_cpu_trap_init(false);
|
|
mips_clockevent_init();
|
|
mp_ops->init_secondary();
|
|
+ cpu_report();
|
|
|
|
/*
|
|
* XXX parity protection should be folded in here when it's converted
|
|
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
|
|
index c369a5d..b897dde 100644
|
|
--- a/arch/mips/kernel/unaligned.c
|
|
+++ b/arch/mips/kernel/unaligned.c
|
|
@@ -605,7 +605,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
|
|
case sdc1_op:
|
|
die_if_kernel("Unaligned FP access in kernel code", regs);
|
|
BUG_ON(!used_math());
|
|
- BUG_ON(!is_fpu_owner());
|
|
|
|
lose_fpu(1); /* Save FPU state for the emulator. */
|
|
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
|
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
|
|
index bbace09..03a2db5 100644
|
|
--- a/arch/mips/kvm/kvm_locore.S
|
|
+++ b/arch/mips/kvm/kvm_locore.S
|
|
@@ -428,7 +428,7 @@ __kvm_mips_return_to_guest:
|
|
/* Setup status register for running guest in UM */
|
|
.set at
|
|
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
|
|
- and v1, v1, ~ST0_CU0
|
|
+ and v1, v1, ~(ST0_CU0 | ST0_MX)
|
|
.set noat
|
|
mtc0 v1, CP0_STATUS
|
|
ehb
|
|
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
|
|
index 3e0ff8d..897c605 100644
|
|
--- a/arch/mips/kvm/kvm_mips.c
|
|
+++ b/arch/mips/kvm/kvm_mips.c
|
|
@@ -15,6 +15,7 @@
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/bootmem.h>
|
|
+#include <asm/fpu.h>
|
|
#include <asm/page.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/mmu_context.h>
|
|
@@ -418,11 +419,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
vcpu->mmio_needed = 0;
|
|
}
|
|
|
|
+ lose_fpu(1);
|
|
+
|
|
+ local_irq_disable();
|
|
/* Check if we have any exceptions/interrupts pending */
|
|
kvm_mips_deliver_interrupts(vcpu,
|
|
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
|
|
|
- local_irq_disable();
|
|
kvm_guest_enter();
|
|
|
|
r = __kvm_mips_vcpu_run(run, vcpu);
|
|
@@ -1021,9 +1024,6 @@ void kvm_mips_set_c0_status(void)
|
|
{
|
|
uint32_t status = read_c0_status();
|
|
|
|
- if (cpu_has_fpu)
|
|
- status |= (ST0_CU1);
|
|
-
|
|
if (cpu_has_dsp)
|
|
status |= (ST0_MX);
|
|
|
|
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
|
|
index e75ef82..c76f297 100644
|
|
--- a/arch/mips/kvm/kvm_mips_emul.c
|
|
+++ b/arch/mips/kvm/kvm_mips_emul.c
|
|
@@ -1626,7 +1626,7 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
if (vcpu->mmio_needed == 2)
|
|
*gpr = *(int16_t *) run->mmio.data;
|
|
else
|
|
- *gpr = *(int16_t *) run->mmio.data;
|
|
+ *gpr = *(uint16_t *)run->mmio.data;
|
|
|
|
break;
|
|
case 1:
|
|
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
|
|
index bc9e0f4..e51621e 100644
|
|
--- a/arch/mips/kvm/trace.h
|
|
+++ b/arch/mips/kvm/trace.h
|
|
@@ -26,18 +26,18 @@ TRACE_EVENT(kvm_exit,
|
|
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
|
TP_ARGS(vcpu, reason),
|
|
TP_STRUCT__entry(
|
|
- __field(struct kvm_vcpu *, vcpu)
|
|
+ __field(unsigned long, pc)
|
|
__field(unsigned int, reason)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- __entry->vcpu = vcpu;
|
|
+ __entry->pc = vcpu->arch.pc;
|
|
__entry->reason = reason;
|
|
),
|
|
|
|
TP_printk("[%s]PC: 0x%08lx",
|
|
kvm_mips_exit_types_str[__entry->reason],
|
|
- __entry->vcpu->arch.pc)
|
|
+ __entry->pc)
|
|
);
|
|
|
|
#endif /* _TRACE_KVM_H */
|
|
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
|
|
index 9e4484c..9005a8d6 100644
|
|
--- a/arch/mips/loongson/common/Makefile
|
|
+++ b/arch/mips/loongson/common/Makefile
|
|
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
|
|
# Serial port support
|
|
#
|
|
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
|
-obj-$(CONFIG_SERIAL_8250) += serial.o
|
|
+loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
|
|
+obj-y += $(loongson-serial-m) $(loongson-serial-y)
|
|
obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
|
|
obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
|
|
|
|
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
|
|
index becc42b..70ab5d6 100644
|
|
--- a/arch/mips/mm/fault.c
|
|
+++ b/arch/mips/mm/fault.c
|
|
@@ -158,6 +158,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
|
|
index b234b1b..dd012c5 100644
|
|
--- a/arch/mips/mm/tlbex.c
|
|
+++ b/arch/mips/mm/tlbex.c
|
|
@@ -1057,6 +1057,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
|
|
struct mips_huge_tlb_info {
|
|
int huge_pte;
|
|
int restore_scratch;
|
|
+ bool need_reload_pte;
|
|
};
|
|
|
|
static struct mips_huge_tlb_info
|
|
@@ -1071,6 +1072,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
|
|
|
|
rv.huge_pte = scratch;
|
|
rv.restore_scratch = 0;
|
|
+ rv.need_reload_pte = false;
|
|
|
|
if (check_for_high_segbits) {
|
|
UASM_i_MFC0(p, tmp, C0_BADVADDR);
|
|
@@ -1259,6 +1261,7 @@ static void build_r4000_tlb_refill_handler(void)
|
|
} else {
|
|
htlb_info.huge_pte = K0;
|
|
htlb_info.restore_scratch = 0;
|
|
+ htlb_info.need_reload_pte = true;
|
|
vmalloc_mode = refill_noscratch;
|
|
/*
|
|
* create the plain linear handler
|
|
@@ -1295,6 +1298,8 @@ static void build_r4000_tlb_refill_handler(void)
|
|
}
|
|
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
|
uasm_l_tlb_huge_update(&l, p);
|
|
+ if (htlb_info.need_reload_pte)
|
|
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
|
|
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
|
|
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
|
|
htlb_info.restore_scratch);
|
|
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
|
|
index 3190099..d4ab447 100644
|
|
--- a/arch/mips/mti-malta/malta-time.c
|
|
+++ b/arch/mips/mti-malta/malta-time.c
|
|
@@ -168,14 +168,17 @@ unsigned int get_c0_compare_int(void)
|
|
|
|
static void __init init_rtc(void)
|
|
{
|
|
- /* stop the clock whilst setting it up */
|
|
- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
|
|
+ unsigned char freq, ctrl;
|
|
|
|
- /* 32KHz time base */
|
|
- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
|
+ /* Set 32KHz time base if not already set */
|
|
+ freq = CMOS_READ(RTC_FREQ_SELECT);
|
|
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
|
|
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
|
|
|
- /* start the clock */
|
|
- CMOS_WRITE(RTC_24H, RTC_CONTROL);
|
|
+ /* Ensure SET bit is clear so RTC can run */
|
|
+ ctrl = CMOS_READ(RTC_CONTROL);
|
|
+ if (ctrl & RTC_SET)
|
|
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
|
|
}
|
|
|
|
void __init plat_time_init(void)
|
|
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
|
|
index 6854ed5..83a1dfd 100644
|
|
--- a/arch/mips/oprofile/backtrace.c
|
|
+++ b/arch/mips/oprofile/backtrace.c
|
|
@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
|
|
/* This marks the end of the previous function,
|
|
which means we overran. */
|
|
break;
|
|
- stack_size = (unsigned) stack_adjustment;
|
|
+ stack_size = (unsigned long) stack_adjustment;
|
|
} else if (is_ra_save_ins(&ip)) {
|
|
int ra_slot = ip.i_format.simmediate;
|
|
if (ra_slot < 0)
|
|
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
|
|
index 521e596..2129e67 100644
|
|
--- a/arch/mips/power/cpu.c
|
|
+++ b/arch/mips/power/cpu.c
|
|
@@ -7,7 +7,7 @@
|
|
* Author: Hu Hongbing <huhb@lemote.com>
|
|
* Wu Zhangjin <wuzhangjin@gmail.com>
|
|
*/
|
|
-#include <asm/suspend.h>
|
|
+#include <asm/sections.h>
|
|
#include <asm/fpu.h>
|
|
#include <asm/dsp.h>
|
|
|
|
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
|
|
index 32a7c82..e7567c8 100644
|
|
--- a/arch/mips/power/hibernate.S
|
|
+++ b/arch/mips/power/hibernate.S
|
|
@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
|
|
END(swsusp_arch_suspend)
|
|
|
|
LEAF(swsusp_arch_resume)
|
|
+ /* Avoid TLB mismatch during and after kernel resume */
|
|
+ jal local_flush_tlb_all
|
|
PTR_L t0, restore_pblist
|
|
0:
|
|
PTR_L t1, PBE_ADDRESS(t0) /* source */
|
|
@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
|
|
bne t1, t3, 1b
|
|
PTR_L t0, PBE_NEXT(t0)
|
|
bnez t0, 0b
|
|
- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
|
|
PTR_LA t0, saved_regs
|
|
PTR_L ra, PT_R31(t0)
|
|
PTR_L sp, PT_R29(t0)
|
|
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
|
|
index 3516cbd..0c2cc5d 100644
|
|
--- a/arch/mn10300/mm/fault.c
|
|
+++ b/arch/mn10300/mm/fault.c
|
|
@@ -262,6 +262,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
|
|
index 0703acf..230ac20 100644
|
|
--- a/arch/openrisc/mm/fault.c
|
|
+++ b/arch/openrisc/mm/fault.c
|
|
@@ -171,6 +171,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
|
|
index 7187664..5db8882 100644
|
|
--- a/arch/parisc/Makefile
|
|
+++ b/arch/parisc/Makefile
|
|
@@ -48,7 +48,12 @@ cflags-y := -pipe
|
|
|
|
# These flags should be implied by an hppa-linux configuration, but they
|
|
# are not in gcc 3.2.
|
|
-cflags-y += -mno-space-regs -mfast-indirect-calls
|
|
+cflags-y += -mno-space-regs
|
|
+
|
|
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
|
|
+ifndef CONFIG_64BIT
|
|
+cflags-y += -mfast-indirect-calls
|
|
+endif
|
|
|
|
# Currently we save and restore fpregs on all kernel entry/interruption paths.
|
|
# If that gets optimized, we might need to disable the use of fpregs in the
|
|
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
|
|
index d2d11b7..8121aa6 100644
|
|
--- a/arch/parisc/include/asm/ldcw.h
|
|
+++ b/arch/parisc/include/asm/ldcw.h
|
|
@@ -33,11 +33,18 @@
|
|
|
|
#endif /*!CONFIG_PA20*/
|
|
|
|
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
|
|
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
|
|
+ We don't explicitly expose that "*a" may be written as reload
|
|
+ fails to find a register in class R1_REGS when "a" needs to be
|
|
+ reloaded when generating 64-bit PIC code. Instead, we clobber
|
|
+ memory to indicate to the compiler that the assembly code reads
|
|
+ or writes to items other than those listed in the input and output
|
|
+ operands. This may pessimize the code somewhat but __ldcw is
|
|
+ usually used within code blocks surrounded by memory barriors. */
|
|
#define __ldcw(a) ({ \
|
|
unsigned __ret; \
|
|
- __asm__ __volatile__(__LDCW " 0(%2),%0" \
|
|
- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
|
|
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
|
|
+ : "=r" (__ret) : "r" (a) : "memory"); \
|
|
__ret; \
|
|
})
|
|
|
|
diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h
|
|
index 0a3eada..f395cde 100644
|
|
--- a/arch/parisc/include/uapi/asm/shmbuf.h
|
|
+++ b/arch/parisc/include/uapi/asm/shmbuf.h
|
|
@@ -36,23 +36,16 @@ struct shmid64_ds {
|
|
unsigned int __unused2;
|
|
};
|
|
|
|
-#ifdef CONFIG_64BIT
|
|
-/* The 'unsigned int' (formerly 'unsigned long') data types below will
|
|
- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
|
|
- * a wide kernel, but if some of these values are meant to contain pointers
|
|
- * they may need to be 'long long' instead. -PB XXX FIXME
|
|
- */
|
|
-#endif
|
|
struct shminfo64 {
|
|
- unsigned int shmmax;
|
|
- unsigned int shmmin;
|
|
- unsigned int shmmni;
|
|
- unsigned int shmseg;
|
|
- unsigned int shmall;
|
|
- unsigned int __unused1;
|
|
- unsigned int __unused2;
|
|
- unsigned int __unused3;
|
|
- unsigned int __unused4;
|
|
+ unsigned long shmmax;
|
|
+ unsigned long shmmin;
|
|
+ unsigned long shmmni;
|
|
+ unsigned long shmseg;
|
|
+ unsigned long shmall;
|
|
+ unsigned long __unused1;
|
|
+ unsigned long __unused2;
|
|
+ unsigned long __unused3;
|
|
+ unsigned long __unused4;
|
|
};
|
|
|
|
#endif /* _PARISC_SHMBUF_H */
|
|
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
|
|
index a2fa2971..f5645d6 100644
|
|
--- a/arch/parisc/include/uapi/asm/signal.h
|
|
+++ b/arch/parisc/include/uapi/asm/signal.h
|
|
@@ -69,8 +69,6 @@
|
|
#define SA_NOMASK SA_NODEFER
|
|
#define SA_ONESHOT SA_RESETHAND
|
|
|
|
-#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
|
|
-
|
|
#define MINSIGSTKSZ 2048
|
|
#define SIGSTKSZ 8192
|
|
|
|
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
|
|
index 8387860..7ef22e3 100644
|
|
--- a/arch/parisc/kernel/syscall.S
|
|
+++ b/arch/parisc/kernel/syscall.S
|
|
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
|
|
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
|
|
/* Light-weight-syscall entry must always be located at 0xb0 */
|
|
/* WARNING: Keep this number updated with table size changes */
|
|
-#define __NR_lws_entries (2)
|
|
+#define __NR_lws_entries (3)
|
|
|
|
lws_entry:
|
|
gate lws_start, %r0 /* increase privilege */
|
|
@@ -502,7 +502,7 @@ lws_exit:
|
|
|
|
|
|
/***************************************************
|
|
- Implementing CAS as an atomic operation:
|
|
+ Implementing 32bit CAS as an atomic operation:
|
|
|
|
%r26 - Address to examine
|
|
%r25 - Old value to check (old)
|
|
@@ -659,6 +659,230 @@ cas_action:
|
|
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
|
|
|
|
|
|
+ /***************************************************
|
|
+ New CAS implementation which uses pointers and variable size
|
|
+ information. The value pointed by old and new MUST NOT change
|
|
+ while performing CAS. The lock only protect the value at %r26.
|
|
+
|
|
+ %r26 - Address to examine
|
|
+ %r25 - Pointer to the value to check (old)
|
|
+ %r24 - Pointer to the value to set (new)
|
|
+ %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
|
|
+ %r28 - Return non-zero on failure
|
|
+ %r21 - Kernel error code
|
|
+
|
|
+ %r21 has the following meanings:
|
|
+
|
|
+ EAGAIN - CAS is busy, ldcw failed, try again.
|
|
+ EFAULT - Read or write failed.
|
|
+
|
|
+ Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
|
|
+
|
|
+ ****************************************************/
|
|
+
|
|
+ /* ELF32 Process entry path */
|
|
+lws_compare_and_swap_2:
|
|
+#ifdef CONFIG_64BIT
|
|
+ /* Clip the input registers */
|
|
+ depdi 0, 31, 32, %r26
|
|
+ depdi 0, 31, 32, %r25
|
|
+ depdi 0, 31, 32, %r24
|
|
+ depdi 0, 31, 32, %r23
|
|
+#endif
|
|
+
|
|
+ /* Check the validity of the size pointer */
|
|
+ subi,>>= 4, %r23, %r0
|
|
+ b,n lws_exit_nosys
|
|
+
|
|
+ /* Jump to the functions which will load the old and new values into
|
|
+ registers depending on the their size */
|
|
+ shlw %r23, 2, %r29
|
|
+ blr %r29, %r0
|
|
+ nop
|
|
+
|
|
+ /* 8bit load */
|
|
+4: ldb 0(%sr3,%r25), %r25
|
|
+ b cas2_lock_start
|
|
+5: ldb 0(%sr3,%r24), %r24
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 16bit load */
|
|
+6: ldh 0(%sr3,%r25), %r25
|
|
+ b cas2_lock_start
|
|
+7: ldh 0(%sr3,%r24), %r24
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 32bit load */
|
|
+8: ldw 0(%sr3,%r25), %r25
|
|
+ b cas2_lock_start
|
|
+9: ldw 0(%sr3,%r24), %r24
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 64bit load */
|
|
+#ifdef CONFIG_64BIT
|
|
+10: ldd 0(%sr3,%r25), %r25
|
|
+11: ldd 0(%sr3,%r24), %r24
|
|
+#else
|
|
+ /* Load new value into r22/r23 - high/low */
|
|
+10: ldw 0(%sr3,%r25), %r22
|
|
+11: ldw 4(%sr3,%r25), %r23
|
|
+ /* Load new value into fr4 for atomic store later */
|
|
+12: flddx 0(%sr3,%r24), %fr4
|
|
+#endif
|
|
+
|
|
+cas2_lock_start:
|
|
+ /* Load start of lock table */
|
|
+ ldil L%lws_lock_start, %r20
|
|
+ ldo R%lws_lock_start(%r20), %r28
|
|
+
|
|
+ /* Extract four bits from r26 and hash lock (Bits 4-7) */
|
|
+ extru %r26, 27, 4, %r20
|
|
+
|
|
+ /* Find lock to use, the hash is either one of 0 to
|
|
+ 15, multiplied by 16 (keep it 16-byte aligned)
|
|
+ and add to the lock table offset. */
|
|
+ shlw %r20, 4, %r20
|
|
+ add %r20, %r28, %r20
|
|
+
|
|
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
|
|
+ /* COW breaks can cause contention on UP systems */
|
|
+ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
|
|
+ cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
|
|
+cas2_wouldblock:
|
|
+ ldo 2(%r0), %r28 /* 2nd case */
|
|
+ ssm PSW_SM_I, %r0
|
|
+ b lws_exit /* Contended... */
|
|
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
|
|
+
|
|
+ /*
|
|
+ prev = *addr;
|
|
+ if ( prev == old )
|
|
+ *addr = new;
|
|
+ return prev;
|
|
+ */
|
|
+
|
|
+ /* NOTES:
|
|
+ This all works becuse intr_do_signal
|
|
+ and schedule both check the return iasq
|
|
+ and see that we are on the kernel page
|
|
+ so this process is never scheduled off
|
|
+ or is ever sent any signal of any sort,
|
|
+ thus it is wholly atomic from usrspaces
|
|
+ perspective
|
|
+ */
|
|
+cas2_action:
|
|
+ /* Jump to the correct function */
|
|
+ blr %r29, %r0
|
|
+ /* Set %r28 as non-zero for now */
|
|
+ ldo 1(%r0),%r28
|
|
+
|
|
+ /* 8bit CAS */
|
|
+13: ldb,ma 0(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r25, %r0
|
|
+ b,n cas2_end
|
|
+14: stb,ma %r24, 0(%sr3,%r26)
|
|
+ b cas2_end
|
|
+ copy %r0, %r28
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 16bit CAS */
|
|
+15: ldh,ma 0(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r25, %r0
|
|
+ b,n cas2_end
|
|
+16: sth,ma %r24, 0(%sr3,%r26)
|
|
+ b cas2_end
|
|
+ copy %r0, %r28
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 32bit CAS */
|
|
+17: ldw,ma 0(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r25, %r0
|
|
+ b,n cas2_end
|
|
+18: stw,ma %r24, 0(%sr3,%r26)
|
|
+ b cas2_end
|
|
+ copy %r0, %r28
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* 64bit CAS */
|
|
+#ifdef CONFIG_64BIT
|
|
+19: ldd,ma 0(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r25, %r0
|
|
+ b,n cas2_end
|
|
+20: std,ma %r24, 0(%sr3,%r26)
|
|
+ copy %r0, %r28
|
|
+#else
|
|
+ /* Compare first word */
|
|
+19: ldw,ma 0(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r22, %r0
|
|
+ b,n cas2_end
|
|
+ /* Compare second word */
|
|
+20: ldw,ma 4(%sr3,%r26), %r29
|
|
+ sub,= %r29, %r23, %r0
|
|
+ b,n cas2_end
|
|
+ /* Perform the store */
|
|
+21: fstdx %fr4, 0(%sr3,%r26)
|
|
+ copy %r0, %r28
|
|
+#endif
|
|
+
|
|
+cas2_end:
|
|
+ /* Free lock */
|
|
+ stw,ma %r20, 0(%sr2,%r20)
|
|
+ /* Enable interrupts */
|
|
+ ssm PSW_SM_I, %r0
|
|
+ /* Return to userspace, set no error */
|
|
+ b lws_exit
|
|
+ copy %r0, %r21
|
|
+
|
|
+22:
|
|
+ /* Error occurred on load or store */
|
|
+ /* Free lock */
|
|
+ stw %r20, 0(%sr2,%r20)
|
|
+ ssm PSW_SM_I, %r0
|
|
+ ldo 1(%r0),%r28
|
|
+ b lws_exit
|
|
+ ldo -EFAULT(%r0),%r21 /* set errno */
|
|
+ nop
|
|
+ nop
|
|
+ nop
|
|
+
|
|
+ /* Exception table entries, for the load and store, return EFAULT.
|
|
+ Each of the entries must be relocated. */
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+#ifndef CONFIG_64BIT
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+ ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
|
|
+#endif
|
|
+
|
|
/* Make sure nothing else is placed on this page */
|
|
.align PAGE_SIZE
|
|
END(linux_gateway_page)
|
|
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
|
|
/* Light-weight-syscall table */
|
|
/* Start of lws table. */
|
|
ENTRY(lws_table)
|
|
- LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
|
|
- LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
|
|
+ LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
|
|
+ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
|
|
+ LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
|
|
END(lws_table)
|
|
/* End of lws table */
|
|
|
|
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
|
|
index 7dd8a3b..fc77d53 100644
|
|
--- a/arch/parisc/kernel/syscall_table.S
|
|
+++ b/arch/parisc/kernel/syscall_table.S
|
|
@@ -286,11 +286,11 @@
|
|
ENTRY_COMP(msgsnd)
|
|
ENTRY_COMP(msgrcv)
|
|
ENTRY_SAME(msgget) /* 190 */
|
|
- ENTRY_SAME(msgctl)
|
|
- ENTRY_SAME(shmat)
|
|
+ ENTRY_COMP(msgctl)
|
|
+ ENTRY_COMP(shmat)
|
|
ENTRY_SAME(shmdt)
|
|
ENTRY_SAME(shmget)
|
|
- ENTRY_SAME(shmctl) /* 195 */
|
|
+ ENTRY_COMP(shmctl) /* 195 */
|
|
ENTRY_SAME(ni_syscall) /* streams1 */
|
|
ENTRY_SAME(ni_syscall) /* streams2 */
|
|
ENTRY_SAME(lstat64)
|
|
@@ -323,7 +323,7 @@
|
|
ENTRY_SAME(epoll_ctl) /* 225 */
|
|
ENTRY_SAME(epoll_wait)
|
|
ENTRY_SAME(remap_file_pages)
|
|
- ENTRY_SAME(semtimedop)
|
|
+ ENTRY_COMP(semtimedop)
|
|
ENTRY_COMP(mq_open)
|
|
ENTRY_SAME(mq_unlink) /* 230 */
|
|
ENTRY_COMP(mq_timedsend)
|
|
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
|
|
index d72197f..d27e388 100644
|
|
--- a/arch/parisc/mm/fault.c
|
|
+++ b/arch/parisc/mm/fault.c
|
|
@@ -256,6 +256,8 @@ good_area:
|
|
*/
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto bad_area;
|
|
BUG();
|
|
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
|
|
index 1382fec..7fcb1ac 100644
|
|
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
|
|
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
|
|
@@ -50,6 +50,7 @@ ethernet@b0000 {
|
|
fsl,num_tx_queues = <0x8>;
|
|
fsl,magic-packet;
|
|
local-mac-address = [ 00 00 00 00 00 00 ];
|
|
+ ranges;
|
|
|
|
queue-group@b0000 {
|
|
#address-cells = <1>;
|
|
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
|
|
index 221cd2e..9f25427 100644
|
|
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
|
|
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
|
|
@@ -50,6 +50,7 @@ ethernet@b1000 {
|
|
fsl,num_tx_queues = <0x8>;
|
|
fsl,magic-packet;
|
|
local-mac-address = [ 00 00 00 00 00 00 ];
|
|
+ ranges;
|
|
|
|
queue-group@b1000 {
|
|
#address-cells = <1>;
|
|
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
|
|
index 61456c3..cd7c318 100644
|
|
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
|
|
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
|
|
@@ -49,6 +49,7 @@ ethernet@b2000 {
|
|
fsl,num_tx_queues = <0x8>;
|
|
fsl,magic-packet;
|
|
local-mac-address = [ 00 00 00 00 00 00 ];
|
|
+ ranges;
|
|
|
|
queue-group@b2000 {
|
|
#address-cells = <1>;
|
|
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
|
|
index f9e8b94..b51da91 100644
|
|
--- a/arch/powerpc/crypto/sha1.c
|
|
+++ b/arch/powerpc/crypto/sha1.c
|
|
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
|
|
|
|
-MODULE_ALIAS("sha1-powerpc");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1-powerpc");
|
|
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
|
|
index ad3025d..f207868 100644
|
|
--- a/arch/powerpc/include/asm/machdep.h
|
|
+++ b/arch/powerpc/include/asm/machdep.h
|
|
@@ -57,10 +57,10 @@ struct machdep_calls {
|
|
void (*hpte_removebolted)(unsigned long ea,
|
|
int psize, int ssize);
|
|
void (*flush_hash_range)(unsigned long number, int local);
|
|
- void (*hugepage_invalidate)(struct mm_struct *mm,
|
|
+ void (*hugepage_invalidate)(unsigned long vsid,
|
|
+ unsigned long addr,
|
|
unsigned char *hpte_slot_array,
|
|
- unsigned long addr, int psize);
|
|
-
|
|
+ int psize, int ssize);
|
|
/* special for kexec, to be called in real mode, linear mapping is
|
|
* destroyed as well */
|
|
void (*hpte_clear_all)(void);
|
|
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
|
|
index eb92610..7b3d54f 100644
|
|
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
|
|
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
|
|
@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
|
|
}
|
|
|
|
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
- pmd_t *pmdp);
|
|
+ pmd_t *pmdp, unsigned long old_pmd);
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
|
|
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
|
|
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
|
|
index d836d94..9ecede1 100644
|
|
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
|
|
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
|
|
@@ -46,11 +46,31 @@
|
|
* in order to deal with 64K made of 4K HW pages. Thus we override the
|
|
* generic accessors and iterators here
|
|
*/
|
|
-#define __real_pte(e,p) ((real_pte_t) { \
|
|
- (e), (pte_val(e) & _PAGE_COMBO) ? \
|
|
- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
|
|
-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
|
|
- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
|
|
+#define __real_pte __real_pte
|
|
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
|
|
+{
|
|
+ real_pte_t rpte;
|
|
+
|
|
+ rpte.pte = pte;
|
|
+ rpte.hidx = 0;
|
|
+ if (pte_val(pte) & _PAGE_COMBO) {
|
|
+ /*
|
|
+ * Make sure we order the hidx load against the _PAGE_COMBO
|
|
+ * check. The store side ordering is done in __hash_page_4K
|
|
+ */
|
|
+ smp_rmb();
|
|
+ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
|
|
+ }
|
|
+ return rpte;
|
|
+}
|
|
+
|
|
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
|
|
+{
|
|
+ if ((pte_val(rpte.pte) & _PAGE_COMBO))
|
|
+ return (rpte.hidx >> (index<<2)) & 0xf;
|
|
+ return (pte_val(rpte.pte) >> 12) & 0xf;
|
|
+}
|
|
+
|
|
#define __rpte_to_pte(r) ((r).pte)
|
|
#define __rpte_sub_valid(rpte, index) \
|
|
(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
|
|
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
|
|
index 279b80f..c0c61fa 100644
|
|
--- a/arch/powerpc/include/asm/ptrace.h
|
|
+++ b/arch/powerpc/include/asm/ptrace.h
|
|
@@ -47,6 +47,12 @@
|
|
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
|
|
#define STACK_FRAME_MARKER 12
|
|
|
|
+#if defined(_CALL_ELF) && _CALL_ELF == 2
|
|
+#define STACK_FRAME_MIN_SIZE 32
|
|
+#else
|
|
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
|
|
+#endif
|
|
+
|
|
/* Size of dummy stack frame allocated when calling signal handler. */
|
|
#define __SIGNAL_FRAMESIZE 128
|
|
#define __SIGNAL_FRAMESIZE32 64
|
|
@@ -60,6 +66,7 @@
|
|
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
|
|
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
|
|
#define STACK_FRAME_MARKER 2
|
|
+#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
|
|
|
|
/* Size of stack frame allocated when calling signal handler. */
|
|
#define __SIGNAL_FRAMESIZE 64
|
|
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
|
|
index 35aa339..4dbe072 100644
|
|
--- a/arch/powerpc/include/asm/spinlock.h
|
|
+++ b/arch/powerpc/include/asm/spinlock.h
|
|
@@ -61,6 +61,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
|
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
+ smp_mb();
|
|
return !arch_spin_value_unlocked(*lock);
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
|
|
index 2912b87..3eb36ce 100644
|
|
--- a/arch/powerpc/kernel/cacheinfo.c
|
|
+++ b/arch/powerpc/kernel/cacheinfo.c
|
|
@@ -61,12 +61,22 @@ struct cache_type_info {
|
|
};
|
|
|
|
/* These are used to index the cache_type_info array. */
|
|
-#define CACHE_TYPE_UNIFIED 0
|
|
-#define CACHE_TYPE_INSTRUCTION 1
|
|
-#define CACHE_TYPE_DATA 2
|
|
+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
|
|
+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
|
|
+#define CACHE_TYPE_INSTRUCTION 2
|
|
+#define CACHE_TYPE_DATA 3
|
|
|
|
static const struct cache_type_info cache_type_info[] = {
|
|
{
|
|
+ /* Embedded systems that use cache-size, cache-block-size,
|
|
+ * etc. for the Unified (typically L2) cache. */
|
|
+ .name = "Unified",
|
|
+ .size_prop = "cache-size",
|
|
+ .line_size_props = { "cache-line-size",
|
|
+ "cache-block-size", },
|
|
+ .nr_sets_prop = "cache-sets",
|
|
+ },
|
|
+ {
|
|
/* PowerPC Processor binding says the [di]-cache-*
|
|
* must be equal on unified caches, so just use
|
|
* d-cache properties. */
|
|
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
|
|
{
|
|
struct cache *iter;
|
|
|
|
- if (cache->type == CACHE_TYPE_UNIFIED)
|
|
+ if (cache->type == CACHE_TYPE_UNIFIED ||
|
|
+ cache->type == CACHE_TYPE_UNIFIED_D)
|
|
return cache;
|
|
|
|
list_for_each_entry(iter, &cache_list, list)
|
|
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
|
|
return of_get_property(np, "cache-unified", NULL);
|
|
}
|
|
|
|
-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
|
|
- int level)
|
|
+/*
|
|
+ * Unified caches can have two different sets of tags. Most embedded
|
|
+ * use cache-size, etc. for the unified cache size, but open firmware systems
|
|
+ * use d-cache-size, etc. Check on initialization for which type we have, and
|
|
+ * return the appropriate structure type. Assume it's embedded if it isn't
|
|
+ * open firmware. If it's yet a 3rd type, then there will be missing entries
|
|
+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
|
|
+ * to be extended further.
|
|
+ */
|
|
+static int cache_is_unified_d(const struct device_node *np)
|
|
{
|
|
- struct cache *cache;
|
|
+ return of_get_property(np,
|
|
+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
|
|
+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
|
|
+}
|
|
|
|
+/*
|
|
+ */
|
|
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
|
|
+{
|
|
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
|
|
|
|
- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
|
|
-
|
|
- return cache;
|
|
+ return new_cache(cache_is_unified_d(node), level, node);
|
|
}
|
|
|
|
static struct cache *cache_do_one_devnode_split(struct device_node *node,
|
|
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
|
|
index 38d5073..5193116 100644
|
|
--- a/arch/powerpc/kernel/exceptions-64s.S
|
|
+++ b/arch/powerpc/kernel/exceptions-64s.S
|
|
@@ -1422,7 +1422,7 @@ machine_check_handle_early:
|
|
bne 9f /* continue in V mode if we are. */
|
|
|
|
5:
|
|
-#ifdef CONFIG_KVM_BOOK3S_64_HV
|
|
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
|
/*
|
|
* We are coming from kernel context. Check if we are coming from
|
|
* guest. if yes, then we can continue. We will fall through
|
|
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
|
|
index 27c93f4..fc0927a 100644
|
|
--- a/arch/powerpc/kernel/mce_power.c
|
|
+++ b/arch/powerpc/kernel/mce_power.c
|
|
@@ -78,7 +78,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
|
|
}
|
|
if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
|
|
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
|
|
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
|
|
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
|
|
/* reset error bits */
|
|
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
|
|
}
|
|
@@ -109,7 +109,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
|
|
break;
|
|
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
|
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
|
|
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
|
|
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
|
|
handled = 1;
|
|
}
|
|
break;
|
|
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
|
|
index 4e47db6..e881e3f 100644
|
|
--- a/arch/powerpc/kernel/signal_32.c
|
|
+++ b/arch/powerpc/kernel/signal_32.c
|
|
@@ -967,8 +967,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
|
|
|
|
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
|
{
|
|
- memset(to, 0, sizeof *to);
|
|
-
|
|
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
|
copy_from_user(to->_sifields._pad,
|
|
from->_sifields._pad, SI_PAD_SIZE32))
|
|
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
|
|
index 0167d53..a531154 100644
|
|
--- a/arch/powerpc/kernel/suspend.c
|
|
+++ b/arch/powerpc/kernel/suspend.c
|
|
@@ -9,9 +9,7 @@
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/page.h>
|
|
-
|
|
-/* References to section boundaries */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
+#include <asm/sections.h>
|
|
|
|
/*
|
|
* pfn_is_nosave - check if given pfn is in the 'nosave' section
|
|
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
|
|
index 75702e2..f7089fc 100644
|
|
--- a/arch/powerpc/kernel/udbg_16550.c
|
|
+++ b/arch/powerpc/kernel/udbg_16550.c
|
|
@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
|
|
|
|
static int udbg_uart_getc_poll(void)
|
|
{
|
|
- if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
|
|
+ if (!udbg_uart_in)
|
|
+ return -1;
|
|
+
|
|
+ if (!(udbg_uart_in(UART_LSR) & LSR_DR))
|
|
return udbg_uart_in(UART_RBR);
|
|
+
|
|
return -1;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
|
|
index 47afd08..fe7e97a 100644
|
|
--- a/arch/powerpc/kernel/vdso32/getcpu.S
|
|
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
|
|
@@ -30,8 +30,8 @@
|
|
V_FUNCTION_BEGIN(__kernel_getcpu)
|
|
.cfi_startproc
|
|
mfspr r5,SPRN_USPRG3
|
|
- cmpdi cr0,r3,0
|
|
- cmpdi cr1,r4,0
|
|
+ cmpwi cr0,r3,0
|
|
+ cmpwi cr1,r4,0
|
|
clrlwi r6,r5,16
|
|
rlwinm r7,r5,16,31-15,31-0
|
|
beq cr0,1f
|
|
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
|
|
index f096e72..1db6851 100644
|
|
--- a/arch/powerpc/kernel/vmlinux.lds.S
|
|
+++ b/arch/powerpc/kernel/vmlinux.lds.S
|
|
@@ -213,6 +213,7 @@ SECTIONS
|
|
*(.opd)
|
|
}
|
|
|
|
+ . = ALIGN(256);
|
|
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
|
__toc_start = .;
|
|
#ifndef CONFIG_RELOCATABLE
|
|
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
|
|
index 0c9c8d7..170a034 100644
|
|
--- a/arch/powerpc/lib/locks.c
|
|
+++ b/arch/powerpc/lib/locks.c
|
|
@@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
|
|
|
|
void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
|
{
|
|
+ smp_mb();
|
|
+
|
|
while (lock->slock) {
|
|
HMT_low();
|
|
if (SHARED_PROCESSOR)
|
|
__spin_yield(lock);
|
|
}
|
|
HMT_medium();
|
|
+
|
|
+ smp_mb();
|
|
}
|
|
|
|
EXPORT_SYMBOL(arch_spin_unlock_wait);
|
|
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
|
|
index 51ab9e7..010fabf 100644
|
|
--- a/arch/powerpc/mm/fault.c
|
|
+++ b/arch/powerpc/mm/fault.c
|
|
@@ -432,6 +432,8 @@ good_area:
|
|
*/
|
|
fault = handle_mm_fault(mm, vma, address, flags);
|
|
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
|
|
+ if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
rc = mm_fault_error(regs, address, fault);
|
|
if (rc >= MM_FAULT_RETURN)
|
|
goto bail;
|
|
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
|
|
index 3ea26c2..838de8e 100644
|
|
--- a/arch/powerpc/mm/hash_native_64.c
|
|
+++ b/arch/powerpc/mm/hash_native_64.c
|
|
@@ -418,18 +418,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-static void native_hugepage_invalidate(struct mm_struct *mm,
|
|
+static void native_hugepage_invalidate(unsigned long vsid,
|
|
+ unsigned long addr,
|
|
unsigned char *hpte_slot_array,
|
|
- unsigned long addr, int psize)
|
|
+ int psize, int ssize)
|
|
{
|
|
- int ssize = 0, i;
|
|
- int lock_tlbie;
|
|
+ int i;
|
|
struct hash_pte *hptep;
|
|
int actual_psize = MMU_PAGE_16M;
|
|
unsigned int max_hpte_count, valid;
|
|
unsigned long flags, s_addr = addr;
|
|
unsigned long hpte_v, want_v, shift;
|
|
- unsigned long hidx, vpn = 0, vsid, hash, slot;
|
|
+ unsigned long hidx, vpn = 0, hash, slot;
|
|
|
|
shift = mmu_psize_defs[psize].shift;
|
|
max_hpte_count = 1U << (PMD_SHIFT - shift);
|
|
@@ -443,15 +443,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
|
|
|
|
/* get the vpn */
|
|
addr = s_addr + (i * (1ul << shift));
|
|
- if (!is_kernel_addr(addr)) {
|
|
- ssize = user_segment_size(addr);
|
|
- vsid = get_vsid(mm->context.id, addr, ssize);
|
|
- WARN_ON(vsid == 0);
|
|
- } else {
|
|
- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
|
|
- ssize = mmu_kernel_ssize;
|
|
- }
|
|
-
|
|
vpn = hpt_vpn(addr, vsid, ssize);
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
if (hidx & _PTEIDX_SECONDARY)
|
|
@@ -471,22 +462,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
|
|
else
|
|
/* Invalidate the hpte. NOTE: this also unlocks it */
|
|
hptep->v = 0;
|
|
+ /*
|
|
+ * We need to do tlb invalidate for all the address, tlbie
|
|
+ * instruction compares entry_VA in tlb with the VA specified
|
|
+ * here
|
|
+ */
|
|
+ tlbie(vpn, psize, actual_psize, ssize, 0);
|
|
}
|
|
- /*
|
|
- * Since this is a hugepage, we just need a single tlbie.
|
|
- * use the last vpn.
|
|
- */
|
|
- lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
|
- if (lock_tlbie)
|
|
- raw_spin_lock(&native_tlbie_lock);
|
|
-
|
|
- asm volatile("ptesync":::"memory");
|
|
- __tlbie(vpn, psize, actual_psize, ssize);
|
|
- asm volatile("eieio; tlbsync; ptesync":::"memory");
|
|
-
|
|
- if (lock_tlbie)
|
|
- raw_spin_unlock(&native_tlbie_lock);
|
|
-
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
|
|
index 826893f..5f5e632 100644
|
|
--- a/arch/powerpc/mm/hugepage-hash64.c
|
|
+++ b/arch/powerpc/mm/hugepage-hash64.c
|
|
@@ -18,6 +18,57 @@
|
|
#include <linux/mm.h>
|
|
#include <asm/machdep.h>
|
|
|
|
+static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
|
|
+ pmd_t *pmdp, unsigned int psize, int ssize)
|
|
+{
|
|
+ int i, max_hpte_count, valid;
|
|
+ unsigned long s_addr;
|
|
+ unsigned char *hpte_slot_array;
|
|
+ unsigned long hidx, shift, vpn, hash, slot;
|
|
+
|
|
+ s_addr = addr & HPAGE_PMD_MASK;
|
|
+ hpte_slot_array = get_hpte_slot_array(pmdp);
|
|
+ /*
|
|
+ * IF we try to do a HUGE PTE update after a withdraw is done.
|
|
+ * we will find the below NULL. This happens when we do
|
|
+ * split_huge_page_pmd
|
|
+ */
|
|
+ if (!hpte_slot_array)
|
|
+ return;
|
|
+
|
|
+ if (ppc_md.hugepage_invalidate)
|
|
+ return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
|
|
+ psize, ssize);
|
|
+ /*
|
|
+ * No bluk hpte removal support, invalidate each entry
|
|
+ */
|
|
+ shift = mmu_psize_defs[psize].shift;
|
|
+ max_hpte_count = HPAGE_PMD_SIZE >> shift;
|
|
+ for (i = 0; i < max_hpte_count; i++) {
|
|
+ /*
|
|
+ * 8 bits per each hpte entries
|
|
+ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
|
|
+ */
|
|
+ valid = hpte_valid(hpte_slot_array, i);
|
|
+ if (!valid)
|
|
+ continue;
|
|
+ hidx = hpte_hash_index(hpte_slot_array, i);
|
|
+
|
|
+ /* get the vpn */
|
|
+ addr = s_addr + (i * (1ul << shift));
|
|
+ vpn = hpt_vpn(addr, vsid, ssize);
|
|
+ hash = hpt_hash(vpn, shift, ssize);
|
|
+ if (hidx & _PTEIDX_SECONDARY)
|
|
+ hash = ~hash;
|
|
+
|
|
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
+ slot += hidx & _PTEIDX_GROUP_IX;
|
|
+ ppc_md.hpte_invalidate(slot, vpn, psize,
|
|
+ MMU_PAGE_16M, ssize, 0);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
pmd_t *pmdp, unsigned long trap, int local, int ssize,
|
|
unsigned int psize)
|
|
@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
* atomically mark the linux large page PMD busy and dirty
|
|
*/
|
|
do {
|
|
- old_pmd = pmd_val(*pmdp);
|
|
+ pmd_t pmd = ACCESS_ONCE(*pmdp);
|
|
+
|
|
+ old_pmd = pmd_val(pmd);
|
|
/* If PMD busy, retry the access */
|
|
if (unlikely(old_pmd & _PAGE_BUSY))
|
|
return 0;
|
|
@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
vpn = hpt_vpn(ea, vsid, ssize);
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
hpte_slot_array = get_hpte_slot_array(pmdp);
|
|
+ if (psize == MMU_PAGE_4K) {
|
|
+ /*
|
|
+ * invalidate the old hpte entry if we have that mapped via 64K
|
|
+ * base page size. This is because demote_segment won't flush
|
|
+ * hash page table entries.
|
|
+ */
|
|
+ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
|
|
+ invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
|
|
+ }
|
|
|
|
valid = hpte_valid(hpte_slot_array, index);
|
|
if (valid) {
|
|
@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
* safely update this here.
|
|
*/
|
|
valid = 0;
|
|
- new_pmd &= ~_PAGE_HPTEFLAGS;
|
|
hpte_slot_array[index] = 0;
|
|
- } else
|
|
- /* clear the busy bits and set the hash pte bits */
|
|
- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
+ }
|
|
}
|
|
|
|
if (!valid) {
|
|
@@ -119,11 +178,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
|
|
/* insert new entry */
|
|
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
|
-repeat:
|
|
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
|
-
|
|
- /* clear the busy bits and set the hash pte bits */
|
|
- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
+ new_pmd |= _PAGE_HASHPTE;
|
|
|
|
/* Add in WIMG bits */
|
|
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
|
|
@@ -132,6 +187,8 @@ repeat:
|
|
* enable the memory coherence always
|
|
*/
|
|
rflags |= HPTE_R_M;
|
|
+repeat:
|
|
+ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
|
|
|
/* Insert into the hash table, primary slot */
|
|
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
|
|
@@ -172,8 +229,17 @@ repeat:
|
|
mark_hpte_slot_valid(hpte_slot_array, index, slot);
|
|
}
|
|
/*
|
|
- * No need to use ldarx/stdcx here
|
|
+ * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
|
|
+ * base page size 4k.
|
|
+ */
|
|
+ if (psize == MMU_PAGE_4K)
|
|
+ new_pmd |= _PAGE_COMBO;
|
|
+ /*
|
|
+ * The hpte valid is stored in the pgtable whose address is in the
|
|
+ * second half of the PMD. Order this against clearing of the busy bit in
|
|
+ * huge pmd.
|
|
*/
|
|
+ smp_wmb();
|
|
*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
|
|
return 0;
|
|
}
|
|
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
|
|
index 30a42e2..a5fff17 100644
|
|
--- a/arch/powerpc/mm/numa.c
|
|
+++ b/arch/powerpc/mm/numa.c
|
|
@@ -610,8 +610,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
|
|
case CPU_UP_CANCELED:
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
unmap_cpu_from_node(lcpu);
|
|
- break;
|
|
ret = NOTIFY_OK;
|
|
+ break;
|
|
#endif
|
|
}
|
|
return ret;
|
|
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
|
|
index 62bf5e8..c64da56 100644
|
|
--- a/arch/powerpc/mm/pgtable_64.c
|
|
+++ b/arch/powerpc/mm/pgtable_64.c
|
|
@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|
*pmdp = __pmd((old & ~clr) | set);
|
|
#endif
|
|
if (old & _PAGE_HASHPTE)
|
|
- hpte_do_hugepage_flush(mm, addr, pmdp);
|
|
+ hpte_do_hugepage_flush(mm, addr, pmdp, old);
|
|
return old;
|
|
}
|
|
|
|
@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
|
|
if (!(old & _PAGE_SPLITTING)) {
|
|
/* We need to flush the hpte */
|
|
if (old & _PAGE_HASHPTE)
|
|
- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
|
|
+ hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
|
|
}
|
|
}
|
|
|
|
@@ -718,7 +718,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
* neesd to be flushed.
|
|
*/
|
|
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
- pmd_t *pmdp)
|
|
+ pmd_t *pmdp, unsigned long old_pmd)
|
|
{
|
|
int ssize, i;
|
|
unsigned long s_addr;
|
|
@@ -740,12 +740,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
if (!hpte_slot_array)
|
|
return;
|
|
|
|
- /* get the base page size */
|
|
+ /* get the base page size,vsid and segment size */
|
|
+#ifdef CONFIG_DEBUG_VM
|
|
psize = get_slice_psize(mm, s_addr);
|
|
+ BUG_ON(psize == MMU_PAGE_16M);
|
|
+#endif
|
|
+ if (old_pmd & _PAGE_COMBO)
|
|
+ psize = MMU_PAGE_4K;
|
|
+ else
|
|
+ psize = MMU_PAGE_64K;
|
|
+
|
|
+ if (!is_kernel_addr(s_addr)) {
|
|
+ ssize = user_segment_size(s_addr);
|
|
+ vsid = get_vsid(mm->context.id, s_addr, ssize);
|
|
+ WARN_ON(vsid == 0);
|
|
+ } else {
|
|
+ vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
|
|
+ ssize = mmu_kernel_ssize;
|
|
+ }
|
|
|
|
if (ppc_md.hugepage_invalidate)
|
|
- return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
|
|
- s_addr, psize);
|
|
+ return ppc_md.hugepage_invalidate(vsid, s_addr,
|
|
+ hpte_slot_array,
|
|
+ psize, ssize);
|
|
/*
|
|
* No bluk hpte removal support, invalidate each entry
|
|
*/
|
|
@@ -763,15 +780,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
|
|
|
|
/* get the vpn */
|
|
addr = s_addr + (i * (1ul << shift));
|
|
- if (!is_kernel_addr(addr)) {
|
|
- ssize = user_segment_size(addr);
|
|
- vsid = get_vsid(mm->context.id, addr, ssize);
|
|
- WARN_ON(vsid == 0);
|
|
- } else {
|
|
- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
|
|
- ssize = mmu_kernel_ssize;
|
|
- }
|
|
-
|
|
vpn = hpt_vpn(addr, vsid, ssize);
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
if (hidx & _PTEIDX_SECONDARY)
|
|
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
|
|
index c99f651..9adda57 100644
|
|
--- a/arch/powerpc/mm/tlb_hash64.c
|
|
+++ b/arch/powerpc/mm/tlb_hash64.c
|
|
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|
if (!(pte & _PAGE_HASHPTE))
|
|
continue;
|
|
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
|
|
- hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
|
|
+ hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
|
|
else
|
|
hpte_need_flush(mm, start, ptep, pte, 0);
|
|
}
|
|
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
|
|
index 74d1e78..ead5535 100644
|
|
--- a/arch/powerpc/perf/callchain.c
|
|
+++ b/arch/powerpc/perf/callchain.c
|
|
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
|
return 0; /* must be 16-byte aligned */
|
|
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
|
return 0;
|
|
- if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
|
|
+ if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
|
|
return 1;
|
|
/*
|
|
* sp could decrease when we jump off an interrupt stack
|
|
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
|
sp = regs->gpr[1];
|
|
perf_callchain_store(entry, next_ip);
|
|
|
|
- for (;;) {
|
|
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
|
fp = (unsigned long __user *) sp;
|
|
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
|
return;
|
|
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
|
|
index 38265dc..65dfbd0 100644
|
|
--- a/arch/powerpc/perf/core-book3s.c
|
|
+++ b/arch/powerpc/perf/core-book3s.c
|
|
@@ -124,7 +124,16 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
|
|
|
static bool regs_use_siar(struct pt_regs *regs)
|
|
{
|
|
- return !!regs->result;
|
|
+ /*
|
|
+ * When we take a performance monitor exception the regs are setup
|
|
+ * using perf_read_regs() which overloads some fields, in particular
|
|
+ * regs->result to tell us whether to use SIAR.
|
|
+ *
|
|
+ * However if the regs are from another exception, eg. a syscall, then
|
|
+ * they have not been setup using perf_read_regs() and so regs->result
|
|
+ * is something random.
|
|
+ */
|
|
+ return ((TRAP(regs) == 0xf00) && regs->result);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
|
|
index 2b90ff8..59ef76c 100644
|
|
--- a/arch/powerpc/platforms/cell/iommu.c
|
|
+++ b/arch/powerpc/platforms/cell/iommu.c
|
|
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
|
|
|
|
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
|
|
|
|
- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
|
|
+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
|
|
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
|
|
|
|
mb();
|
|
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
|
|
index 641e727..62f3e4e 100644
|
|
--- a/arch/powerpc/platforms/cell/spu_fault.c
|
|
+++ b/arch/powerpc/platforms/cell/spu_fault.c
|
|
@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
|
if (*flt & VM_FAULT_OOM) {
|
|
ret = -ENOMEM;
|
|
goto out_unlock;
|
|
- } else if (*flt & VM_FAULT_SIGBUS) {
|
|
+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
|
|
ret = -EFAULT;
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
|
|
index 87ba7cf..65d633f 100644
|
|
--- a/arch/powerpc/platforms/cell/spufs/inode.c
|
|
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
|
|
@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
|
|
struct dentry *dentry, *tmp;
|
|
|
|
mutex_lock(&dir->d_inode->i_mutex);
|
|
- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
|
|
spin_lock(&dentry->d_lock);
|
|
if (!(d_unhashed(dentry)) && dentry->d_inode) {
|
|
dget_dlock(dentry);
|
|
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
index 3b2b4fb..d558b85 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
@@ -491,6 +491,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
|
|
set_dma_ops(&pdev->dev, &dma_iommu_ops);
|
|
set_iommu_table_base(&pdev->dev, &pe->tce32_table);
|
|
}
|
|
+ *pdev->dev.dma_mask = dma_mask;
|
|
return 0;
|
|
}
|
|
|
|
@@ -901,7 +902,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
|
unsigned int is_64, struct msi_msg *msg)
|
|
{
|
|
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
|
|
- struct pci_dn *pdn = pci_get_pdn(dev);
|
|
struct irq_data *idata;
|
|
struct irq_chip *ichip;
|
|
unsigned int xive_num = hwirq - phb->msi_base;
|
|
@@ -917,7 +917,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
|
return -ENXIO;
|
|
|
|
/* Force 32-bit MSI on some broken devices */
|
|
- if (pdn && pdn->force_32bit_msi)
|
|
+ if (dev->no_64bit_msi)
|
|
is_64 = 0;
|
|
|
|
/* Assign XIVE to PE */
|
|
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
|
|
index 8518817..52c1162 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci.c
|
|
@@ -1,3 +1,4 @@
|
|
+
|
|
/*
|
|
* Support PCI/PCIe on PowerNV platforms
|
|
*
|
|
@@ -50,9 +51,8 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
|
|
{
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
struct pnv_phb *phb = hose->private_data;
|
|
- struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
- if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
|
|
+ if (pdev->no_64bit_msi && !phb->msi32_support)
|
|
return -ENODEV;
|
|
|
|
return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
|
|
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
|
|
index a8fe5aa..3b46eed 100644
|
|
--- a/arch/powerpc/platforms/pseries/dlpar.c
|
|
+++ b/arch/powerpc/platforms/pseries/dlpar.c
|
|
@@ -380,7 +380,7 @@ static int dlpar_online_cpu(struct device_node *dn)
|
|
BUG_ON(get_cpu_current_state(cpu)
|
|
!= CPU_STATE_OFFLINE);
|
|
cpu_maps_update_done();
|
|
- rc = cpu_up(cpu);
|
|
+ rc = device_online(get_cpu_device(cpu));
|
|
if (rc)
|
|
goto out;
|
|
cpu_maps_update_begin();
|
|
@@ -463,7 +463,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
|
|
if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
|
|
set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
|
|
cpu_maps_update_done();
|
|
- rc = cpu_down(cpu);
|
|
+ rc = device_offline(get_cpu_device(cpu));
|
|
if (rc)
|
|
goto out;
|
|
cpu_maps_update_begin();
|
|
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
index 9590dbb..b9a8204 100644
|
|
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
@@ -160,7 +160,7 @@ static int pseries_remove_memory(struct device_node *np)
|
|
static inline int pseries_remove_memblock(unsigned long base,
|
|
unsigned int memblock_size)
|
|
{
|
|
- return -EOPNOTSUPP;
|
|
+ return 0;
|
|
}
|
|
static inline int pseries_remove_memory(struct device_node *np)
|
|
{
|
|
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
|
|
index 33b552f..de1ec54 100644
|
|
--- a/arch/powerpc/platforms/pseries/iommu.c
|
|
+++ b/arch/powerpc/platforms/pseries/iommu.c
|
|
@@ -329,16 +329,16 @@ struct direct_window {
|
|
|
|
/* Dynamic DMA Window support */
|
|
struct ddw_query_response {
|
|
- __be32 windows_available;
|
|
- __be32 largest_available_block;
|
|
- __be32 page_size;
|
|
- __be32 migration_capable;
|
|
+ u32 windows_available;
|
|
+ u32 largest_available_block;
|
|
+ u32 page_size;
|
|
+ u32 migration_capable;
|
|
};
|
|
|
|
struct ddw_create_response {
|
|
- __be32 liobn;
|
|
- __be32 addr_hi;
|
|
- __be32 addr_lo;
|
|
+ u32 liobn;
|
|
+ u32 addr_hi;
|
|
+ u32 addr_lo;
|
|
};
|
|
|
|
static LIST_HEAD(direct_window_list);
|
|
@@ -721,20 +721,22 @@ static int __init disable_ddw_setup(char *str)
|
|
|
|
early_param("disable_ddw", disable_ddw_setup);
|
|
|
|
-static void remove_ddw(struct device_node *np)
|
|
+static void remove_ddw(struct device_node *np, bool remove_prop)
|
|
{
|
|
struct dynamic_dma_window_prop *dwp;
|
|
struct property *win64;
|
|
- const u32 *ddw_avail;
|
|
+ u32 ddw_avail[3];
|
|
u64 liobn;
|
|
- int len, ret;
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
|
|
+ &ddw_avail[0], 3);
|
|
|
|
- ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
|
|
win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
|
|
if (!win64)
|
|
return;
|
|
|
|
- if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
|
|
+ if (ret || win64->length < sizeof(*dwp))
|
|
goto delprop;
|
|
|
|
dwp = win64->value;
|
|
@@ -761,7 +763,8 @@ static void remove_ddw(struct device_node *np)
|
|
np->full_name, ret, ddw_avail[2], liobn);
|
|
|
|
delprop:
|
|
- ret = of_remove_property(np, win64);
|
|
+ if (remove_prop)
|
|
+ ret = of_remove_property(np, win64);
|
|
if (ret)
|
|
pr_warning("%s: failed to remove direct window property: %d\n",
|
|
np->full_name, ret);
|
|
@@ -805,7 +808,7 @@ static int find_existing_ddw_windows(void)
|
|
window = kzalloc(sizeof(*window), GFP_KERNEL);
|
|
if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
|
|
kfree(window);
|
|
- remove_ddw(pdn);
|
|
+ remove_ddw(pdn, true);
|
|
continue;
|
|
}
|
|
|
|
@@ -871,8 +874,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|
|
|
do {
|
|
/* extra outputs are LIOBN and dma-addr (hi, lo) */
|
|
- ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
|
|
- BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
|
|
+ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
|
|
+ cfg_addr, BUID_HI(buid), BUID_LO(buid),
|
|
+ page_shift, window_shift);
|
|
} while (rtas_busy_delay(ret));
|
|
dev_info(&dev->dev,
|
|
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
|
|
@@ -909,7 +913,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
int page_shift;
|
|
u64 dma_addr, max_addr;
|
|
struct device_node *dn;
|
|
- const u32 *uninitialized_var(ddw_avail);
|
|
+ u32 ddw_avail[3];
|
|
struct direct_window *window;
|
|
struct property *win64;
|
|
struct dynamic_dma_window_prop *ddwprop;
|
|
@@ -941,8 +945,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
* for the given node in that order.
|
|
* the property is actually in the parent, not the PE
|
|
*/
|
|
- ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
|
|
- if (!ddw_avail || len < 3 * sizeof(u32))
|
|
+ ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
|
|
+ &ddw_avail[0], 3);
|
|
+ if (ret)
|
|
goto out_failed;
|
|
|
|
/*
|
|
@@ -965,11 +970,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
dev_dbg(&dev->dev, "no free dynamic windows");
|
|
goto out_failed;
|
|
}
|
|
- if (be32_to_cpu(query.page_size) & 4) {
|
|
+ if (query.page_size & 4) {
|
|
page_shift = 24; /* 16MB */
|
|
- } else if (be32_to_cpu(query.page_size) & 2) {
|
|
+ } else if (query.page_size & 2) {
|
|
page_shift = 16; /* 64kB */
|
|
- } else if (be32_to_cpu(query.page_size) & 1) {
|
|
+ } else if (query.page_size & 1) {
|
|
page_shift = 12; /* 4kB */
|
|
} else {
|
|
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
|
|
@@ -979,7 +984,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
/* verify the window * number of ptes will map the partition */
|
|
/* check largest block * page size > max memory hotplug addr */
|
|
max_addr = memory_hotplug_max();
|
|
- if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
|
|
+ if (query.largest_available_block < (max_addr >> page_shift)) {
|
|
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
|
|
"%llu-sized pages\n", max_addr, query.largest_available_block,
|
|
1ULL << page_shift);
|
|
@@ -1005,8 +1010,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
if (ret != 0)
|
|
goto out_free_prop;
|
|
|
|
- ddwprop->liobn = create.liobn;
|
|
- ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
|
|
+ ddwprop->liobn = cpu_to_be32(create.liobn);
|
|
+ ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
|
|
+ create.addr_lo);
|
|
ddwprop->tce_shift = cpu_to_be32(page_shift);
|
|
ddwprop->window_shift = cpu_to_be32(len);
|
|
|
|
@@ -1038,14 +1044,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
list_add(&window->list, &direct_window_list);
|
|
spin_unlock(&direct_window_list_lock);
|
|
|
|
- dma_addr = of_read_number(&create.addr_hi, 2);
|
|
+ dma_addr = be64_to_cpu(ddwprop->dma_base);
|
|
goto out_unlock;
|
|
|
|
out_free_window:
|
|
kfree(window);
|
|
|
|
out_clear_window:
|
|
- remove_ddw(pdn);
|
|
+ remove_ddw(pdn, true);
|
|
|
|
out_free_prop:
|
|
kfree(win64->name);
|
|
@@ -1255,7 +1261,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
|
|
|
|
switch (action) {
|
|
case OF_RECONFIG_DETACH_NODE:
|
|
- remove_ddw(np);
|
|
+ /*
|
|
+ * Removing the property will invoke the reconfig
|
|
+ * notifier again, which causes dead-lock on the
|
|
+ * read-write semaphore of the notifier chain. So
|
|
+ * we have to remove the property when releasing
|
|
+ * the device node.
|
|
+ */
|
|
+ remove_ddw(np, false);
|
|
if (pci && pci->iommu_table)
|
|
iommu_free_table(pci->iommu_table, np->full_name);
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
|
|
index b02af9e..ccf6f16 100644
|
|
--- a/arch/powerpc/platforms/pseries/lpar.c
|
|
+++ b/arch/powerpc/platforms/pseries/lpar.c
|
|
@@ -430,16 +430,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
|
|
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
|
|
}
|
|
|
|
-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
|
|
- unsigned char *hpte_slot_array,
|
|
- unsigned long addr, int psize)
|
|
+static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
|
|
+ unsigned long addr,
|
|
+ unsigned char *hpte_slot_array,
|
|
+ int psize, int ssize)
|
|
{
|
|
- int ssize = 0, i, index = 0;
|
|
+ int i, index = 0;
|
|
unsigned long s_addr = addr;
|
|
unsigned int max_hpte_count, valid;
|
|
unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
|
|
unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
|
|
- unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
|
|
+ unsigned long shift, hidx, vpn = 0, hash, slot;
|
|
|
|
shift = mmu_psize_defs[psize].shift;
|
|
max_hpte_count = 1U << (PMD_SHIFT - shift);
|
|
@@ -452,15 +453,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
|
|
|
|
/* get the vpn */
|
|
addr = s_addr + (i * (1ul << shift));
|
|
- if (!is_kernel_addr(addr)) {
|
|
- ssize = user_segment_size(addr);
|
|
- vsid = get_vsid(mm->context.id, addr, ssize);
|
|
- WARN_ON(vsid == 0);
|
|
- } else {
|
|
- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
|
|
- ssize = mmu_kernel_ssize;
|
|
- }
|
|
-
|
|
vpn = hpt_vpn(addr, vsid, ssize);
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
if (hidx & _PTEIDX_SECONDARY)
|
|
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
|
|
index cde4e0a..bf38292 100644
|
|
--- a/arch/powerpc/platforms/pseries/mobility.c
|
|
+++ b/arch/powerpc/platforms/pseries/mobility.c
|
|
@@ -24,10 +24,10 @@
|
|
static struct kobject *mobility_kobj;
|
|
|
|
struct update_props_workarea {
|
|
- u32 phandle;
|
|
- u32 state;
|
|
- u64 reserved;
|
|
- u32 nprops;
|
|
+ __be32 phandle;
|
|
+ __be32 state;
|
|
+ __be64 reserved;
|
|
+ __be32 nprops;
|
|
} __packed;
|
|
|
|
#define NODE_ACTION_MASK 0xff000000
|
|
@@ -53,11 +53,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
|
|
return rc;
|
|
}
|
|
|
|
-static int delete_dt_node(u32 phandle)
|
|
+static int delete_dt_node(__be32 phandle)
|
|
{
|
|
struct device_node *dn;
|
|
|
|
- dn = of_find_node_by_phandle(phandle);
|
|
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
|
|
if (!dn)
|
|
return -ENOENT;
|
|
|
|
@@ -126,7 +126,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
|
|
return 0;
|
|
}
|
|
|
|
-static int update_dt_node(u32 phandle, s32 scope)
|
|
+static int update_dt_node(__be32 phandle, s32 scope)
|
|
{
|
|
struct update_props_workarea *upwa;
|
|
struct device_node *dn;
|
|
@@ -135,6 +135,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|
char *prop_data;
|
|
char *rtas_buf;
|
|
int update_properties_token;
|
|
+ u32 nprops;
|
|
u32 vd;
|
|
|
|
update_properties_token = rtas_token("ibm,update-properties");
|
|
@@ -145,7 +146,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|
if (!rtas_buf)
|
|
return -ENOMEM;
|
|
|
|
- dn = of_find_node_by_phandle(phandle);
|
|
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
|
|
if (!dn) {
|
|
kfree(rtas_buf);
|
|
return -ENOENT;
|
|
@@ -161,6 +162,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|
break;
|
|
|
|
prop_data = rtas_buf + sizeof(*upwa);
|
|
+ nprops = be32_to_cpu(upwa->nprops);
|
|
|
|
/* On the first call to ibm,update-properties for a node the
|
|
* the first property value descriptor contains an empty
|
|
@@ -169,17 +171,17 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|
*/
|
|
if (*prop_data == 0) {
|
|
prop_data++;
|
|
- vd = *(u32 *)prop_data;
|
|
+ vd = be32_to_cpu(*(__be32 *)prop_data);
|
|
prop_data += vd + sizeof(vd);
|
|
- upwa->nprops--;
|
|
+ nprops--;
|
|
}
|
|
|
|
- for (i = 0; i < upwa->nprops; i++) {
|
|
+ for (i = 0; i < nprops; i++) {
|
|
char *prop_name;
|
|
|
|
prop_name = prop_data;
|
|
prop_data += strlen(prop_name) + 1;
|
|
- vd = *(u32 *)prop_data;
|
|
+ vd = be32_to_cpu(*(__be32 *)prop_data);
|
|
prop_data += sizeof(vd);
|
|
|
|
switch (vd) {
|
|
@@ -211,13 +213,13 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|
return 0;
|
|
}
|
|
|
|
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
|
|
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
|
|
{
|
|
struct device_node *dn;
|
|
struct device_node *parent_dn;
|
|
int rc;
|
|
|
|
- parent_dn = of_find_node_by_phandle(parent_phandle);
|
|
+ parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
|
|
if (!parent_dn)
|
|
return -ENOENT;
|
|
|
|
@@ -236,7 +238,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
|
|
int pseries_devicetree_update(s32 scope)
|
|
{
|
|
char *rtas_buf;
|
|
- u32 *data;
|
|
+ __be32 *data;
|
|
int update_nodes_token;
|
|
int rc;
|
|
|
|
@@ -253,17 +255,17 @@ int pseries_devicetree_update(s32 scope)
|
|
if (rc && rc != 1)
|
|
break;
|
|
|
|
- data = (u32 *)rtas_buf + 4;
|
|
- while (*data & NODE_ACTION_MASK) {
|
|
+ data = (__be32 *)rtas_buf + 4;
|
|
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
|
|
int i;
|
|
- u32 action = *data & NODE_ACTION_MASK;
|
|
- int node_count = *data & NODE_COUNT_MASK;
|
|
+ u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
|
|
+ u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
|
|
|
|
data++;
|
|
|
|
for (i = 0; i < node_count; i++) {
|
|
- u32 phandle = *data++;
|
|
- u32 drc_index;
|
|
+ __be32 phandle = *data++;
|
|
+ __be32 drc_index;
|
|
|
|
switch (action) {
|
|
case DELETE_DT_NODE:
|
|
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
|
|
index 0c882e8..6849d85 100644
|
|
--- a/arch/powerpc/platforms/pseries/msi.c
|
|
+++ b/arch/powerpc/platforms/pseries/msi.c
|
|
@@ -428,7 +428,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
|
|
*/
|
|
again:
|
|
if (type == PCI_CAP_ID_MSI) {
|
|
- if (pdn->force_32bit_msi) {
|
|
+ if (pdev->no_64bit_msi) {
|
|
rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
|
|
if (rc < 0) {
|
|
/*
|
|
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
|
|
index efe6137..e68922b 100644
|
|
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
|
|
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
|
|
@@ -118,10 +118,10 @@ int remove_phb_dynamic(struct pci_controller *phb)
|
|
}
|
|
}
|
|
|
|
- /* Unregister the bridge device from sysfs and remove the PCI bus */
|
|
- device_unregister(b->bridge);
|
|
+ /* Remove the PCI bus and unregister the bridge device from sysfs */
|
|
phb->bus = NULL;
|
|
pci_remove_bus(b);
|
|
+ device_unregister(b->bridge);
|
|
|
|
/* Now release the IO resource */
|
|
if (res->flags & IORESOURCE_IO)
|
|
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
|
|
index 47b6b9f..830edc8 100644
|
|
--- a/arch/powerpc/sysdev/axonram.c
|
|
+++ b/arch/powerpc/sysdev/axonram.c
|
|
@@ -156,7 +156,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
|
|
}
|
|
|
|
*kaddr = (void *)(bank->ph_addr + offset);
|
|
- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
|
|
+ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
|
|
index b079098..f89389f 100644
|
|
--- a/arch/powerpc/xmon/xmon.c
|
|
+++ b/arch/powerpc/xmon/xmon.c
|
|
@@ -288,10 +288,11 @@ static inline void disable_surveillance(void)
|
|
args.token = rtas_token("set-indicator");
|
|
if (args.token == RTAS_UNKNOWN_SERVICE)
|
|
return;
|
|
- args.nargs = 3;
|
|
- args.nret = 1;
|
|
+ args.token = cpu_to_be32(args.token);
|
|
+ args.nargs = cpu_to_be32(3);
|
|
+ args.nret = cpu_to_be32(1);
|
|
args.rets = &args.args[3];
|
|
- args.args[0] = SURVEILLANCE_TOKEN;
|
|
+ args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
|
|
args.args[1] = 0;
|
|
args.args[2] = 0;
|
|
enter_rtas(__pa(&args));
|
|
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
|
|
index bb74b21..a0a3bed 100644
|
|
--- a/arch/s390/Kconfig
|
|
+++ b/arch/s390/Kconfig
|
|
@@ -93,6 +93,7 @@ config S390
|
|
select ARCH_INLINE_WRITE_UNLOCK_IRQ
|
|
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
|
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
|
+ select ARCH_SUPPORTS_ATOMIC_RMW
|
|
select ARCH_USE_CMPXCHG_LOCKREF
|
|
select ARCH_WANT_IPC_PARSE_VERSION
|
|
select BUILDTIME_EXTABLE_SORT
|
|
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
|
|
index 23223cd..1f272b2 100644
|
|
--- a/arch/s390/crypto/aes_s390.c
|
|
+++ b/arch/s390/crypto/aes_s390.c
|
|
@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
|
|
module_init(aes_s390_init);
|
|
module_exit(aes_s390_fini);
|
|
|
|
-MODULE_ALIAS("aes-all");
|
|
+MODULE_ALIAS_CRYPTO("aes-all");
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
|
|
index 7acb77f..9e05cc4 100644
|
|
--- a/arch/s390/crypto/des_s390.c
|
|
+++ b/arch/s390/crypto/des_s390.c
|
|
@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
|
|
module_init(des_s390_init);
|
|
module_exit(des_s390_exit);
|
|
|
|
-MODULE_ALIAS("des");
|
|
-MODULE_ALIAS("des3_ede");
|
|
+MODULE_ALIAS_CRYPTO("des");
|
|
+MODULE_ALIAS_CRYPTO("des3_ede");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
|
|
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
|
|
index d43485d..b258110 100644
|
|
--- a/arch/s390/crypto/ghash_s390.c
|
|
+++ b/arch/s390/crypto/ghash_s390.c
|
|
@@ -16,11 +16,12 @@
|
|
#define GHASH_DIGEST_SIZE 16
|
|
|
|
struct ghash_ctx {
|
|
- u8 icv[16];
|
|
- u8 key[16];
|
|
+ u8 key[GHASH_BLOCK_SIZE];
|
|
};
|
|
|
|
struct ghash_desc_ctx {
|
|
+ u8 icv[GHASH_BLOCK_SIZE];
|
|
+ u8 key[GHASH_BLOCK_SIZE];
|
|
u8 buffer[GHASH_BLOCK_SIZE];
|
|
u32 bytes;
|
|
};
|
|
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
|
|
static int ghash_init(struct shash_desc *desc)
|
|
{
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
|
|
memset(dctx, 0, sizeof(*dctx));
|
|
+ memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|
}
|
|
|
|
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
|
- memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
|
|
const u8 *src, unsigned int srclen)
|
|
{
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
unsigned int n;
|
|
u8 *buf = dctx->buffer;
|
|
int ret;
|
|
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
|
|
src += n;
|
|
|
|
if (!dctx->bytes) {
|
|
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
|
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
|
|
GHASH_BLOCK_SIZE);
|
|
if (ret != GHASH_BLOCK_SIZE)
|
|
return -EIO;
|
|
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
|
|
|
|
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
|
if (n) {
|
|
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
|
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
|
|
if (ret != n)
|
|
return -EIO;
|
|
src += n;
|
|
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
|
|
return 0;
|
|
}
|
|
|
|
-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|
+static int ghash_flush(struct ghash_desc_ctx *dctx)
|
|
{
|
|
u8 *buf = dctx->buffer;
|
|
int ret;
|
|
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|
|
|
memset(pos, 0, dctx->bytes);
|
|
|
|
- ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
|
+ ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
|
|
if (ret != GHASH_BLOCK_SIZE)
|
|
return -EIO;
|
|
+
|
|
+ dctx->bytes = 0;
|
|
}
|
|
|
|
- dctx->bytes = 0;
|
|
return 0;
|
|
}
|
|
|
|
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
|
{
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
int ret;
|
|
|
|
- ret = ghash_flush(ctx, dctx);
|
|
+ ret = ghash_flush(dctx);
|
|
if (!ret)
|
|
- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
|
+ memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
|
|
return ret;
|
|
}
|
|
|
|
@@ -160,7 +161,7 @@ static void __exit ghash_mod_exit(void)
|
|
module_init(ghash_mod_init);
|
|
module_exit(ghash_mod_exit);
|
|
|
|
-MODULE_ALIAS("ghash");
|
|
+MODULE_ALIAS_CRYPTO("ghash");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
|
|
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
|
|
index a1b3a9d..5b2bee3 100644
|
|
--- a/arch/s390/crypto/sha1_s390.c
|
|
+++ b/arch/s390/crypto/sha1_s390.c
|
|
@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
|
|
module_init(sha1_s390_init);
|
|
module_exit(sha1_s390_fini);
|
|
|
|
-MODULE_ALIAS("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
|
|
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
|
|
index 9b85380..b74ff15 100644
|
|
--- a/arch/s390/crypto/sha256_s390.c
|
|
+++ b/arch/s390/crypto/sha256_s390.c
|
|
@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
|
|
module_init(sha256_s390_init);
|
|
module_exit(sha256_s390_fini);
|
|
|
|
-MODULE_ALIAS("sha256");
|
|
-MODULE_ALIAS("sha224");
|
|
+MODULE_ALIAS_CRYPTO("sha256");
|
|
+MODULE_ALIAS_CRYPTO("sha224");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
|
|
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
|
|
index 32a8138..0c36989 100644
|
|
--- a/arch/s390/crypto/sha512_s390.c
|
|
+++ b/arch/s390/crypto/sha512_s390.c
|
|
@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
|
|
}
|
|
};
|
|
|
|
-MODULE_ALIAS("sha512");
|
|
+MODULE_ALIAS_CRYPTO("sha512");
|
|
|
|
static int sha384_init(struct shash_desc *desc)
|
|
{
|
|
@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
|
|
}
|
|
};
|
|
|
|
-MODULE_ALIAS("sha384");
|
|
+MODULE_ALIAS_CRYPTO("sha384");
|
|
|
|
static int __init init(void)
|
|
{
|
|
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
|
|
index db02052..5426c9e 100644
|
|
--- a/arch/s390/kernel/compat_linux.c
|
|
+++ b/arch/s390/kernel/compat_linux.c
|
|
@@ -245,7 +245,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
|
|
struct group_info *group_info;
|
|
int retval;
|
|
|
|
- if (!capable(CAP_SETGID))
|
|
+ if (!may_setgroups())
|
|
return -EPERM;
|
|
if ((unsigned)gidsetsize > NGROUPS_MAX)
|
|
return -EINVAL;
|
|
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
|
|
index a48bc79..184d305 100644
|
|
--- a/arch/s390/kernel/ptrace.c
|
|
+++ b/arch/s390/kernel/ptrace.c
|
|
@@ -323,9 +323,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
|
unsigned long mask = PSW_MASK_USER;
|
|
|
|
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
|
|
- if ((data & ~mask) != PSW_USER_BITS)
|
|
+ if ((data ^ PSW_USER_BITS) & ~mask)
|
|
+ /* Invalid psw mask. */
|
|
+ return -EINVAL;
|
|
+ if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
|
|
+ /* Invalid address-space-control bits */
|
|
return -EINVAL;
|
|
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
|
|
+ /* Invalid addressing mode bits */
|
|
return -EINVAL;
|
|
}
|
|
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
|
|
@@ -661,9 +666,12 @@ static int __poke_user_compat(struct task_struct *child,
|
|
|
|
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
|
|
/* Build a 64 bit psw mask from 31 bit mask. */
|
|
- if ((tmp & ~mask) != PSW32_USER_BITS)
|
|
+ if ((tmp ^ PSW32_USER_BITS) & ~mask)
|
|
/* Invalid psw mask. */
|
|
return -EINVAL;
|
|
+ if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
|
|
+ /* Invalid address-space-control bits */
|
|
+ return -EINVAL;
|
|
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
|
|
(regs->psw.mask & PSW_MASK_BA) |
|
|
(__u64)(tmp & mask) << 32;
|
|
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
|
|
index 29bd7be..1ecd47b 100644
|
|
--- a/arch/s390/kernel/sclp.S
|
|
+++ b/arch/s390/kernel/sclp.S
|
|
@@ -276,6 +276,8 @@ ENTRY(_sclp_print_early)
|
|
jno .Lesa2
|
|
ahi %r15,-80
|
|
stmh %r6,%r15,96(%r15) # store upper register halves
|
|
+ basr %r13,0
|
|
+ lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
|
|
.Lesa2:
|
|
#endif
|
|
lr %r10,%r2 # save string pointer
|
|
@@ -299,6 +301,8 @@ ENTRY(_sclp_print_early)
|
|
#endif
|
|
lm %r6,%r15,120(%r15) # restore registers
|
|
br %r14
|
|
+.Lzeroes:
|
|
+ .fill 64,4,0
|
|
|
|
.LwritedataS4:
|
|
.long 0x00760005 # SCLP command for write data
|
|
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
|
|
index a7a7537..d3236c9 100644
|
|
--- a/arch/s390/kernel/suspend.c
|
|
+++ b/arch/s390/kernel/suspend.c
|
|
@@ -13,14 +13,10 @@
|
|
#include <asm/ipl.h>
|
|
#include <asm/cio.h>
|
|
#include <asm/pci.h>
|
|
+#include <asm/sections.h>
|
|
#include "entry.h"
|
|
|
|
/*
|
|
- * References to section boundaries
|
|
- */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
-
|
|
-/*
|
|
* The restore of the saved pages in an hibernation image will set
|
|
* the change and referenced bits in the storage key for each page.
|
|
* Overindication of the referenced bits after an hibernation cycle
|
|
@@ -142,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
|
|
{
|
|
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
|
|
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
|
|
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
|
|
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
|
|
|
|
/* Always save lowcore pages (LC protection might be enabled). */
|
|
if (pfn <= LC_PAGES)
|
|
@@ -149,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
|
|
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
|
|
return 1;
|
|
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
|
|
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
|
|
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
|
|
if (tprot(PFN_PHYS(pfn)))
|
|
return 1;
|
|
return 0;
|
|
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
|
|
index 5f79d2d..f1ba119 100644
|
|
--- a/arch/s390/kvm/interrupt.c
|
|
+++ b/arch/s390/kvm/interrupt.c
|
|
@@ -71,6 +71,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
|
|
return 0;
|
|
if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
|
|
return 1;
|
|
+ return 0;
|
|
case KVM_S390_INT_EMERGENCY:
|
|
if (psw_extint_disabled(vcpu))
|
|
return 0;
|
|
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
|
|
index 75beea6..3588f2f 100644
|
|
--- a/arch/s390/kvm/priv.c
|
|
+++ b/arch/s390/kvm/priv.c
|
|
@@ -414,6 +414,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
|
for (n = mem->count - 1; n > 0 ; n--)
|
|
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
|
|
|
|
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
|
|
mem->vm[0].cpus_total = cpus;
|
|
mem->vm[0].cpus_configured = cpus;
|
|
mem->vm[0].cpus_standby = 0;
|
|
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
|
|
index d95265b2..8e95432 100644
|
|
--- a/arch/s390/mm/fault.c
|
|
+++ b/arch/s390/mm/fault.c
|
|
@@ -239,6 +239,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
|
|
do_no_context(regs);
|
|
else
|
|
pagefault_out_of_memory();
|
|
+ } else if (fault & VM_FAULT_SIGSEGV) {
|
|
+ /* Kernel mode? Handle exceptions or die */
|
|
+ if (!user_mode(regs))
|
|
+ do_no_context(regs);
|
|
+ else
|
|
+ do_sigsegv(regs, SEGV_MAPERR);
|
|
} else if (fault & VM_FAULT_SIGBUS) {
|
|
/* Kernel mode? Handle exceptions or die */
|
|
if (!user_mode(regs))
|
|
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
|
|
index 3584ed9..e309c5c 100644
|
|
--- a/arch/s390/mm/pgtable.c
|
|
+++ b/arch/s390/mm/pgtable.c
|
|
@@ -810,11 +810,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
+retry:
|
|
ptep = get_locked_pte(current->mm, addr, &ptl);
|
|
if (unlikely(!ptep)) {
|
|
up_read(&mm->mmap_sem);
|
|
return -EFAULT;
|
|
}
|
|
+ if (!(pte_val(*ptep) & _PAGE_INVALID) &&
|
|
+ (pte_val(*ptep) & _PAGE_PROTECT)) {
|
|
+ pte_unmap_unlock(*ptep, ptl);
|
|
+ if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
|
|
+ up_read(&mm->mmap_sem);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ goto retry;
|
|
+ }
|
|
|
|
new = old = pgste_get_lock(ptep);
|
|
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
|
|
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
|
|
index 52238983..6860beb 100644
|
|
--- a/arch/score/mm/fault.c
|
|
+++ b/arch/score/mm/fault.c
|
|
@@ -114,6 +114,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
|
|
index 1b61997..7a99e6a 100644
|
|
--- a/arch/sh/include/asm/sections.h
|
|
+++ b/arch/sh/include/asm/sections.h
|
|
@@ -3,7 +3,6 @@
|
|
|
|
#include <asm-generic/sections.h>
|
|
|
|
-extern long __nosave_begin, __nosave_end;
|
|
extern long __machvec_start, __machvec_end;
|
|
extern char __uncached_start, __uncached_end;
|
|
extern char __start_eh_frame[], __stop_eh_frame[];
|
|
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
|
|
index ff1465c..5acf89c 100644
|
|
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
|
|
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
|
|
@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
|
|
};
|
|
|
|
static struct resource scif0_resources[] = {
|
|
- DEFINE_RES_MEM(0xfffffe80, 0x100),
|
|
+ DEFINE_RES_MEM(0xfffffe80, 0x10),
|
|
DEFINE_RES_IRQ(evt2irq(0x4e0)),
|
|
};
|
|
|
|
@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
|
|
};
|
|
|
|
static struct resource scif1_resources[] = {
|
|
- DEFINE_RES_MEM(0xa4000150, 0x100),
|
|
+ DEFINE_RES_MEM(0xa4000150, 0x10),
|
|
DEFINE_RES_IRQ(evt2irq(0x900)),
|
|
};
|
|
|
|
@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
|
|
};
|
|
|
|
static struct resource scif2_resources[] = {
|
|
- DEFINE_RES_MEM(0xa4000140, 0x100),
|
|
+ DEFINE_RES_MEM(0xa4000140, 0x10),
|
|
DEFINE_RES_IRQ(evt2irq(0x880)),
|
|
};
|
|
|
|
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
|
|
index 541dc61..a58fec9 100644
|
|
--- a/arch/sh/mm/fault.c
|
|
+++ b/arch/sh/mm/fault.c
|
|
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
} else {
|
|
if (fault & VM_FAULT_SIGBUS)
|
|
do_sigbus(regs, error_code, address);
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ bad_area(regs, error_code, address);
|
|
else
|
|
BUG();
|
|
}
|
|
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
|
|
index b398c68..a38513c 100644
|
|
--- a/arch/sparc/Kconfig
|
|
+++ b/arch/sparc/Kconfig
|
|
@@ -67,6 +67,7 @@ config SPARC64
|
|
select HAVE_SYSCALL_TRACEPOINTS
|
|
select HAVE_CONTEXT_TRACKING
|
|
select HAVE_DEBUG_KMEMLEAK
|
|
+ select SPARSE_IRQ
|
|
select RTC_DRV_CMOS
|
|
select RTC_DRV_BQ4802
|
|
select RTC_DRV_SUN4V
|
|
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
|
|
index 503e6d9..ded4cee3 100644
|
|
--- a/arch/sparc/crypto/aes_glue.c
|
|
+++ b/arch/sparc/crypto/aes_glue.c
|
|
@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
|
|
|
|
-MODULE_ALIAS("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
|
|
index 888f6260..641f55c 100644
|
|
--- a/arch/sparc/crypto/camellia_glue.c
|
|
+++ b/arch/sparc/crypto/camellia_glue.c
|
|
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
|
|
|
|
-MODULE_ALIAS("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
|
|
index 5162fad..d1064e4 100644
|
|
--- a/arch/sparc/crypto/crc32c_glue.c
|
|
+++ b/arch/sparc/crypto/crc32c_glue.c
|
|
@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
|
|
|
|
-MODULE_ALIAS("crc32c");
|
|
+MODULE_ALIAS_CRYPTO("crc32c");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
|
|
index 3065bc6..d115009 100644
|
|
--- a/arch/sparc/crypto/des_glue.c
|
|
+++ b/arch/sparc/crypto/des_glue.c
|
|
@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
|
|
|
|
-MODULE_ALIAS("des");
|
|
+MODULE_ALIAS_CRYPTO("des");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
|
|
index 09a9ea1..64c7ff5 100644
|
|
--- a/arch/sparc/crypto/md5_glue.c
|
|
+++ b/arch/sparc/crypto/md5_glue.c
|
|
@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
|
|
|
|
-MODULE_ALIAS("md5");
|
|
+MODULE_ALIAS_CRYPTO("md5");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
|
|
index 6cd5f29..1b3e47a 100644
|
|
--- a/arch/sparc/crypto/sha1_glue.c
|
|
+++ b/arch/sparc/crypto/sha1_glue.c
|
|
@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
|
|
|
|
-MODULE_ALIAS("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
|
|
index 04f555a..41f27cc 100644
|
|
--- a/arch/sparc/crypto/sha256_glue.c
|
|
+++ b/arch/sparc/crypto/sha256_glue.c
|
|
@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
|
|
|
|
-MODULE_ALIAS("sha224");
|
|
-MODULE_ALIAS("sha256");
|
|
+MODULE_ALIAS_CRYPTO("sha224");
|
|
+MODULE_ALIAS_CRYPTO("sha256");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
|
|
index f04d199..9fff885 100644
|
|
--- a/arch/sparc/crypto/sha512_glue.c
|
|
+++ b/arch/sparc/crypto/sha512_glue.c
|
|
@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
|
|
|
|
-MODULE_ALIAS("sha384");
|
|
-MODULE_ALIAS("sha512");
|
|
+MODULE_ALIAS_CRYPTO("sha384");
|
|
+MODULE_ALIAS_CRYPTO("sha512");
|
|
|
|
#include "crop_devid.c"
|
|
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
|
|
index 905832a..a0ed182 100644
|
|
--- a/arch/sparc/include/asm/atomic_32.h
|
|
+++ b/arch/sparc/include/asm/atomic_32.h
|
|
@@ -21,7 +21,7 @@
|
|
|
|
extern int __atomic_add_return(int, atomic_t *);
|
|
extern int atomic_cmpxchg(atomic_t *, int, int);
|
|
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
+extern int atomic_xchg(atomic_t *, int);
|
|
extern int __atomic_add_unless(atomic_t *, int, int);
|
|
extern void atomic_set(atomic_t *, int);
|
|
|
|
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
|
|
index 1fae1a0..ae0f9a7 100644
|
|
--- a/arch/sparc/include/asm/cmpxchg_32.h
|
|
+++ b/arch/sparc/include/asm/cmpxchg_32.h
|
|
@@ -11,22 +11,14 @@
|
|
#ifndef __ARCH_SPARC_CMPXCHG__
|
|
#define __ARCH_SPARC_CMPXCHG__
|
|
|
|
-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
|
|
-{
|
|
- __asm__ __volatile__("swap [%2], %0"
|
|
- : "=&r" (val)
|
|
- : "0" (val), "r" (m)
|
|
- : "memory");
|
|
- return val;
|
|
-}
|
|
-
|
|
+extern unsigned long __xchg_u32(volatile u32 *m, u32 new);
|
|
extern void __xchg_called_with_bad_pointer(void);
|
|
|
|
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
|
|
{
|
|
switch (size) {
|
|
case 4:
|
|
- return xchg_u32(ptr, x);
|
|
+ return __xchg_u32(ptr, x);
|
|
}
|
|
__xchg_called_with_bad_pointer();
|
|
return x;
|
|
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
|
|
index ca121f0..17be9d6 100644
|
|
--- a/arch/sparc/include/asm/hypervisor.h
|
|
+++ b/arch/sparc/include/asm/hypervisor.h
|
|
@@ -2944,6 +2944,16 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
|
|
unsigned long reg_val);
|
|
#endif
|
|
|
|
+#define HV_FAST_T5_GET_PERFREG 0x1a8
|
|
+#define HV_FAST_T5_SET_PERFREG 0x1a9
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
+unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
|
|
+ unsigned long *reg_val);
|
|
+unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
|
|
+ unsigned long reg_val);
|
|
+#endif
|
|
+
|
|
/* Function numbers for HV_CORE_TRAP. */
|
|
#define HV_CORE_SET_VER 0x00
|
|
#define HV_CORE_PUTCHAR 0x01
|
|
@@ -2975,6 +2985,7 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
|
|
#define HV_GRP_VF_CPU 0x0205
|
|
#define HV_GRP_KT_CPU 0x0209
|
|
#define HV_GRP_VT_CPU 0x020c
|
|
+#define HV_GRP_T5_CPU 0x0211
|
|
#define HV_GRP_DIAG 0x0300
|
|
|
|
#ifndef __ASSEMBLY__
|
|
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
|
|
index abf6afe..3deb07f 100644
|
|
--- a/arch/sparc/include/asm/irq_64.h
|
|
+++ b/arch/sparc/include/asm/irq_64.h
|
|
@@ -37,7 +37,7 @@
|
|
*
|
|
* ino_bucket->irq allocation is made during {sun4v_,}build_irq().
|
|
*/
|
|
-#define NR_IRQS 255
|
|
+#define NR_IRQS (2048)
|
|
|
|
extern void irq_install_pre_handler(int irq,
|
|
void (*func)(unsigned int, void *, void *),
|
|
@@ -57,11 +57,8 @@ extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
|
|
unsigned long iclr_base);
|
|
extern void sun4u_destroy_msi(unsigned int irq);
|
|
|
|
-extern unsigned char irq_alloc(unsigned int dev_handle,
|
|
- unsigned int dev_ino);
|
|
-#ifdef CONFIG_PCI_MSI
|
|
-extern void irq_free(unsigned int irq);
|
|
-#endif
|
|
+unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
|
|
+void irq_free(unsigned int irq);
|
|
|
|
extern void __init init_IRQ(void);
|
|
extern void fixup_irqs(void);
|
|
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
|
|
index bdb524a..8732ed3 100644
|
|
--- a/arch/sparc/include/asm/ldc.h
|
|
+++ b/arch/sparc/include/asm/ldc.h
|
|
@@ -53,13 +53,14 @@ struct ldc_channel;
|
|
/* Allocate state for a channel. */
|
|
extern struct ldc_channel *ldc_alloc(unsigned long id,
|
|
const struct ldc_channel_config *cfgp,
|
|
- void *event_arg);
|
|
+ void *event_arg,
|
|
+ const char *name);
|
|
|
|
/* Shut down and free state for a channel. */
|
|
extern void ldc_free(struct ldc_channel *lp);
|
|
|
|
/* Register TX and RX queues of the link with the hypervisor. */
|
|
-extern int ldc_bind(struct ldc_channel *lp, const char *name);
|
|
+extern int ldc_bind(struct ldc_channel *lp);
|
|
|
|
/* For non-RAW protocols we need to complete a handshake before
|
|
* communication can proceed. ldc_connect() does that, if the
|
|
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
|
|
index a12dbe3..e48fdf4 100644
|
|
--- a/arch/sparc/include/asm/oplib_64.h
|
|
+++ b/arch/sparc/include/asm/oplib_64.h
|
|
@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
|
|
/* You must call prom_init() before using any of the library services,
|
|
* preferably as early as possible. Pass it the romvec pointer.
|
|
*/
|
|
-extern void prom_init(void *cif_handler, void *cif_stack);
|
|
+extern void prom_init(void *cif_handler);
|
|
+extern void prom_init_report(void);
|
|
|
|
/* Boot argument acquisition, returns the boot command line string. */
|
|
extern char *prom_getbootargs(void);
|
|
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
|
|
index aac53fc..b18e602 100644
|
|
--- a/arch/sparc/include/asm/page_64.h
|
|
+++ b/arch/sparc/include/asm/page_64.h
|
|
@@ -57,18 +57,21 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
|
|
typedef struct { unsigned long pte; } pte_t;
|
|
typedef struct { unsigned long iopte; } iopte_t;
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
|
+typedef struct { unsigned long pud; } pud_t;
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
|
|
|
#define pte_val(x) ((x).pte)
|
|
#define iopte_val(x) ((x).iopte)
|
|
#define pmd_val(x) ((x).pmd)
|
|
+#define pud_val(x) ((x).pud)
|
|
#define pgd_val(x) ((x).pgd)
|
|
#define pgprot_val(x) ((x).pgprot)
|
|
|
|
#define __pte(x) ((pte_t) { (x) } )
|
|
#define __iopte(x) ((iopte_t) { (x) } )
|
|
#define __pmd(x) ((pmd_t) { (x) } )
|
|
+#define __pud(x) ((pud_t) { (x) } )
|
|
#define __pgd(x) ((pgd_t) { (x) } )
|
|
#define __pgprot(x) ((pgprot_t) { (x) } )
|
|
|
|
@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|
typedef unsigned long pte_t;
|
|
typedef unsigned long iopte_t;
|
|
typedef unsigned long pmd_t;
|
|
+typedef unsigned long pud_t;
|
|
typedef unsigned long pgd_t;
|
|
typedef unsigned long pgprot_t;
|
|
|
|
#define pte_val(x) (x)
|
|
#define iopte_val(x) (x)
|
|
#define pmd_val(x) (x)
|
|
+#define pud_val(x) (x)
|
|
#define pgd_val(x) (x)
|
|
#define pgprot_val(x) (x)
|
|
|
|
#define __pte(x) (x)
|
|
#define __iopte(x) (x)
|
|
#define __pmd(x) (x)
|
|
+#define __pud(x) (x)
|
|
#define __pgd(x) (x)
|
|
#define __pgprot(x) (x)
|
|
|
|
@@ -96,21 +102,14 @@ typedef unsigned long pgprot_t;
|
|
|
|
typedef pte_t *pgtable_t;
|
|
|
|
-/* These two values define the virtual address space range in which we
|
|
- * must forbid 64-bit user processes from making mappings. It used to
|
|
- * represent precisely the virtual address space hole present in most
|
|
- * early sparc64 chips including UltraSPARC-I. But now it also is
|
|
- * further constrained by the limits of our page tables, which is
|
|
- * 43-bits of virtual address.
|
|
- */
|
|
-#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL)
|
|
-#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL)
|
|
+extern unsigned long sparc64_va_hole_top;
|
|
+extern unsigned long sparc64_va_hole_bottom;
|
|
|
|
/* The next two defines specify the actual exclusion region we
|
|
* enforce, wherein we use a 4GB red zone on each side of the VA hole.
|
|
*/
|
|
-#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
|
|
-#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
|
|
+#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
|
|
+#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL))
|
|
|
|
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
|
|
_AC(0x0000000070000000,UL) : \
|
|
@@ -118,20 +117,16 @@ typedef pte_t *pgtable_t;
|
|
|
|
#include <asm-generic/memory_model.h>
|
|
|
|
-#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
|
|
extern unsigned long PAGE_OFFSET;
|
|
|
|
#endif /* !(__ASSEMBLY__) */
|
|
|
|
-/* The maximum number of physical memory address bits we support, this
|
|
- * is used to size various tables used to manage kernel TLB misses and
|
|
- * also the sparsemem code.
|
|
+/* The maximum number of physical memory address bits we support. The
|
|
+ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
|
|
+ * evaluates to.
|
|
*/
|
|
-#define MAX_PHYS_ADDRESS_BITS 47
|
|
+#define MAX_PHYS_ADDRESS_BITS 53
|
|
|
|
-/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
|
|
- * and kpte_linear_bitmap.
|
|
- */
|
|
#define ILOG2_4MB 22
|
|
#define ILOG2_256MB 28
|
|
|
|
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
|
|
index bcfe063..2c8d41f 100644
|
|
--- a/arch/sparc/include/asm/pgalloc_64.h
|
|
+++ b/arch/sparc/include/asm/pgalloc_64.h
|
|
@@ -15,6 +15,13 @@
|
|
|
|
extern struct kmem_cache *pgtable_cache;
|
|
|
|
+static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
|
|
+{
|
|
+ pgd_set(pgd, pud);
|
|
+}
|
|
+
|
|
+#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
|
|
+
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
|
@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
kmem_cache_free(pgtable_cache, pgd);
|
|
}
|
|
|
|
-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
|
|
+static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
|
|
+{
|
|
+ pud_set(pud, pmd);
|
|
+}
|
|
+
|
|
+#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
|
|
+
|
|
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
+{
|
|
+ return kmem_cache_alloc(pgtable_cache,
|
|
+ GFP_KERNEL|__GFP_REPEAT);
|
|
+}
|
|
+
|
|
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|
+{
|
|
+ kmem_cache_free(pgtable_cache, pud);
|
|
+}
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
|
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|
|
pgtable_free_tlb(tlb, pmd, false)
|
|
|
|
+#define __pud_free_tlb(tlb, pud, addr) \
|
|
+ pgtable_free_tlb(tlb, pud, false)
|
|
+
|
|
#endif /* _SPARC64_PGALLOC_H */
|
|
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
|
|
index 0f9e945..e8dfabf 100644
|
|
--- a/arch/sparc/include/asm/pgtable_64.h
|
|
+++ b/arch/sparc/include/asm/pgtable_64.h
|
|
@@ -20,11 +20,10 @@
|
|
#include <asm/page.h>
|
|
#include <asm/processor.h>
|
|
|
|
-#include <asm-generic/pgtable-nopud.h>
|
|
-
|
|
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
|
|
* The page copy blockops can use 0x6000000 to 0x8000000.
|
|
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
|
|
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
|
|
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
|
|
* The PROM resides in an area spanning 0xf0000000 to 0x100000000.
|
|
* The vmalloc area spans 0x100000000 to 0x200000000.
|
|
* Since modules need to be in the lowest 32-bits of the address space,
|
|
@@ -33,17 +32,15 @@
|
|
* 0x400000000.
|
|
*/
|
|
#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
|
|
-#define TSBMAP_BASE _AC(0x0000000008000000,UL)
|
|
+#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
|
|
+#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
|
|
#define MODULES_VADDR _AC(0x0000000010000000,UL)
|
|
#define MODULES_LEN _AC(0x00000000e0000000,UL)
|
|
#define MODULES_END _AC(0x00000000f0000000,UL)
|
|
#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
|
|
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
|
|
#define VMALLOC_START _AC(0x0000000100000000,UL)
|
|
-#define VMALLOC_END _AC(0x0000010000000000,UL)
|
|
-#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
|
|
-
|
|
-#define vmemmap ((struct page *)VMEMMAP_BASE)
|
|
+#define VMEMMAP_BASE VMALLOC_END
|
|
|
|
/* PMD_SHIFT determines the size of the area a second-level page
|
|
* table can map
|
|
@@ -53,13 +50,25 @@
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
|
#define PMD_BITS (PAGE_SHIFT - 3)
|
|
|
|
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
|
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
|
|
+/* PUD_SHIFT determines the size of the area a third-level page
|
|
+ * table can map
|
|
+ */
|
|
+#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
|
|
+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
|
|
+#define PUD_MASK (~(PUD_SIZE-1))
|
|
+#define PUD_BITS (PAGE_SHIFT - 3)
|
|
+
|
|
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
|
|
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
|
|
#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
#define PGDIR_BITS (PAGE_SHIFT - 3)
|
|
|
|
-#if (PGDIR_SHIFT + PGDIR_BITS) != 43
|
|
+#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
|
|
+#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
|
|
+#endif
|
|
+
|
|
+#if (PGDIR_SHIFT + PGDIR_BITS) != 53
|
|
#error Page table parameters do not cover virtual address space properly.
|
|
#endif
|
|
|
|
@@ -69,19 +78,32 @@
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
+extern unsigned long VMALLOC_END;
|
|
+
|
|
+#define vmemmap ((struct page *)VMEMMAP_BASE)
|
|
+
|
|
#include <linux/sched.h>
|
|
|
|
+bool kern_addr_valid(unsigned long addr);
|
|
+
|
|
/* Entries per page directory level. */
|
|
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
|
|
#define PTRS_PER_PMD (1UL << PMD_BITS)
|
|
+#define PTRS_PER_PUD (1UL << PUD_BITS)
|
|
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
|
|
|
|
/* Kernel has a separate 44bit address space. */
|
|
#define FIRST_USER_ADDRESS 0
|
|
|
|
-#define pte_ERROR(e) __builtin_trap()
|
|
-#define pmd_ERROR(e) __builtin_trap()
|
|
-#define pgd_ERROR(e) __builtin_trap()
|
|
+#define pmd_ERROR(e) \
|
|
+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
|
|
+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
|
|
+#define pud_ERROR(e) \
|
|
+ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
|
|
+ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
|
|
+#define pgd_ERROR(e) \
|
|
+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
|
|
+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
|
|
|
|
#endif /* !(__ASSEMBLY__) */
|
|
|
|
@@ -90,6 +112,7 @@
|
|
#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
|
|
#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
|
|
#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
|
|
+#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
|
|
|
|
/* Advertise support for _PAGE_SPECIAL */
|
|
#define __HAVE_ARCH_PTE_SPECIAL
|
|
@@ -258,8 +281,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
|
|
{
|
|
unsigned long mask, tmp;
|
|
|
|
- /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
|
|
- * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
|
|
+ /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
|
|
+ * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
|
|
*
|
|
* Even if we use negation tricks the result is still a 6
|
|
* instruction sequence, so don't try to play fancy and just
|
|
@@ -289,10 +312,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
|
|
" .previous\n"
|
|
: "=r" (mask), "=r" (tmp)
|
|
: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
|
|
- _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
|
|
+ _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
|
|
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
|
|
"i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
|
|
- _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
|
|
+ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
|
|
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
|
|
|
|
return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
|
|
@@ -633,29 +656,29 @@ static inline unsigned long pmd_large(pmd_t pmd)
|
|
{
|
|
pte_t pte = __pte(pmd_val(pmd));
|
|
|
|
- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
|
|
+ return pte_val(pte) & _PAGE_PMD_HUGE;
|
|
}
|
|
|
|
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
-static inline unsigned long pmd_young(pmd_t pmd)
|
|
+static inline unsigned long pmd_pfn(pmd_t pmd)
|
|
{
|
|
pte_t pte = __pte(pmd_val(pmd));
|
|
|
|
- return pte_young(pte);
|
|
+ return pte_pfn(pte);
|
|
}
|
|
|
|
-static inline unsigned long pmd_write(pmd_t pmd)
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
+static inline unsigned long pmd_young(pmd_t pmd)
|
|
{
|
|
pte_t pte = __pte(pmd_val(pmd));
|
|
|
|
- return pte_write(pte);
|
|
+ return pte_young(pte);
|
|
}
|
|
|
|
-static inline unsigned long pmd_pfn(pmd_t pmd)
|
|
+static inline unsigned long pmd_write(pmd_t pmd)
|
|
{
|
|
pte_t pte = __pte(pmd_val(pmd));
|
|
|
|
- return pte_pfn(pte);
|
|
+ return pte_write(pte);
|
|
}
|
|
|
|
static inline unsigned long pmd_trans_huge(pmd_t pmd)
|
|
@@ -719,20 +742,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|
return __pmd(pte_val(pte));
|
|
}
|
|
|
|
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
|
-{
|
|
- unsigned long mask;
|
|
-
|
|
- if (tlb_type == hypervisor)
|
|
- mask = _PAGE_PRESENT_4V;
|
|
- else
|
|
- mask = _PAGE_PRESENT_4U;
|
|
-
|
|
- pmd_val(pmd) &= ~mask;
|
|
-
|
|
- return pmd;
|
|
-}
|
|
-
|
|
static inline pmd_t pmd_mksplitting(pmd_t pmd)
|
|
{
|
|
pte_t pte = __pte(pmd_val(pmd));
|
|
@@ -757,6 +766,22 @@ static inline int pmd_present(pmd_t pmd)
|
|
|
|
#define pmd_none(pmd) (!pmd_val(pmd))
|
|
|
|
+/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
|
|
+ * very simple, it's just the physical address. PTE tables are of
|
|
+ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
|
|
+ * the top bits outside of the range of any physical address size we
|
|
+ * support are clear as well. We also validate the physical itself.
|
|
+ */
|
|
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
|
|
+
|
|
+#define pud_none(pud) (!pud_val(pud))
|
|
+
|
|
+#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
|
|
+
|
|
+#define pgd_none(pgd) (!pgd_val(pgd))
|
|
+
|
|
+#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
|
|
+
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd);
|
|
@@ -790,16 +815,34 @@ static inline unsigned long __pmd_page(pmd_t pmd)
|
|
#define pud_page_vaddr(pud) \
|
|
((unsigned long) __va(pud_val(pud)))
|
|
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
|
|
-#define pmd_bad(pmd) (0)
|
|
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
|
|
-#define pud_none(pud) (!pud_val(pud))
|
|
-#define pud_bad(pud) (0)
|
|
#define pud_present(pud) (pud_val(pud) != 0U)
|
|
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
|
|
+#define pgd_page_vaddr(pgd) \
|
|
+ ((unsigned long) __va(pgd_val(pgd)))
|
|
+#define pgd_present(pgd) (pgd_val(pgd) != 0U)
|
|
+#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
|
|
+
|
|
+static inline unsigned long pud_large(pud_t pud)
|
|
+{
|
|
+ pte_t pte = __pte(pud_val(pud));
|
|
+
|
|
+ return pte_val(pte) & _PAGE_PMD_HUGE;
|
|
+}
|
|
+
|
|
+static inline unsigned long pud_pfn(pud_t pud)
|
|
+{
|
|
+ pte_t pte = __pte(pud_val(pud));
|
|
+
|
|
+ return pte_pfn(pte);
|
|
+}
|
|
|
|
/* Same in both SUN4V and SUN4U. */
|
|
#define pte_none(pte) (!pte_val(pte))
|
|
|
|
+#define pgd_set(pgdp, pudp) \
|
|
+ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
|
|
+
|
|
/* to find an entry in a page-table-directory. */
|
|
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
|
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
|
@@ -807,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
|
|
/* to find an entry in a kernel page-table-directory */
|
|
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
|
|
|
+/* Find an entry in the third-level page table.. */
|
|
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
|
+#define pud_offset(pgdp, address) \
|
|
+ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
|
|
+
|
|
/* Find an entry in the second-level page table.. */
|
|
#define pmd_offset(pudp, address) \
|
|
((pmd_t *) pud_page_vaddr(*(pudp)) + \
|
|
@@ -879,7 +927,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
#endif
|
|
|
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
-extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
|
|
|
|
extern void paging_init(void);
|
|
extern unsigned long find_ecache_flush_span(unsigned long size);
|
|
@@ -893,6 +940,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
|
|
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
pmd_t *pmd);
|
|
|
|
+#define __HAVE_ARCH_PMDP_INVALIDATE
|
|
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
+ pmd_t *pmdp);
|
|
+
|
|
#define __HAVE_ARCH_PGTABLE_DEPOSIT
|
|
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
pgtable_t pgtable);
|
|
@@ -919,18 +970,6 @@ extern unsigned long pte_file(pte_t);
|
|
extern pte_t pgoff_to_pte(unsigned long);
|
|
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
|
|
|
|
-extern unsigned long sparc64_valid_addr_bitmap[];
|
|
-
|
|
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
|
-static inline bool kern_addr_valid(unsigned long addr)
|
|
-{
|
|
- unsigned long paddr = __pa(addr);
|
|
-
|
|
- if ((paddr >> 41UL) != 0UL)
|
|
- return false;
|
|
- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
|
|
-}
|
|
-
|
|
extern int page_in_phys_avail(unsigned long paddr);
|
|
|
|
/*
|
|
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
|
|
index 5e35e05..acd6146 100644
|
|
--- a/arch/sparc/include/asm/setup.h
|
|
+++ b/arch/sparc/include/asm/setup.h
|
|
@@ -24,6 +24,10 @@ static inline int con_is_present(void)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_SPARC64
|
|
+extern void __init start_early_boot(void);
|
|
+#endif
|
|
+
|
|
extern void sun_do_break(void);
|
|
extern int stop_a_enabled;
|
|
extern int scons_pwroff;
|
|
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
|
|
index 6b67e50..69424d4 100644
|
|
--- a/arch/sparc/include/asm/spitfire.h
|
|
+++ b/arch/sparc/include/asm/spitfire.h
|
|
@@ -45,6 +45,8 @@
|
|
#define SUN4V_CHIP_NIAGARA3 0x03
|
|
#define SUN4V_CHIP_NIAGARA4 0x04
|
|
#define SUN4V_CHIP_NIAGARA5 0x05
|
|
+#define SUN4V_CHIP_SPARC_M6 0x06
|
|
+#define SUN4V_CHIP_SPARC_M7 0x07
|
|
#define SUN4V_CHIP_SPARC64X 0x8a
|
|
#define SUN4V_CHIP_UNKNOWN 0xff
|
|
|
|
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
|
|
index a5f01ac..cc6275c 100644
|
|
--- a/arch/sparc/include/asm/thread_info_64.h
|
|
+++ b/arch/sparc/include/asm/thread_info_64.h
|
|
@@ -63,7 +63,8 @@ struct thread_info {
|
|
struct pt_regs *kern_una_regs;
|
|
unsigned int kern_una_insn;
|
|
|
|
- unsigned long fpregs[0] __attribute__ ((aligned(64)));
|
|
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
|
|
+ __attribute__ ((aligned(64)));
|
|
};
|
|
|
|
#endif /* !(__ASSEMBLY__) */
|
|
@@ -102,6 +103,7 @@ struct thread_info {
|
|
#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
|
|
#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
|
|
#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
|
|
+#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */
|
|
|
|
#if PAGE_SHIFT == 13
|
|
#define THREAD_SIZE (2*PAGE_SIZE)
|
|
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
|
|
index 3c3c89f..7f9bab2 100644
|
|
--- a/arch/sparc/include/asm/tlbflush_64.h
|
|
+++ b/arch/sparc/include/asm/tlbflush_64.h
|
|
@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
{
|
|
}
|
|
|
|
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
+
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
|
|
|
extern void flush_tlb_pending(void);
|
|
@@ -48,11 +50,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
-#define flush_tlb_kernel_range(start,end) \
|
|
-do { flush_tsb_kernel_range(start,end); \
|
|
- __flush_tlb_kernel_range(start,end); \
|
|
-} while (0)
|
|
-
|
|
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
|
{
|
|
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
|
@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
|
|
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
|
|
|
-#define flush_tlb_kernel_range(start, end) \
|
|
-do { flush_tsb_kernel_range(start,end); \
|
|
- smp_flush_tlb_kernel_range(start, end); \
|
|
-} while (0)
|
|
-
|
|
#define global_flush_tlb_page(mm, vaddr) \
|
|
smp_flush_tlb_page(mm, vaddr)
|
|
|
|
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
|
|
index 2230f80..ecb49cf 100644
|
|
--- a/arch/sparc/include/asm/tsb.h
|
|
+++ b/arch/sparc/include/asm/tsb.h
|
|
@@ -133,9 +133,24 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
sub TSB, 0x8, TSB; \
|
|
TSB_STORE(TSB, TAG);
|
|
|
|
- /* Do a kernel page table walk. Leaves physical PTE pointer in
|
|
- * REG1. Jumps to FAIL_LABEL on early page table walk termination.
|
|
- * VADDR will not be clobbered, but REG2 will.
|
|
+ /* Do a kernel page table walk. Leaves valid PTE value in
|
|
+ * REG1. Jumps to FAIL_LABEL on early page table walk
|
|
+ * termination. VADDR will not be clobbered, but REG2 will.
|
|
+ *
|
|
+ * There are two masks we must apply to propagate bits from
|
|
+ * the virtual address into the PTE physical address field
|
|
+ * when dealing with huge pages. This is because the page
|
|
+ * table boundaries do not match the huge page size(s) the
|
|
+ * hardware supports.
|
|
+ *
|
|
+ * In these cases we propagate the bits that are below the
|
|
+ * page table level where we saw the huge page mapping, but
|
|
+ * are still within the relevant physical bits for the huge
|
|
+ * page size in question. So for PMD mappings (which fall on
|
|
+ * bit 23, for 8MB per PMD) we must propagate bit 22 for a
|
|
+ * 4MB huge page. For huge PUDs (which fall on bit 33, for
|
|
+ * 8GB per PUD), we have to accomodate 256MB and 2GB huge
|
|
+ * pages. So for those we propagate bits 32 to 28.
|
|
*/
|
|
#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
|
|
sethi %hi(swapper_pg_dir), REG1; \
|
|
@@ -145,15 +160,40 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
andn REG2, 0x7, REG2; \
|
|
ldx [REG1 + REG2], REG1; \
|
|
brz,pn REG1, FAIL_LABEL; \
|
|
- sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
|
|
+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
|
|
srlx REG2, 64 - PAGE_SHIFT, REG2; \
|
|
andn REG2, 0x7, REG2; \
|
|
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
|
|
brz,pn REG1, FAIL_LABEL; \
|
|
- sllx VADDR, 64 - PMD_SHIFT, REG2; \
|
|
+ sethi %uhi(_PAGE_PUD_HUGE), REG2; \
|
|
+ brz,pn REG1, FAIL_LABEL; \
|
|
+ sllx REG2, 32, REG2; \
|
|
+ andcc REG1, REG2, %g0; \
|
|
+ sethi %hi(0xf8000000), REG2; \
|
|
+ bne,pt %xcc, 697f; \
|
|
+ sllx REG2, 1, REG2; \
|
|
+ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
|
|
srlx REG2, 64 - PAGE_SHIFT, REG2; \
|
|
andn REG2, 0x7, REG2; \
|
|
- add REG1, REG2, REG1;
|
|
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
|
|
+ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
|
|
+ brz,pn REG1, FAIL_LABEL; \
|
|
+ sllx REG2, 32, REG2; \
|
|
+ andcc REG1, REG2, %g0; \
|
|
+ be,pn %xcc, 698f; \
|
|
+ sethi %hi(0x400000), REG2; \
|
|
+697: brgez,pn REG1, FAIL_LABEL; \
|
|
+ andn REG1, REG2, REG1; \
|
|
+ and VADDR, REG2, REG2; \
|
|
+ ba,pt %xcc, 699f; \
|
|
+ or REG1, REG2, REG1; \
|
|
+698: sllx VADDR, 64 - PMD_SHIFT, REG2; \
|
|
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
|
|
+ andn REG2, 0x7, REG2; \
|
|
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
|
|
+ brgez,pn REG1, FAIL_LABEL; \
|
|
+ nop; \
|
|
+699:
|
|
|
|
/* PMD has been loaded into REG1, interpret the value, seeing
|
|
* if it is a HUGE PMD or a normal one. If it is not valid
|
|
@@ -171,7 +211,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
andcc REG1, REG2, %g0; \
|
|
be,pt %xcc, 700f; \
|
|
sethi %hi(4 * 1024 * 1024), REG2; \
|
|
- andn REG1, REG2, REG1; \
|
|
+ brgez,pn REG1, FAIL_LABEL; \
|
|
+ andn REG1, REG2, REG1; \
|
|
and VADDR, REG2, REG2; \
|
|
brlz,pt REG1, PTE_LABEL; \
|
|
or REG1, REG2, REG1; \
|
|
@@ -197,6 +238,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
andn REG2, 0x7, REG2; \
|
|
ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
|
|
brz,pn REG1, FAIL_LABEL; \
|
|
+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
|
|
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
|
|
+ andn REG2, 0x7, REG2; \
|
|
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
|
|
+ brz,pn REG1, FAIL_LABEL; \
|
|
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
|
|
srlx REG2, 64 - PAGE_SHIFT, REG2; \
|
|
andn REG2, 0x7, REG2; \
|
|
@@ -245,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
(KERNEL_TSB_SIZE_BYTES / 16)
|
|
#define KERNEL_TSB4M_NENTRIES 4096
|
|
|
|
-#define KTSB_PHYS_SHIFT 15
|
|
-
|
|
/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
|
|
* on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
|
|
* and the found TTE will be left in REG1. REG3 and REG4 must
|
|
@@ -255,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
* VADDR and TAG will be preserved and not clobbered by this macro.
|
|
*/
|
|
#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
|
|
-661: sethi %hi(swapper_tsb), REG1; \
|
|
- or REG1, %lo(swapper_tsb), REG1; \
|
|
+661: sethi %uhi(swapper_tsb), REG1; \
|
|
+ sethi %hi(swapper_tsb), REG2; \
|
|
+ or REG1, %ulo(swapper_tsb), REG1; \
|
|
+ or REG2, %lo(swapper_tsb), REG2; \
|
|
.section .swapper_tsb_phys_patch, "ax"; \
|
|
.word 661b; \
|
|
.previous; \
|
|
-661: nop; \
|
|
- .section .tsb_ldquad_phys_patch, "ax"; \
|
|
- .word 661b; \
|
|
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
|
|
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
|
|
- .previous; \
|
|
+ sllx REG1, 32, REG1; \
|
|
+ or REG1, REG2, REG1; \
|
|
srlx VADDR, PAGE_SHIFT, REG2; \
|
|
and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
|
|
sllx REG2, 4, REG2; \
|
|
@@ -280,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
|
|
* we can make use of that for the index computation.
|
|
*/
|
|
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
|
|
-661: sethi %hi(swapper_4m_tsb), REG1; \
|
|
- or REG1, %lo(swapper_4m_tsb), REG1; \
|
|
+661: sethi %uhi(swapper_4m_tsb), REG1; \
|
|
+ sethi %hi(swapper_4m_tsb), REG2; \
|
|
+ or REG1, %ulo(swapper_4m_tsb), REG1; \
|
|
+ or REG2, %lo(swapper_4m_tsb), REG2; \
|
|
.section .swapper_4m_tsb_phys_patch, "ax"; \
|
|
.word 661b; \
|
|
.previous; \
|
|
-661: nop; \
|
|
- .section .tsb_ldquad_phys_patch, "ax"; \
|
|
- .word 661b; \
|
|
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
|
|
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
|
|
- .previous; \
|
|
+ sllx REG1, 32, REG1; \
|
|
+ or REG1, REG2, REG1; \
|
|
and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
|
|
sllx REG2, 4, REG2; \
|
|
add REG1, REG2, REG2; \
|
|
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
|
|
index 432afa8..55841c1 100644
|
|
--- a/arch/sparc/include/asm/vio.h
|
|
+++ b/arch/sparc/include/asm/vio.h
|
|
@@ -118,12 +118,18 @@ struct vio_disk_attr_info {
|
|
u8 vdisk_type;
|
|
#define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */
|
|
#define VD_DISK_TYPE_DISK 0x02 /* Entire block device */
|
|
- u16 resv1;
|
|
+ u8 vdisk_mtype; /* v1.1 */
|
|
+#define VD_MEDIA_TYPE_FIXED 0x01 /* Fixed device */
|
|
+#define VD_MEDIA_TYPE_CD 0x02 /* CD Device */
|
|
+#define VD_MEDIA_TYPE_DVD 0x03 /* DVD Device */
|
|
+ u8 resv1;
|
|
u32 vdisk_block_size;
|
|
u64 operations;
|
|
- u64 vdisk_size;
|
|
+ u64 vdisk_size; /* v1.1 */
|
|
u64 max_xfer_size;
|
|
- u64 resv2[2];
|
|
+ u32 phys_block_size; /* v1.2 */
|
|
+ u32 resv2;
|
|
+ u64 resv3[1];
|
|
};
|
|
|
|
struct vio_disk_desc {
|
|
@@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
|
|
unsigned int ring_size)
|
|
{
|
|
return (dr->pending -
|
|
- ((dr->prod - dr->cons) & (ring_size - 1)));
|
|
+ ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
|
|
}
|
|
|
|
#define VIO_MAX_TYPE_LEN 32
|
|
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
|
|
index 39ca301..50d6f16 100644
|
|
--- a/arch/sparc/include/asm/visasm.h
|
|
+++ b/arch/sparc/include/asm/visasm.h
|
|
@@ -28,18 +28,20 @@
|
|
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
|
|
|
|
#define VISEntryHalf \
|
|
+ VISEntry
|
|
+
|
|
+#define VISExitHalf \
|
|
+ VISExit
|
|
+
|
|
+#define VISEntryHalfFast(fail_label) \
|
|
rd %fprs, %o5; \
|
|
andcc %o5, FPRS_FEF, %g0; \
|
|
be,pt %icc, 297f; \
|
|
- sethi %hi(298f), %g7; \
|
|
- sethi %hi(VISenterhalf), %g1; \
|
|
- jmpl %g1 + %lo(VISenterhalf), %g0; \
|
|
- or %g7, %lo(298f), %g7; \
|
|
- clr %o5; \
|
|
-297: wr %o5, FPRS_FEF, %fprs; \
|
|
-298:
|
|
+ nop; \
|
|
+ ba,a,pt %xcc, fail_label; \
|
|
+297: wr %o5, FPRS_FEF, %fprs;
|
|
|
|
-#define VISExitHalf \
|
|
+#define VISExitHalfFast \
|
|
wr %o5, 0, %fprs;
|
|
|
|
#ifndef __ASSEMBLY__
|
|
diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h
|
|
index a34ad07..4c7c12d 100644
|
|
--- a/arch/sparc/include/uapi/asm/swab.h
|
|
+++ b/arch/sparc/include/uapi/asm/swab.h
|
|
@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
|
|
{
|
|
__u16 ret;
|
|
|
|
- __asm__ __volatile__ ("lduha [%1] %2, %0"
|
|
+ __asm__ __volatile__ ("lduha [%2] %3, %0"
|
|
: "=r" (ret)
|
|
- : "r" (addr), "i" (ASI_PL));
|
|
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
|
|
return ret;
|
|
}
|
|
#define __arch_swab16p __arch_swab16p
|
|
@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
|
|
{
|
|
__u32 ret;
|
|
|
|
- __asm__ __volatile__ ("lduwa [%1] %2, %0"
|
|
+ __asm__ __volatile__ ("lduwa [%2] %3, %0"
|
|
: "=r" (ret)
|
|
- : "r" (addr), "i" (ASI_PL));
|
|
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
|
|
return ret;
|
|
}
|
|
#define __arch_swab32p __arch_swab32p
|
|
@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
|
|
{
|
|
__u64 ret;
|
|
|
|
- __asm__ __volatile__ ("ldxa [%1] %2, %0"
|
|
+ __asm__ __volatile__ ("ldxa [%2] %3, %0"
|
|
: "=r" (ret)
|
|
- : "r" (addr), "i" (ASI_PL));
|
|
+ : "m" (*addr), "r" (addr), "i" (ASI_PL));
|
|
return ret;
|
|
}
|
|
#define __arch_swab64p __arch_swab64p
|
|
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
|
|
index 5c51258..52e10de 100644
|
|
--- a/arch/sparc/kernel/cpu.c
|
|
+++ b/arch/sparc/kernel/cpu.c
|
|
@@ -493,6 +493,18 @@ static void __init sun4v_cpu_probe(void)
|
|
sparc_pmu_type = "niagara5";
|
|
break;
|
|
|
|
+ case SUN4V_CHIP_SPARC_M6:
|
|
+ sparc_cpu_type = "SPARC-M6";
|
|
+ sparc_fpu_type = "SPARC-M6 integrated FPU";
|
|
+ sparc_pmu_type = "sparc-m6";
|
|
+ break;
|
|
+
|
|
+ case SUN4V_CHIP_SPARC_M7:
|
|
+ sparc_cpu_type = "SPARC-M7";
|
|
+ sparc_fpu_type = "SPARC-M7 integrated FPU";
|
|
+ sparc_pmu_type = "sparc-m7";
|
|
+ break;
|
|
+
|
|
case SUN4V_CHIP_SPARC64X:
|
|
sparc_cpu_type = "SPARC64-X";
|
|
sparc_fpu_type = "SPARC64-X integrated FPU";
|
|
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
|
|
index de1c844..e69ec0e 100644
|
|
--- a/arch/sparc/kernel/cpumap.c
|
|
+++ b/arch/sparc/kernel/cpumap.c
|
|
@@ -326,6 +326,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
|
|
case SUN4V_CHIP_NIAGARA3:
|
|
case SUN4V_CHIP_NIAGARA4:
|
|
case SUN4V_CHIP_NIAGARA5:
|
|
+ case SUN4V_CHIP_SPARC_M6:
|
|
+ case SUN4V_CHIP_SPARC_M7:
|
|
case SUN4V_CHIP_SPARC64X:
|
|
rover_inc_table = niagara_iterate_method;
|
|
break;
|
|
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
|
|
index dff60ab..f87a55d 100644
|
|
--- a/arch/sparc/kernel/ds.c
|
|
+++ b/arch/sparc/kernel/ds.c
|
|
@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
ds_cfg.tx_irq = vdev->tx_irq;
|
|
ds_cfg.rx_irq = vdev->rx_irq;
|
|
|
|
- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
|
|
+ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
|
|
if (IS_ERR(lp)) {
|
|
err = PTR_ERR(lp);
|
|
goto out_free_ds_states;
|
|
}
|
|
dp->lp = lp;
|
|
|
|
- err = ldc_bind(lp, "DS");
|
|
+ err = ldc_bind(lp);
|
|
if (err)
|
|
goto out_free_ldc;
|
|
|
|
diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
|
|
index b2c2c5b..d668ca14 100644
|
|
--- a/arch/sparc/kernel/dtlb_prot.S
|
|
+++ b/arch/sparc/kernel/dtlb_prot.S
|
|
@@ -24,11 +24,11 @@
|
|
mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
|
|
|
|
/* PROT ** ICACHE line 2: More real fault processing */
|
|
+ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
|
|
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
|
|
- ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
|
|
- ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
|
|
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
|
|
- nop
|
|
+ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
|
|
+ nop
|
|
nop
|
|
nop
|
|
nop
|
|
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
|
|
index 140966f..c88ffb9 100644
|
|
--- a/arch/sparc/kernel/entry.h
|
|
+++ b/arch/sparc/kernel/entry.h
|
|
@@ -66,13 +66,10 @@ struct pause_patch_entry {
|
|
extern struct pause_patch_entry __pause_3insn_patch,
|
|
__pause_3insn_patch_end;
|
|
|
|
-extern void __init per_cpu_patch(void);
|
|
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
|
|
struct sun4v_1insn_patch_entry *);
|
|
extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
|
|
struct sun4v_2insn_patch_entry *);
|
|
-extern void __init sun4v_patch(void);
|
|
-extern void __init boot_cpu_id_too_large(int cpu);
|
|
extern unsigned int dcache_parity_tl1_occurred;
|
|
extern unsigned int icache_parity_tl1_occurred;
|
|
|
|
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
|
|
index 26b706a..3d61fca 100644
|
|
--- a/arch/sparc/kernel/head_64.S
|
|
+++ b/arch/sparc/kernel/head_64.S
|
|
@@ -282,8 +282,8 @@ sun4v_chip_type:
|
|
stx %l2, [%l4 + 0x0]
|
|
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
|
|
/* 4MB align */
|
|
- srlx %l3, 22, %l3
|
|
- sllx %l3, 22, %l3
|
|
+ srlx %l3, ILOG2_4MB, %l3
|
|
+ sllx %l3, ILOG2_4MB, %l3
|
|
stx %l3, [%l4 + 0x8]
|
|
|
|
/* Leave service as-is, "call-method" */
|
|
@@ -427,6 +427,12 @@ sun4v_chip_type:
|
|
cmp %g2, '5'
|
|
be,pt %xcc, 5f
|
|
mov SUN4V_CHIP_NIAGARA5, %g4
|
|
+ cmp %g2, '6'
|
|
+ be,pt %xcc, 5f
|
|
+ mov SUN4V_CHIP_SPARC_M6, %g4
|
|
+ cmp %g2, '7'
|
|
+ be,pt %xcc, 5f
|
|
+ mov SUN4V_CHIP_SPARC_M7, %g4
|
|
ba,pt %xcc, 49f
|
|
nop
|
|
|
|
@@ -585,6 +591,12 @@ niagara_tlb_fixup:
|
|
cmp %g1, SUN4V_CHIP_NIAGARA5
|
|
be,pt %xcc, niagara4_patch
|
|
nop
|
|
+ cmp %g1, SUN4V_CHIP_SPARC_M6
|
|
+ be,pt %xcc, niagara4_patch
|
|
+ nop
|
|
+ cmp %g1, SUN4V_CHIP_SPARC_M7
|
|
+ be,pt %xcc, niagara4_patch
|
|
+ nop
|
|
|
|
call generic_patch_copyops
|
|
nop
|
|
@@ -660,14 +672,12 @@ tlb_fixup_done:
|
|
sethi %hi(init_thread_union), %g6
|
|
or %g6, %lo(init_thread_union), %g6
|
|
ldx [%g6 + TI_TASK], %g4
|
|
- mov %sp, %l6
|
|
|
|
wr %g0, ASI_P, %asi
|
|
mov 1, %g1
|
|
sllx %g1, THREAD_SHIFT, %g1
|
|
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
|
|
add %g6, %g1, %sp
|
|
- mov 0, %fp
|
|
|
|
/* Set per-cpu pointer initially to zero, this makes
|
|
* the boot-cpu use the in-kernel-image per-cpu areas
|
|
@@ -694,44 +704,14 @@ tlb_fixup_done:
|
|
nop
|
|
#endif
|
|
|
|
- mov %l6, %o1 ! OpenPROM stack
|
|
call prom_init
|
|
mov %l7, %o0 ! OpenPROM cif handler
|
|
|
|
- /* Initialize current_thread_info()->cpu as early as possible.
|
|
- * In order to do that accurately we have to patch up the get_cpuid()
|
|
- * assembler sequences. And that, in turn, requires that we know
|
|
- * if we are on a Starfire box or not. While we're here, patch up
|
|
- * the sun4v sequences as well.
|
|
+ /* To create a one-register-window buffer between the kernel's
|
|
+ * initial stack and the last stack frame we use from the firmware,
|
|
+ * do the rest of the boot from a C helper function.
|
|
*/
|
|
- call check_if_starfire
|
|
- nop
|
|
- call per_cpu_patch
|
|
- nop
|
|
- call sun4v_patch
|
|
- nop
|
|
-
|
|
-#ifdef CONFIG_SMP
|
|
- call hard_smp_processor_id
|
|
- nop
|
|
- cmp %o0, NR_CPUS
|
|
- blu,pt %xcc, 1f
|
|
- nop
|
|
- call boot_cpu_id_too_large
|
|
- nop
|
|
- /* Not reached... */
|
|
-
|
|
-1:
|
|
-#else
|
|
- mov 0, %o0
|
|
-#endif
|
|
- sth %o0, [%g6 + TI_CPU]
|
|
-
|
|
- call prom_init_report
|
|
- nop
|
|
-
|
|
- /* Off we go.... */
|
|
- call start_kernel
|
|
+ call start_early_boot
|
|
nop
|
|
/* Not reached... */
|
|
|
|
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
|
|
index c0a2de0..5c55145 100644
|
|
--- a/arch/sparc/kernel/hvapi.c
|
|
+++ b/arch/sparc/kernel/hvapi.c
|
|
@@ -46,6 +46,7 @@ static struct api_info api_table[] = {
|
|
{ .group = HV_GRP_VF_CPU, },
|
|
{ .group = HV_GRP_KT_CPU, },
|
|
{ .group = HV_GRP_VT_CPU, },
|
|
+ { .group = HV_GRP_T5_CPU, },
|
|
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
|
|
};
|
|
|
|
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
|
|
index f3ab509..caedf83 100644
|
|
--- a/arch/sparc/kernel/hvcalls.S
|
|
+++ b/arch/sparc/kernel/hvcalls.S
|
|
@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg)
|
|
retl
|
|
nop
|
|
ENDPROC(sun4v_vt_set_perfreg)
|
|
+
|
|
+ENTRY(sun4v_t5_get_perfreg)
|
|
+ mov %o1, %o4
|
|
+ mov HV_FAST_T5_GET_PERFREG, %o5
|
|
+ ta HV_FAST_TRAP
|
|
+ stx %o1, [%o4]
|
|
+ retl
|
|
+ nop
|
|
+ENDPROC(sun4v_t5_get_perfreg)
|
|
+
|
|
+ENTRY(sun4v_t5_set_perfreg)
|
|
+ mov HV_FAST_T5_SET_PERFREG, %o5
|
|
+ ta HV_FAST_TRAP
|
|
+ retl
|
|
+ nop
|
|
+ENDPROC(sun4v_t5_set_perfreg)
|
|
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
|
|
index b7ddcdd..cdbfec2 100644
|
|
--- a/arch/sparc/kernel/hvtramp.S
|
|
+++ b/arch/sparc/kernel/hvtramp.S
|
|
@@ -109,7 +109,6 @@ hv_cpu_startup:
|
|
sllx %g5, THREAD_SHIFT, %g5
|
|
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
|
|
add %g6, %g5, %sp
|
|
- mov 0, %fp
|
|
|
|
call init_irqwork_curcpu
|
|
nop
|
|
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
|
|
index e7e215d..c2d81ad 100644
|
|
--- a/arch/sparc/kernel/ioport.c
|
|
+++ b/arch/sparc/kernel/ioport.c
|
|
@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
|
|
}
|
|
|
|
order = get_order(len_total);
|
|
- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
|
|
+ va = __get_free_pages(gfp, order);
|
|
+ if (va == 0)
|
|
goto err_nopages;
|
|
|
|
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
|
|
@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
|
|
}
|
|
|
|
order = get_order(len_total);
|
|
- va = (void *) __get_free_pages(GFP_KERNEL, order);
|
|
+ va = (void *) __get_free_pages(gfp, order);
|
|
if (va == NULL) {
|
|
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
|
|
goto err_nopages;
|
|
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
|
|
index 666193f..4033c23 100644
|
|
--- a/arch/sparc/kernel/irq_64.c
|
|
+++ b/arch/sparc/kernel/irq_64.c
|
|
@@ -47,8 +47,6 @@
|
|
#include "cpumap.h"
|
|
#include "kstack.h"
|
|
|
|
-#define NUM_IVECS (IMAP_INR + 1)
|
|
-
|
|
struct ino_bucket *ivector_table;
|
|
unsigned long ivector_table_pa;
|
|
|
|
@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
|
|
|
|
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
|
|
|
|
-static struct {
|
|
- unsigned int dev_handle;
|
|
- unsigned int dev_ino;
|
|
- unsigned int in_use;
|
|
-} irq_table[NR_IRQS];
|
|
-static DEFINE_SPINLOCK(irq_alloc_lock);
|
|
+static unsigned long hvirq_major __initdata;
|
|
+static int __init early_hvirq_major(char *p)
|
|
+{
|
|
+ int rc = kstrtoul(p, 10, &hvirq_major);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+early_param("hvirq", early_hvirq_major);
|
|
+
|
|
+static int hv_irq_version;
|
|
+
|
|
+/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
|
|
+ * based interfaces, but:
|
|
+ *
|
|
+ * 1) Several OSs, Solaris and Linux included, use them even when only
|
|
+ * negotiating version 1.0 (or failing to negotiate at all). So the
|
|
+ * hypervisor has a workaround that provides the VIRQ interfaces even
|
|
+ * when only verion 1.0 of the API is in use.
|
|
+ *
|
|
+ * 2) Second, and more importantly, with major version 2.0 these VIRQ
|
|
+ * interfaces only were actually hooked up for LDC interrupts, even
|
|
+ * though the Hypervisor specification clearly stated:
|
|
+ *
|
|
+ * The new interrupt API functions will be available to a guest
|
|
+ * when it negotiates version 2.0 in the interrupt API group 0x2. When
|
|
+ * a guest negotiates version 2.0, all interrupt sources will only
|
|
+ * support using the cookie interface, and any attempt to use the
|
|
+ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
|
|
+ * ENOTSUPPORTED error being returned.
|
|
+ *
|
|
+ * with an emphasis on "all interrupt sources".
|
|
+ *
|
|
+ * To correct this, major version 3.0 was created which does actually
|
|
+ * support VIRQs for all interrupt sources (not just LDC devices). So
|
|
+ * if we want to move completely over the cookie based VIRQs we must
|
|
+ * negotiate major version 3.0 or later of HV_GRP_INTR.
|
|
+ */
|
|
+static bool sun4v_cookie_only_virqs(void)
|
|
+{
|
|
+ if (hv_irq_version >= 3)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
|
|
-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
|
|
+static void __init irq_init_hv(void)
|
|
{
|
|
- unsigned long flags;
|
|
- unsigned char ent;
|
|
+ unsigned long hv_error, major, minor = 0;
|
|
+
|
|
+ if (tlb_type != hypervisor)
|
|
+ return;
|
|
|
|
- BUILD_BUG_ON(NR_IRQS >= 256);
|
|
+ if (hvirq_major)
|
|
+ major = hvirq_major;
|
|
+ else
|
|
+ major = 3;
|
|
|
|
- spin_lock_irqsave(&irq_alloc_lock, flags);
|
|
+ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
|
|
+ if (!hv_error)
|
|
+ hv_irq_version = major;
|
|
+ else
|
|
+ hv_irq_version = 1;
|
|
|
|
- for (ent = 1; ent < NR_IRQS; ent++) {
|
|
- if (!irq_table[ent].in_use)
|
|
+ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
|
|
+ hv_irq_version,
|
|
+ sun4v_cookie_only_virqs() ? "enabled" : "disabled");
|
|
+}
|
|
+
|
|
+/* This function is for the timer interrupt.*/
|
|
+int __init arch_probe_nr_irqs(void)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#define DEFAULT_NUM_IVECS (0xfffU)
|
|
+static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
|
|
+#define NUM_IVECS (nr_ivec)
|
|
+
|
|
+static unsigned int __init size_nr_ivec(void)
|
|
+{
|
|
+ if (tlb_type == hypervisor) {
|
|
+ switch (sun4v_chip_type) {
|
|
+ /* Athena's devhandle|devino is large.*/
|
|
+ case SUN4V_CHIP_SPARC64X:
|
|
+ nr_ivec = 0xffff;
|
|
break;
|
|
+ }
|
|
}
|
|
- if (ent >= NR_IRQS) {
|
|
- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
|
|
- ent = 0;
|
|
- } else {
|
|
- irq_table[ent].dev_handle = dev_handle;
|
|
- irq_table[ent].dev_ino = dev_ino;
|
|
- irq_table[ent].in_use = 1;
|
|
- }
|
|
+ return nr_ivec;
|
|
+}
|
|
+
|
|
+struct irq_handler_data {
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned int dev_handle;
|
|
+ unsigned int dev_ino;
|
|
+ };
|
|
+ unsigned long sysino;
|
|
+ };
|
|
+ struct ino_bucket bucket;
|
|
+ unsigned long iclr;
|
|
+ unsigned long imap;
|
|
+};
|
|
+
|
|
+static inline unsigned int irq_data_to_handle(struct irq_data *data)
|
|
+{
|
|
+ struct irq_handler_data *ihd = data->handler_data;
|
|
+
|
|
+ return ihd->dev_handle;
|
|
+}
|
|
+
|
|
+static inline unsigned int irq_data_to_ino(struct irq_data *data)
|
|
+{
|
|
+ struct irq_handler_data *ihd = data->handler_data;
|
|
|
|
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
|
|
+ return ihd->dev_ino;
|
|
+}
|
|
+
|
|
+static inline unsigned long irq_data_to_sysino(struct irq_data *data)
|
|
+{
|
|
+ struct irq_handler_data *ihd = data->handler_data;
|
|
|
|
- return ent;
|
|
+ return ihd->sysino;
|
|
}
|
|
|
|
-#ifdef CONFIG_PCI_MSI
|
|
void irq_free(unsigned int irq)
|
|
{
|
|
- unsigned long flags;
|
|
+ void *data = irq_get_handler_data(irq);
|
|
|
|
- if (irq >= NR_IRQS)
|
|
- return;
|
|
+ kfree(data);
|
|
+ irq_set_handler_data(irq, NULL);
|
|
+ irq_free_descs(irq, 1);
|
|
+}
|
|
|
|
- spin_lock_irqsave(&irq_alloc_lock, flags);
|
|
+unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
|
|
+{
|
|
+ int irq;
|
|
|
|
- irq_table[irq].in_use = 0;
|
|
+ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
|
|
+ if (irq <= 0)
|
|
+ goto out;
|
|
|
|
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
|
|
+ return irq;
|
|
+out:
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
|
|
+{
|
|
+ unsigned long hv_err, cookie;
|
|
+ struct ino_bucket *bucket;
|
|
+ unsigned int irq = 0U;
|
|
+
|
|
+ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
|
|
+ if (hv_err) {
|
|
+ pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (cookie & ((1UL << 63UL))) {
|
|
+ cookie = ~cookie;
|
|
+ bucket = (struct ino_bucket *) __va(cookie);
|
|
+ irq = bucket->__irq;
|
|
+ }
|
|
+out:
|
|
+ return irq;
|
|
+}
|
|
+
|
|
+static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
|
|
+{
|
|
+ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
|
|
+ struct ino_bucket *bucket;
|
|
+ unsigned int irq;
|
|
+
|
|
+ bucket = &ivector_table[sysino];
|
|
+ irq = bucket_get_irq(__pa(bucket));
|
|
+
|
|
+ return irq;
|
|
+}
|
|
+
|
|
+void ack_bad_irq(unsigned int irq)
|
|
+{
|
|
+ pr_crit("BAD IRQ ack %d\n", irq);
|
|
+}
|
|
+
|
|
+void irq_install_pre_handler(int irq,
|
|
+ void (*func)(unsigned int, void *, void *),
|
|
+ void *arg1, void *arg2)
|
|
+{
|
|
+ pr_warn("IRQ pre handler NOT supported.\n");
|
|
}
|
|
-#endif
|
|
|
|
/*
|
|
* /proc/interrupts printing:
|
|
@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
|
|
return tid;
|
|
}
|
|
|
|
-struct irq_handler_data {
|
|
- unsigned long iclr;
|
|
- unsigned long imap;
|
|
-
|
|
- void (*pre_handler)(unsigned int, void *, void *);
|
|
- void *arg1;
|
|
- void *arg2;
|
|
-};
|
|
-
|
|
#ifdef CONFIG_SMP
|
|
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
|
|
{
|
|
@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
|
|
|
|
static void sun4v_irq_enable(struct irq_data *data)
|
|
{
|
|
- unsigned int ino = irq_table[data->irq].dev_ino;
|
|
unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
|
|
+ unsigned int ino = irq_data_to_sysino(data);
|
|
int err;
|
|
|
|
err = sun4v_intr_settarget(ino, cpuid);
|
|
@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
|
|
static int sun4v_set_affinity(struct irq_data *data,
|
|
const struct cpumask *mask, bool force)
|
|
{
|
|
- unsigned int ino = irq_table[data->irq].dev_ino;
|
|
unsigned long cpuid = irq_choose_cpu(data->irq, mask);
|
|
+ unsigned int ino = irq_data_to_sysino(data);
|
|
int err;
|
|
|
|
err = sun4v_intr_settarget(ino, cpuid);
|
|
@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
|
|
|
|
static void sun4v_irq_disable(struct irq_data *data)
|
|
{
|
|
- unsigned int ino = irq_table[data->irq].dev_ino;
|
|
+ unsigned int ino = irq_data_to_sysino(data);
|
|
int err;
|
|
|
|
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
|
|
@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
|
|
|
|
static void sun4v_irq_eoi(struct irq_data *data)
|
|
{
|
|
- unsigned int ino = irq_table[data->irq].dev_ino;
|
|
+ unsigned int ino = irq_data_to_sysino(data);
|
|
int err;
|
|
|
|
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
|
|
@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
|
|
|
|
static void sun4v_virq_enable(struct irq_data *data)
|
|
{
|
|
- unsigned long cpuid, dev_handle, dev_ino;
|
|
+ unsigned long dev_handle = irq_data_to_handle(data);
|
|
+ unsigned long dev_ino = irq_data_to_ino(data);
|
|
+ unsigned long cpuid;
|
|
int err;
|
|
|
|
cpuid = irq_choose_cpu(data->irq, data->affinity);
|
|
|
|
- dev_handle = irq_table[data->irq].dev_handle;
|
|
- dev_ino = irq_table[data->irq].dev_ino;
|
|
-
|
|
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
|
|
if (err != HV_EOK)
|
|
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
|
|
@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
|
|
static int sun4v_virt_set_affinity(struct irq_data *data,
|
|
const struct cpumask *mask, bool force)
|
|
{
|
|
- unsigned long cpuid, dev_handle, dev_ino;
|
|
+ unsigned long dev_handle = irq_data_to_handle(data);
|
|
+ unsigned long dev_ino = irq_data_to_ino(data);
|
|
+ unsigned long cpuid;
|
|
int err;
|
|
|
|
cpuid = irq_choose_cpu(data->irq, mask);
|
|
|
|
- dev_handle = irq_table[data->irq].dev_handle;
|
|
- dev_ino = irq_table[data->irq].dev_ino;
|
|
-
|
|
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
|
|
if (err != HV_EOK)
|
|
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
|
|
@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
|
|
|
|
static void sun4v_virq_disable(struct irq_data *data)
|
|
{
|
|
- unsigned long dev_handle, dev_ino;
|
|
+ unsigned long dev_handle = irq_data_to_handle(data);
|
|
+ unsigned long dev_ino = irq_data_to_ino(data);
|
|
int err;
|
|
|
|
- dev_handle = irq_table[data->irq].dev_handle;
|
|
- dev_ino = irq_table[data->irq].dev_ino;
|
|
|
|
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
|
|
HV_INTR_DISABLED);
|
|
@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
|
|
|
|
static void sun4v_virq_eoi(struct irq_data *data)
|
|
{
|
|
- unsigned long dev_handle, dev_ino;
|
|
+ unsigned long dev_handle = irq_data_to_handle(data);
|
|
+ unsigned long dev_ino = irq_data_to_ino(data);
|
|
int err;
|
|
|
|
- dev_handle = irq_table[data->irq].dev_handle;
|
|
- dev_ino = irq_table[data->irq].dev_ino;
|
|
-
|
|
err = sun4v_vintr_set_state(dev_handle, dev_ino,
|
|
HV_INTR_STATE_IDLE);
|
|
if (err != HV_EOK)
|
|
@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
|
|
.flags = IRQCHIP_EOI_IF_HANDLED,
|
|
};
|
|
|
|
-static void pre_flow_handler(struct irq_data *d)
|
|
-{
|
|
- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
|
|
- unsigned int ino = irq_table[d->irq].dev_ino;
|
|
-
|
|
- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
|
|
-}
|
|
-
|
|
-void irq_install_pre_handler(int irq,
|
|
- void (*func)(unsigned int, void *, void *),
|
|
- void *arg1, void *arg2)
|
|
-{
|
|
- struct irq_handler_data *handler_data = irq_get_handler_data(irq);
|
|
-
|
|
- handler_data->pre_handler = func;
|
|
- handler_data->arg1 = arg1;
|
|
- handler_data->arg2 = arg2;
|
|
-
|
|
- __irq_set_preflow_handler(irq, pre_flow_handler);
|
|
-}
|
|
-
|
|
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
|
|
{
|
|
- struct ino_bucket *bucket;
|
|
struct irq_handler_data *handler_data;
|
|
+ struct ino_bucket *bucket;
|
|
unsigned int irq;
|
|
int ino;
|
|
|
|
@@ -537,119 +641,166 @@ out:
|
|
return irq;
|
|
}
|
|
|
|
-static unsigned int sun4v_build_common(unsigned long sysino,
|
|
- struct irq_chip *chip)
|
|
+static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
|
|
+ void (*handler_data_init)(struct irq_handler_data *data,
|
|
+ u32 devhandle, unsigned int devino),
|
|
+ struct irq_chip *chip)
|
|
{
|
|
- struct ino_bucket *bucket;
|
|
- struct irq_handler_data *handler_data;
|
|
+ struct irq_handler_data *data;
|
|
unsigned int irq;
|
|
|
|
- BUG_ON(tlb_type != hypervisor);
|
|
+ irq = irq_alloc(devhandle, devino);
|
|
+ if (!irq)
|
|
+ goto out;
|
|
|
|
- bucket = &ivector_table[sysino];
|
|
- irq = bucket_get_irq(__pa(bucket));
|
|
- if (!irq) {
|
|
- irq = irq_alloc(0, sysino);
|
|
- bucket_set_irq(__pa(bucket), irq);
|
|
- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
|
|
- "IVEC");
|
|
+ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
|
|
+ if (unlikely(!data)) {
|
|
+ pr_err("IRQ handler data allocation failed.\n");
|
|
+ irq_free(irq);
|
|
+ irq = 0;
|
|
+ goto out;
|
|
}
|
|
|
|
- handler_data = irq_get_handler_data(irq);
|
|
- if (unlikely(handler_data))
|
|
- goto out;
|
|
+ irq_set_handler_data(irq, data);
|
|
+ handler_data_init(data, devhandle, devino);
|
|
+ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
|
|
+ data->imap = ~0UL;
|
|
+ data->iclr = ~0UL;
|
|
+out:
|
|
+ return irq;
|
|
+}
|
|
|
|
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
|
|
- if (unlikely(!handler_data)) {
|
|
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
|
|
- prom_halt();
|
|
- }
|
|
- irq_set_handler_data(irq, handler_data);
|
|
+static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
|
|
+ unsigned int devino)
|
|
+{
|
|
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
|
|
+ unsigned long hv_error, cookie;
|
|
|
|
- /* Catch accidental accesses to these things. IMAP/ICLR handling
|
|
- * is done by hypervisor calls on sun4v platforms, not by direct
|
|
- * register accesses.
|
|
+ /* handler_irq needs to find the irq. cookie is seen signed in
|
|
+ * sun4v_dev_mondo and treated as a non ivector_table delivery.
|
|
*/
|
|
- handler_data->imap = ~0UL;
|
|
- handler_data->iclr = ~0UL;
|
|
+ ihd->bucket.__irq = irq;
|
|
+ cookie = ~__pa(&ihd->bucket);
|
|
|
|
-out:
|
|
- return irq;
|
|
+ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
|
|
+ if (hv_error)
|
|
+ pr_err("HV vintr set cookie failed = %ld\n", hv_error);
|
|
+
|
|
+ return hv_error;
|
|
}
|
|
|
|
-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
|
|
+static void cookie_handler_data(struct irq_handler_data *data,
|
|
+ u32 devhandle, unsigned int devino)
|
|
{
|
|
- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
|
|
+ data->dev_handle = devhandle;
|
|
+ data->dev_ino = devino;
|
|
+}
|
|
|
|
- return sun4v_build_common(sysino, &sun4v_irq);
|
|
+static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
|
|
+ struct irq_chip *chip)
|
|
+{
|
|
+ unsigned long hv_error;
|
|
+ unsigned int irq;
|
|
+
|
|
+ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
|
|
+
|
|
+ hv_error = cookie_assign(irq, devhandle, devino);
|
|
+ if (hv_error) {
|
|
+ irq_free(irq);
|
|
+ irq = 0;
|
|
+ }
|
|
+
|
|
+ return irq;
|
|
}
|
|
|
|
-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
|
|
+static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
|
|
{
|
|
- struct irq_handler_data *handler_data;
|
|
- unsigned long hv_err, cookie;
|
|
- struct ino_bucket *bucket;
|
|
unsigned int irq;
|
|
|
|
- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
|
|
- if (unlikely(!bucket))
|
|
- return 0;
|
|
+ irq = cookie_exists(devhandle, devino);
|
|
+ if (irq)
|
|
+ goto out;
|
|
|
|
- /* The only reference we store to the IRQ bucket is
|
|
- * by physical address which kmemleak can't see, tell
|
|
- * it that this object explicitly is not a leak and
|
|
- * should be scanned.
|
|
- */
|
|
- kmemleak_not_leak(bucket);
|
|
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
|
|
|
|
- __flush_dcache_range((unsigned long) bucket,
|
|
- ((unsigned long) bucket +
|
|
- sizeof(struct ino_bucket)));
|
|
+out:
|
|
+ return irq;
|
|
+}
|
|
|
|
- irq = irq_alloc(devhandle, devino);
|
|
+static void sysino_set_bucket(unsigned int irq)
|
|
+{
|
|
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
|
|
+ struct ino_bucket *bucket;
|
|
+ unsigned long sysino;
|
|
+
|
|
+ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
|
|
+ BUG_ON(sysino >= nr_ivec);
|
|
+ bucket = &ivector_table[sysino];
|
|
bucket_set_irq(__pa(bucket), irq);
|
|
+}
|
|
|
|
- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
|
|
- "IVEC");
|
|
+static void sysino_handler_data(struct irq_handler_data *data,
|
|
+ u32 devhandle, unsigned int devino)
|
|
+{
|
|
+ unsigned long sysino;
|
|
|
|
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
|
|
- if (unlikely(!handler_data))
|
|
- return 0;
|
|
+ sysino = sun4v_devino_to_sysino(devhandle, devino);
|
|
+ data->sysino = sysino;
|
|
+}
|
|
|
|
- /* In order to make the LDC channel startup sequence easier,
|
|
- * especially wrt. locking, we do not let request_irq() enable
|
|
- * the interrupt.
|
|
- */
|
|
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
|
- irq_set_handler_data(irq, handler_data);
|
|
+static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
|
|
+ struct irq_chip *chip)
|
|
+{
|
|
+ unsigned int irq;
|
|
|
|
- /* Catch accidental accesses to these things. IMAP/ICLR handling
|
|
- * is done by hypervisor calls on sun4v platforms, not by direct
|
|
- * register accesses.
|
|
- */
|
|
- handler_data->imap = ~0UL;
|
|
- handler_data->iclr = ~0UL;
|
|
+ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
|
|
+ if (!irq)
|
|
+ goto out;
|
|
|
|
- cookie = ~__pa(bucket);
|
|
- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
|
|
- if (hv_err) {
|
|
- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
|
|
- "err=%lu\n", devhandle, devino, hv_err);
|
|
- prom_halt();
|
|
- }
|
|
+ sysino_set_bucket(irq);
|
|
+out:
|
|
+ return irq;
|
|
+}
|
|
|
|
+static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
|
|
+{
|
|
+ int irq;
|
|
+
|
|
+ irq = sysino_exists(devhandle, devino);
|
|
+ if (irq)
|
|
+ goto out;
|
|
+
|
|
+ irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
|
|
+out:
|
|
return irq;
|
|
}
|
|
|
|
-void ack_bad_irq(unsigned int irq)
|
|
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
|
|
{
|
|
- unsigned int ino = irq_table[irq].dev_ino;
|
|
+ unsigned int irq;
|
|
|
|
- if (!ino)
|
|
- ino = 0xdeadbeef;
|
|
+ if (sun4v_cookie_only_virqs())
|
|
+ irq = sun4v_build_cookie(devhandle, devino);
|
|
+ else
|
|
+ irq = sun4v_build_sysino(devhandle, devino);
|
|
|
|
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
|
|
- ino, irq);
|
|
+ return irq;
|
|
+}
|
|
+
|
|
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
|
|
+{
|
|
+ int irq;
|
|
+
|
|
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
|
|
+ if (!irq)
|
|
+ goto out;
|
|
+
|
|
+ /* This is borrowed from the original function.
|
|
+ */
|
|
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
|
+
|
|
+out:
|
|
+ return irq;
|
|
}
|
|
|
|
void *hardirq_stack[NR_CPUS];
|
|
@@ -720,9 +871,12 @@ void fixup_irqs(void)
|
|
|
|
for (irq = 0; irq < NR_IRQS; irq++) {
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
- struct irq_data *data = irq_desc_get_irq_data(desc);
|
|
+ struct irq_data *data;
|
|
unsigned long flags;
|
|
|
|
+ if (!desc)
|
|
+ continue;
|
|
+ data = irq_desc_get_irq_data(desc);
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
if (desc->action && !irqd_is_per_cpu(data)) {
|
|
if (data->chip->irq_set_affinity)
|
|
@@ -922,16 +1076,22 @@ static struct irqaction timer_irq_action = {
|
|
.name = "timer",
|
|
};
|
|
|
|
-/* Only invoked on boot processor. */
|
|
-void __init init_IRQ(void)
|
|
+static void __init irq_ivector_init(void)
|
|
{
|
|
- unsigned long size;
|
|
+ unsigned long size, order;
|
|
+ unsigned int ivecs;
|
|
|
|
- map_prom_timers();
|
|
- kill_prom_timer();
|
|
+ /* If we are doing cookie only VIRQs then we do not need the ivector
|
|
+ * table to process interrupts.
|
|
+ */
|
|
+ if (sun4v_cookie_only_virqs())
|
|
+ return;
|
|
|
|
- size = sizeof(struct ino_bucket) * NUM_IVECS;
|
|
- ivector_table = kzalloc(size, GFP_KERNEL);
|
|
+ ivecs = size_nr_ivec();
|
|
+ size = sizeof(struct ino_bucket) * ivecs;
|
|
+ order = get_order(size);
|
|
+ ivector_table = (struct ino_bucket *)
|
|
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
|
if (!ivector_table) {
|
|
prom_printf("Fatal error, cannot allocate ivector_table\n");
|
|
prom_halt();
|
|
@@ -940,6 +1100,15 @@ void __init init_IRQ(void)
|
|
((unsigned long) ivector_table) + size);
|
|
|
|
ivector_table_pa = __pa(ivector_table);
|
|
+}
|
|
+
|
|
+/* Only invoked on boot processor.*/
|
|
+void __init init_IRQ(void)
|
|
+{
|
|
+ irq_init_hv();
|
|
+ irq_ivector_init();
|
|
+ map_prom_timers();
|
|
+ kill_prom_timer();
|
|
|
|
if (tlb_type == hypervisor)
|
|
sun4v_init_mondo_queues();
|
|
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
|
|
index 542e96a..ef0d8e9 100644
|
|
--- a/arch/sparc/kernel/ktlb.S
|
|
+++ b/arch/sparc/kernel/ktlb.S
|
|
@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr:
|
|
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
|
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
-
|
|
- /* Load and check PTE. */
|
|
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
|
|
- mov 1, %g7
|
|
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
|
|
- brgez,a,pn %g5, kvmap_itlb_longpath
|
|
- TSB_STORE(%g1, %g7)
|
|
-
|
|
TSB_WRITE(%g1, %g5, %g6)
|
|
|
|
/* fallthrough to TLB load */
|
|
@@ -118,6 +110,12 @@ kvmap_dtlb_obp:
|
|
ba,pt %xcc, kvmap_dtlb_load
|
|
nop
|
|
|
|
+kvmap_linear_early:
|
|
+ sethi %hi(kern_linear_pte_xor), %g7
|
|
+ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
|
|
+ ba,pt %xcc, kvmap_dtlb_tsb4m_load
|
|
+ xor %g2, %g4, %g5
|
|
+
|
|
.align 32
|
|
kvmap_dtlb_tsb4m_load:
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
@@ -146,105 +144,17 @@ kvmap_dtlb_4v:
|
|
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
|
|
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
|
#endif
|
|
- /* TSB entry address left in %g1, lookup linear PTE.
|
|
- * Must preserve %g1 and %g6 (TAG).
|
|
- */
|
|
-kvmap_dtlb_tsb4m_miss:
|
|
- /* Clear the PAGE_OFFSET top virtual bits, shift
|
|
- * down to get PFN, and make sure PFN is in range.
|
|
- */
|
|
-661: sllx %g4, 0, %g5
|
|
- .section .page_offset_shift_patch, "ax"
|
|
- .word 661b
|
|
- .previous
|
|
-
|
|
- /* Check to see if we know about valid memory at the 4MB
|
|
- * chunk this physical address will reside within.
|
|
+ /* Linear mapping TSB lookup failed. Fallthrough to kernel
|
|
+ * page table based lookup.
|
|
*/
|
|
-661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
|
|
- .section .page_offset_shift_patch, "ax"
|
|
- .word 661b
|
|
- .previous
|
|
-
|
|
- brnz,pn %g2, kvmap_dtlb_longpath
|
|
- nop
|
|
-
|
|
- /* This unconditional branch and delay-slot nop gets patched
|
|
- * by the sethi sequence once the bitmap is properly setup.
|
|
- */
|
|
- .globl valid_addr_bitmap_insn
|
|
-valid_addr_bitmap_insn:
|
|
- ba,pt %xcc, 2f
|
|
- nop
|
|
- .subsection 2
|
|
- .globl valid_addr_bitmap_patch
|
|
-valid_addr_bitmap_patch:
|
|
- sethi %hi(sparc64_valid_addr_bitmap), %g7
|
|
- or %g7, %lo(sparc64_valid_addr_bitmap), %g7
|
|
- .previous
|
|
-
|
|
-661: srlx %g5, ILOG2_4MB, %g2
|
|
- .section .page_offset_shift_patch, "ax"
|
|
- .word 661b
|
|
- .previous
|
|
-
|
|
- srlx %g2, 6, %g5
|
|
- and %g2, 63, %g2
|
|
- sllx %g5, 3, %g5
|
|
- ldx [%g7 + %g5], %g5
|
|
- mov 1, %g7
|
|
- sllx %g7, %g2, %g7
|
|
- andcc %g5, %g7, %g0
|
|
- be,pn %xcc, kvmap_dtlb_longpath
|
|
-
|
|
-2: sethi %hi(kpte_linear_bitmap), %g2
|
|
-
|
|
- /* Get the 256MB physical address index. */
|
|
-661: sllx %g4, 0, %g5
|
|
- .section .page_offset_shift_patch, "ax"
|
|
- .word 661b
|
|
- .previous
|
|
-
|
|
- or %g2, %lo(kpte_linear_bitmap), %g2
|
|
-
|
|
-661: srlx %g5, ILOG2_256MB, %g5
|
|
- .section .page_offset_shift_patch, "ax"
|
|
- .word 661b
|
|
- .previous
|
|
-
|
|
- and %g5, (32 - 1), %g7
|
|
-
|
|
- /* Divide by 32 to get the offset into the bitmask. */
|
|
- srlx %g5, 5, %g5
|
|
- add %g7, %g7, %g7
|
|
- sllx %g5, 3, %g5
|
|
-
|
|
- /* kern_linear_pte_xor[(mask >> shift) & 3)] */
|
|
- ldx [%g2 + %g5], %g2
|
|
- srlx %g2, %g7, %g7
|
|
- sethi %hi(kern_linear_pte_xor), %g5
|
|
- and %g7, 3, %g7
|
|
- or %g5, %lo(kern_linear_pte_xor), %g5
|
|
- sllx %g7, 3, %g7
|
|
- ldx [%g5 + %g7], %g2
|
|
-
|
|
.globl kvmap_linear_patch
|
|
kvmap_linear_patch:
|
|
- ba,pt %xcc, kvmap_dtlb_tsb4m_load
|
|
- xor %g2, %g4, %g5
|
|
+ ba,a,pt %xcc, kvmap_linear_early
|
|
|
|
kvmap_dtlb_vmalloc_addr:
|
|
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
|
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
-
|
|
- /* Load and check PTE. */
|
|
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
|
|
- mov 1, %g7
|
|
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
|
|
- brgez,a,pn %g5, kvmap_dtlb_longpath
|
|
- TSB_STORE(%g1, %g7)
|
|
-
|
|
TSB_WRITE(%g1, %g5, %g6)
|
|
|
|
/* fallthrough to TLB load */
|
|
@@ -276,13 +186,8 @@ kvmap_dtlb_load:
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
kvmap_vmemmap:
|
|
- sub %g4, %g5, %g5
|
|
- srlx %g5, 22, %g5
|
|
- sethi %hi(vmemmap_table), %g1
|
|
- sllx %g5, 3, %g5
|
|
- or %g1, %lo(vmemmap_table), %g1
|
|
- ba,pt %xcc, kvmap_dtlb_load
|
|
- ldx [%g1 + %g5], %g5
|
|
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
|
|
+ ba,a,pt %xcc, kvmap_dtlb_load
|
|
#endif
|
|
|
|
kvmap_dtlb_nonlinear:
|
|
@@ -294,8 +199,8 @@ kvmap_dtlb_nonlinear:
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
/* Do not use the TSB for vmemmap. */
|
|
- mov (VMEMMAP_BASE >> 40), %g5
|
|
- sllx %g5, 40, %g5
|
|
+ sethi %hi(VMEMMAP_BASE), %g5
|
|
+ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
|
|
cmp %g4,%g5
|
|
bgeu,pn %xcc, kvmap_vmemmap
|
|
nop
|
|
@@ -307,8 +212,8 @@ kvmap_dtlb_tsbmiss:
|
|
sethi %hi(MODULES_VADDR), %g5
|
|
cmp %g4, %g5
|
|
blu,pn %xcc, kvmap_dtlb_longpath
|
|
- mov (VMALLOC_END >> 40), %g5
|
|
- sllx %g5, 40, %g5
|
|
+ sethi %hi(VMALLOC_END), %g5
|
|
+ ldx [%g5 + %lo(VMALLOC_END)], %g5
|
|
cmp %g4, %g5
|
|
bgeu,pn %xcc, kvmap_dtlb_longpath
|
|
nop
|
|
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
|
|
index e01d75d..7ef2862 100644
|
|
--- a/arch/sparc/kernel/ldc.c
|
|
+++ b/arch/sparc/kernel/ldc.c
|
|
@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp)
|
|
|
|
struct ldc_channel *ldc_alloc(unsigned long id,
|
|
const struct ldc_channel_config *cfgp,
|
|
- void *event_arg)
|
|
+ void *event_arg,
|
|
+ const char *name)
|
|
{
|
|
struct ldc_channel *lp;
|
|
const struct ldc_mode_ops *mops;
|
|
@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
|
|
err = -EINVAL;
|
|
if (!cfgp)
|
|
goto out_err;
|
|
+ if (!name)
|
|
+ goto out_err;
|
|
|
|
switch (cfgp->mode) {
|
|
case LDC_MODE_RAW:
|
|
@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
|
|
|
|
INIT_HLIST_HEAD(&lp->mh_list);
|
|
|
|
+ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
|
|
+ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
|
|
+
|
|
+ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
|
|
+ lp->rx_irq_name, lp);
|
|
+ if (err)
|
|
+ goto out_free_txq;
|
|
+
|
|
+ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
|
|
+ lp->tx_irq_name, lp);
|
|
+ if (err) {
|
|
+ free_irq(lp->cfg.rx_irq, lp);
|
|
+ goto out_free_txq;
|
|
+ }
|
|
+
|
|
return lp;
|
|
|
|
out_free_txq:
|
|
@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free);
|
|
* state. This does not initiate a handshake, ldc_connect() does
|
|
* that.
|
|
*/
|
|
-int ldc_bind(struct ldc_channel *lp, const char *name)
|
|
+int ldc_bind(struct ldc_channel *lp)
|
|
{
|
|
unsigned long hv_err, flags;
|
|
int err = -EINVAL;
|
|
|
|
- if (!name ||
|
|
- (lp->state != LDC_STATE_INIT))
|
|
+ if (lp->state != LDC_STATE_INIT)
|
|
return -EINVAL;
|
|
|
|
- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
|
|
- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
|
|
-
|
|
- err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
|
|
- lp->rx_irq_name, lp);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
|
|
- lp->tx_irq_name, lp);
|
|
- if (err) {
|
|
- free_irq(lp->cfg.rx_irq, lp);
|
|
- return err;
|
|
- }
|
|
-
|
|
-
|
|
spin_lock_irqsave(&lp->lock, flags);
|
|
|
|
enable_irq(lp->cfg.rx_irq);
|
|
@@ -1336,7 +1337,7 @@ int ldc_connect(struct ldc_channel *lp)
|
|
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
|
|
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
|
|
lp->hs_state != LDC_HS_OPEN)
|
|
- err = -EINVAL;
|
|
+ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
|
|
else
|
|
err = start_handshake(lp);
|
|
|
|
@@ -2306,7 +2307,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
|
|
if (len & (8UL - 1))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- buf = kzalloc(len, GFP_KERNEL);
|
|
+ buf = kzalloc(len, GFP_ATOMIC);
|
|
if (!buf)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
|
|
index 6479256..fce8ab1 100644
|
|
--- a/arch/sparc/kernel/nmi.c
|
|
+++ b/arch/sparc/kernel/nmi.c
|
|
@@ -141,7 +141,6 @@ static inline unsigned int get_nmi_count(int cpu)
|
|
|
|
static __init void nmi_cpu_busy(void *data)
|
|
{
|
|
- local_irq_enable_in_hardirq();
|
|
while (endflag == 0)
|
|
mb();
|
|
}
|
|
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
|
|
index 8f76f23..f9c6813 100644
|
|
--- a/arch/sparc/kernel/pci_schizo.c
|
|
+++ b/arch/sparc/kernel/pci_schizo.c
|
|
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
|
|
{
|
|
unsigned long csr_reg, csr, csr_error_bits;
|
|
irqreturn_t ret = IRQ_NONE;
|
|
- u16 stat;
|
|
+ u32 stat;
|
|
|
|
csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
|
|
csr = upa_readq(csr_reg);
|
|
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
|
|
pbm->name);
|
|
ret = IRQ_HANDLED;
|
|
}
|
|
- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
|
|
+ pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
|
|
if (stat & (PCI_STATUS_PARITY |
|
|
PCI_STATUS_SIG_TARGET_ABORT |
|
|
PCI_STATUS_REC_TARGET_ABORT |
|
|
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
|
|
PCI_STATUS_SIG_SYSTEM_ERROR)) {
|
|
printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
|
|
pbm->name, stat);
|
|
- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
|
|
+ pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
|
|
ret = IRQ_HANDLED;
|
|
}
|
|
return ret;
|
|
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
|
|
index 269af58..7e967c8 100644
|
|
--- a/arch/sparc/kernel/pcr.c
|
|
+++ b/arch/sparc/kernel/pcr.c
|
|
@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = {
|
|
.pcr_nmi_disable = PCR_N4_PICNPT,
|
|
};
|
|
|
|
+static u64 n5_pcr_read(unsigned long reg_num)
|
|
+{
|
|
+ unsigned long val;
|
|
+
|
|
+ (void) sun4v_t5_get_perfreg(reg_num, &val);
|
|
+
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static void n5_pcr_write(unsigned long reg_num, u64 val)
|
|
+{
|
|
+ (void) sun4v_t5_set_perfreg(reg_num, val);
|
|
+}
|
|
+
|
|
+static const struct pcr_ops n5_pcr_ops = {
|
|
+ .read_pcr = n5_pcr_read,
|
|
+ .write_pcr = n5_pcr_write,
|
|
+ .read_pic = n4_pic_read,
|
|
+ .write_pic = n4_pic_write,
|
|
+ .nmi_picl_value = n4_picl_value,
|
|
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
|
|
+ PCR_N4_UTRACE | PCR_N4_TOE |
|
|
+ (26 << PCR_N4_SL_SHIFT)),
|
|
+ .pcr_nmi_disable = PCR_N4_PICNPT,
|
|
+};
|
|
+
|
|
+
|
|
static unsigned long perf_hsvc_group;
|
|
static unsigned long perf_hsvc_major;
|
|
static unsigned long perf_hsvc_minor;
|
|
|
|
static int __init register_perf_hsvc(void)
|
|
{
|
|
+ unsigned long hverror;
|
|
+
|
|
if (tlb_type == hypervisor) {
|
|
switch (sun4v_chip_type) {
|
|
case SUN4V_CHIP_NIAGARA1:
|
|
@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void)
|
|
perf_hsvc_group = HV_GRP_VT_CPU;
|
|
break;
|
|
|
|
+ case SUN4V_CHIP_NIAGARA5:
|
|
+ perf_hsvc_group = HV_GRP_T5_CPU;
|
|
+ break;
|
|
+
|
|
default:
|
|
return -ENODEV;
|
|
}
|
|
@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void)
|
|
|
|
perf_hsvc_major = 1;
|
|
perf_hsvc_minor = 0;
|
|
- if (sun4v_hvapi_register(perf_hsvc_group,
|
|
- perf_hsvc_major,
|
|
- &perf_hsvc_minor)) {
|
|
- printk("perfmon: Could not register hvapi.\n");
|
|
+ hverror = sun4v_hvapi_register(perf_hsvc_group,
|
|
+ perf_hsvc_major,
|
|
+ &perf_hsvc_minor);
|
|
+ if (hverror) {
|
|
+ pr_err("perfmon: Could not register hvapi(0x%lx).\n",
|
|
+ hverror);
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void)
|
|
pcr_ops = &n4_pcr_ops;
|
|
break;
|
|
|
|
+ case SUN4V_CHIP_NIAGARA5:
|
|
+ pcr_ops = &n5_pcr_ops;
|
|
+ break;
|
|
+
|
|
default:
|
|
ret = -ENODEV;
|
|
break;
|
|
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
|
|
index b5c38fa..3ccb677 100644
|
|
--- a/arch/sparc/kernel/perf_event.c
|
|
+++ b/arch/sparc/kernel/perf_event.c
|
|
@@ -960,6 +960,8 @@ out:
|
|
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
|
|
}
|
|
|
|
+static void sparc_pmu_start(struct perf_event *event, int flags);
|
|
+
|
|
/* On this PMU each PIC has it's own PCR control register. */
|
|
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
|
|
{
|
|
@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
|
|
struct perf_event *cp = cpuc->event[i];
|
|
struct hw_perf_event *hwc = &cp->hw;
|
|
int idx = hwc->idx;
|
|
- u64 enc;
|
|
|
|
if (cpuc->current_idx[i] != PIC_NO_INDEX)
|
|
continue;
|
|
|
|
- sparc_perf_event_set_period(cp, hwc, idx);
|
|
cpuc->current_idx[i] = idx;
|
|
|
|
- enc = perf_event_get_enc(cpuc->events[i]);
|
|
- cpuc->pcr[idx] &= ~mask_for_index(idx);
|
|
- if (hwc->state & PERF_HES_STOPPED)
|
|
- cpuc->pcr[idx] |= nop_for_index(idx);
|
|
- else
|
|
- cpuc->pcr[idx] |= event_encoding(enc, idx);
|
|
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
|
|
}
|
|
out:
|
|
for (i = 0; i < cpuc->n_events; i++) {
|
|
@@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|
int i;
|
|
|
|
local_irq_save(flags);
|
|
- perf_pmu_disable(event->pmu);
|
|
|
|
for (i = 0; i < cpuc->n_events; i++) {
|
|
if (event == cpuc->event[i]) {
|
|
@@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|
}
|
|
}
|
|
|
|
- perf_pmu_enable(event->pmu);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
@@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
- perf_pmu_disable(event->pmu);
|
|
|
|
n0 = cpuc->n_events;
|
|
if (n0 >= sparc_pmu->max_hw_events)
|
|
@@ -1394,7 +1386,6 @@ nocheck:
|
|
|
|
ret = 0;
|
|
out:
|
|
- perf_pmu_enable(event->pmu);
|
|
local_irq_restore(flags);
|
|
return ret;
|
|
}
|
|
@@ -1662,7 +1653,8 @@ static bool __init supported_pmu(void)
|
|
sparc_pmu = &niagara2_pmu;
|
|
return true;
|
|
}
|
|
- if (!strcmp(sparc_pmu_type, "niagara4")) {
|
|
+ if (!strcmp(sparc_pmu_type, "niagara4") ||
|
|
+ !strcmp(sparc_pmu_type, "niagara5")) {
|
|
sparc_pmu = &niagara4_pmu;
|
|
return true;
|
|
}
|
|
@@ -1671,9 +1663,12 @@ static bool __init supported_pmu(void)
|
|
|
|
int __init init_hw_perf_events(void)
|
|
{
|
|
+ int err;
|
|
+
|
|
pr_info("Performance events: ");
|
|
|
|
- if (!supported_pmu()) {
|
|
+ err = pcr_arch_init();
|
|
+ if (err || !supported_pmu()) {
|
|
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
|
|
return 0;
|
|
}
|
|
@@ -1685,7 +1680,7 @@ int __init init_hw_perf_events(void)
|
|
|
|
return 0;
|
|
}
|
|
-early_initcall(init_hw_perf_events);
|
|
+pure_initcall(init_hw_perf_events);
|
|
|
|
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
struct pt_regs *regs)
|
|
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
|
|
index d7b4967..1a79d68 100644
|
|
--- a/arch/sparc/kernel/process_64.c
|
|
+++ b/arch/sparc/kernel/process_64.c
|
|
@@ -281,6 +281,8 @@ void arch_trigger_all_cpu_backtrace(void)
|
|
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
|
|
gp->tpc, gp->o7, gp->i7, gp->rpc);
|
|
}
|
|
+
|
|
+ touch_nmi_watchdog();
|
|
}
|
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
|
@@ -306,6 +308,9 @@ static void __global_pmu_self(int this_cpu)
|
|
struct global_pmu_snapshot *pp;
|
|
int i, num;
|
|
|
|
+ if (!pcr_ops)
|
|
+ return;
|
|
+
|
|
pp = &global_cpu_snapshot[this_cpu].pmu;
|
|
|
|
num = 1;
|
|
@@ -353,6 +358,8 @@ static void pmu_snapshot_all_cpus(void)
|
|
(cpu == this_cpu ? '*' : ' '), cpu,
|
|
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
|
|
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
|
|
+
|
|
+ touch_nmi_watchdog();
|
|
}
|
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
|
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
|
|
index 3fdb455..61a5198 100644
|
|
--- a/arch/sparc/kernel/setup_64.c
|
|
+++ b/arch/sparc/kernel/setup_64.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/cpu.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/start_kernel.h>
|
|
|
|
#include <asm/io.h>
|
|
#include <asm/processor.h>
|
|
@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
|
|
|
|
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
|
|
|
|
-void __init per_cpu_patch(void)
|
|
+static void __init per_cpu_patch(void)
|
|
{
|
|
struct cpuid_patch_entry *p;
|
|
unsigned long ver;
|
|
@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
|
|
}
|
|
}
|
|
|
|
-void __init sun4v_patch(void)
|
|
+static void __init sun4v_patch(void)
|
|
{
|
|
extern void sun4v_hvapi_init(void);
|
|
|
|
@@ -335,14 +336,25 @@ static void __init pause_patch(void)
|
|
}
|
|
}
|
|
|
|
-#ifdef CONFIG_SMP
|
|
-void __init boot_cpu_id_too_large(int cpu)
|
|
+void __init start_early_boot(void)
|
|
{
|
|
- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
|
|
- cpu, NR_CPUS);
|
|
- prom_halt();
|
|
+ int cpu;
|
|
+
|
|
+ check_if_starfire();
|
|
+ per_cpu_patch();
|
|
+ sun4v_patch();
|
|
+
|
|
+ cpu = hard_smp_processor_id();
|
|
+ if (cpu >= NR_CPUS) {
|
|
+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
|
|
+ cpu, NR_CPUS);
|
|
+ prom_halt();
|
|
+ }
|
|
+ current_thread_info()->cpu = cpu;
|
|
+
|
|
+ prom_init_report();
|
|
+ start_kernel();
|
|
}
|
|
-#endif
|
|
|
|
/* On Ultra, we support all of the v8 capabilities. */
|
|
unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
|
|
@@ -500,12 +512,16 @@ static void __init init_sparc64_elf_hwcap(void)
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
|
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
|
cap |= HWCAP_SPARC_BLKINIT;
|
|
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
|
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
|
cap |= HWCAP_SPARC_N2;
|
|
}
|
|
@@ -533,6 +549,8 @@ static void __init init_sparc64_elf_hwcap(void)
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
|
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
|
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
|
|
AV_SPARC_ASI_BLK_INIT |
|
|
@@ -540,6 +558,8 @@ static void __init init_sparc64_elf_hwcap(void)
|
|
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
|
|
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
|
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
|
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
|
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
|
|
AV_SPARC_FMAF);
|
|
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
|
|
index b085311..9af0a5d 100644
|
|
--- a/arch/sparc/kernel/smp_64.c
|
|
+++ b/arch/sparc/kernel/smp_64.c
|
|
@@ -151,7 +151,7 @@ void cpu_panic(void)
|
|
#define NUM_ROUNDS 64 /* magic value */
|
|
#define NUM_ITERS 5 /* likewise */
|
|
|
|
-static DEFINE_SPINLOCK(itc_sync_lock);
|
|
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
|
|
static unsigned long go[SLAVE + 1];
|
|
|
|
#define DEBUG_TICK_SYNC 0
|
|
@@ -259,7 +259,7 @@ static void smp_synchronize_one_tick(int cpu)
|
|
go[MASTER] = 0;
|
|
membar_safe("#StoreLoad");
|
|
|
|
- spin_lock_irqsave(&itc_sync_lock, flags);
|
|
+ raw_spin_lock_irqsave(&itc_sync_lock, flags);
|
|
{
|
|
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
|
|
while (!go[MASTER])
|
|
@@ -270,7 +270,7 @@ static void smp_synchronize_one_tick(int cpu)
|
|
membar_safe("#StoreLoad");
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&itc_sync_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
|
|
}
|
|
|
|
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
|
|
@@ -823,13 +823,17 @@ void arch_send_call_function_single_ipi(int cpu)
|
|
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
|
|
{
|
|
clear_softint(1 << irq);
|
|
+ irq_enter();
|
|
generic_smp_call_function_interrupt();
|
|
+ irq_exit();
|
|
}
|
|
|
|
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
|
|
{
|
|
clear_softint(1 << irq);
|
|
+ irq_enter();
|
|
generic_smp_call_function_single_interrupt();
|
|
+ irq_exit();
|
|
}
|
|
|
|
static void tsb_sync(void *info)
|
|
@@ -1395,7 +1399,6 @@ void __cpu_die(unsigned int cpu)
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
- pcr_arch_init();
|
|
}
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
@@ -1480,6 +1483,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
+ if (pgd_none(*pgd)) {
|
|
+ pud_t *new;
|
|
+
|
|
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
|
+ pgd_populate(&init_mm, pgd, new);
|
|
+ }
|
|
+
|
|
pud = pud_offset(pgd, addr);
|
|
if (pud_none(*pud)) {
|
|
pmd_t *new;
|
|
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
|
|
index e0c09bf8..6179e19 100644
|
|
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
|
|
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
|
|
@@ -195,6 +195,11 @@ sun4v_tsb_miss_common:
|
|
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
|
|
|
|
sun4v_itlb_error:
|
|
+ rdpr %tl, %g1
|
|
+ cmp %g1, 1
|
|
+ ble,pt %icc, sun4v_bad_ra
|
|
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
|
|
+
|
|
sethi %hi(sun4v_err_itlb_vaddr), %g1
|
|
stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
|
|
sethi %hi(sun4v_err_itlb_ctx), %g1
|
|
@@ -206,15 +211,10 @@ sun4v_itlb_error:
|
|
sethi %hi(sun4v_err_itlb_error), %g1
|
|
stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
|
|
|
|
+ sethi %hi(1f), %g7
|
|
rdpr %tl, %g4
|
|
- cmp %g4, 1
|
|
- ble,pt %icc, 1f
|
|
- sethi %hi(2f), %g7
|
|
ba,pt %xcc, etraptl1
|
|
- or %g7, %lo(2f), %g7
|
|
-
|
|
-1: ba,pt %xcc, etrap
|
|
-2: or %g7, %lo(2b), %g7
|
|
+1: or %g7, %lo(1f), %g7
|
|
mov %l4, %o1
|
|
call sun4v_itlb_error_report
|
|
add %sp, PTREGS_OFF, %o0
|
|
@@ -222,6 +222,11 @@ sun4v_itlb_error:
|
|
/* NOTREACHED */
|
|
|
|
sun4v_dtlb_error:
|
|
+ rdpr %tl, %g1
|
|
+ cmp %g1, 1
|
|
+ ble,pt %icc, sun4v_bad_ra
|
|
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
|
|
+
|
|
sethi %hi(sun4v_err_dtlb_vaddr), %g1
|
|
stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
|
|
sethi %hi(sun4v_err_dtlb_ctx), %g1
|
|
@@ -233,21 +238,23 @@ sun4v_dtlb_error:
|
|
sethi %hi(sun4v_err_dtlb_error), %g1
|
|
stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
|
|
|
|
+ sethi %hi(1f), %g7
|
|
rdpr %tl, %g4
|
|
- cmp %g4, 1
|
|
- ble,pt %icc, 1f
|
|
- sethi %hi(2f), %g7
|
|
ba,pt %xcc, etraptl1
|
|
- or %g7, %lo(2f), %g7
|
|
-
|
|
-1: ba,pt %xcc, etrap
|
|
-2: or %g7, %lo(2b), %g7
|
|
+1: or %g7, %lo(1f), %g7
|
|
mov %l4, %o1
|
|
call sun4v_dtlb_error_report
|
|
add %sp, PTREGS_OFF, %o0
|
|
|
|
/* NOTREACHED */
|
|
|
|
+sun4v_bad_ra:
|
|
+ or %g0, %g4, %g5
|
|
+ ba,pt %xcc, sparc64_realfault_common
|
|
+ or %g1, %g0, %g4
|
|
+
|
|
+ /* NOTREACHED */
|
|
+
|
|
/* Instruction Access Exception, tl0. */
|
|
sun4v_iacc:
|
|
ldxa [%g0] ASI_SCRATCHPAD, %g2
|
|
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
|
|
index f7c72b6..d066eb1 100644
|
|
--- a/arch/sparc/kernel/sys32.S
|
|
+++ b/arch/sparc/kernel/sys32.S
|
|
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
|
|
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
|
|
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
|
|
SIGN1(sys32_select, compat_sys_select, %o0)
|
|
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
|
|
+SIGN1(sys32_futex, compat_sys_futex, %o1)
|
|
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
|
|
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
|
|
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
|
|
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
|
|
index beb0b5a..25db14a 100644
|
|
--- a/arch/sparc/kernel/sys_sparc_64.c
|
|
+++ b/arch/sparc/kernel/sys_sparc_64.c
|
|
@@ -332,7 +332,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
|
long err;
|
|
|
|
/* No need for backward compatibility. We can start fresh... */
|
|
- if (call <= SEMCTL) {
|
|
+ if (call <= SEMTIMEDOP) {
|
|
switch (call) {
|
|
case SEMOP:
|
|
err = sys_semtimedop(first, ptr,
|
|
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
|
|
index 737f8cb..88ede1d 100644
|
|
--- a/arch/sparc/kernel/trampoline_64.S
|
|
+++ b/arch/sparc/kernel/trampoline_64.S
|
|
@@ -109,10 +109,13 @@ startup_continue:
|
|
brnz,pn %g1, 1b
|
|
nop
|
|
|
|
- sethi %hi(p1275buf), %g2
|
|
- or %g2, %lo(p1275buf), %g2
|
|
- ldx [%g2 + 0x10], %l2
|
|
- add %l2, -(192 + 128), %sp
|
|
+ /* Get onto temporary stack which will be in the locked
|
|
+ * kernel image.
|
|
+ */
|
|
+ sethi %hi(tramp_stack), %g1
|
|
+ or %g1, %lo(tramp_stack), %g1
|
|
+ add %g1, TRAMP_STACK_SIZE, %g1
|
|
+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
|
|
flushw
|
|
|
|
/* Setup the loop variables:
|
|
@@ -394,7 +397,6 @@ after_lock_tlb:
|
|
sllx %g5, THREAD_SHIFT, %g5
|
|
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
|
|
add %g6, %g5, %sp
|
|
- mov 0, %fp
|
|
|
|
rdpr %pstate, %o1
|
|
or %o1, PSTATE_IE, %o1
|
|
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
|
|
index 4ced92f..25d0c7e 100644
|
|
--- a/arch/sparc/kernel/traps_64.c
|
|
+++ b/arch/sparc/kernel/traps_64.c
|
|
@@ -2102,6 +2102,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
|
|
atomic_inc(&sun4v_nonresum_oflow_cnt);
|
|
}
|
|
|
|
+static void sun4v_tlb_error(struct pt_regs *regs)
|
|
+{
|
|
+ die_if_kernel("TLB/TSB error", regs);
|
|
+}
|
|
+
|
|
unsigned long sun4v_err_itlb_vaddr;
|
|
unsigned long sun4v_err_itlb_ctx;
|
|
unsigned long sun4v_err_itlb_pte;
|
|
@@ -2109,8 +2114,7 @@ unsigned long sun4v_err_itlb_error;
|
|
|
|
void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
|
|
{
|
|
- if (tl > 1)
|
|
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
|
|
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
|
|
|
|
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
|
|
regs->tpc, tl);
|
|
@@ -2123,7 +2127,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
|
|
sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
|
|
sun4v_err_itlb_pte, sun4v_err_itlb_error);
|
|
|
|
- prom_halt();
|
|
+ sun4v_tlb_error(regs);
|
|
}
|
|
|
|
unsigned long sun4v_err_dtlb_vaddr;
|
|
@@ -2133,8 +2137,7 @@ unsigned long sun4v_err_dtlb_error;
|
|
|
|
void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
|
|
{
|
|
- if (tl > 1)
|
|
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
|
|
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
|
|
|
|
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
|
|
regs->tpc, tl);
|
|
@@ -2147,7 +2150,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
|
|
sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
|
|
sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
|
|
|
|
- prom_halt();
|
|
+ sun4v_tlb_error(regs);
|
|
}
|
|
|
|
void hypervisor_tlbop_error(unsigned long err, unsigned long op)
|
|
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
|
|
index 14158d4..be98685 100644
|
|
--- a/arch/sparc/kernel/tsb.S
|
|
+++ b/arch/sparc/kernel/tsb.S
|
|
@@ -162,10 +162,10 @@ tsb_miss_page_table_walk_sun4v_fastpath:
|
|
nop
|
|
.previous
|
|
|
|
- rdpr %tl, %g3
|
|
- cmp %g3, 1
|
|
+ rdpr %tl, %g7
|
|
+ cmp %g7, 1
|
|
bne,pn %xcc, winfix_trampoline
|
|
- nop
|
|
+ mov %g3, %g4
|
|
ba,pt %xcc, etrap
|
|
rd %pc, %g7
|
|
call hugetlb_setup
|
|
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
|
|
index 3c1a7cb..35ab8b6 100644
|
|
--- a/arch/sparc/kernel/unaligned_64.c
|
|
+++ b/arch/sparc/kernel/unaligned_64.c
|
|
@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
|
|
unsigned long compute_effective_address(struct pt_regs *regs,
|
|
unsigned int insn, unsigned int rd)
|
|
{
|
|
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
|
|
unsigned int rs1 = (insn >> 14) & 0x1f;
|
|
unsigned int rs2 = insn & 0x1f;
|
|
- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
|
|
+ unsigned long addr;
|
|
|
|
if (insn & 0x2000) {
|
|
maybe_flush_windows(rs1, 0, rd, from_kernel);
|
|
- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
|
|
+ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
|
|
} else {
|
|
maybe_flush_windows(rs1, rs2, rd, from_kernel);
|
|
- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
|
|
+ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
|
|
}
|
|
+
|
|
+ if (!from_kernel && test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xffffffff;
|
|
+
|
|
+ return addr;
|
|
}
|
|
|
|
/* This is just to make gcc think die_if_kernel does return... */
|
|
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
|
|
index f8e7dd5..9c5fbd0 100644
|
|
--- a/arch/sparc/kernel/viohs.c
|
|
+++ b/arch/sparc/kernel/viohs.c
|
|
@@ -714,7 +714,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
|
|
cfg.tx_irq = vio->vdev->tx_irq;
|
|
cfg.rx_irq = vio->vdev->rx_irq;
|
|
|
|
- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
|
|
+ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
|
|
if (IS_ERR(lp))
|
|
return PTR_ERR(lp);
|
|
|
|
@@ -746,7 +746,7 @@ void vio_port_up(struct vio_driver_state *vio)
|
|
|
|
err = 0;
|
|
if (state == LDC_STATE_INIT) {
|
|
- err = ldc_bind(vio->lp, vio->name);
|
|
+ err = ldc_bind(vio->lp);
|
|
if (err)
|
|
printk(KERN_WARNING "%s: Port %lu bind failed, "
|
|
"err=%d\n",
|
|
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
|
|
index 932ff90..0924305 100644
|
|
--- a/arch/sparc/kernel/vmlinux.lds.S
|
|
+++ b/arch/sparc/kernel/vmlinux.lds.S
|
|
@@ -35,8 +35,9 @@ jiffies = jiffies_64;
|
|
|
|
SECTIONS
|
|
{
|
|
- /* swapper_low_pmd_dir is sparc64 only */
|
|
- swapper_low_pmd_dir = 0x0000000000402000;
|
|
+#ifdef CONFIG_SPARC64
|
|
+ swapper_pg_dir = 0x0000000000402000;
|
|
+#endif
|
|
. = INITIAL_ADDRESS;
|
|
.text TEXTSTART :
|
|
{
|
|
@@ -122,11 +123,6 @@ SECTIONS
|
|
*(.swapper_4m_tsb_phys_patch)
|
|
__swapper_4m_tsb_phys_patch_end = .;
|
|
}
|
|
- .page_offset_shift_patch : {
|
|
- __page_offset_shift_patch = .;
|
|
- *(.page_offset_shift_patch)
|
|
- __page_offset_shift_patch_end = .;
|
|
- }
|
|
.popc_3insn_patch : {
|
|
__popc_3insn_patch = .;
|
|
*(.popc_3insn_patch)
|
|
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
|
|
index 2c20ad6..30eee6e 100644
|
|
--- a/arch/sparc/lib/NG2memcpy.S
|
|
+++ b/arch/sparc/lib/NG2memcpy.S
|
|
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|
*/
|
|
VISEntryHalf
|
|
|
|
+ membar #Sync
|
|
alignaddr %o1, %g0, %g0
|
|
|
|
add %o1, (64 - 1), %o4
|
|
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
|
|
index 9cf2ee0..83aeeb1 100644
|
|
--- a/arch/sparc/lib/NG4memcpy.S
|
|
+++ b/arch/sparc/lib/NG4memcpy.S
|
|
@@ -41,6 +41,10 @@
|
|
#endif
|
|
#endif
|
|
|
|
+#if !defined(EX_LD) && !defined(EX_ST)
|
|
+#define NON_USER_COPY
|
|
+#endif
|
|
+
|
|
#ifndef EX_LD
|
|
#define EX_LD(x) x
|
|
#endif
|
|
@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|
mov EX_RETVAL(%o3), %o0
|
|
|
|
.Llarge_src_unaligned:
|
|
+#ifdef NON_USER_COPY
|
|
+ VISEntryHalfFast(.Lmedium_vis_entry_fail)
|
|
+#else
|
|
+ VISEntryHalf
|
|
+#endif
|
|
andn %o2, 0x3f, %o4
|
|
sub %o2, %o4, %o2
|
|
- VISEntryHalf
|
|
alignaddr %o1, %g0, %g1
|
|
add %o1, %o4, %o1
|
|
EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
|
|
@@ -232,14 +240,21 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|
add %o0, 0x40, %o0
|
|
bne,pt %icc, 1b
|
|
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
|
|
+#ifdef NON_USER_COPY
|
|
+ VISExitHalfFast
|
|
+#else
|
|
VISExitHalf
|
|
-
|
|
+#endif
|
|
brz,pn %o2, .Lexit
|
|
cmp %o2, 19
|
|
ble,pn %icc, .Lsmall_unaligned
|
|
nop
|
|
ba,a,pt %icc, .Lmedium_unaligned
|
|
|
|
+#ifdef NON_USER_COPY
|
|
+.Lmedium_vis_entry_fail:
|
|
+ or %o0, %o1, %g2
|
|
+#endif
|
|
.Lmedium:
|
|
LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
|
|
andcc %g2, 0x7, %g0
|
|
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
|
|
index b320ae9..a063d84 100644
|
|
--- a/arch/sparc/lib/VISsave.S
|
|
+++ b/arch/sparc/lib/VISsave.S
|
|
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
|
|
|
stx %g3, [%g6 + TI_GSR]
|
|
2: add %g6, %g1, %g3
|
|
- cmp %o5, FPRS_DU
|
|
- be,pn %icc, 6f
|
|
- sll %g1, 3, %g1
|
|
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
|
|
+ sll %g1, 3, %g1
|
|
stb %o5, [%g3 + TI_FPSAVED]
|
|
rd %gsr, %g2
|
|
add %g6, %g1, %g3
|
|
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
|
.align 32
|
|
80: jmpl %g7 + %g0, %g0
|
|
nop
|
|
-
|
|
-6: ldub [%g3 + TI_FPSAVED], %o5
|
|
- or %o5, FPRS_DU, %o5
|
|
- add %g6, TI_FPREGS+0x80, %g2
|
|
- stb %o5, [%g3 + TI_FPSAVED]
|
|
-
|
|
- sll %g1, 5, %g1
|
|
- add %g6, TI_FPREGS+0xc0, %g3
|
|
- wr %g0, FPRS_FEF, %fprs
|
|
- membar #Sync
|
|
- stda %f32, [%g2 + %g1] ASI_BLK_P
|
|
- stda %f48, [%g3 + %g1] ASI_BLK_P
|
|
- membar #Sync
|
|
- ba,pt %xcc, 80f
|
|
- nop
|
|
-
|
|
- .align 32
|
|
-80: jmpl %g7 + %g0, %g0
|
|
- nop
|
|
-
|
|
- .align 32
|
|
-VISenterhalf:
|
|
- ldub [%g6 + TI_FPDEPTH], %g1
|
|
- brnz,a,pn %g1, 1f
|
|
- cmp %g1, 1
|
|
- stb %g0, [%g6 + TI_FPSAVED]
|
|
- stx %fsr, [%g6 + TI_XFSR]
|
|
- clr %o5
|
|
- jmpl %g7 + %g0, %g0
|
|
- wr %g0, FPRS_FEF, %fprs
|
|
-
|
|
-1: bne,pn %icc, 2f
|
|
- srl %g1, 1, %g1
|
|
- ba,pt %xcc, vis1
|
|
- sub %g7, 8, %g7
|
|
-2: addcc %g6, %g1, %g3
|
|
- sll %g1, 3, %g1
|
|
- andn %o5, FPRS_DU, %g2
|
|
- stb %g2, [%g3 + TI_FPSAVED]
|
|
-
|
|
- rd %gsr, %g2
|
|
- add %g6, %g1, %g3
|
|
- stx %g2, [%g3 + TI_GSR]
|
|
- add %g6, %g1, %g2
|
|
- stx %fsr, [%g2 + TI_XFSR]
|
|
- sll %g1, 5, %g1
|
|
-3: andcc %o5, FPRS_DL, %g0
|
|
- be,pn %icc, 4f
|
|
- add %g6, TI_FPREGS, %g2
|
|
-
|
|
- add %g6, TI_FPREGS+0x40, %g3
|
|
- membar #Sync
|
|
- stda %f0, [%g2 + %g1] ASI_BLK_P
|
|
- stda %f16, [%g3 + %g1] ASI_BLK_P
|
|
- membar #Sync
|
|
- ba,pt %xcc, 4f
|
|
- nop
|
|
-
|
|
- .align 32
|
|
-4: and %o5, FPRS_DU, %o5
|
|
- jmpl %g7 + %g0, %g0
|
|
- wr %o5, FPRS_FEF, %fprs
|
|
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
|
|
index 1d32b54..8f2f94d 100644
|
|
--- a/arch/sparc/lib/atomic32.c
|
|
+++ b/arch/sparc/lib/atomic32.c
|
|
@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v)
|
|
}
|
|
EXPORT_SYMBOL(__atomic_add_return);
|
|
|
|
+int atomic_xchg(atomic_t *v, int new)
|
|
+{
|
|
+ int ret;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
|
+ ret = v->counter;
|
|
+ v->counter = new;
|
|
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(atomic_xchg);
|
|
+
|
|
int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
|
|
return (unsigned long)prev;
|
|
}
|
|
EXPORT_SYMBOL(__cmpxchg_u32);
|
|
+
|
|
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ u32 prev;
|
|
+
|
|
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
|
|
+ prev = *ptr;
|
|
+ *ptr = new;
|
|
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
|
|
+
|
|
+ return (unsigned long)prev;
|
|
+}
|
|
+EXPORT_SYMBOL(__xchg_u32);
|
|
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
|
|
index 323335b..ac094de 100644
|
|
--- a/arch/sparc/lib/ksyms.c
|
|
+++ b/arch/sparc/lib/ksyms.c
|
|
@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page);
|
|
void VISenter(void);
|
|
EXPORT_SYMBOL(VISenter);
|
|
|
|
-/* CRYPTO code needs this */
|
|
-void VISenterhalf(void);
|
|
-EXPORT_SYMBOL(VISenterhalf);
|
|
-
|
|
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
|
|
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
|
|
unsigned long *);
|
|
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
|
|
index b7f6334..857ad4f 100644
|
|
--- a/arch/sparc/lib/memmove.S
|
|
+++ b/arch/sparc/lib/memmove.S
|
|
@@ -8,9 +8,11 @@
|
|
|
|
.text
|
|
ENTRY(memmove) /* o0=dst o1=src o2=len */
|
|
- mov %o0, %g1
|
|
+ brz,pn %o2, 99f
|
|
+ mov %o0, %g1
|
|
+
|
|
cmp %o0, %o1
|
|
- bleu,pt %xcc, memcpy
|
|
+ bleu,pt %xcc, 2f
|
|
add %o1, %o2, %g7
|
|
cmp %g7, %o0
|
|
bleu,pt %xcc, memcpy
|
|
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
|
|
stb %g7, [%o0]
|
|
bne,pt %icc, 1b
|
|
sub %o0, 1, %o0
|
|
-
|
|
+99:
|
|
retl
|
|
mov %g1, %o0
|
|
+
|
|
+ /* We can't just call memcpy for these memmove cases. On some
|
|
+ * chips the memcpy uses cache initializing stores and when dst
|
|
+ * and src are close enough, those can clobber the source data
|
|
+ * before we've loaded it in.
|
|
+ */
|
|
+2: or %o0, %o1, %g7
|
|
+ or %o2, %g7, %g7
|
|
+ andcc %g7, 0x7, %g0
|
|
+ bne,pn %xcc, 4f
|
|
+ nop
|
|
+
|
|
+3: ldx [%o1], %g7
|
|
+ add %o1, 8, %o1
|
|
+ subcc %o2, 8, %o2
|
|
+ add %o0, 8, %o0
|
|
+ bne,pt %icc, 3b
|
|
+ stx %g7, [%o0 - 0x8]
|
|
+ ba,a,pt %xcc, 99b
|
|
+
|
|
+4: ldub [%o1], %g7
|
|
+ add %o1, 1, %o1
|
|
+ subcc %o2, 1, %o2
|
|
+ add %o0, 1, %o0
|
|
+ bne,pt %icc, 4b
|
|
+ stb %g7, [%o0 - 0x1]
|
|
+ ba,a,pt %xcc, 99b
|
|
ENDPROC(memmove)
|
|
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
|
|
index 99c017b..f75e690 100644
|
|
--- a/arch/sparc/lib/memset.S
|
|
+++ b/arch/sparc/lib/memset.S
|
|
@@ -3,8 +3,9 @@
|
|
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
|
*
|
|
- * Returns 0, if ok, and number of bytes not yet set if exception
|
|
- * occurs and we were called as clear_user.
|
|
+ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
|
|
+ * number of bytes not yet set if exception occurs and we were called as
|
|
+ * clear_user.
|
|
*/
|
|
|
|
#include <asm/ptrace.h>
|
|
@@ -65,6 +66,8 @@ __bzero_begin:
|
|
.globl __memset_start, __memset_end
|
|
__memset_start:
|
|
memset:
|
|
+ mov %o0, %g1
|
|
+ mov 1, %g4
|
|
and %o1, 0xff, %g3
|
|
sll %g3, 8, %g2
|
|
or %g3, %g2, %g3
|
|
@@ -89,6 +92,7 @@ memset:
|
|
sub %o0, %o2, %o0
|
|
|
|
__bzero:
|
|
+ clr %g4
|
|
mov %g0, %g3
|
|
1:
|
|
cmp %o1, 7
|
|
@@ -151,8 +155,8 @@ __bzero:
|
|
bne,a 8f
|
|
EX(stb %g3, [%o0], and %o1, 1)
|
|
8:
|
|
- retl
|
|
- clr %o0
|
|
+ b 0f
|
|
+ nop
|
|
7:
|
|
be 13b
|
|
orcc %o1, 0, %g0
|
|
@@ -164,6 +168,12 @@ __bzero:
|
|
bne 8b
|
|
EX(stb %g3, [%o0 - 1], add %o1, 1)
|
|
0:
|
|
+ andcc %g4, 1, %g0
|
|
+ be 5f
|
|
+ nop
|
|
+ retl
|
|
+ mov %g1, %o0
|
|
+5:
|
|
retl
|
|
clr %o0
|
|
__memset_end:
|
|
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
|
|
index aa4d55b..5ce8f2f 100644
|
|
--- a/arch/sparc/math-emu/math_32.c
|
|
+++ b/arch/sparc/math-emu/math_32.c
|
|
@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
|
|
case 0: fsr = *pfsr;
|
|
if (IR == -1) IR = 2;
|
|
/* fcc is always fcc0 */
|
|
- fsr &= ~0xc00; fsr |= (IR << 10); break;
|
|
+ fsr &= ~0xc00; fsr |= (IR << 10);
|
|
*pfsr = fsr;
|
|
break;
|
|
case 1: rd->s = IR; break;
|
|
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
|
|
index 59dbd46..163c787 100644
|
|
--- a/arch/sparc/mm/fault_32.c
|
|
+++ b/arch/sparc/mm/fault_32.c
|
|
@@ -252,6 +252,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
|
|
index 69bb818..0d6de79 100644
|
|
--- a/arch/sparc/mm/fault_64.c
|
|
+++ b/arch/sparc/mm/fault_64.c
|
|
@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc)
|
|
pte_t *ptep, pte;
|
|
unsigned long pa;
|
|
u32 insn = 0;
|
|
- unsigned long pstate;
|
|
|
|
- if (pgd_none(*pgdp))
|
|
- goto outret;
|
|
+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
|
|
+ goto out;
|
|
pudp = pud_offset(pgdp, tpc);
|
|
- if (pud_none(*pudp))
|
|
- goto outret;
|
|
- pmdp = pmd_offset(pudp, tpc);
|
|
- if (pmd_none(*pmdp))
|
|
- goto outret;
|
|
+ if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
|
|
+ goto out;
|
|
|
|
/* This disables preemption for us as well. */
|
|
- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
|
|
- __asm__ __volatile__("wrpr %0, %1, %%pstate"
|
|
- : : "r" (pstate), "i" (PSTATE_IE));
|
|
- ptep = pte_offset_map(pmdp, tpc);
|
|
- pte = *ptep;
|
|
- if (!pte_present(pte))
|
|
- goto out;
|
|
+ local_irq_disable();
|
|
+
|
|
+ pmdp = pmd_offset(pudp, tpc);
|
|
+ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
|
|
+ goto out_irq_enable;
|
|
|
|
- pa = (pte_pfn(pte) << PAGE_SHIFT);
|
|
- pa += (tpc & ~PAGE_MASK);
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
+ if (pmd_trans_huge(*pmdp)) {
|
|
+ if (pmd_trans_splitting(*pmdp))
|
|
+ goto out_irq_enable;
|
|
|
|
- /* Use phys bypass so we don't pollute dtlb/dcache. */
|
|
- __asm__ __volatile__("lduwa [%1] %2, %0"
|
|
- : "=r" (insn)
|
|
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
|
|
+ pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
|
|
+ pa += tpc & ~HPAGE_MASK;
|
|
|
|
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
|
|
+ __asm__ __volatile__("lduwa [%1] %2, %0"
|
|
+ : "=r" (insn)
|
|
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ ptep = pte_offset_map(pmdp, tpc);
|
|
+ pte = *ptep;
|
|
+ if (pte_present(pte)) {
|
|
+ pa = (pte_pfn(pte) << PAGE_SHIFT);
|
|
+ pa += (tpc & ~PAGE_MASK);
|
|
+
|
|
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
|
|
+ __asm__ __volatile__("lduwa [%1] %2, %0"
|
|
+ : "=r" (insn)
|
|
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
|
|
+ }
|
|
+ pte_unmap(ptep);
|
|
+ }
|
|
+out_irq_enable:
|
|
+ local_irq_enable();
|
|
out:
|
|
- pte_unmap(ptep);
|
|
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
|
|
-outret:
|
|
return insn;
|
|
}
|
|
|
|
@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
|
|
}
|
|
|
|
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
|
- unsigned int insn, int fault_code)
|
|
+ unsigned long fault_addr, unsigned int insn,
|
|
+ int fault_code)
|
|
{
|
|
unsigned long addr;
|
|
siginfo_t info;
|
|
@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
|
info.si_code = code;
|
|
info.si_signo = sig;
|
|
info.si_errno = 0;
|
|
- if (fault_code & FAULT_CODE_ITLB)
|
|
+ if (fault_code & FAULT_CODE_ITLB) {
|
|
addr = regs->tpc;
|
|
- else
|
|
- addr = compute_effective_address(regs, insn, 0);
|
|
+ } else {
|
|
+ /* If we were able to probe the faulting instruction, use it
|
|
+ * to compute a precise fault address. Otherwise use the fault
|
|
+ * time provided address which may only have page granularity.
|
|
+ */
|
|
+ if (insn)
|
|
+ addr = compute_effective_address(regs, insn, 0);
|
|
+ else
|
|
+ addr = fault_addr;
|
|
+ }
|
|
info.si_addr = (void __user *) addr;
|
|
info.si_trapno = 0;
|
|
|
|
@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
|
|
/* The si_code was set to make clear whether
|
|
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
|
|
*/
|
|
- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
|
|
+ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
|
|
return;
|
|
}
|
|
|
|
@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
|
|
show_regs(regs);
|
|
}
|
|
|
|
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
|
|
- unsigned long addr)
|
|
-{
|
|
- static int times;
|
|
-
|
|
- if (times++ < 10)
|
|
- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
|
|
- "reports 64-bit fault address [%lx]\n",
|
|
- current->comm, current->pid, addr);
|
|
- show_regs(regs);
|
|
-}
|
|
-
|
|
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|
{
|
|
enum ctx_state prev_state = exception_enter();
|
|
@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|
goto intr_or_no_mm;
|
|
}
|
|
}
|
|
- if (unlikely((address >> 32) != 0)) {
|
|
- bogus_32bit_fault_address(regs, address);
|
|
+ if (unlikely((address >> 32) != 0))
|
|
goto intr_or_no_mm;
|
|
- }
|
|
}
|
|
|
|
if (regs->tstate & TSTATE_PRIV) {
|
|
@@ -340,6 +348,9 @@ retry:
|
|
down_read(&mm->mmap_sem);
|
|
}
|
|
|
|
+ if (fault_code & FAULT_CODE_BAD_RA)
|
|
+ goto do_sigbus;
|
|
+
|
|
vma = find_vma(mm, address);
|
|
if (!vma)
|
|
goto bad_area;
|
|
@@ -437,6 +448,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
@@ -525,7 +538,7 @@ do_sigbus:
|
|
* Send a sigbus, regardless of whether we were in kernel
|
|
* or user mode.
|
|
*/
|
|
- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
|
|
+ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
|
|
|
|
/* Kernel mode? Handle exceptions or die */
|
|
if (regs->tstate & TSTATE_PRIV)
|
|
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
|
|
index c4d3da6..ae6ce38 100644
|
|
--- a/arch/sparc/mm/gup.c
|
|
+++ b/arch/sparc/mm/gup.c
|
|
@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
|
struct page *head, *page, *tail;
|
|
int refs;
|
|
|
|
- if (!pmd_large(pmd))
|
|
+ if (!(pmd_val(pmd) & _PAGE_VALID))
|
|
return 0;
|
|
|
|
if (write && !pmd_write(pmd))
|
|
@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
|
return 1;
|
|
}
|
|
|
|
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|
+ struct page **pages)
|
|
+{
|
|
+ struct mm_struct *mm = current->mm;
|
|
+ unsigned long addr, len, end;
|
|
+ unsigned long next, flags;
|
|
+ pgd_t *pgdp;
|
|
+ int nr = 0;
|
|
+
|
|
+ start &= PAGE_MASK;
|
|
+ addr = start;
|
|
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
|
|
+ end = start + len;
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ pgdp = pgd_offset(mm, addr);
|
|
+ do {
|
|
+ pgd_t pgd = *pgdp;
|
|
+
|
|
+ next = pgd_addr_end(addr, end);
|
|
+ if (pgd_none(pgd))
|
|
+ break;
|
|
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
|
+ break;
|
|
+ } while (pgdp++, addr = next, addr != end);
|
|
+ local_irq_restore(flags);
|
|
+
|
|
+ return nr;
|
|
+}
|
|
+
|
|
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|
struct page **pages)
|
|
{
|
|
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
|
|
index eafbc65..34506f2 100644
|
|
--- a/arch/sparc/mm/init_64.c
|
|
+++ b/arch/sparc/mm/init_64.c
|
|
@@ -73,7 +73,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
|
|
* 'cpu' properties, but we need to have this table setup before the
|
|
* MDESC is initialized.
|
|
*/
|
|
-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
|
|
|
|
#ifndef CONFIG_DEBUG_PAGEALLOC
|
|
/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
|
|
@@ -82,10 +81,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
|
|
*/
|
|
extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
|
|
#endif
|
|
+extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
|
|
|
static unsigned long cpu_pgsz_mask;
|
|
|
|
-#define MAX_BANKS 32
|
|
+#define MAX_BANKS 1024
|
|
|
|
static struct linux_prom64_registers pavail[MAX_BANKS];
|
|
static int pavail_ents;
|
|
@@ -163,10 +163,6 @@ static void __init read_obp_memory(const char *property,
|
|
cmp_p64, NULL);
|
|
}
|
|
|
|
-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
|
|
- sizeof(unsigned long)];
|
|
-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
|
|
-
|
|
/* Kernel physical address base and size in bytes. */
|
|
unsigned long kern_base __read_mostly;
|
|
unsigned long kern_size __read_mostly;
|
|
@@ -350,6 +346,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
|
|
|
|
mm = vma->vm_mm;
|
|
|
|
+ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
|
|
+ if (!pte_accessible(mm, pte))
|
|
+ return;
|
|
+
|
|
spin_lock_irqsave(&mm->context.lock, flags);
|
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
@@ -588,7 +588,7 @@ static void __init remap_kernel(void)
|
|
int i, tlb_ent = sparc64_highest_locked_tlbent();
|
|
|
|
tte_vaddr = (unsigned long) KERNBASE;
|
|
- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
|
|
+ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
|
|
tte_data = kern_large_tte(phys_page);
|
|
|
|
kern_locked_tte_data = tte_data;
|
|
@@ -834,7 +834,10 @@ static int find_node(unsigned long addr)
|
|
if ((addr & p->mask) == p->val)
|
|
return i;
|
|
}
|
|
- return -1;
|
|
+ /* The following condition has been observed on LDOM guests.*/
|
|
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
|
|
+ " rule. Some physical memory will be owned by node 0.");
|
|
+ return 0;
|
|
}
|
|
|
|
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
|
@@ -1356,9 +1359,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
|
|
static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
|
|
static int pall_ents __initdata;
|
|
|
|
-#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
+static unsigned long max_phys_bits = 40;
|
|
+
|
|
+bool kern_addr_valid(unsigned long addr)
|
|
+{
|
|
+ pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+ pte_t *pte;
|
|
+
|
|
+ if ((long)addr < 0L) {
|
|
+ unsigned long pa = __pa(addr);
|
|
+
|
|
+ if ((addr >> max_phys_bits) != 0UL)
|
|
+ return false;
|
|
+
|
|
+ return pfn_valid(pa >> PAGE_SHIFT);
|
|
+ }
|
|
+
|
|
+ if (addr >= (unsigned long) KERNBASE &&
|
|
+ addr < (unsigned long)&_end)
|
|
+ return true;
|
|
+
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ if (pgd_none(*pgd))
|
|
+ return 0;
|
|
+
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ if (pud_none(*pud))
|
|
+ return 0;
|
|
+
|
|
+ if (pud_large(*pud))
|
|
+ return pfn_valid(pud_pfn(*pud));
|
|
+
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ if (pmd_none(*pmd))
|
|
+ return 0;
|
|
+
|
|
+ if (pmd_large(*pmd))
|
|
+ return pfn_valid(pmd_pfn(*pmd));
|
|
+
|
|
+ pte = pte_offset_kernel(pmd, addr);
|
|
+ if (pte_none(*pte))
|
|
+ return 0;
|
|
+
|
|
+ return pfn_valid(pte_pfn(*pte));
|
|
+}
|
|
+EXPORT_SYMBOL(kern_addr_valid);
|
|
+
|
|
+static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
|
|
+ unsigned long vend,
|
|
+ pud_t *pud)
|
|
+{
|
|
+ const unsigned long mask16gb = (1UL << 34) - 1UL;
|
|
+ u64 pte_val = vstart;
|
|
+
|
|
+ /* Each PUD is 8GB */
|
|
+ if ((vstart & mask16gb) ||
|
|
+ (vend - vstart <= mask16gb)) {
|
|
+ pte_val ^= kern_linear_pte_xor[2];
|
|
+ pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
|
|
+
|
|
+ return vstart + PUD_SIZE;
|
|
+ }
|
|
+
|
|
+ pte_val ^= kern_linear_pte_xor[3];
|
|
+ pte_val |= _PAGE_PUD_HUGE;
|
|
+
|
|
+ vend = vstart + mask16gb + 1UL;
|
|
+ while (vstart < vend) {
|
|
+ pud_val(*pud) = pte_val;
|
|
+
|
|
+ pte_val += PUD_SIZE;
|
|
+ vstart += PUD_SIZE;
|
|
+ pud++;
|
|
+ }
|
|
+ return vstart;
|
|
+}
|
|
+
|
|
+static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
|
|
+ bool guard)
|
|
+{
|
|
+ if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
|
|
+ unsigned long vend,
|
|
+ pmd_t *pmd)
|
|
+{
|
|
+ const unsigned long mask256mb = (1UL << 28) - 1UL;
|
|
+ const unsigned long mask2gb = (1UL << 31) - 1UL;
|
|
+ u64 pte_val = vstart;
|
|
+
|
|
+ /* Each PMD is 8MB */
|
|
+ if ((vstart & mask256mb) ||
|
|
+ (vend - vstart <= mask256mb)) {
|
|
+ pte_val ^= kern_linear_pte_xor[0];
|
|
+ pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
|
|
+
|
|
+ return vstart + PMD_SIZE;
|
|
+ }
|
|
+
|
|
+ if ((vstart & mask2gb) ||
|
|
+ (vend - vstart <= mask2gb)) {
|
|
+ pte_val ^= kern_linear_pte_xor[1];
|
|
+ pte_val |= _PAGE_PMD_HUGE;
|
|
+ vend = vstart + mask256mb + 1UL;
|
|
+ } else {
|
|
+ pte_val ^= kern_linear_pte_xor[2];
|
|
+ pte_val |= _PAGE_PMD_HUGE;
|
|
+ vend = vstart + mask2gb + 1UL;
|
|
+ }
|
|
+
|
|
+ while (vstart < vend) {
|
|
+ pmd_val(*pmd) = pte_val;
|
|
+
|
|
+ pte_val += PMD_SIZE;
|
|
+ vstart += PMD_SIZE;
|
|
+ pmd++;
|
|
+ }
|
|
+
|
|
+ return vstart;
|
|
+}
|
|
+
|
|
+static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
|
|
+ bool guard)
|
|
+{
|
|
+ if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static unsigned long __ref kernel_map_range(unsigned long pstart,
|
|
- unsigned long pend, pgprot_t prot)
|
|
+ unsigned long pend, pgprot_t prot,
|
|
+ bool use_huge)
|
|
{
|
|
unsigned long vstart = PAGE_OFFSET + pstart;
|
|
unsigned long vend = PAGE_OFFSET + pend;
|
|
@@ -1377,19 +1515,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
+ if (pgd_none(*pgd)) {
|
|
+ pud_t *new;
|
|
+
|
|
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
|
+ alloc_bytes += PAGE_SIZE;
|
|
+ pgd_populate(&init_mm, pgd, new);
|
|
+ }
|
|
pud = pud_offset(pgd, vstart);
|
|
if (pud_none(*pud)) {
|
|
pmd_t *new;
|
|
|
|
+ if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
|
|
+ vstart = kernel_map_hugepud(vstart, vend, pud);
|
|
+ continue;
|
|
+ }
|
|
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
|
alloc_bytes += PAGE_SIZE;
|
|
pud_populate(&init_mm, pud, new);
|
|
}
|
|
|
|
pmd = pmd_offset(pud, vstart);
|
|
- if (!pmd_present(*pmd)) {
|
|
+ if (pmd_none(*pmd)) {
|
|
pte_t *new;
|
|
|
|
+ if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
|
|
+ vstart = kernel_map_hugepmd(vstart, vend, pmd);
|
|
+ continue;
|
|
+ }
|
|
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
|
alloc_bytes += PAGE_SIZE;
|
|
pmd_populate_kernel(&init_mm, pmd, new);
|
|
@@ -1412,100 +1565,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
|
|
return alloc_bytes;
|
|
}
|
|
|
|
-extern unsigned int kvmap_linear_patch[1];
|
|
-#endif /* CONFIG_DEBUG_PAGEALLOC */
|
|
-
|
|
-static void __init kpte_set_val(unsigned long index, unsigned long val)
|
|
-{
|
|
- unsigned long *ptr = kpte_linear_bitmap;
|
|
-
|
|
- val <<= ((index % (BITS_PER_LONG / 2)) * 2);
|
|
- ptr += (index / (BITS_PER_LONG / 2));
|
|
-
|
|
- *ptr |= val;
|
|
-}
|
|
-
|
|
-static const unsigned long kpte_shift_min = 28; /* 256MB */
|
|
-static const unsigned long kpte_shift_max = 34; /* 16GB */
|
|
-static const unsigned long kpte_shift_incr = 3;
|
|
-
|
|
-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
|
|
- unsigned long shift)
|
|
+static void __init flush_all_kernel_tsbs(void)
|
|
{
|
|
- unsigned long size = (1UL << shift);
|
|
- unsigned long mask = (size - 1UL);
|
|
- unsigned long remains = end - start;
|
|
- unsigned long val;
|
|
-
|
|
- if (remains < size || (start & mask))
|
|
- return start;
|
|
-
|
|
- /* VAL maps:
|
|
- *
|
|
- * shift 28 --> kern_linear_pte_xor index 1
|
|
- * shift 31 --> kern_linear_pte_xor index 2
|
|
- * shift 34 --> kern_linear_pte_xor index 3
|
|
- */
|
|
- val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
|
|
-
|
|
- remains &= ~mask;
|
|
- if (shift != kpte_shift_max)
|
|
- remains = size;
|
|
-
|
|
- while (remains) {
|
|
- unsigned long index = start >> kpte_shift_min;
|
|
+ int i;
|
|
|
|
- kpte_set_val(index, val);
|
|
+ for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
|
|
+ struct tsb *ent = &swapper_tsb[i];
|
|
|
|
- start += 1UL << kpte_shift_min;
|
|
- remains -= 1UL << kpte_shift_min;
|
|
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
|
|
}
|
|
+#ifndef CONFIG_DEBUG_PAGEALLOC
|
|
+ for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
|
|
+ struct tsb *ent = &swapper_4m_tsb[i];
|
|
|
|
- return start;
|
|
-}
|
|
-
|
|
-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
|
|
-{
|
|
- unsigned long smallest_size, smallest_mask;
|
|
- unsigned long s;
|
|
-
|
|
- smallest_size = (1UL << kpte_shift_min);
|
|
- smallest_mask = (smallest_size - 1UL);
|
|
-
|
|
- while (start < end) {
|
|
- unsigned long orig_start = start;
|
|
-
|
|
- for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
|
|
- start = kpte_mark_using_shift(start, end, s);
|
|
-
|
|
- if (start != orig_start)
|
|
- break;
|
|
- }
|
|
-
|
|
- if (start == orig_start)
|
|
- start = (start + smallest_size) & ~smallest_mask;
|
|
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
|
|
}
|
|
+#endif
|
|
}
|
|
|
|
-static void __init init_kpte_bitmap(void)
|
|
-{
|
|
- unsigned long i;
|
|
-
|
|
- for (i = 0; i < pall_ents; i++) {
|
|
- unsigned long phys_start, phys_end;
|
|
-
|
|
- phys_start = pall[i].phys_addr;
|
|
- phys_end = phys_start + pall[i].reg_size;
|
|
-
|
|
- mark_kpte_bitmap(phys_start, phys_end);
|
|
- }
|
|
-}
|
|
+extern unsigned int kvmap_linear_patch[1];
|
|
|
|
static void __init kernel_physical_mapping_init(void)
|
|
{
|
|
-#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
unsigned long i, mem_alloced = 0UL;
|
|
+ bool use_huge = true;
|
|
|
|
+#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
+ use_huge = false;
|
|
+#endif
|
|
for (i = 0; i < pall_ents; i++) {
|
|
unsigned long phys_start, phys_end;
|
|
|
|
@@ -1513,7 +1600,7 @@ static void __init kernel_physical_mapping_init(void)
|
|
phys_end = phys_start + pall[i].reg_size;
|
|
|
|
mem_alloced += kernel_map_range(phys_start, phys_end,
|
|
- PAGE_KERNEL);
|
|
+ PAGE_KERNEL, use_huge);
|
|
}
|
|
|
|
printk("Allocated %ld bytes for kernel page tables.\n",
|
|
@@ -1522,8 +1609,9 @@ static void __init kernel_physical_mapping_init(void)
|
|
kvmap_linear_patch[0] = 0x01000000; /* nop */
|
|
flushi(&kvmap_linear_patch[0]);
|
|
|
|
+ flush_all_kernel_tsbs();
|
|
+
|
|
__flush_tlb_all();
|
|
-#endif
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
@@ -1533,7 +1621,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
|
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
|
|
|
|
kernel_map_range(phys_start, phys_end,
|
|
- (enable ? PAGE_KERNEL : __pgprot(0)));
|
|
+ (enable ? PAGE_KERNEL : __pgprot(0)), false);
|
|
|
|
flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
|
|
PAGE_OFFSET + phys_end);
|
|
@@ -1561,76 +1649,56 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
|
|
unsigned long PAGE_OFFSET;
|
|
EXPORT_SYMBOL(PAGE_OFFSET);
|
|
|
|
-static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
|
|
-{
|
|
- unsigned long final_shift;
|
|
- unsigned int val = *insn;
|
|
- unsigned int cnt;
|
|
-
|
|
- /* We are patching in ilog2(max_supported_phys_address), and
|
|
- * we are doing so in a manner similar to a relocation addend.
|
|
- * That is, we are adding the shift value to whatever value
|
|
- * is in the shift instruction count field already.
|
|
- */
|
|
- cnt = (val & 0x3f);
|
|
- val &= ~0x3f;
|
|
-
|
|
- /* If we are trying to shift >= 64 bits, clear the destination
|
|
- * register. This can happen when phys_bits ends up being equal
|
|
- * to MAX_PHYS_ADDRESS_BITS.
|
|
- */
|
|
- final_shift = (cnt + (64 - phys_bits));
|
|
- if (final_shift >= 64) {
|
|
- unsigned int rd = (val >> 25) & 0x1f;
|
|
-
|
|
- val = 0x80100000 | (rd << 25);
|
|
- } else {
|
|
- val |= final_shift;
|
|
- }
|
|
- *insn = val;
|
|
-
|
|
- __asm__ __volatile__("flush %0"
|
|
- : /* no outputs */
|
|
- : "r" (insn));
|
|
-}
|
|
-
|
|
-static void __init page_offset_shift_patch(unsigned long phys_bits)
|
|
-{
|
|
- extern unsigned int __page_offset_shift_patch;
|
|
- extern unsigned int __page_offset_shift_patch_end;
|
|
- unsigned int *p;
|
|
-
|
|
- p = &__page_offset_shift_patch;
|
|
- while (p < &__page_offset_shift_patch_end) {
|
|
- unsigned int *insn = (unsigned int *)(unsigned long)*p;
|
|
+unsigned long VMALLOC_END = 0x0000010000000000UL;
|
|
+EXPORT_SYMBOL(VMALLOC_END);
|
|
|
|
- page_offset_shift_patch_one(insn, phys_bits);
|
|
-
|
|
- p++;
|
|
- }
|
|
-}
|
|
+unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
|
|
+unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
|
|
|
|
static void __init setup_page_offset(void)
|
|
{
|
|
- unsigned long max_phys_bits = 40;
|
|
-
|
|
if (tlb_type == cheetah || tlb_type == cheetah_plus) {
|
|
+ /* Cheetah/Panther support a full 64-bit virtual
|
|
+ * address, so we can use all that our page tables
|
|
+ * support.
|
|
+ */
|
|
+ sparc64_va_hole_top = 0xfff0000000000000UL;
|
|
+ sparc64_va_hole_bottom = 0x0010000000000000UL;
|
|
+
|
|
max_phys_bits = 42;
|
|
} else if (tlb_type == hypervisor) {
|
|
switch (sun4v_chip_type) {
|
|
case SUN4V_CHIP_NIAGARA1:
|
|
case SUN4V_CHIP_NIAGARA2:
|
|
+ /* T1 and T2 support 48-bit virtual addresses. */
|
|
+ sparc64_va_hole_top = 0xffff800000000000UL;
|
|
+ sparc64_va_hole_bottom = 0x0000800000000000UL;
|
|
+
|
|
max_phys_bits = 39;
|
|
break;
|
|
case SUN4V_CHIP_NIAGARA3:
|
|
+ /* T3 supports 48-bit virtual addresses. */
|
|
+ sparc64_va_hole_top = 0xffff800000000000UL;
|
|
+ sparc64_va_hole_bottom = 0x0000800000000000UL;
|
|
+
|
|
max_phys_bits = 43;
|
|
break;
|
|
case SUN4V_CHIP_NIAGARA4:
|
|
case SUN4V_CHIP_NIAGARA5:
|
|
case SUN4V_CHIP_SPARC64X:
|
|
- default:
|
|
+ case SUN4V_CHIP_SPARC_M6:
|
|
+ /* T4 and later support 52-bit virtual addresses. */
|
|
+ sparc64_va_hole_top = 0xfff8000000000000UL;
|
|
+ sparc64_va_hole_bottom = 0x0008000000000000UL;
|
|
max_phys_bits = 47;
|
|
break;
|
|
+ case SUN4V_CHIP_SPARC_M7:
|
|
+ default:
|
|
+ /* M7 and later support 52-bit virtual addresses. */
|
|
+ sparc64_va_hole_top = 0xfff8000000000000UL;
|
|
+ sparc64_va_hole_bottom = 0x0008000000000000UL;
|
|
+ max_phys_bits = 49;
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
@@ -1640,12 +1708,16 @@ static void __init setup_page_offset(void)
|
|
prom_halt();
|
|
}
|
|
|
|
- PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
|
|
+ PAGE_OFFSET = sparc64_va_hole_top;
|
|
+ VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
|
|
+ (sparc64_va_hole_bottom >> 2));
|
|
|
|
- pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
|
|
+ pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
|
|
PAGE_OFFSET, max_phys_bits);
|
|
-
|
|
- page_offset_shift_patch(max_phys_bits);
|
|
+ pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
|
|
+ VMALLOC_START, VMALLOC_END);
|
|
+ pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
|
|
+ VMEMMAP_BASE, VMEMMAP_BASE << 1);
|
|
}
|
|
|
|
static void __init tsb_phys_patch(void)
|
|
@@ -1690,21 +1762,42 @@ static void __init tsb_phys_patch(void)
|
|
#define NUM_KTSB_DESCR 1
|
|
#endif
|
|
static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
|
|
-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
|
+
|
|
+/* The swapper TSBs are loaded with a base sequence of:
|
|
+ *
|
|
+ * sethi %uhi(SYMBOL), REG1
|
|
+ * sethi %hi(SYMBOL), REG2
|
|
+ * or REG1, %ulo(SYMBOL), REG1
|
|
+ * or REG2, %lo(SYMBOL), REG2
|
|
+ * sllx REG1, 32, REG1
|
|
+ * or REG1, REG2, REG1
|
|
+ *
|
|
+ * When we use physical addressing for the TSB accesses, we patch the
|
|
+ * first four instructions in the above sequence.
|
|
+ */
|
|
|
|
static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
|
|
{
|
|
- pa >>= KTSB_PHYS_SHIFT;
|
|
+ unsigned long high_bits, low_bits;
|
|
+
|
|
+ high_bits = (pa >> 32) & 0xffffffff;
|
|
+ low_bits = (pa >> 0) & 0xffffffff;
|
|
|
|
while (start < end) {
|
|
unsigned int *ia = (unsigned int *)(unsigned long)*start;
|
|
|
|
- ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
|
|
+ ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
|
|
__asm__ __volatile__("flush %0" : : "r" (ia));
|
|
|
|
- ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
|
|
+ ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
|
|
__asm__ __volatile__("flush %0" : : "r" (ia + 1));
|
|
|
|
+ ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
|
|
+ __asm__ __volatile__("flush %0" : : "r" (ia + 2));
|
|
+
|
|
+ ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
|
|
+ __asm__ __volatile__("flush %0" : : "r" (ia + 3));
|
|
+
|
|
start++;
|
|
}
|
|
}
|
|
@@ -1843,7 +1936,6 @@ static void __init sun4v_linear_pte_xor_finalize(void)
|
|
/* paging_init() sets up the page tables */
|
|
|
|
static unsigned long last_valid_pfn;
|
|
-pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
|
|
static void sun4u_pgprot_init(void);
|
|
static void sun4v_pgprot_init(void);
|
|
@@ -1881,7 +1973,7 @@ void __init paging_init(void)
|
|
|
|
BUILD_BUG_ON(NR_CPUS > 4096);
|
|
|
|
- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
|
|
+ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
|
|
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
|
|
|
|
/* Invalidate both kernel TSBs. */
|
|
@@ -1937,7 +2029,7 @@ void __init paging_init(void)
|
|
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
|
|
|
|
real_end = (unsigned long)_end;
|
|
- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
|
|
+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
|
|
printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
|
|
num_kernel_image_mappings);
|
|
|
|
@@ -1946,16 +2038,10 @@ void __init paging_init(void)
|
|
*/
|
|
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
|
|
|
|
- memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
|
|
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
|
|
|
|
- /* Now can init the kernel/bad page tables. */
|
|
- pud_set(pud_offset(&swapper_pg_dir[0], 0),
|
|
- swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
|
|
-
|
|
inherit_prom_mappings();
|
|
|
|
- init_kpte_bitmap();
|
|
-
|
|
/* Ok, we can use our TLB miss and window trap handlers safely. */
|
|
setup_tba();
|
|
|
|
@@ -2062,70 +2148,6 @@ int page_in_phys_avail(unsigned long paddr)
|
|
return 0;
|
|
}
|
|
|
|
-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
|
|
-static int pavail_rescan_ents __initdata;
|
|
-
|
|
-/* Certain OBP calls, such as fetching "available" properties, can
|
|
- * claim physical memory. So, along with initializing the valid
|
|
- * address bitmap, what we do here is refetch the physical available
|
|
- * memory list again, and make sure it provides at least as much
|
|
- * memory as 'pavail' does.
|
|
- */
|
|
-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
|
|
-{
|
|
- int i;
|
|
-
|
|
- read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
|
|
-
|
|
- for (i = 0; i < pavail_ents; i++) {
|
|
- unsigned long old_start, old_end;
|
|
-
|
|
- old_start = pavail[i].phys_addr;
|
|
- old_end = old_start + pavail[i].reg_size;
|
|
- while (old_start < old_end) {
|
|
- int n;
|
|
-
|
|
- for (n = 0; n < pavail_rescan_ents; n++) {
|
|
- unsigned long new_start, new_end;
|
|
-
|
|
- new_start = pavail_rescan[n].phys_addr;
|
|
- new_end = new_start +
|
|
- pavail_rescan[n].reg_size;
|
|
-
|
|
- if (new_start <= old_start &&
|
|
- new_end >= (old_start + PAGE_SIZE)) {
|
|
- set_bit(old_start >> 22, bitmap);
|
|
- goto do_next_page;
|
|
- }
|
|
- }
|
|
-
|
|
- prom_printf("mem_init: Lost memory in pavail\n");
|
|
- prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
|
|
- pavail[i].phys_addr,
|
|
- pavail[i].reg_size);
|
|
- prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
|
|
- pavail_rescan[i].phys_addr,
|
|
- pavail_rescan[i].reg_size);
|
|
- prom_printf("mem_init: Cannot continue, aborting.\n");
|
|
- prom_halt();
|
|
-
|
|
- do_next_page:
|
|
- old_start += PAGE_SIZE;
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static void __init patch_tlb_miss_handler_bitmap(void)
|
|
-{
|
|
- extern unsigned int valid_addr_bitmap_insn[];
|
|
- extern unsigned int valid_addr_bitmap_patch[];
|
|
-
|
|
- valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
|
|
- mb();
|
|
- valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
|
|
- flushi(&valid_addr_bitmap_insn[0]);
|
|
-}
|
|
-
|
|
static void __init register_page_bootmem_info(void)
|
|
{
|
|
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
|
@@ -2138,18 +2160,6 @@ static void __init register_page_bootmem_info(void)
|
|
}
|
|
void __init mem_init(void)
|
|
{
|
|
- unsigned long addr, last;
|
|
-
|
|
- addr = PAGE_OFFSET + kern_base;
|
|
- last = PAGE_ALIGN(kern_size) + addr;
|
|
- while (addr < last) {
|
|
- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
|
|
- addr += PAGE_SIZE;
|
|
- }
|
|
-
|
|
- setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
|
|
- patch_tlb_miss_handler_bitmap();
|
|
-
|
|
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
|
|
|
|
register_page_bootmem_info();
|
|
@@ -2239,18 +2249,9 @@ unsigned long _PAGE_CACHE __read_mostly;
|
|
EXPORT_SYMBOL(_PAGE_CACHE);
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
-unsigned long vmemmap_table[VMEMMAP_SIZE];
|
|
-
|
|
-static long __meminitdata addr_start, addr_end;
|
|
-static int __meminitdata node_start;
|
|
-
|
|
int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
|
|
int node)
|
|
{
|
|
- unsigned long phys_start = (vstart - VMEMMAP_BASE);
|
|
- unsigned long phys_end = (vend - VMEMMAP_BASE);
|
|
- unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
|
|
- unsigned long end = VMEMMAP_ALIGN(phys_end);
|
|
unsigned long pte_base;
|
|
|
|
pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
|
|
@@ -2261,47 +2262,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
|
|
_PAGE_CP_4V | _PAGE_CV_4V |
|
|
_PAGE_P_4V | _PAGE_W_4V);
|
|
|
|
- for (; addr < end; addr += VMEMMAP_CHUNK) {
|
|
- unsigned long *vmem_pp =
|
|
- vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
|
|
- void *block;
|
|
+ pte_base |= _PAGE_PMD_HUGE;
|
|
|
|
- if (!(*vmem_pp & _PAGE_VALID)) {
|
|
- block = vmemmap_alloc_block(1UL << 22, node);
|
|
- if (!block)
|
|
+ vstart = vstart & PMD_MASK;
|
|
+ vend = ALIGN(vend, PMD_SIZE);
|
|
+ for (; vstart < vend; vstart += PMD_SIZE) {
|
|
+ pgd_t *pgd = pgd_offset_k(vstart);
|
|
+ unsigned long pte;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+
|
|
+ if (pgd_none(*pgd)) {
|
|
+ pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
+
|
|
+ if (!new)
|
|
return -ENOMEM;
|
|
+ pgd_populate(&init_mm, pgd, new);
|
|
+ }
|
|
|
|
- *vmem_pp = pte_base | __pa(block);
|
|
+ pud = pud_offset(pgd, vstart);
|
|
+ if (pud_none(*pud)) {
|
|
+ pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
|
|
- /* check to see if we have contiguous blocks */
|
|
- if (addr_end != addr || node_start != node) {
|
|
- if (addr_start)
|
|
- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
|
|
- addr_start, addr_end-1, node_start);
|
|
- addr_start = addr;
|
|
- node_start = node;
|
|
- }
|
|
- addr_end = addr + VMEMMAP_CHUNK;
|
|
+ if (!new)
|
|
+ return -ENOMEM;
|
|
+ pud_populate(&init_mm, pud, new);
|
|
}
|
|
- }
|
|
- return 0;
|
|
-}
|
|
|
|
-void __meminit vmemmap_populate_print_last(void)
|
|
-{
|
|
- if (addr_start) {
|
|
- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
|
|
- addr_start, addr_end-1, node_start);
|
|
- addr_start = 0;
|
|
- addr_end = 0;
|
|
- node_start = 0;
|
|
+ pmd = pmd_offset(pud, vstart);
|
|
+
|
|
+ pte = pmd_val(*pmd);
|
|
+ if (!(pte & _PAGE_VALID)) {
|
|
+ void *block = vmemmap_alloc_block(PMD_SIZE, node);
|
|
+
|
|
+ if (!block)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ pmd_val(*pmd) = pte_base | __pa(block);
|
|
+ }
|
|
}
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
void vmemmap_free(unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
-
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
static void prot_init_common(unsigned long page_none,
|
|
@@ -2614,6 +2620,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
pte = pmd_val(entry);
|
|
|
|
+ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
|
|
+ if (!(pte & _PAGE_VALID))
|
|
+ return;
|
|
+
|
|
/* We are fabricating 8MB pages using 4MB real hw pages. */
|
|
pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
|
|
|
|
@@ -2694,3 +2704,26 @@ void hugetlb_setup(struct pt_regs *regs)
|
|
}
|
|
}
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_SMP
|
|
+#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
|
|
+#else
|
|
+#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
|
|
+#endif
|
|
+
|
|
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
+{
|
|
+ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
|
|
+ if (start < LOW_OBP_ADDRESS) {
|
|
+ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
|
|
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
|
|
+ }
|
|
+ if (end > HI_OBP_ADDRESS) {
|
|
+ flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
|
|
+ do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
|
|
+ }
|
|
+ } else {
|
|
+ flush_tsb_kernel_range(start, end);
|
|
+ do_flush_tlb_kernel_range(start, end);
|
|
+ }
|
|
+}
|
|
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
|
|
index 5d3782de..ac49119 100644
|
|
--- a/arch/sparc/mm/init_64.h
|
|
+++ b/arch/sparc/mm/init_64.h
|
|
@@ -8,15 +8,8 @@
|
|
*/
|
|
|
|
#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
|
|
-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
|
|
-#define KPTE_BITMAP_BYTES \
|
|
- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
|
|
-#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
|
|
-#define VALID_ADDR_BITMAP_BYTES \
|
|
- ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
|
|
|
|
extern unsigned long kern_linear_pte_xor[4];
|
|
-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
|
|
extern unsigned int sparc64_highest_unlocked_tlb_ent;
|
|
extern unsigned long sparc64_kern_pri_context;
|
|
extern unsigned long sparc64_kern_pri_nuc_bits;
|
|
@@ -38,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
|
|
|
|
extern void prom_world(int enter);
|
|
|
|
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
-#define VMEMMAP_CHUNK_SHIFT 22
|
|
-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
|
|
-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
|
|
-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
|
|
-
|
|
-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
|
|
- sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
|
|
-extern unsigned long vmemmap_table[VMEMMAP_SIZE];
|
|
-#endif
|
|
-
|
|
#endif /* _SPARC64_MM_INIT_H */
|
|
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
|
|
index cfbe53c..09daebd 100644
|
|
--- a/arch/sparc/mm/srmmu.c
|
|
+++ b/arch/sparc/mm/srmmu.c
|
|
@@ -460,10 +460,12 @@ static void __init sparc_context_init(int numctx)
|
|
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
|
|
struct task_struct *tsk)
|
|
{
|
|
+ unsigned long flags;
|
|
+
|
|
if (mm->context == NO_CONTEXT) {
|
|
- spin_lock(&srmmu_context_spinlock);
|
|
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
|
|
alloc_context(old_mm, mm);
|
|
- spin_unlock(&srmmu_context_spinlock);
|
|
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
|
|
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
|
|
}
|
|
|
|
@@ -988,14 +990,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
void destroy_context(struct mm_struct *mm)
|
|
{
|
|
+ unsigned long flags;
|
|
|
|
if (mm->context != NO_CONTEXT) {
|
|
flush_cache_mm(mm);
|
|
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
|
|
flush_tlb_mm(mm);
|
|
- spin_lock(&srmmu_context_spinlock);
|
|
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
|
|
free_context(mm->context);
|
|
- spin_unlock(&srmmu_context_spinlock);
|
|
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
|
|
mm->context = NO_CONTEXT;
|
|
}
|
|
}
|
|
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
|
|
index b12cb5e..b89aba2 100644
|
|
--- a/arch/sparc/mm/tlb.c
|
|
+++ b/arch/sparc/mm/tlb.c
|
|
@@ -134,7 +134,7 @@ no_cache_flush:
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|
- pmd_t pmd, bool exec)
|
|
+ pmd_t pmd)
|
|
{
|
|
unsigned long end;
|
|
pte_t *pte;
|
|
@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|
pte = pte_offset_map(&pmd, vaddr);
|
|
end = vaddr + HPAGE_SIZE;
|
|
while (vaddr < end) {
|
|
- if (pte_val(*pte) & _PAGE_VALID)
|
|
+ if (pte_val(*pte) & _PAGE_VALID) {
|
|
+ bool exec = pte_exec(*pte);
|
|
+
|
|
tlb_batch_add_one(mm, vaddr, exec);
|
|
+ }
|
|
pte++;
|
|
vaddr += PAGE_SIZE;
|
|
}
|
|
@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
}
|
|
|
|
if (!pmd_none(orig)) {
|
|
- pte_t orig_pte = __pte(pmd_val(orig));
|
|
- bool exec = pte_exec(orig_pte);
|
|
-
|
|
addr &= HPAGE_MASK;
|
|
if (pmd_trans_huge(orig)) {
|
|
+ pte_t orig_pte = __pte(pmd_val(orig));
|
|
+ bool exec = pte_exec(orig_pte);
|
|
+
|
|
tlb_batch_add_one(mm, addr, exec);
|
|
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
|
|
} else {
|
|
- tlb_batch_pmd_scan(mm, addr, orig, exec);
|
|
+ tlb_batch_pmd_scan(mm, addr, orig);
|
|
}
|
|
}
|
|
}
|
|
|
|
+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
+ pmd_t *pmdp)
|
|
+{
|
|
+ pmd_t entry = *pmdp;
|
|
+
|
|
+ pmd_val(entry) &= ~_PAGE_VALID;
|
|
+
|
|
+ set_pmd_at(vma->vm_mm, address, pmdp, entry);
|
|
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
+}
|
|
+
|
|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
pgtable_t pgtable)
|
|
{
|
|
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
|
|
index f5d506f..fe19b81 100644
|
|
--- a/arch/sparc/mm/tsb.c
|
|
+++ b/arch/sparc/mm/tsb.c
|
|
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
|
|
mm->context.tsb_block[tsb_idx].tsb_nentries =
|
|
tsb_bytes / sizeof(struct tsb);
|
|
|
|
- base = TSBMAP_BASE;
|
|
+ switch (tsb_idx) {
|
|
+ case MM_TSB_BASE:
|
|
+ base = TSBMAP_8K_BASE;
|
|
+ break;
|
|
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
+ case MM_TSB_HUGE:
|
|
+ base = TSBMAP_4M_BASE;
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ BUG();
|
|
+ }
|
|
+
|
|
tte = pgprot_val(PAGE_KERNEL_LOCKED);
|
|
tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
|
|
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
|
|
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
|
|
index 42b0b8c..17bd2e1 100644
|
|
--- a/arch/sparc/power/hibernate.c
|
|
+++ b/arch/sparc/power/hibernate.c
|
|
@@ -9,11 +9,9 @@
|
|
#include <asm/hibernate.h>
|
|
#include <asm/visasm.h>
|
|
#include <asm/page.h>
|
|
+#include <asm/sections.h>
|
|
#include <asm/tlb.h>
|
|
|
|
-/* References to section boundaries */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
-
|
|
struct saved_context saved_context;
|
|
|
|
/*
|
|
diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S
|
|
index 7994216..d7d9017 100644
|
|
--- a/arch/sparc/power/hibernate_asm.S
|
|
+++ b/arch/sparc/power/hibernate_asm.S
|
|
@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume)
|
|
nop
|
|
|
|
/* Write PAGE_OFFSET to %g7 */
|
|
- sethi %uhi(PAGE_OFFSET), %g7
|
|
- sllx %g7, 32, %g7
|
|
+ sethi %hi(PAGE_OFFSET), %g7
|
|
+ ldx [%g7 + %lo(PAGE_OFFSET)], %g7
|
|
|
|
setuw (PAGE_SIZE-8), %g3
|
|
|
|
diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c
|
|
index ab9ccc6..7149e77 100644
|
|
--- a/arch/sparc/prom/bootstr_64.c
|
|
+++ b/arch/sparc/prom/bootstr_64.c
|
|
@@ -14,7 +14,10 @@
|
|
* the .bss section or it will break things.
|
|
*/
|
|
|
|
-#define BARG_LEN 256
|
|
+/* We limit BARG_LEN to 1024 because this is the size of the
|
|
+ * 'barg_out' command line buffer in the SILO bootloader.
|
|
+ */
|
|
+#define BARG_LEN 1024
|
|
struct {
|
|
int bootstr_len;
|
|
int bootstr_valid;
|
|
diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
|
|
index 9c86b4b..8050f38 100644
|
|
--- a/arch/sparc/prom/cif.S
|
|
+++ b/arch/sparc/prom/cif.S
|
|
@@ -11,11 +11,10 @@
|
|
.text
|
|
.globl prom_cif_direct
|
|
prom_cif_direct:
|
|
+ save %sp, -192, %sp
|
|
sethi %hi(p1275buf), %o1
|
|
or %o1, %lo(p1275buf), %o1
|
|
- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
|
|
- save %o2, -192, %sp
|
|
- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
|
|
+ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
|
|
mov %g4, %l0
|
|
mov %g5, %l1
|
|
mov %g6, %l3
|
|
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
|
|
index d95db75..110b0d7 100644
|
|
--- a/arch/sparc/prom/init_64.c
|
|
+++ b/arch/sparc/prom/init_64.c
|
|
@@ -26,13 +26,13 @@ phandle prom_chosen_node;
|
|
* It gets passed the pointer to the PROM vector.
|
|
*/
|
|
|
|
-extern void prom_cif_init(void *, void *);
|
|
+extern void prom_cif_init(void *);
|
|
|
|
-void __init prom_init(void *cif_handler, void *cif_stack)
|
|
+void __init prom_init(void *cif_handler)
|
|
{
|
|
phandle node;
|
|
|
|
- prom_cif_init(cif_handler, cif_stack);
|
|
+ prom_cif_init(cif_handler);
|
|
|
|
prom_chosen_node = prom_finddevice(prom_chosen_path);
|
|
if (!prom_chosen_node || (s32)prom_chosen_node == -1)
|
|
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
|
|
index e58b817..545d8bb 100644
|
|
--- a/arch/sparc/prom/p1275.c
|
|
+++ b/arch/sparc/prom/p1275.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <linux/smp.h>
|
|
#include <linux/string.h>
|
|
#include <linux/spinlock.h>
|
|
+#include <linux/irqflags.h>
|
|
|
|
#include <asm/openprom.h>
|
|
#include <asm/oplib.h>
|
|
@@ -19,7 +20,6 @@
|
|
struct {
|
|
long prom_callback; /* 0x00 */
|
|
void (*prom_cif_handler)(long *); /* 0x08 */
|
|
- unsigned long prom_cif_stack; /* 0x10 */
|
|
} p1275buf;
|
|
|
|
extern void prom_world(int);
|
|
@@ -36,8 +36,8 @@ void p1275_cmd_direct(unsigned long *args)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- raw_local_save_flags(flags);
|
|
- raw_local_irq_restore((unsigned long)PIL_NMI);
|
|
+ local_save_flags(flags);
|
|
+ local_irq_restore((unsigned long)PIL_NMI);
|
|
raw_spin_lock(&prom_entry_lock);
|
|
|
|
prom_world(1);
|
|
@@ -45,11 +45,10 @@ void p1275_cmd_direct(unsigned long *args)
|
|
prom_world(0);
|
|
|
|
raw_spin_unlock(&prom_entry_lock);
|
|
- raw_local_irq_restore(flags);
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
void prom_cif_init(void *cif_handler, void *cif_stack)
|
|
{
|
|
p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
|
|
- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
|
|
}
|
|
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
|
|
index 74c9172..bdb3ecf 100644
|
|
--- a/arch/tile/kernel/setup.c
|
|
+++ b/arch/tile/kernel/setup.c
|
|
@@ -1146,7 +1146,7 @@ static void __init load_hv_initrd(void)
|
|
|
|
void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
|
{
|
|
- free_bootmem(__pa(begin), end - begin);
|
|
+ free_bootmem_late(__pa(begin), end - begin);
|
|
}
|
|
|
|
static int __init setup_initrd(char *str)
|
|
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
|
|
index 6c05712..c6d2a76 100644
|
|
--- a/arch/tile/mm/fault.c
|
|
+++ b/arch/tile/mm/fault.c
|
|
@@ -444,6 +444,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
|
|
index 004ba56..33294fd 100644
|
|
--- a/arch/tile/mm/homecache.c
|
|
+++ b/arch/tile/mm/homecache.c
|
|
@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
|
|
if (put_page_testzero(page)) {
|
|
homecache_change_page_home(page, order, PAGE_HOME_HASH);
|
|
if (order == 0) {
|
|
- free_hot_cold_page(page, 0);
|
|
+ free_hot_cold_page(page, false);
|
|
} else {
|
|
init_page_count(page);
|
|
__free_pages(page, order);
|
|
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
|
|
index 21ca44c..1f0ea55 100644
|
|
--- a/arch/um/Kconfig.common
|
|
+++ b/arch/um/Kconfig.common
|
|
@@ -2,6 +2,7 @@ config UML
|
|
bool
|
|
default y
|
|
select HAVE_UID16
|
|
+ select HAVE_FUTEX_CMPXCHG if FUTEX
|
|
select GENERIC_IRQ_SHOW
|
|
select GENERIC_CPU_DEVICES
|
|
select GENERIC_IO
|
|
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
|
|
index 3716e69..e8ab93c 100644
|
|
--- a/arch/um/drivers/ubd_kern.c
|
|
+++ b/arch/um/drivers/ubd_kern.c
|
|
@@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q)
|
|
|
|
while(1){
|
|
struct ubd *dev = q->queuedata;
|
|
- if(dev->end_sg == 0){
|
|
+ if(dev->request == NULL){
|
|
struct request *req = blk_fetch_request(q);
|
|
if(req == NULL)
|
|
return;
|
|
@@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q)
|
|
return;
|
|
}
|
|
prepare_flush_request(req, io_req);
|
|
- submit_request(io_req, dev);
|
|
+ if (submit_request(io_req, dev) == false)
|
|
+ return;
|
|
}
|
|
|
|
while(dev->start_sg < dev->end_sg){
|
|
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
|
|
index 974b874..53b8320 100644
|
|
--- a/arch/um/kernel/trap.c
|
|
+++ b/arch/um/kernel/trap.c
|
|
@@ -80,6 +80,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM) {
|
|
goto out_of_memory;
|
|
+ } else if (fault & VM_FAULT_SIGSEGV) {
|
|
+ goto out;
|
|
} else if (fault & VM_FAULT_SIGBUS) {
|
|
err = -EACCES;
|
|
goto out;
|
|
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
|
|
index fb5e4c6..ef470a7 100644
|
|
--- a/arch/unicore32/include/asm/mmu_context.h
|
|
+++ b/arch/unicore32/include/asm/mmu_context.h
|
|
@@ -14,6 +14,8 @@
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/io.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
@@ -73,7 +75,7 @@ do { \
|
|
else \
|
|
mm->mmap = NULL; \
|
|
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
|
|
- mm->mmap_cache = NULL; \
|
|
+ vmacache_invalidate(mm); \
|
|
mm->map_count--; \
|
|
remove_vma(high_vma); \
|
|
} \
|
|
diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
|
|
index 4dcd34a..77b5226 100644
|
|
--- a/arch/unicore32/include/mach/pm.h
|
|
+++ b/arch/unicore32/include/mach/pm.h
|
|
@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state);
|
|
/* Defined in hibernate_asm.S */
|
|
extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
|
|
|
|
-/* References to section boundaries */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
-
|
|
extern struct pbe *restore_pblist;
|
|
#endif
|
|
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
|
|
index d75ef8b..9969ec3 100644
|
|
--- a/arch/unicore32/kernel/hibernate.c
|
|
+++ b/arch/unicore32/kernel/hibernate.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
+#include <asm/sections.h>
|
|
#include <asm/suspend.h>
|
|
|
|
#include "mach/pm.h"
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 7324107..96e743a 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -160,7 +160,7 @@ config SBUS
|
|
|
|
config NEED_DMA_MAP_STATE
|
|
def_bool y
|
|
- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
|
|
+ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
|
|
|
|
config NEED_SG_DMA_LENGTH
|
|
def_bool y
|
|
@@ -854,7 +854,7 @@ source "kernel/Kconfig.preempt"
|
|
|
|
config X86_UP_APIC
|
|
bool "Local APIC support on uniprocessors"
|
|
- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
|
|
+ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
|
|
---help---
|
|
A local APIC (Advanced Programmable Interrupt Controller) is an
|
|
integrated interrupt controller in the CPU. If you have a single-CPU
|
|
@@ -865,6 +865,10 @@ config X86_UP_APIC
|
|
performance counters), and the NMI watchdog which detects hard
|
|
lockups.
|
|
|
|
+config X86_UP_APIC_MSI
|
|
+ def_bool y
|
|
+ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
|
|
+
|
|
config X86_UP_IOAPIC
|
|
bool "IO-APIC support on uniprocessors"
|
|
depends on X86_UP_APIC
|
|
@@ -966,10 +970,27 @@ config VM86
|
|
default y
|
|
depends on X86_32
|
|
---help---
|
|
- This option is required by programs like DOSEMU to run 16-bit legacy
|
|
- code on X86 processors. It also may be needed by software like
|
|
- XFree86 to initialize some video cards via BIOS. Disabling this
|
|
- option saves about 6k.
|
|
+ This option is required by programs like DOSEMU to run
|
|
+ 16-bit real mode legacy code on x86 processors. It also may
|
|
+ be needed by software like XFree86 to initialize some video
|
|
+ cards via BIOS. Disabling this option saves about 6K.
|
|
+
|
|
+config X86_16BIT
|
|
+ bool "Enable support for 16-bit segments" if EXPERT
|
|
+ default y
|
|
+ ---help---
|
|
+ This option is required by programs like Wine to run 16-bit
|
|
+ protected mode legacy code on x86 processors. Disabling
|
|
+ this option saves about 300 bytes on i386, or around 6K text
|
|
+ plus 16K runtime memory on x86-64,
|
|
+
|
|
+config X86_ESPFIX32
|
|
+ def_bool y
|
|
+ depends on X86_16BIT && X86_32
|
|
+
|
|
+config X86_ESPFIX64
|
|
+ def_bool y
|
|
+ depends on X86_16BIT && X86_64
|
|
|
|
config TOSHIBA
|
|
tristate "Toshiba Laptop support"
|
|
@@ -1580,6 +1601,7 @@ config EFI
|
|
config EFI_STUB
|
|
bool "EFI stub support"
|
|
depends on EFI
|
|
+ select RELOCATABLE
|
|
---help---
|
|
This kernel feature allows a bzImage to be loaded directly
|
|
by EFI firmware without the use of a bootloader.
|
|
@@ -2418,12 +2440,19 @@ config X86_DMA_REMAP
|
|
depends on STA2X11
|
|
|
|
config IOSF_MBI
|
|
- bool
|
|
+ tristate "Intel System On Chip IOSF Sideband support"
|
|
depends on PCI
|
|
---help---
|
|
- To be selected by modules requiring access to the Intel OnChip System
|
|
- Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms
|
|
- enumerable by PCI.
|
|
+ Enables sideband access to mailbox registers on SoC's. The sideband is
|
|
+ available on the following platforms. This list is not meant to be
|
|
+ exclusive.
|
|
+ - BayTrail
|
|
+ - Cherryview
|
|
+ - Braswell
|
|
+ - Quark
|
|
+
|
|
+ You should say Y if you are running a kernel on one of these
|
|
+ platforms.
|
|
|
|
source "net/Kconfig"
|
|
|
|
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
|
|
index 0fcd913..67e9f5c 100644
|
|
--- a/arch/x86/boot/compressed/Makefile
|
|
+++ b/arch/x86/boot/compressed/Makefile
|
|
@@ -75,8 +75,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
|
|
suffix-$(CONFIG_KERNEL_LZO) := lzo
|
|
suffix-$(CONFIG_KERNEL_LZ4) := lz4
|
|
|
|
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
|
|
+ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
|
|
quiet_cmd_mkpiggy = MKPIGGY $@
|
|
- cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
|
|
+ cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
|
|
|
|
targets += piggy.S
|
|
$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
|
|
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
|
|
index 4dbf967..6cfcf2a 100644
|
|
--- a/arch/x86/boot/compressed/aslr.c
|
|
+++ b/arch/x86/boot/compressed/aslr.c
|
|
@@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
|
|
static bool mem_avoid_overlap(struct mem_vector *img)
|
|
{
|
|
int i;
|
|
+ struct setup_data *ptr;
|
|
|
|
for (i = 0; i < MEM_AVOID_MAX; i++) {
|
|
if (mem_overlaps(img, &mem_avoid[i]))
|
|
return true;
|
|
}
|
|
|
|
+ /* Avoid all entries in the setup_data linked list. */
|
|
+ ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
|
|
+ while (ptr) {
|
|
+ struct mem_vector avoid;
|
|
+
|
|
+ avoid.start = (u64)ptr;
|
|
+ avoid.size = sizeof(*ptr) + ptr->len;
|
|
+
|
|
+ if (mem_overlaps(img, &avoid))
|
|
+ return true;
|
|
+
|
|
+ ptr = (struct setup_data *)(unsigned long)ptr->next;
|
|
+ }
|
|
+
|
|
return false;
|
|
}
|
|
|
|
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
|
|
index 78cbb2d..ec5a3c7 100644
|
|
--- a/arch/x86/boot/compressed/eboot.c
|
|
+++ b/arch/x86/boot/compressed/eboot.c
|
|
@@ -560,6 +560,10 @@ static efi_status_t setup_e820(struct boot_params *params,
|
|
unsigned int e820_type = 0;
|
|
unsigned long m = efi->efi_memmap;
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
+ m |= (u64)efi->efi_memmap_hi << 32;
|
|
+#endif
|
|
+
|
|
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
|
|
switch (d->type) {
|
|
case EFI_RESERVED_TYPE:
|
|
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
|
|
index f45ab7a..a814c80 100644
|
|
--- a/arch/x86/boot/compressed/head_32.S
|
|
+++ b/arch/x86/boot/compressed/head_32.S
|
|
@@ -54,7 +54,7 @@ ENTRY(efi_pe_entry)
|
|
call reloc
|
|
reloc:
|
|
popl %ecx
|
|
- subl reloc, %ecx
|
|
+ subl $reloc, %ecx
|
|
movl %ecx, BP_code32_start(%eax)
|
|
|
|
sub $0x4, %esp
|
|
@@ -186,7 +186,8 @@ relocated:
|
|
* Do the decompression, and jump to the new kernel..
|
|
*/
|
|
/* push arguments for decompress_kernel: */
|
|
- pushl $z_output_len /* decompressed length */
|
|
+ pushl $z_run_size /* size of kernel with .bss and .brk */
|
|
+ pushl $z_output_len /* decompressed length, end of relocs */
|
|
leal z_extract_offset_negative(%ebx), %ebp
|
|
pushl %ebp /* output address */
|
|
pushl $z_input_len /* input_len */
|
|
@@ -196,7 +197,7 @@ relocated:
|
|
pushl %eax /* heap area */
|
|
pushl %esi /* real mode pointer */
|
|
call decompress_kernel /* returns kernel location in %eax */
|
|
- addl $24, %esp
|
|
+ addl $28, %esp
|
|
|
|
/*
|
|
* Jump to the decompressed kernel.
|
|
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
|
|
index b10fa66..34bbc09 100644
|
|
--- a/arch/x86/boot/compressed/head_64.S
|
|
+++ b/arch/x86/boot/compressed/head_64.S
|
|
@@ -334,13 +334,16 @@ relocated:
|
|
* Do the decompression, and jump to the new kernel..
|
|
*/
|
|
pushq %rsi /* Save the real mode argument */
|
|
+ movq $z_run_size, %r9 /* size of kernel with .bss and .brk */
|
|
+ pushq %r9
|
|
movq %rsi, %rdi /* real mode address */
|
|
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
|
|
leaq input_data(%rip), %rdx /* input_data */
|
|
movl $z_input_len, %ecx /* input_len */
|
|
movq %rbp, %r8 /* output target address */
|
|
- movq $z_output_len, %r9 /* decompressed length */
|
|
+ movq $z_output_len, %r9 /* decompressed length, end of relocs */
|
|
call decompress_kernel /* returns kernel location in %rax */
|
|
+ popq %r9
|
|
popq %rsi
|
|
|
|
/*
|
|
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
|
|
index 196eaf3..8f45c85 100644
|
|
--- a/arch/x86/boot/compressed/misc.c
|
|
+++ b/arch/x86/boot/compressed/misc.c
|
|
@@ -393,8 +393,11 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
|
|
unsigned char *input_data,
|
|
unsigned long input_len,
|
|
unsigned char *output,
|
|
- unsigned long output_len)
|
|
+ unsigned long output_len,
|
|
+ unsigned long run_size)
|
|
{
|
|
+ unsigned char *output_orig = output;
|
|
+
|
|
real_mode = rmode;
|
|
|
|
sanitize_boot_params(real_mode);
|
|
@@ -416,8 +419,14 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
|
|
free_mem_ptr = heap; /* Heap */
|
|
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
|
|
|
|
- output = choose_kernel_location(input_data, input_len,
|
|
- output, output_len);
|
|
+ /*
|
|
+ * The memory hole needed for the kernel is the larger of either
|
|
+ * the entire decompressed kernel plus relocation table, or the
|
|
+ * entire decompressed kernel plus .bss and .brk sections.
|
|
+ */
|
|
+ output = choose_kernel_location(input_data, input_len, output,
|
|
+ output_len > run_size ? output_len
|
|
+ : run_size);
|
|
|
|
/* Validate memory location choices. */
|
|
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
|
|
@@ -437,7 +446,12 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
|
|
debug_putstr("\nDecompressing Linux... ");
|
|
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
|
parse_elf(output);
|
|
- handle_relocations(output, output_len);
|
|
+ /*
|
|
+ * 32-bit always performs relocations. 64-bit relocations are only
|
|
+ * needed if kASLR has chosen a different load address.
|
|
+ */
|
|
+ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
|
|
+ handle_relocations(output, output_len);
|
|
debug_putstr("done.\nBooting the kernel.\n");
|
|
return output;
|
|
}
|
|
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
|
|
index b669ab6..d8222f2 100644
|
|
--- a/arch/x86/boot/compressed/mkpiggy.c
|
|
+++ b/arch/x86/boot/compressed/mkpiggy.c
|
|
@@ -36,11 +36,13 @@ int main(int argc, char *argv[])
|
|
uint32_t olen;
|
|
long ilen;
|
|
unsigned long offs;
|
|
+ unsigned long run_size;
|
|
FILE *f = NULL;
|
|
int retval = 1;
|
|
|
|
- if (argc < 2) {
|
|
- fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
|
|
+ if (argc < 3) {
|
|
+ fprintf(stderr, "Usage: %s compressed_file run_size\n",
|
|
+ argv[0]);
|
|
goto bail;
|
|
}
|
|
|
|
@@ -74,6 +76,7 @@ int main(int argc, char *argv[])
|
|
offs += olen >> 12; /* Add 8 bytes for each 32K block */
|
|
offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */
|
|
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
|
|
+ run_size = atoi(argv[2]);
|
|
|
|
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
|
|
printf(".globl z_input_len\n");
|
|
@@ -85,6 +88,8 @@ int main(int argc, char *argv[])
|
|
/* z_extract_offset_negative allows simplification of head_32.S */
|
|
printf(".globl z_extract_offset_negative\n");
|
|
printf("z_extract_offset_negative = -0x%lx\n", offs);
|
|
+ printf(".globl z_run_size\n");
|
|
+ printf("z_run_size = %lu\n", run_size);
|
|
|
|
printf(".globl input_data, input_data_end\n");
|
|
printf("input_data:\n");
|
|
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
|
|
index ec3b8ba..04da6c2 100644
|
|
--- a/arch/x86/boot/header.S
|
|
+++ b/arch/x86/boot/header.S
|
|
@@ -91,10 +91,9 @@ bs_die:
|
|
|
|
.section ".bsdata", "a"
|
|
bugger_off_msg:
|
|
- .ascii "Direct floppy boot is not supported. "
|
|
- .ascii "Use a boot loader program instead.\r\n"
|
|
+ .ascii "Use a boot loader.\r\n"
|
|
.ascii "\n"
|
|
- .ascii "Remove disk and press any key to reboot ...\r\n"
|
|
+ .ascii "Remove disk and press any key to reboot...\r\n"
|
|
.byte 0
|
|
|
|
#ifdef CONFIG_EFI_STUB
|
|
@@ -108,7 +107,7 @@ coff_header:
|
|
#else
|
|
.word 0x8664 # x86-64
|
|
#endif
|
|
- .word 3 # nr_sections
|
|
+ .word 4 # nr_sections
|
|
.long 0 # TimeDateStamp
|
|
.long 0 # PointerToSymbolTable
|
|
.long 1 # NumberOfSymbols
|
|
@@ -250,6 +249,25 @@ section_table:
|
|
.word 0 # NumberOfLineNumbers
|
|
.long 0x60500020 # Characteristics (section flags)
|
|
|
|
+ #
|
|
+ # The offset & size fields are filled in by build.c.
|
|
+ #
|
|
+ .ascii ".bss"
|
|
+ .byte 0
|
|
+ .byte 0
|
|
+ .byte 0
|
|
+ .byte 0
|
|
+ .long 0
|
|
+ .long 0x0
|
|
+ .long 0 # Size of initialized data
|
|
+ # on disk
|
|
+ .long 0x0
|
|
+ .long 0 # PointerToRelocations
|
|
+ .long 0 # PointerToLineNumbers
|
|
+ .word 0 # NumberOfRelocations
|
|
+ .word 0 # NumberOfLineNumbers
|
|
+ .long 0xc8000080 # Characteristics (section flags)
|
|
+
|
|
#endif /* CONFIG_EFI_STUB */
|
|
|
|
# Kernel attributes; used by setup. This is part 1 of the
|
|
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
|
|
index 8e15b22..3dafaeb 100644
|
|
--- a/arch/x86/boot/tools/build.c
|
|
+++ b/arch/x86/boot/tools/build.c
|
|
@@ -142,7 +142,7 @@ static void usage(void)
|
|
|
|
#ifdef CONFIG_EFI_STUB
|
|
|
|
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
|
|
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
|
|
{
|
|
unsigned int pe_header;
|
|
unsigned short num_sections;
|
|
@@ -163,10 +163,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
|
|
put_unaligned_le32(size, section + 0x8);
|
|
|
|
/* section header vma field */
|
|
- put_unaligned_le32(offset, section + 0xc);
|
|
+ put_unaligned_le32(vma, section + 0xc);
|
|
|
|
/* section header 'size of initialised data' field */
|
|
- put_unaligned_le32(size, section + 0x10);
|
|
+ put_unaligned_le32(datasz, section + 0x10);
|
|
|
|
/* section header 'file offset' field */
|
|
put_unaligned_le32(offset, section + 0x14);
|
|
@@ -178,6 +178,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
|
|
}
|
|
}
|
|
|
|
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
|
|
+{
|
|
+ update_pecoff_section_header_fields(section_name, offset, size, size, offset);
|
|
+}
|
|
+
|
|
static void update_pecoff_setup_and_reloc(unsigned int size)
|
|
{
|
|
u32 setup_offset = 0x200;
|
|
@@ -202,9 +207,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
|
|
|
|
pe_header = get_unaligned_le32(&buf[0x3c]);
|
|
|
|
- /* Size of image */
|
|
- put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
|
|
-
|
|
/*
|
|
* Size of code: Subtract the size of the first sector (512 bytes)
|
|
* which includes the header.
|
|
@@ -219,6 +221,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
|
|
update_pecoff_section_header(".text", text_start, text_sz);
|
|
}
|
|
|
|
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
|
|
+{
|
|
+ unsigned int pe_header;
|
|
+ unsigned int bss_sz = init_sz - file_sz;
|
|
+
|
|
+ pe_header = get_unaligned_le32(&buf[0x3c]);
|
|
+
|
|
+ /* Size of uninitialized data */
|
|
+ put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
|
|
+
|
|
+ /* Size of image */
|
|
+ put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
|
|
+
|
|
+ update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
|
|
+}
|
|
+
|
|
#endif /* CONFIG_EFI_STUB */
|
|
|
|
|
|
@@ -270,6 +288,9 @@ int main(int argc, char ** argv)
|
|
int fd;
|
|
void *kernel;
|
|
u32 crc = 0xffffffffUL;
|
|
+#ifdef CONFIG_EFI_STUB
|
|
+ unsigned int init_sz;
|
|
+#endif
|
|
|
|
/* Defaults for old kernel */
|
|
#ifdef CONFIG_X86_32
|
|
@@ -343,7 +364,9 @@ int main(int argc, char ** argv)
|
|
put_unaligned_le32(sys_size, &buf[0x1f4]);
|
|
|
|
#ifdef CONFIG_EFI_STUB
|
|
- update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
|
|
+ update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
|
|
+ init_sz = get_unaligned_le32(&buf[0x260]);
|
|
+ update_pecoff_bss(i + (sys_size * 16), init_sz);
|
|
|
|
#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
|
|
efi_stub_entry -= 0x200;
|
|
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
|
|
index aafe8ce..e26984f 100644
|
|
--- a/arch/x86/crypto/aes_glue.c
|
|
+++ b/arch/x86/crypto/aes_glue.c
|
|
@@ -66,5 +66,5 @@ module_exit(aes_fini);
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("aes");
|
|
-MODULE_ALIAS("aes-asm");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes-asm");
|
|
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
|
|
index 948ad0e..6d4faba 100644
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -1109,7 +1109,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
|
|
if (!src)
|
|
return -ENOMEM;
|
|
- assoc = (src + req->cryptlen + auth_tag_len);
|
|
+ assoc = (src + req->cryptlen);
|
|
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
|
|
scatterwalk_map_and_copy(assoc, req->assoc, 0,
|
|
req->assoclen, 0);
|
|
@@ -1134,7 +1134,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
|
scatterwalk_done(&src_sg_walk, 0, 0);
|
|
scatterwalk_done(&assoc_sg_walk, 0, 0);
|
|
} else {
|
|
- scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
|
|
+ scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
|
|
kfree(src);
|
|
}
|
|
return retval;
|
|
@@ -1514,4 +1514,4 @@ module_exit(aesni_exit);
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
|
|
index 50ec333..1477cfc 100644
|
|
--- a/arch/x86/crypto/blowfish_glue.c
|
|
+++ b/arch/x86/crypto/blowfish_glue.c
|
|
@@ -481,5 +481,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
|
|
-MODULE_ALIAS("blowfish");
|
|
-MODULE_ALIAS("blowfish-asm");
|
|
+MODULE_ALIAS_CRYPTO("blowfish");
|
|
+MODULE_ALIAS_CRYPTO("blowfish-asm");
|
|
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
|
|
index 4209a76..9a07faf 100644
|
|
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
|
|
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
|
|
@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
|
|
-MODULE_ALIAS("camellia");
|
|
-MODULE_ALIAS("camellia-asm");
|
|
+MODULE_ALIAS_CRYPTO("camellia");
|
|
+MODULE_ALIAS_CRYPTO("camellia-asm");
|
|
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
|
|
index 87a041a..ed38d95 100644
|
|
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
|
|
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
|
|
@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
|
|
-MODULE_ALIAS("camellia");
|
|
-MODULE_ALIAS("camellia-asm");
|
|
+MODULE_ALIAS_CRYPTO("camellia");
|
|
+MODULE_ALIAS_CRYPTO("camellia-asm");
|
|
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
|
|
index c171dcb..5c8b626 100644
|
|
--- a/arch/x86/crypto/camellia_glue.c
|
|
+++ b/arch/x86/crypto/camellia_glue.c
|
|
@@ -1725,5 +1725,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
|
|
-MODULE_ALIAS("camellia");
|
|
-MODULE_ALIAS("camellia-asm");
|
|
+MODULE_ALIAS_CRYPTO("camellia");
|
|
+MODULE_ALIAS_CRYPTO("camellia-asm");
|
|
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
|
|
index e6a3700..f62e9db 100644
|
|
--- a/arch/x86/crypto/cast5_avx_glue.c
|
|
+++ b/arch/x86/crypto/cast5_avx_glue.c
|
|
@@ -494,4 +494,4 @@ module_exit(cast5_exit);
|
|
|
|
MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("cast5");
|
|
+MODULE_ALIAS_CRYPTO("cast5");
|
|
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
|
|
index 09f3677..0160f68 100644
|
|
--- a/arch/x86/crypto/cast6_avx_glue.c
|
|
+++ b/arch/x86/crypto/cast6_avx_glue.c
|
|
@@ -611,4 +611,4 @@ module_exit(cast6_exit);
|
|
|
|
MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("cast6");
|
|
+MODULE_ALIAS_CRYPTO("cast6");
|
|
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
|
|
index 9d014a7..1937fc1 100644
|
|
--- a/arch/x86/crypto/crc32-pclmul_glue.c
|
|
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
|
|
@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
|
|
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-MODULE_ALIAS("crc32");
|
|
-MODULE_ALIAS("crc32-pclmul");
|
|
+MODULE_ALIAS_CRYPTO("crc32");
|
|
+MODULE_ALIAS_CRYPTO("crc32-pclmul");
|
|
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
|
|
index 6812ad9..28640c3 100644
|
|
--- a/arch/x86/crypto/crc32c-intel_glue.c
|
|
+++ b/arch/x86/crypto/crc32c-intel_glue.c
|
|
@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
|
|
MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-MODULE_ALIAS("crc32c");
|
|
-MODULE_ALIAS("crc32c-intel");
|
|
+MODULE_ALIAS_CRYPTO("crc32c");
|
|
+MODULE_ALIAS_CRYPTO("crc32c-intel");
|
|
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
|
|
index 7845d7f..b6c67bf 100644
|
|
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
|
|
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
|
|
@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
|
|
MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-MODULE_ALIAS("crct10dif");
|
|
-MODULE_ALIAS("crct10dif-pclmul");
|
|
+MODULE_ALIAS_CRYPTO("crct10dif");
|
|
+MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
|
|
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
|
|
index 98d7a18..f368ba2 100644
|
|
--- a/arch/x86/crypto/fpu.c
|
|
+++ b/arch/x86/crypto/fpu.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/crypto.h>
|
|
#include <asm/i387.h>
|
|
|
|
struct crypto_fpu_ctx {
|
|
@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
|
|
{
|
|
crypto_unregister_template(&crypto_fpu_tmpl);
|
|
}
|
|
+
|
|
+MODULE_ALIAS_CRYPTO("fpu");
|
|
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
|
|
index d785cf2..4bcf841 100644
|
|
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
|
|
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
|
|
@@ -291,6 +291,7 @@ static struct ahash_alg ghash_async_alg = {
|
|
.cra_name = "ghash",
|
|
.cra_driver_name = "ghash-clmulni",
|
|
.cra_priority = 400,
|
|
+ .cra_ctxsize = sizeof(struct ghash_async_ctx),
|
|
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
|
|
.cra_blocksize = GHASH_BLOCK_SIZE,
|
|
.cra_type = &crypto_ahash_type,
|
|
@@ -341,4 +342,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
|
|
"acclerated by PCLMULQDQ-NI");
|
|
-MODULE_ALIAS("ghash");
|
|
+MODULE_ALIAS_CRYPTO("ghash");
|
|
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
|
|
index 5e8e677..399a29d 100644
|
|
--- a/arch/x86/crypto/salsa20_glue.c
|
|
+++ b/arch/x86/crypto/salsa20_glue.c
|
|
@@ -119,5 +119,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
|
|
-MODULE_ALIAS("salsa20");
|
|
-MODULE_ALIAS("salsa20-asm");
|
|
+MODULE_ALIAS_CRYPTO("salsa20");
|
|
+MODULE_ALIAS_CRYPTO("salsa20-asm");
|
|
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
|
|
index 2fae489..437e47a 100644
|
|
--- a/arch/x86/crypto/serpent_avx2_glue.c
|
|
+++ b/arch/x86/crypto/serpent_avx2_glue.c
|
|
@@ -558,5 +558,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
|
|
-MODULE_ALIAS("serpent");
|
|
-MODULE_ALIAS("serpent-asm");
|
|
+MODULE_ALIAS_CRYPTO("serpent");
|
|
+MODULE_ALIAS_CRYPTO("serpent-asm");
|
|
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
|
|
index ff48708..7e21739 100644
|
|
--- a/arch/x86/crypto/serpent_avx_glue.c
|
|
+++ b/arch/x86/crypto/serpent_avx_glue.c
|
|
@@ -617,4 +617,4 @@ module_exit(serpent_exit);
|
|
|
|
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("serpent");
|
|
+MODULE_ALIAS_CRYPTO("serpent");
|
|
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
|
|
index 8c95f86..bf025ad 100644
|
|
--- a/arch/x86/crypto/serpent_sse2_glue.c
|
|
+++ b/arch/x86/crypto/serpent_sse2_glue.c
|
|
@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
|
|
|
|
MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("serpent");
|
|
+MODULE_ALIAS_CRYPTO("serpent");
|
|
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
|
|
index 4a11a9d..29e1060 100644
|
|
--- a/arch/x86/crypto/sha1_ssse3_glue.c
|
|
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
|
|
@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
|
|
|
-MODULE_ALIAS("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
|
|
index f248546..4dc100d 100644
|
|
--- a/arch/x86/crypto/sha256_ssse3_glue.c
|
|
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
|
|
@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
|
|
|
-MODULE_ALIAS("sha256");
|
|
-MODULE_ALIAS("sha224");
|
|
+MODULE_ALIAS_CRYPTO("sha256");
|
|
+MODULE_ALIAS_CRYPTO("sha224");
|
|
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
|
|
index 8626b03..26a5898 100644
|
|
--- a/arch/x86/crypto/sha512_ssse3_glue.c
|
|
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
|
|
@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
|
|
|
-MODULE_ALIAS("sha512");
|
|
-MODULE_ALIAS("sha384");
|
|
+MODULE_ALIAS_CRYPTO("sha512");
|
|
+MODULE_ALIAS_CRYPTO("sha384");
|
|
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
|
|
index 4e3c665..1ac531e 100644
|
|
--- a/arch/x86/crypto/twofish_avx_glue.c
|
|
+++ b/arch/x86/crypto/twofish_avx_glue.c
|
|
@@ -579,4 +579,4 @@ module_exit(twofish_exit);
|
|
|
|
MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("twofish");
|
|
+MODULE_ALIAS_CRYPTO("twofish");
|
|
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
|
|
index 0a52023..77e06c2 100644
|
|
--- a/arch/x86/crypto/twofish_glue.c
|
|
+++ b/arch/x86/crypto/twofish_glue.c
|
|
@@ -96,5 +96,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
|
|
-MODULE_ALIAS("twofish");
|
|
-MODULE_ALIAS("twofish-asm");
|
|
+MODULE_ALIAS_CRYPTO("twofish");
|
|
+MODULE_ALIAS_CRYPTO("twofish-asm");
|
|
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
|
|
index 13e63b3..56d8a08 100644
|
|
--- a/arch/x86/crypto/twofish_glue_3way.c
|
|
+++ b/arch/x86/crypto/twofish_glue_3way.c
|
|
@@ -495,5 +495,5 @@ module_exit(fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
|
|
-MODULE_ALIAS("twofish");
|
|
-MODULE_ALIAS("twofish-asm");
|
|
+MODULE_ALIAS_CRYPTO("twofish");
|
|
+MODULE_ALIAS_CRYPTO("twofish-asm");
|
|
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
|
|
index 4299eb0..92a2e93 100644
|
|
--- a/arch/x86/ia32/ia32entry.S
|
|
+++ b/arch/x86/ia32/ia32entry.S
|
|
@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
|
|
1: movl (%rbp),%ebp
|
|
_ASM_EXTABLE(1b,ia32_badarg)
|
|
ASM_CLAC
|
|
+
|
|
+ /*
|
|
+ * Sysenter doesn't filter flags, so we need to clear NT
|
|
+ * ourselves. To save a few cycles, we can check whether
|
|
+ * NT was set instead of doing an unconditional popfq.
|
|
+ */
|
|
+ testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
|
|
+ jnz sysenter_fix_flags
|
|
+sysenter_flags_fixed:
|
|
+
|
|
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
CFI_REMEMBER_STATE
|
|
@@ -184,6 +194,8 @@ sysexit_from_sys_call:
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS_SYSEXIT32
|
|
|
|
+ CFI_RESTORE_STATE
|
|
+
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
.macro auditsys_entry_common
|
|
movl %esi,%r9d /* 6th arg: 4th syscall arg */
|
|
@@ -226,7 +238,6 @@ sysexit_from_sys_call:
|
|
.endm
|
|
|
|
sysenter_auditsys:
|
|
- CFI_RESTORE_STATE
|
|
auditsys_entry_common
|
|
movl %ebp,%r9d /* reload 6th syscall arg */
|
|
jmp sysenter_dispatch
|
|
@@ -235,6 +246,11 @@ sysexit_audit:
|
|
auditsys_exit sysexit_from_sys_call
|
|
#endif
|
|
|
|
+sysenter_fix_flags:
|
|
+ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
|
|
+ popfq_cfi
|
|
+ jmp sysenter_flags_fixed
|
|
+
|
|
sysenter_tracesys:
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
|
|
index 5f12968..1717156 100644
|
|
--- a/arch/x86/include/asm/cpufeature.h
|
|
+++ b/arch/x86/include/asm/cpufeature.h
|
|
@@ -203,6 +203,7 @@
|
|
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
|
|
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
|
|
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
|
|
+#define X86_FEATURE_VMMCALL (8*32+15) /* Prefer vmmcall to vmcall */
|
|
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
|
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
|
|
index 50d033a..a94b82e 100644
|
|
--- a/arch/x86/include/asm/desc.h
|
|
+++ b/arch/x86/include/asm/desc.h
|
|
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
|
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
|
}
|
|
|
|
-#define _LDT_empty(info) \
|
|
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
|
|
+#define LDT_empty(info) \
|
|
((info)->base_addr == 0 && \
|
|
(info)->limit == 0 && \
|
|
(info)->contents == 0 && \
|
|
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
|
(info)->seg_not_present == 1 && \
|
|
(info)->useable == 0)
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
|
|
-#else
|
|
-#define LDT_empty(info) (_LDT_empty(info))
|
|
-#endif
|
|
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
|
|
+static inline bool LDT_zero(const struct user_desc *info)
|
|
+{
|
|
+ return (info->base_addr == 0 &&
|
|
+ info->limit == 0 &&
|
|
+ info->contents == 0 &&
|
|
+ info->read_exec_only == 0 &&
|
|
+ info->seg_32bit == 0 &&
|
|
+ info->limit_in_pages == 0 &&
|
|
+ info->seg_not_present == 0 &&
|
|
+ info->useable == 0);
|
|
+}
|
|
|
|
static inline void clear_LDT(void)
|
|
{
|
|
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
|
|
index 9c999c1..01f15b2 100644
|
|
--- a/arch/x86/include/asm/elf.h
|
|
+++ b/arch/x86/include/asm/elf.h
|
|
@@ -155,8 +155,9 @@ do { \
|
|
#define elf_check_arch(x) \
|
|
((x)->e_machine == EM_X86_64)
|
|
|
|
-#define compat_elf_check_arch(x) \
|
|
- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
|
|
+#define compat_elf_check_arch(x) \
|
|
+ (elf_check_arch_ia32(x) || \
|
|
+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
|
|
|
|
#if __USER32_DS != __USER_DS
|
|
# error "The following code assumes __USER32_DS == __USER_DS"
|
|
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
|
|
index 7252cd3..6762a55 100644
|
|
--- a/arch/x86/include/asm/fixmap.h
|
|
+++ b/arch/x86/include/asm/fixmap.h
|
|
@@ -123,14 +123,14 @@ enum fixed_addresses {
|
|
__end_of_permanent_fixed_addresses,
|
|
|
|
/*
|
|
- * 256 temporary boot-time mappings, used by early_ioremap(),
|
|
+ * 512 temporary boot-time mappings, used by early_ioremap(),
|
|
* before ioremap() is functional.
|
|
*
|
|
- * If necessary we round it up to the next 256 pages boundary so
|
|
+ * If necessary we round it up to the next 512 pages boundary so
|
|
* that we can have a single pgd entry and a single pte table:
|
|
*/
|
|
#define NR_FIX_BTMAPS 64
|
|
-#define FIX_BTMAPS_SLOTS 4
|
|
+#define FIX_BTMAPS_SLOTS 8
|
|
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
|
|
FIX_BTMAP_END =
|
|
(__end_of_permanent_fixed_addresses ^
|
|
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
|
|
index cea1c76..1ac1c00 100644
|
|
--- a/arch/x86/include/asm/fpu-internal.h
|
|
+++ b/arch/x86/include/asm/fpu-internal.h
|
|
@@ -368,7 +368,7 @@ static inline void drop_fpu(struct task_struct *tsk)
|
|
preempt_disable();
|
|
tsk->thread.fpu_counter = 0;
|
|
__drop_fpu(tsk);
|
|
- clear_used_math();
|
|
+ clear_stopped_child_used_math(tsk);
|
|
preempt_enable();
|
|
}
|
|
|
|
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
|
|
index 8e71c79..57995f0 100644
|
|
--- a/arch/x86/include/asm/iosf_mbi.h
|
|
+++ b/arch/x86/include/asm/iosf_mbi.h
|
|
@@ -50,6 +50,32 @@
|
|
#define BT_MBI_PCIE_READ 0x00
|
|
#define BT_MBI_PCIE_WRITE 0x01
|
|
|
|
+/* Quark available units */
|
|
+#define QRK_MBI_UNIT_HBA 0x00
|
|
+#define QRK_MBI_UNIT_HB 0x03
|
|
+#define QRK_MBI_UNIT_RMU 0x04
|
|
+#define QRK_MBI_UNIT_MM 0x05
|
|
+#define QRK_MBI_UNIT_MMESRAM 0x05
|
|
+#define QRK_MBI_UNIT_SOC 0x31
|
|
+
|
|
+/* Quark read/write opcodes */
|
|
+#define QRK_MBI_HBA_READ 0x10
|
|
+#define QRK_MBI_HBA_WRITE 0x11
|
|
+#define QRK_MBI_HB_READ 0x10
|
|
+#define QRK_MBI_HB_WRITE 0x11
|
|
+#define QRK_MBI_RMU_READ 0x10
|
|
+#define QRK_MBI_RMU_WRITE 0x11
|
|
+#define QRK_MBI_MM_READ 0x10
|
|
+#define QRK_MBI_MM_WRITE 0x11
|
|
+#define QRK_MBI_MMESRAM_READ 0x12
|
|
+#define QRK_MBI_MMESRAM_WRITE 0x13
|
|
+#define QRK_MBI_SOC_READ 0x06
|
|
+#define QRK_MBI_SOC_WRITE 0x07
|
|
+
|
|
+#if IS_ENABLED(CONFIG_IOSF_MBI)
|
|
+
|
|
+bool iosf_mbi_available(void);
|
|
+
|
|
/**
|
|
* iosf_mbi_read() - MailBox Interface read command
|
|
* @port: port indicating subunit being accessed
|
|
@@ -87,4 +113,33 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
|
|
*/
|
|
int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
|
|
|
|
+#else /* CONFIG_IOSF_MBI is not enabled */
|
|
+static inline
|
|
+bool iosf_mbi_available(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static inline
|
|
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
|
|
+{
|
|
+ WARN(1, "IOSF_MBI driver not available");
|
|
+ return -EPERM;
|
|
+}
|
|
+
|
|
+static inline
|
|
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
|
|
+{
|
|
+ WARN(1, "IOSF_MBI driver not available");
|
|
+ return -EPERM;
|
|
+}
|
|
+
|
|
+static inline
|
|
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
|
|
+{
|
|
+ WARN(1, "IOSF_MBI driver not available");
|
|
+ return -EPERM;
|
|
+}
|
|
+#endif /* CONFIG_IOSF_MBI */
|
|
+
|
|
#endif /* IOSF_MBI_SYMS_H */
|
|
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
|
|
index bba3cf8..0a8b519 100644
|
|
--- a/arch/x86/include/asm/irqflags.h
|
|
+++ b/arch/x86/include/asm/irqflags.h
|
|
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
|
|
|
|
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
|
|
|
-#define INTERRUPT_RETURN iretq
|
|
+#define INTERRUPT_RETURN jmp native_iret
|
|
#define USERGS_SYSRET64 \
|
|
swapgs; \
|
|
sysretq;
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index 3092300..ac03bd7 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -99,7 +99,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
|
#define KVM_REFILL_PAGES 25
|
|
#define KVM_MAX_CPUID_ENTRIES 80
|
|
#define KVM_NR_FIXED_MTRR_REGION 88
|
|
-#define KVM_NR_VAR_MTRR 10
|
|
+#define KVM_NR_VAR_MTRR 8
|
|
|
|
#define ASYNC_PF_PER_VCPU 64
|
|
|
|
@@ -480,6 +480,7 @@ struct kvm_vcpu_arch {
|
|
u64 mmio_gva;
|
|
unsigned access;
|
|
gfn_t mmio_gfn;
|
|
+ u64 mmio_gen;
|
|
|
|
struct kvm_pmu pmu;
|
|
|
|
@@ -570,7 +571,7 @@ struct kvm_arch {
|
|
struct kvm_pic *vpic;
|
|
struct kvm_ioapic *vioapic;
|
|
struct kvm_pit *vpit;
|
|
- int vapics_in_nmi_mode;
|
|
+ atomic_t vapics_in_nmi_mode;
|
|
struct mutex apic_map_lock;
|
|
struct kvm_apic_map *apic_map;
|
|
|
|
@@ -983,6 +984,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
|
|
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
|
|
}
|
|
|
|
+static inline u64 get_canonical(u64 la)
|
|
+{
|
|
+ return ((int64_t)la << 16) >> 16;
|
|
+}
|
|
+
|
|
+static inline bool is_noncanonical_address(u64 la)
|
|
+{
|
|
+#ifdef CONFIG_X86_64
|
|
+ return get_canonical(la) != la;
|
|
+#else
|
|
+ return false;
|
|
+#endif
|
|
+}
|
|
+
|
|
#define TSS_IOPB_BASE_OFFSET 0x66
|
|
#define TSS_BASE_SIZE 0x68
|
|
#define TSS_IOPB_SIZE (65536 / 8)
|
|
@@ -1041,7 +1056,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
|
void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_define_shared_msr(unsigned index, u32 msr);
|
|
-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
|
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
|
|
|
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
|
|
|
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
|
|
index c7678e4..e62cf89 100644
|
|
--- a/arch/x86/include/asm/kvm_para.h
|
|
+++ b/arch/x86/include/asm/kvm_para.h
|
|
@@ -2,6 +2,7 @@
|
|
#define _ASM_X86_KVM_PARA_H
|
|
|
|
#include <asm/processor.h>
|
|
+#include <asm/alternative.h>
|
|
#include <uapi/asm/kvm_para.h>
|
|
|
|
extern void kvmclock_init(void);
|
|
@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void)
|
|
}
|
|
#endif /* CONFIG_KVM_GUEST */
|
|
|
|
-/* This instruction is vmcall. On non-VT architectures, it will generate a
|
|
- * trap that we will then rewrite to the appropriate instruction.
|
|
+#ifdef CONFIG_DEBUG_RODATA
|
|
+#define KVM_HYPERCALL \
|
|
+ ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
|
|
+#else
|
|
+/* On AMD processors, vmcall will generate a trap that we will
|
|
+ * then rewrite to the appropriate instruction.
|
|
*/
|
|
#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
|
|
+#endif
|
|
|
|
/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
|
|
* instruction. The hypervisor may replace it with something else but only the
|
|
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
|
|
index 1da25a5..3ba047c 100644
|
|
--- a/arch/x86/include/asm/mwait.h
|
|
+++ b/arch/x86/include/asm/mwait.h
|
|
@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
|
|
:: "a" (eax), "c" (ecx));
|
|
}
|
|
|
|
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
|
|
+{
|
|
+ trace_hardirqs_on();
|
|
+ /* "mwait %eax, %ecx;" */
|
|
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
|
|
+ :: "a" (eax), "c" (ecx));
|
|
+}
|
|
+
|
|
/*
|
|
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
|
|
* which can obviate IPI to trigger checking of need_resched.
|
|
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
|
|
index f48b17d..3a52ee0 100644
|
|
--- a/arch/x86/include/asm/page_32_types.h
|
|
+++ b/arch/x86/include/asm/page_32_types.h
|
|
@@ -20,7 +20,6 @@
|
|
#define THREAD_SIZE_ORDER 1
|
|
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
|
|
|
-#define STACKFAULT_STACK 0
|
|
#define DOUBLEFAULT_STACK 1
|
|
#define NMI_STACK 0
|
|
#define DEBUG_STACK 0
|
|
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
|
|
index 8de6d9c..d54d1ee 100644
|
|
--- a/arch/x86/include/asm/page_64_types.h
|
|
+++ b/arch/x86/include/asm/page_64_types.h
|
|
@@ -14,12 +14,11 @@
|
|
#define IRQ_STACK_ORDER 2
|
|
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
|
|
|
|
-#define STACKFAULT_STACK 1
|
|
-#define DOUBLEFAULT_STACK 2
|
|
-#define NMI_STACK 3
|
|
-#define DEBUG_STACK 4
|
|
-#define MCE_STACK 5
|
|
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
|
|
+#define DOUBLEFAULT_STACK 1
|
|
+#define NMI_STACK 2
|
|
+#define DEBUG_STACK 3
|
|
+#define MCE_STACK 4
|
|
+#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
|
|
|
|
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
|
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
|
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
|
|
index e22c1db..d869931 100644
|
|
--- a/arch/x86/include/asm/pgtable_64.h
|
|
+++ b/arch/x86/include/asm/pgtable_64.h
|
|
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
|
|
extern pmd_t level2_kernel_pgt[512];
|
|
extern pmd_t level2_fixmap_pgt[512];
|
|
extern pmd_t level2_ident_pgt[512];
|
|
+extern pte_t level1_fixmap_pgt[512];
|
|
extern pgd_t init_level4_pgt[];
|
|
|
|
#define swapper_pg_dir init_level4_pgt
|
|
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
|
|
index c883bf7..7166e25 100644
|
|
--- a/arch/x86/include/asm/pgtable_64_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_64_types.h
|
|
@@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
|
|
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
|
|
#define MODULES_END _AC(0xffffffffff000000, UL)
|
|
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
|
|
+#define ESPFIX_PGD_ENTRY _AC(-2, UL)
|
|
+#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
|
|
|
|
#define EARLY_DYNAMIC_PAGE_TABLES 64
|
|
|
|
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
|
|
index 6f1c3a8..bcc9a2f 100644
|
|
--- a/arch/x86/include/asm/segment.h
|
|
+++ b/arch/x86/include/asm/segment.h
|
|
@@ -212,10 +212,21 @@
|
|
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
|
|
|
|
#ifdef __KERNEL__
|
|
+
|
|
+/*
|
|
+ * early_idt_handler_array is an array of entry points referenced in the
|
|
+ * early IDT. For simplicity, it's a real array with one entry point
|
|
+ * every nine bytes. That leaves room for an optional 'push $0' if the
|
|
+ * vector has no error code (two bytes), a 'push $vector_number' (two
|
|
+ * bytes), and a jump to the common entry code (up to five bytes).
|
|
+ */
|
|
+#define EARLY_IDT_HANDLER_SIZE 9
|
|
+
|
|
#ifndef __ASSEMBLY__
|
|
-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
|
|
+
|
|
+extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
|
|
#ifdef CONFIG_TRACING
|
|
-#define trace_early_idt_handlers early_idt_handlers
|
|
+# define trace_early_idt_handler_array early_idt_handler_array
|
|
#endif
|
|
|
|
/*
|
|
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
|
|
index d62c9f8..75b14ca 100644
|
|
--- a/arch/x86/include/asm/setup.h
|
|
+++ b/arch/x86/include/asm/setup.h
|
|
@@ -65,6 +65,8 @@ static inline void x86_ce4100_early_setup(void) { }
|
|
|
|
#ifndef _SETUP
|
|
|
|
+#include <asm/espfix.h>
|
|
+
|
|
/*
|
|
* This is set up by the setup-routine at boot-time
|
|
*/
|
|
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
|
index e1940c0..e870ea9 100644
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -144,7 +144,7 @@ struct thread_info {
|
|
/* Only used for 64 bit */
|
|
#define _TIF_DO_NOTIFY_MASK \
|
|
(_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \
|
|
- _TIF_USER_RETURN_NOTIFY)
|
|
+ _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
|
|
|
|
/* flags to check in __switch_to() */
|
|
#define _TIF_WORK_CTXSW \
|
|
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
|
|
index 58d66fe..b409b17 100644
|
|
--- a/arch/x86/include/asm/traps.h
|
|
+++ b/arch/x86/include/asm/traps.h
|
|
@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
|
|
|
|
#ifdef CONFIG_TRACING
|
|
asmlinkage void trace_page_fault(void);
|
|
+#define trace_stack_segment stack_segment
|
|
#define trace_divide_error divide_error
|
|
#define trace_bounds bounds
|
|
#define trace_invalid_op invalid_op
|
|
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
|
|
index 2a46ca7..2874be9 100644
|
|
--- a/arch/x86/include/asm/vsyscall.h
|
|
+++ b/arch/x86/include/asm/vsyscall.h
|
|
@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
|
|
native_read_tscp(&p);
|
|
} else {
|
|
/* Load per CPU data from GDT */
|
|
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
|
+ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
|
}
|
|
|
|
return p;
|
|
diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
|
|
index 46727eb..6e1aaf7 100644
|
|
--- a/arch/x86/include/uapi/asm/ldt.h
|
|
+++ b/arch/x86/include/uapi/asm/ldt.h
|
|
@@ -28,6 +28,13 @@ struct user_desc {
|
|
unsigned int seg_not_present:1;
|
|
unsigned int useable:1;
|
|
#ifdef __x86_64__
|
|
+ /*
|
|
+ * Because this bit is not present in 32-bit user code, user
|
|
+ * programs can pass uninitialized values here. Therefore, in
|
|
+ * any context in which a user_desc comes from a 32-bit program,
|
|
+ * the kernel must act as though lm == 0, regardless of the
|
|
+ * actual value.
|
|
+ */
|
|
unsigned int lm:1;
|
|
#endif
|
|
};
|
|
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
|
|
index 0e79420..990a2fe 100644
|
|
--- a/arch/x86/include/uapi/asm/vmx.h
|
|
+++ b/arch/x86/include/uapi/asm/vmx.h
|
|
@@ -67,6 +67,7 @@
|
|
#define EXIT_REASON_EPT_MISCONFIG 49
|
|
#define EXIT_REASON_INVEPT 50
|
|
#define EXIT_REASON_PREEMPTION_TIMER 52
|
|
+#define EXIT_REASON_INVVPID 53
|
|
#define EXIT_REASON_WBINVD 54
|
|
#define EXIT_REASON_XSETBV 55
|
|
#define EXIT_REASON_APIC_WRITE 56
|
|
@@ -114,6 +115,7 @@
|
|
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
|
|
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
|
|
{ EXIT_REASON_INVD, "INVD" }, \
|
|
+ { EXIT_REASON_INVVPID, "INVVPID" }, \
|
|
{ EXIT_REASON_INVPCID, "INVPCID" }
|
|
|
|
#endif /* _UAPIVMX_H */
|
|
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
|
|
index cb648c8..56bac86 100644
|
|
--- a/arch/x86/kernel/Makefile
|
|
+++ b/arch/x86/kernel/Makefile
|
|
@@ -29,6 +29,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
|
obj-y += syscall_$(BITS).o
|
|
obj-$(CONFIG_X86_64) += vsyscall_64.o
|
|
obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
|
|
+obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
|
|
obj-$(CONFIG_SYSFS) += ksysfs.o
|
|
obj-y += bootflag.o e820.o
|
|
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
|
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
|
index 7f26c9a..523f147 100644
|
|
--- a/arch/x86/kernel/apic/apic.c
|
|
+++ b/arch/x86/kernel/apic/apic.c
|
|
@@ -1290,7 +1290,7 @@ void setup_local_APIC(void)
|
|
unsigned int value, queued;
|
|
int i, j, acked = 0;
|
|
unsigned long long tsc = 0, ntsc;
|
|
- long long max_loops = cpu_khz;
|
|
+ long long max_loops = cpu_khz ? cpu_khz : 1000000;
|
|
|
|
if (cpu_has_tsc)
|
|
rdtscll(tsc);
|
|
@@ -1387,7 +1387,7 @@ void setup_local_APIC(void)
|
|
break;
|
|
}
|
|
if (queued) {
|
|
- if (cpu_has_tsc) {
|
|
+ if (cpu_has_tsc && cpu_khz) {
|
|
rdtscll(ntsc);
|
|
max_loops = (cpu_khz << 10) - (ntsc - tsc);
|
|
} else
|
|
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
|
index c67ffa6..c005fdd 100644
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -508,6 +508,13 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
|
}
|
|
#endif
|
|
|
|
+ /*
|
|
+ * This is only needed to tell the kernel whether to use VMCALL
|
|
+ * and VMMCALL. VMMCALL is never executed except under virt, so
|
|
+ * we can set it unconditionally.
|
|
+ */
|
|
+ set_cpu_cap(c, X86_FEATURE_VMMCALL);
|
|
+
|
|
/* F16h erratum 793, CVE-2013-6885 */
|
|
if (c->x86 == 0x16 && c->x86_model <= 0xf) {
|
|
u64 val;
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 8e28bf2..e6bddd5 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -144,6 +144,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
|
|
|
static int __init x86_xsave_setup(char *s)
|
|
{
|
|
+ if (strlen(s))
|
|
+ return 0;
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
|
|
setup_clear_cpu_cap(X86_FEATURE_AVX);
|
|
@@ -1141,7 +1143,7 @@ void syscall_init(void)
|
|
/* Flags to clear on syscall */
|
|
wrmsrl(MSR_SYSCALL_MASK,
|
|
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
|
|
- X86_EFLAGS_IOPL|X86_EFLAGS_AC);
|
|
+ X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
|
index 5cd9bfa..66746a8 100644
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -153,6 +153,21 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|
setup_clear_cpu_cap(X86_FEATURE_ERMS);
|
|
}
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Intel Quark Core DevMan_001.pdf section 6.4.11
|
|
+ * "The operating system also is required to invalidate (i.e., flush)
|
|
+ * the TLB when any changes are made to any of the page table entries.
|
|
+ * The operating system must reload CR3 to cause the TLB to be flushed"
|
|
+ *
|
|
+ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
|
|
+ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
|
|
+ * to be modified
|
|
+ */
|
|
+ if (c->x86 == 5 && c->x86_model == 9) {
|
|
+ pr_info("Disabling PGE capability bit\n");
|
|
+ setup_clear_cpu_cap(X86_FEATURE_PGE);
|
|
+ }
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
@@ -368,6 +383,13 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|
detect_extended_topology(c);
|
|
|
|
l2 = init_intel_cacheinfo(c);
|
|
+
|
|
+ /* Detect legacy cache sizes if init_intel_cacheinfo did not */
|
|
+ if (l2 == 0) {
|
|
+ cpu_detect_cache_sizes(c);
|
|
+ l2 = c->x86_cache_size;
|
|
+ }
|
|
+
|
|
if (c->cpuid_level > 9) {
|
|
unsigned eax = cpuid_eax(10);
|
|
/* Check for version and the number of counters */
|
|
@@ -482,6 +504,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
|
*/
|
|
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
|
|
size = 256;
|
|
+
|
|
+ /*
|
|
+ * Intel Quark SoC X1000 contains a 4-way set associative
|
|
+ * 16K cache with a 16 byte cache line and 256 lines per tag
|
|
+ */
|
|
+ if ((c->x86 == 5) && (c->x86_model == 9))
|
|
+ size = 16;
|
|
return size;
|
|
}
|
|
#endif
|
|
@@ -709,7 +738,8 @@ static const struct cpu_dev intel_cpu_dev = {
|
|
[3] = "OverDrive PODP5V83",
|
|
[4] = "Pentium MMX",
|
|
[7] = "Mobile Pentium 75 - 200",
|
|
- [8] = "Mobile Pentium MMX"
|
|
+ [8] = "Mobile Pentium MMX",
|
|
+ [9] = "Quark SoC X1000",
|
|
}
|
|
},
|
|
{ .family = 6, .model_names =
|
|
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
|
|
index 617a9e2..b63773b 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
|
|
@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
|
|
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
|
* kernel heap memory.
|
|
*/
|
|
-static void apply_ucode_in_initrd(void *ucode, size_t size)
|
|
+static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
|
|
{
|
|
struct equiv_cpu_entry *eq;
|
|
size_t *cont_sz;
|
|
u32 *header;
|
|
u8 *data, **cont;
|
|
+ u8 (*patch)[PATCH_MAX_SIZE];
|
|
u16 eq_id = 0;
|
|
int offset, left;
|
|
u32 rev, eax, ebx, ecx, edx;
|
|
@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
|
|
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
cont_sz = (size_t *)__pa_nodebug(&container_size);
|
|
cont = (u8 **)__pa_nodebug(&container);
|
|
+ patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
|
|
#else
|
|
new_rev = &ucode_new_rev;
|
|
cont_sz = &container_size;
|
|
cont = &container;
|
|
+ patch = &amd_ucode_patch;
|
|
#endif
|
|
|
|
data = ucode;
|
|
@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
|
|
rev = mc->hdr.patch_id;
|
|
*new_rev = rev;
|
|
|
|
- /* save ucode patch */
|
|
- memcpy(amd_ucode_patch, mc,
|
|
- min_t(u32, header[1], PATCH_MAX_SIZE));
|
|
+ if (save_patch)
|
|
+ memcpy(patch, mc,
|
|
+ min_t(u32, header[1], PATCH_MAX_SIZE));
|
|
}
|
|
}
|
|
|
|
@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
|
|
*data = cp.data;
|
|
*size = cp.size;
|
|
|
|
- apply_ucode_in_initrd(cp.data, cp.size);
|
|
+ apply_ucode_in_initrd(cp.data, cp.size, true);
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
|
|
size_t *usize;
|
|
void **ucode;
|
|
|
|
- mc = (struct microcode_amd *)__pa(amd_ucode_patch);
|
|
+ mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
|
|
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
|
|
__apply_microcode_amd(mc);
|
|
return;
|
|
@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
|
|
if (!*ucode || !*usize)
|
|
return;
|
|
|
|
- apply_ucode_in_initrd(*ucode, *usize);
|
|
+ apply_ucode_in_initrd(*ucode, *usize, false);
|
|
}
|
|
|
|
static void __init collect_cpu_sig_on_bsp(void *arg)
|
|
@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
|
|
* AP has a different equivalence ID than BSP, looks like
|
|
* mixed-steppings silicon so go through the ucode blob anew.
|
|
*/
|
|
- apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
|
|
+ apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
|
|
}
|
|
}
|
|
#endif
|
|
@@ -347,7 +350,9 @@ void load_ucode_amd_ap(void)
|
|
int __init save_microcode_in_initrd_amd(void)
|
|
{
|
|
unsigned long cont;
|
|
+ int retval = 0;
|
|
enum ucode_state ret;
|
|
+ u8 *cont_va;
|
|
u32 eax;
|
|
|
|
if (!container)
|
|
@@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
get_bsp_sig();
|
|
- cont = (unsigned long)container;
|
|
+ cont = (unsigned long)container;
|
|
+ cont_va = __va(container);
|
|
#else
|
|
/*
|
|
* We need the physical address of the container for both bitness since
|
|
* boot_params.hdr.ramdisk_image is a physical address.
|
|
*/
|
|
- cont = __pa(container);
|
|
+ cont = __pa(container);
|
|
+ cont_va = container;
|
|
#endif
|
|
|
|
/*
|
|
@@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void)
|
|
if (relocated_ramdisk)
|
|
container = (u8 *)(__va(relocated_ramdisk) +
|
|
(cont - boot_params.hdr.ramdisk_image));
|
|
+ else
|
|
+ container = cont_va;
|
|
|
|
if (ucode_new_rev)
|
|
pr_info("microcode: updated early to new patch_level=0x%08x\n",
|
|
@@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
|
|
|
|
ret = load_microcode_amd(eax, container, container_size);
|
|
if (ret != UCODE_OK)
|
|
- return -EINVAL;
|
|
+ retval = -EINVAL;
|
|
|
|
/*
|
|
* This will be freed any msec now, stash patches for the current
|
|
@@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
|
|
container = NULL;
|
|
container_size = 0;
|
|
|
|
- return 0;
|
|
+ return retval;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
|
|
index 18f7391..43a07bf 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
|
|
@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
|
|
unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
|
|
int i;
|
|
|
|
- while (leftover) {
|
|
+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
|
|
mc_header = (struct microcode_header_intel *)ucode_ptr;
|
|
|
|
mc_size = get_totalsize(mc_header);
|
|
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
|
|
index 832d05a..317c811 100644
|
|
--- a/arch/x86/kernel/cpu/mshyperv.c
|
|
+++ b/arch/x86/kernel/cpu/mshyperv.c
|
|
@@ -67,6 +67,7 @@ static struct clocksource hyperv_cs = {
|
|
.rating = 400, /* use this when running on Hyperv*/
|
|
.read = read_hv_clock,
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
};
|
|
|
|
static void __init ms_hyperv_init_platform(void)
|
|
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
|
|
index 79f9f84..fb345c4 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event.c
|
|
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
|
|
continue;
|
|
if (event->attr.config1 & ~er->valid_mask)
|
|
return -EINVAL;
|
|
+ /* Check if the extra msrs can be safely accessed*/
|
|
+ if (!er->extra_msr_access)
|
|
+ return -ENXIO;
|
|
|
|
reg->idx = er->idx;
|
|
reg->config = event->attr.config1;
|
|
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
|
|
index 4972c24..7876c34 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event.h
|
|
+++ b/arch/x86/kernel/cpu/perf_event.h
|
|
@@ -293,14 +293,16 @@ struct extra_reg {
|
|
u64 config_mask;
|
|
u64 valid_mask;
|
|
int idx; /* per_xxx->regs[] reg index */
|
|
+ bool extra_msr_access;
|
|
};
|
|
|
|
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
|
|
- .event = (e), \
|
|
- .msr = (ms), \
|
|
- .config_mask = (m), \
|
|
- .valid_mask = (vm), \
|
|
- .idx = EXTRA_REG_##i, \
|
|
+ .event = (e), \
|
|
+ .msr = (ms), \
|
|
+ .config_mask = (m), \
|
|
+ .valid_mask = (vm), \
|
|
+ .idx = EXTRA_REG_##i, \
|
|
+ .extra_msr_access = true, \
|
|
}
|
|
|
|
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
index 1340ebf..d4c0a0e 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
@@ -2183,6 +2183,41 @@ static void intel_snb_check_microcode(void)
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * Under certain circumstances, access certain MSR may cause #GP.
|
|
+ * The function tests if the input MSR can be safely accessed.
|
|
+ */
|
|
+static bool check_msr(unsigned long msr, u64 mask)
|
|
+{
|
|
+ u64 val_old, val_new, val_tmp;
|
|
+
|
|
+ /*
|
|
+ * Read the current value, change it and read it back to see if it
|
|
+ * matches, this is needed to detect certain hardware emulators
|
|
+ * (qemu/kvm) that don't trap on the MSR access and always return 0s.
|
|
+ */
|
|
+ if (rdmsrl_safe(msr, &val_old))
|
|
+ return false;
|
|
+
|
|
+ /*
|
|
+ * Only change the bits which can be updated by wrmsrl.
|
|
+ */
|
|
+ val_tmp = val_old ^ mask;
|
|
+ if (wrmsrl_safe(msr, val_tmp) ||
|
|
+ rdmsrl_safe(msr, &val_new))
|
|
+ return false;
|
|
+
|
|
+ if (val_new != val_tmp)
|
|
+ return false;
|
|
+
|
|
+ /* Here it's sure that the MSR can be safely accessed.
|
|
+ * Restore the old value and return.
|
|
+ */
|
|
+ wrmsrl(msr, val_old);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
static __init void intel_sandybridge_quirk(void)
|
|
{
|
|
x86_pmu.check_microcode = intel_snb_check_microcode;
|
|
@@ -2272,7 +2307,8 @@ __init int intel_pmu_init(void)
|
|
union cpuid10_ebx ebx;
|
|
struct event_constraint *c;
|
|
unsigned int unused;
|
|
- int version;
|
|
+ struct extra_reg *er;
|
|
+ int version, i;
|
|
|
|
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
|
switch (boot_cpu_data.x86) {
|
|
@@ -2475,6 +2511,9 @@ __init int intel_pmu_init(void)
|
|
case 62: /* IvyBridge EP */
|
|
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
|
sizeof(hw_cache_event_ids));
|
|
+ /* dTLB-load-misses on IVB is different than SNB */
|
|
+ hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
|
|
+
|
|
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
|
|
sizeof(hw_cache_extra_regs));
|
|
|
|
@@ -2575,6 +2614,34 @@ __init int intel_pmu_init(void)
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Access LBR MSR may cause #GP under certain circumstances.
|
|
+ * E.g. KVM doesn't support LBR MSR
|
|
+ * Check all LBT MSR here.
|
|
+ * Disable LBR access if any LBR MSRs can not be accessed.
|
|
+ */
|
|
+ if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
|
|
+ x86_pmu.lbr_nr = 0;
|
|
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
|
+ if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
|
|
+ check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
|
|
+ x86_pmu.lbr_nr = 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Access extra MSR may cause #GP under certain circumstances.
|
|
+ * E.g. KVM doesn't support offcore event
|
|
+ * Check all extra_regs here.
|
|
+ */
|
|
+ if (x86_pmu.extra_regs) {
|
|
+ for (er = x86_pmu.extra_regs; er->msr; er++) {
|
|
+ er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
|
|
+ /* Disable LBR select mapping */
|
|
+ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
|
|
+ x86_pmu.lbr_sel_map = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
/* Support full width counters using alternative MSR range */
|
|
if (x86_pmu.intel_cap.full_width_write) {
|
|
x86_pmu.max_period = x86_pmu.cntval_mask;
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
|
|
index 5ad35ad..95700e5 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
|
|
@@ -511,6 +511,7 @@ static int rapl_cpu_prepare(int cpu)
|
|
struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
|
|
int phys_id = topology_physical_package_id(cpu);
|
|
u64 ms;
|
|
+ u64 msr_rapl_power_unit_bits;
|
|
|
|
if (pmu)
|
|
return 0;
|
|
@@ -518,6 +519,9 @@ static int rapl_cpu_prepare(int cpu)
|
|
if (phys_id < 0)
|
|
return -1;
|
|
|
|
+ if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
|
|
+ return -1;
|
|
+
|
|
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
|
if (!pmu)
|
|
return -1;
|
|
@@ -531,8 +535,7 @@ static int rapl_cpu_prepare(int cpu)
|
|
*
|
|
* we cache in local PMU instance
|
|
*/
|
|
- rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
|
|
- pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
|
|
+ pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
|
|
pmu->pmu = &rapl_pmu_class;
|
|
|
|
/*
|
|
@@ -649,7 +652,9 @@ static int __init rapl_pmu_init(void)
|
|
get_online_cpus();
|
|
|
|
for_each_online_cpu(cpu) {
|
|
- rapl_cpu_prepare(cpu);
|
|
+ ret = rapl_cpu_prepare(cpu);
|
|
+ if (ret)
|
|
+ goto out;
|
|
rapl_cpu_init(cpu);
|
|
}
|
|
|
|
@@ -672,6 +677,7 @@ static int __init rapl_pmu_init(void)
|
|
hweight32(rapl_cntr_mask),
|
|
ktime_to_ms(pmu->timer_interval));
|
|
|
|
+out:
|
|
put_online_cpus();
|
|
|
|
return 0;
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
|
|
index 047f540..2f98588 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
|
|
@@ -2886,6 +2886,17 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
|
return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
|
|
}
|
|
|
|
+/*
|
|
+ * Using uncore_pmu_event_init pmu event_init callback
|
|
+ * as a detection point for uncore events.
|
|
+ */
|
|
+static int uncore_pmu_event_init(struct perf_event *event);
|
|
+
|
|
+static bool is_uncore_event(struct perf_event *event)
|
|
+{
|
|
+ return event->pmu->event_init == uncore_pmu_event_init;
|
|
+}
|
|
+
|
|
static int
|
|
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
|
|
{
|
|
@@ -2900,13 +2911,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
|
|
return -EINVAL;
|
|
|
|
n = box->n_events;
|
|
- box->event_list[n] = leader;
|
|
- n++;
|
|
+
|
|
+ if (is_uncore_event(leader)) {
|
|
+ box->event_list[n] = leader;
|
|
+ n++;
|
|
+ }
|
|
+
|
|
if (!dogrp)
|
|
return n;
|
|
|
|
list_for_each_entry(event, &leader->sibling_list, group_entry) {
|
|
- if (event->state <= PERF_EVENT_STATE_OFF)
|
|
+ if (!is_uncore_event(event) ||
|
|
+ event->state <= PERF_EVENT_STATE_OFF)
|
|
continue;
|
|
|
|
if (n >= max_count)
|
|
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
|
|
index addb207..66e274a 100644
|
|
--- a/arch/x86/kernel/dumpstack_64.c
|
|
+++ b/arch/x86/kernel/dumpstack_64.c
|
|
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
|
|
[ DEBUG_STACK-1 ] = "#DB",
|
|
[ NMI_STACK-1 ] = "NMI",
|
|
[ DOUBLEFAULT_STACK-1 ] = "#DF",
|
|
- [ STACKFAULT_STACK-1 ] = "#SS",
|
|
[ MCE_STACK-1 ] = "#MC",
|
|
#if DEBUG_STKSZ > EXCEPTION_STKSZ
|
|
[ N_EXCEPTION_STACKS ...
|
|
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
|
|
index 6491353..c5a9cb9 100644
|
|
--- a/arch/x86/kernel/entry_32.S
|
|
+++ b/arch/x86/kernel/entry_32.S
|
|
@@ -433,8 +433,8 @@ sysenter_do_call:
|
|
cmpl $(NR_syscalls), %eax
|
|
jae sysenter_badsys
|
|
call *sys_call_table(,%eax,4)
|
|
- movl %eax,PT_EAX(%esp)
|
|
sysenter_after_call:
|
|
+ movl %eax,PT_EAX(%esp)
|
|
LOCKDEP_SYS_EXIT
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
TRACE_IRQS_OFF
|
|
@@ -514,6 +514,7 @@ ENTRY(system_call)
|
|
jae syscall_badsys
|
|
syscall_call:
|
|
call *sys_call_table(,%eax,4)
|
|
+syscall_after_call:
|
|
movl %eax,PT_EAX(%esp) # store the return value
|
|
syscall_exit:
|
|
LOCKDEP_SYS_EXIT
|
|
@@ -528,6 +529,7 @@ syscall_exit:
|
|
restore_all:
|
|
TRACE_IRQS_IRET
|
|
restore_all_notrace:
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
|
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
|
# are returning to the kernel.
|
|
@@ -538,6 +540,7 @@ restore_all_notrace:
|
|
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
|
CFI_REMEMBER_STATE
|
|
je ldt_ss # returning to user-space with LDT SS
|
|
+#endif
|
|
restore_nocheck:
|
|
RESTORE_REGS 4 # skip orig_eax/error_code
|
|
irq_return:
|
|
@@ -550,6 +553,7 @@ ENTRY(iret_exc)
|
|
.previous
|
|
_ASM_EXTABLE(irq_return,iret_exc)
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
CFI_RESTORE_STATE
|
|
ldt_ss:
|
|
#ifdef CONFIG_PARAVIRT
|
|
@@ -593,6 +597,7 @@ ldt_ss:
|
|
lss (%esp), %esp /* switch to espfix segment */
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
jmp restore_nocheck
|
|
+#endif
|
|
CFI_ENDPROC
|
|
ENDPROC(system_call)
|
|
|
|
@@ -683,12 +688,12 @@ syscall_fault:
|
|
END(syscall_fault)
|
|
|
|
syscall_badsys:
|
|
- movl $-ENOSYS,PT_EAX(%esp)
|
|
- jmp syscall_exit
|
|
+ movl $-ENOSYS,%eax
|
|
+ jmp syscall_after_call
|
|
END(syscall_badsys)
|
|
|
|
sysenter_badsys:
|
|
- movl $-ENOSYS,PT_EAX(%esp)
|
|
+ movl $-ENOSYS,%eax
|
|
jmp sysenter_after_call
|
|
END(syscall_badsys)
|
|
CFI_ENDPROC
|
|
@@ -705,6 +710,7 @@ END(syscall_badsys)
|
|
* the high word of the segment base from the GDT and swiches to the
|
|
* normal stack and adjusts ESP with the matching offset.
|
|
*/
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
/* fixup the stack */
|
|
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
|
|
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
|
|
@@ -714,8 +720,10 @@ END(syscall_badsys)
|
|
pushl_cfi %eax
|
|
lss (%esp), %esp /* switch to the normal stack segment */
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
+#endif
|
|
.endm
|
|
.macro UNWIND_ESPFIX_STACK
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
movl %ss, %eax
|
|
/* see if on espfix stack */
|
|
cmpw $__ESPFIX_SS, %ax
|
|
@@ -726,6 +734,7 @@ END(syscall_badsys)
|
|
/* switch to normal stack */
|
|
FIXUP_ESPFIX_STACK
|
|
27:
|
|
+#endif
|
|
.endm
|
|
|
|
/*
|
|
@@ -1356,11 +1365,13 @@ END(debug)
|
|
ENTRY(nmi)
|
|
RING0_INT_FRAME
|
|
ASM_CLAC
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
pushl_cfi %eax
|
|
movl %ss, %eax
|
|
cmpw $__ESPFIX_SS, %ax
|
|
popl_cfi %eax
|
|
je nmi_espfix_stack
|
|
+#endif
|
|
cmpl $ia32_sysenter_target,(%esp)
|
|
je nmi_stack_fixup
|
|
pushl_cfi %eax
|
|
@@ -1400,6 +1411,7 @@ nmi_debug_stack_check:
|
|
FIX_STACK 24, nmi_stack_correct, 1
|
|
jmp nmi_stack_correct
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
nmi_espfix_stack:
|
|
/* We have a RING0_INT_FRAME here.
|
|
*
|
|
@@ -1421,6 +1433,7 @@ nmi_espfix_stack:
|
|
lss 12+4(%esp), %esp # back to espfix stack
|
|
CFI_ADJUST_CFA_OFFSET -24
|
|
jmp irq_return
|
|
+#endif
|
|
CFI_ENDPROC
|
|
END(nmi)
|
|
|
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
|
index 1e96c36..06469ee 100644
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -58,6 +58,7 @@
|
|
#include <asm/asm.h>
|
|
#include <asm/context_tracking.h>
|
|
#include <asm/smap.h>
|
|
+#include <asm/pgtable_types.h>
|
|
#include <linux/err.h>
|
|
|
|
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
|
@@ -541,11 +542,14 @@ ENTRY(ret_from_fork)
|
|
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
|
jz 1f
|
|
|
|
- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
|
- jnz int_ret_from_sys_call
|
|
-
|
|
- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
|
- jmp ret_from_sys_call # go to the SYSRET fastpath
|
|
+ /*
|
|
+ * By the time we get here, we have no idea whether our pt_regs,
|
|
+ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
|
|
+ * the slow path, or one of the ia32entry paths.
|
|
+ * Use int_ret_from_sys_call to return, since it can safely handle
|
|
+ * all of the above.
|
|
+ */
|
|
+ jmp int_ret_from_sys_call
|
|
|
|
1:
|
|
subq $REST_SKIP, %rsp # leave space for volatiles
|
|
@@ -1041,32 +1045,52 @@ restore_args:
|
|
|
|
irq_return:
|
|
INTERRUPT_RETURN
|
|
- _ASM_EXTABLE(irq_return, bad_iret)
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
ENTRY(native_iret)
|
|
- iretq
|
|
- _ASM_EXTABLE(native_iret, bad_iret)
|
|
+ /*
|
|
+ * Are we returning to a stack segment from the LDT? Note: in
|
|
+ * 64-bit mode SS:RSP on the exception stack is always valid.
|
|
+ */
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
+ testb $4,(SS-RIP)(%rsp)
|
|
+ jnz native_irq_return_ldt
|
|
#endif
|
|
|
|
- .section .fixup,"ax"
|
|
-bad_iret:
|
|
+.global native_irq_return_iret
|
|
+native_irq_return_iret:
|
|
/*
|
|
- * The iret traps when the %cs or %ss being restored is bogus.
|
|
- * We've lost the original trap vector and error code.
|
|
- * #GPF is the most likely one to get for an invalid selector.
|
|
- * So pretend we completed the iret and took the #GPF in user mode.
|
|
- *
|
|
- * We are now running with the kernel GS after exception recovery.
|
|
- * But error_entry expects us to have user GS to match the user %cs,
|
|
- * so swap back.
|
|
+ * This may fault. Non-paranoid faults on return to userspace are
|
|
+ * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
|
|
+ * Double-faults due to espfix64 are handled in do_double_fault.
|
|
+ * Other faults here are fatal.
|
|
*/
|
|
- pushq $0
|
|
+ iretq
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
+native_irq_return_ldt:
|
|
+ pushq_cfi %rax
|
|
+ pushq_cfi %rdi
|
|
SWAPGS
|
|
- jmp general_protection
|
|
-
|
|
- .previous
|
|
+ movq PER_CPU_VAR(espfix_waddr),%rdi
|
|
+ movq %rax,(0*8)(%rdi) /* RAX */
|
|
+ movq (2*8)(%rsp),%rax /* RIP */
|
|
+ movq %rax,(1*8)(%rdi)
|
|
+ movq (3*8)(%rsp),%rax /* CS */
|
|
+ movq %rax,(2*8)(%rdi)
|
|
+ movq (4*8)(%rsp),%rax /* RFLAGS */
|
|
+ movq %rax,(3*8)(%rdi)
|
|
+ movq (6*8)(%rsp),%rax /* SS */
|
|
+ movq %rax,(5*8)(%rdi)
|
|
+ movq (5*8)(%rsp),%rax /* RSP */
|
|
+ movq %rax,(4*8)(%rdi)
|
|
+ andl $0xffff0000,%eax
|
|
+ popq_cfi %rdi
|
|
+ orq PER_CPU_VAR(espfix_stack),%rax
|
|
+ SWAPGS
|
|
+ movq %rax,%rsp
|
|
+ popq_cfi %rax
|
|
+ jmp native_irq_return_iret
|
|
+#endif
|
|
|
|
/* edi: workmask, edx: work */
|
|
retint_careful:
|
|
@@ -1110,9 +1134,9 @@ ENTRY(retint_kernel)
|
|
call preempt_schedule_irq
|
|
jmp exit_intr
|
|
#endif
|
|
-
|
|
CFI_ENDPROC
|
|
END(common_interrupt)
|
|
+
|
|
/*
|
|
* End of kprobes section
|
|
*/
|
|
@@ -1484,7 +1508,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
|
|
|
|
paranoidzeroentry_ist debug do_debug DEBUG_STACK
|
|
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
|
|
-paranoiderrorentry stack_segment do_stack_segment
|
|
+errorentry stack_segment do_stack_segment
|
|
#ifdef CONFIG_XEN
|
|
zeroentry xen_debug do_debug
|
|
zeroentry xen_int3 do_int3
|
|
@@ -1594,16 +1618,15 @@ error_sti:
|
|
|
|
/*
|
|
* There are two places in the kernel that can potentially fault with
|
|
- * usergs. Handle them here. The exception handlers after iret run with
|
|
- * kernel gs again, so don't set the user space flag. B stepping K8s
|
|
- * sometimes report an truncated RIP for IRET exceptions returning to
|
|
- * compat mode. Check for these here too.
|
|
+ * usergs. Handle them here. B stepping K8s sometimes report a
|
|
+ * truncated RIP for IRET exceptions returning to compat mode. Check
|
|
+ * for these here too.
|
|
*/
|
|
error_kernelspace:
|
|
incl %ebx
|
|
- leaq irq_return(%rip),%rcx
|
|
+ leaq native_irq_return_iret(%rip),%rcx
|
|
cmpq %rcx,RIP+8(%rsp)
|
|
- je error_swapgs
|
|
+ je error_bad_iret
|
|
movl %ecx,%eax /* zero extend */
|
|
cmpq %rax,RIP+8(%rsp)
|
|
je bstep_iret
|
|
@@ -1614,7 +1637,15 @@ error_kernelspace:
|
|
bstep_iret:
|
|
/* Fix truncated RIP */
|
|
movq %rcx,RIP+8(%rsp)
|
|
- jmp error_swapgs
|
|
+ /* fall through */
|
|
+
|
|
+error_bad_iret:
|
|
+ SWAPGS
|
|
+ mov %rsp,%rdi
|
|
+ call fixup_bad_iret
|
|
+ mov %rax,%rsp
|
|
+ decl %ebx /* Return to usergs */
|
|
+ jmp error_sti
|
|
CFI_ENDPROC
|
|
END(error_entry)
|
|
|
|
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
|
|
index 85126cc..5fc4ac7 100644
|
|
--- a/arch/x86/kernel/head64.c
|
|
+++ b/arch/x86/kernel/head64.c
|
|
@@ -162,7 +162,7 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
|
|
clear_bss();
|
|
|
|
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
|
|
- set_intr_gate(i, early_idt_handlers[i]);
|
|
+ set_intr_gate(i, early_idt_handler_array[i]);
|
|
load_idt((const struct desc_ptr *)&idt_descr);
|
|
|
|
copy_bootdata(__va(real_mode_data));
|
|
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
|
|
index f36bd42..30a2aa3 100644
|
|
--- a/arch/x86/kernel/head_32.S
|
|
+++ b/arch/x86/kernel/head_32.S
|
|
@@ -477,21 +477,22 @@ is486:
|
|
__INIT
|
|
setup_once:
|
|
/*
|
|
- * Set up a idt with 256 entries pointing to ignore_int,
|
|
- * interrupt gates. It doesn't actually load idt - that needs
|
|
- * to be done on each CPU. Interrupts are enabled elsewhere,
|
|
- * when we can be relatively sure everything is ok.
|
|
+ * Set up a idt with 256 interrupt gates that push zero if there
|
|
+ * is no error code and then jump to early_idt_handler_common.
|
|
+ * It doesn't actually load the idt - that needs to be done on
|
|
+ * each CPU. Interrupts are enabled elsewhere, when we can be
|
|
+ * relatively sure everything is ok.
|
|
*/
|
|
|
|
movl $idt_table,%edi
|
|
- movl $early_idt_handlers,%eax
|
|
+ movl $early_idt_handler_array,%eax
|
|
movl $NUM_EXCEPTION_VECTORS,%ecx
|
|
1:
|
|
movl %eax,(%edi)
|
|
movl %eax,4(%edi)
|
|
/* interrupt gate, dpl=0, present */
|
|
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
|
|
- addl $9,%eax
|
|
+ addl $EARLY_IDT_HANDLER_SIZE,%eax
|
|
addl $8,%edi
|
|
loop 1b
|
|
|
|
@@ -523,26 +524,28 @@ setup_once:
|
|
andl $0,setup_once_ref /* Once is enough, thanks */
|
|
ret
|
|
|
|
-ENTRY(early_idt_handlers)
|
|
+ENTRY(early_idt_handler_array)
|
|
# 36(%esp) %eflags
|
|
# 32(%esp) %cs
|
|
# 28(%esp) %eip
|
|
# 24(%rsp) error code
|
|
i = 0
|
|
.rept NUM_EXCEPTION_VECTORS
|
|
- .if (EXCEPTION_ERRCODE_MASK >> i) & 1
|
|
- ASM_NOP2
|
|
- .else
|
|
+ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
|
|
pushl $0 # Dummy error code, to make stack frame uniform
|
|
.endif
|
|
pushl $i # 20(%esp) Vector number
|
|
- jmp early_idt_handler
|
|
+ jmp early_idt_handler_common
|
|
i = i + 1
|
|
+ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
|
.endr
|
|
-ENDPROC(early_idt_handlers)
|
|
+ENDPROC(early_idt_handler_array)
|
|
|
|
- /* This is global to keep gas from relaxing the jumps */
|
|
-ENTRY(early_idt_handler)
|
|
+early_idt_handler_common:
|
|
+ /*
|
|
+ * The stack is the hardware frame, an error code or zero, and the
|
|
+ * vector number.
|
|
+ */
|
|
cld
|
|
|
|
cmpl $2,(%esp) # X86_TRAP_NMI
|
|
@@ -602,7 +605,7 @@ ex_entry:
|
|
is_nmi:
|
|
addl $8,%esp /* drop vector number and error code */
|
|
iret
|
|
-ENDPROC(early_idt_handler)
|
|
+ENDPROC(early_idt_handler_common)
|
|
|
|
/* This is the default interrupt "handler" :-) */
|
|
ALIGN
|
|
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
|
|
index a468c0a..a2dc0ad 100644
|
|
--- a/arch/x86/kernel/head_64.S
|
|
+++ b/arch/x86/kernel/head_64.S
|
|
@@ -321,26 +321,28 @@ bad_address:
|
|
jmp bad_address
|
|
|
|
__INIT
|
|
- .globl early_idt_handlers
|
|
-early_idt_handlers:
|
|
+ENTRY(early_idt_handler_array)
|
|
# 104(%rsp) %rflags
|
|
# 96(%rsp) %cs
|
|
# 88(%rsp) %rip
|
|
# 80(%rsp) error code
|
|
i = 0
|
|
.rept NUM_EXCEPTION_VECTORS
|
|
- .if (EXCEPTION_ERRCODE_MASK >> i) & 1
|
|
- ASM_NOP2
|
|
- .else
|
|
+ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
|
|
pushq $0 # Dummy error code, to make stack frame uniform
|
|
.endif
|
|
pushq $i # 72(%rsp) Vector number
|
|
- jmp early_idt_handler
|
|
+ jmp early_idt_handler_common
|
|
i = i + 1
|
|
+ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
|
.endr
|
|
+ENDPROC(early_idt_handler_array)
|
|
|
|
-/* This is global to keep gas from relaxing the jumps */
|
|
-ENTRY(early_idt_handler)
|
|
+early_idt_handler_common:
|
|
+ /*
|
|
+ * The stack is the hardware frame, an error code or zero, and the
|
|
+ * vector number.
|
|
+ */
|
|
cld
|
|
|
|
cmpl $2,(%rsp) # X86_TRAP_NMI
|
|
@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
|
|
is_nmi:
|
|
addq $16,%rsp # drop vector number and error code
|
|
INTERRUPT_RETURN
|
|
-ENDPROC(early_idt_handler)
|
|
+ENDPROC(early_idt_handler_common)
|
|
|
|
__INITDATA
|
|
|
|
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
|
|
index c3aae66..2e97b3c 100644
|
|
--- a/arch/x86/kernel/iosf_mbi.c
|
|
+++ b/arch/x86/kernel/iosf_mbi.c
|
|
@@ -25,6 +25,10 @@
|
|
|
|
#include <asm/iosf_mbi.h>
|
|
|
|
+#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
|
|
+#define PCI_DEVICE_ID_BRASWELL 0x2280
|
|
+#define PCI_DEVICE_ID_QUARK_X1000 0x0958
|
|
+
|
|
static DEFINE_SPINLOCK(iosf_mbi_lock);
|
|
|
|
static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
|
|
@@ -177,6 +181,13 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
|
|
}
|
|
EXPORT_SYMBOL(iosf_mbi_modify);
|
|
|
|
+bool iosf_mbi_available(void)
|
|
+{
|
|
+ /* Mbi isn't hot-pluggable. No remove routine is provided */
|
|
+ return mbi_pdev;
|
|
+}
|
|
+EXPORT_SYMBOL(iosf_mbi_available);
|
|
+
|
|
static int iosf_mbi_probe(struct pci_dev *pdev,
|
|
const struct pci_device_id *unused)
|
|
{
|
|
@@ -193,7 +204,9 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
|
|
}
|
|
|
|
static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
|
|
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
|
|
{ 0, },
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
|
|
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
|
|
index 79a3f96..490fee1 100644
|
|
--- a/arch/x86/kernel/kprobes/core.c
|
|
+++ b/arch/x86/kernel/kprobes/core.c
|
|
@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
|
{
|
|
struct insn insn;
|
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
|
+ int length;
|
|
|
|
kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
|
|
insn_get_length(&insn);
|
|
+ length = insn.length;
|
|
+
|
|
/* Another subsystem puts a breakpoint, failed to recover */
|
|
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
return 0;
|
|
- memcpy(dest, insn.kaddr, insn.length);
|
|
+ memcpy(dest, insn.kaddr, length);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
if (insn_rip_relative(&insn)) {
|
|
@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
|
*(s32 *) disp = (s32) newdisp;
|
|
}
|
|
#endif
|
|
- return insn.length;
|
|
+ return length;
|
|
}
|
|
|
|
static int __kprobes arch_copy_kprobe(struct kprobe *p)
|
|
@@ -1017,6 +1020,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|
regs->flags &= ~X86_EFLAGS_IF;
|
|
trace_hardirqs_off();
|
|
regs->ip = (unsigned long)(jp->entry);
|
|
+
|
|
+ /*
|
|
+ * jprobes use jprobe_return() which skips the normal return
|
|
+ * path of the function, and this messes up the accounting of the
|
|
+ * function graph tracer to get messed up.
|
|
+ *
|
|
+ * Pause function graph tracing while performing the jprobe function.
|
|
+ */
|
|
+ pause_graph_tracing();
|
|
return 1;
|
|
}
|
|
|
|
@@ -1042,24 +1054,25 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
u8 *addr = (u8 *) (regs->ip - 1);
|
|
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
|
+ void *saved_sp = kcb->jprobe_saved_sp;
|
|
|
|
if ((addr > (u8 *) jprobe_return) &&
|
|
(addr < (u8 *) jprobe_return_end)) {
|
|
- if (stack_addr(regs) != kcb->jprobe_saved_sp) {
|
|
+ if (stack_addr(regs) != saved_sp) {
|
|
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
|
|
printk(KERN_ERR
|
|
"current sp %p does not match saved sp %p\n",
|
|
- stack_addr(regs), kcb->jprobe_saved_sp);
|
|
+ stack_addr(regs), saved_sp);
|
|
printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
|
|
show_regs(saved_regs);
|
|
printk(KERN_ERR "Current registers\n");
|
|
show_regs(regs);
|
|
BUG();
|
|
}
|
|
+ /* It's OK to start function graph tracing again */
|
|
+ unpause_graph_tracing();
|
|
*regs = kcb->jprobe_saved_regs;
|
|
- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
|
|
- kcb->jprobes_stack,
|
|
- MIN_STACK_SIZE(kcb->jprobe_saved_sp));
|
|
+ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
|
|
preempt_enable_no_resched();
|
|
return 1;
|
|
}
|
|
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
|
|
index 713f1b3..0b1e1d5 100644
|
|
--- a/arch/x86/kernel/kvm.c
|
|
+++ b/arch/x86/kernel/kvm.c
|
|
@@ -280,7 +280,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
static void __init paravirt_ops_setup(void)
|
|
{
|
|
pv_info.name = "KVM";
|
|
- pv_info.paravirt_enabled = 1;
|
|
+
|
|
+ /*
|
|
+ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
|
|
+ * guest kernel works like a bare metal kernel with additional
|
|
+ * features, and paravirt_enabled is about features that are
|
|
+ * missing.
|
|
+ */
|
|
+ pv_info.paravirt_enabled = 0;
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
|
|
pv_cpu_ops.io_delay = kvm_io_delay;
|
|
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
|
|
index e604109..c8e98cd 100644
|
|
--- a/arch/x86/kernel/kvmclock.c
|
|
+++ b/arch/x86/kernel/kvmclock.c
|
|
@@ -263,7 +263,6 @@ void __init kvmclock_init(void)
|
|
#endif
|
|
kvm_get_preset_lpj();
|
|
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
|
|
- pv_info.paravirt_enabled = 1;
|
|
pv_info.name = "KVM";
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
|
|
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
|
|
index dcbbaa1..c37886d 100644
|
|
--- a/arch/x86/kernel/ldt.c
|
|
+++ b/arch/x86/kernel/ldt.c
|
|
@@ -20,8 +20,6 @@
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/syscalls.h>
|
|
|
|
-int sysctl_ldt16 = 0;
|
|
-
|
|
#ifdef CONFIG_SMP
|
|
static void flush_ldt(void *current_mm)
|
|
{
|
|
@@ -231,16 +229,10 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|
}
|
|
}
|
|
|
|
- /*
|
|
- * On x86-64 we do not support 16-bit segments due to
|
|
- * IRET leaking the high bits of the kernel stack address.
|
|
- */
|
|
-#ifdef CONFIG_X86_64
|
|
- if (!ldt_info.seg_32bit && !sysctl_ldt16) {
|
|
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
|
error = -EINVAL;
|
|
goto out_unlock;
|
|
}
|
|
-#endif
|
|
|
|
fill_ldt(&ldt, &ldt_info);
|
|
if (oldmode)
|
|
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
|
|
index 3f08f34..a1da673 100644
|
|
--- a/arch/x86/kernel/paravirt_patch_64.c
|
|
+++ b/arch/x86/kernel/paravirt_patch_64.c
|
|
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
|
|
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
|
|
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
|
|
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
|
|
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
|
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|
PATCH_SITE(pv_irq_ops, save_fl);
|
|
PATCH_SITE(pv_irq_ops, irq_enable);
|
|
PATCH_SITE(pv_irq_ops, irq_disable);
|
|
- PATCH_SITE(pv_cpu_ops, iret);
|
|
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret32);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
|
|
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
|
|
index 3fb8d95..1a1ff42 100644
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <asm/fpu-internal.h>
|
|
#include <asm/debugreg.h>
|
|
#include <asm/nmi.h>
|
|
+#include <asm/mwait.h>
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
@@ -398,6 +399,52 @@ static void amd_e400_idle(void)
|
|
default_idle();
|
|
}
|
|
|
|
+/*
|
|
+ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
|
|
+ * We can't rely on cpuidle installing MWAIT, because it will not load
|
|
+ * on systems that support only C1 -- so the boot default must be MWAIT.
|
|
+ *
|
|
+ * Some AMD machines are the opposite, they depend on using HALT.
|
|
+ *
|
|
+ * So for default C1, which is used during boot until cpuidle loads,
|
|
+ * use MWAIT-C1 on Intel HW that has it, else use HALT.
|
|
+ */
|
|
+static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|
+{
|
|
+ if (c->x86_vendor != X86_VENDOR_INTEL)
|
|
+ return 0;
|
|
+
|
|
+ if (!cpu_has(c, X86_FEATURE_MWAIT))
|
|
+ return 0;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * MONITOR/MWAIT with no hints, used for default default C1 state.
|
|
+ * This invokes MWAIT with interrutps enabled and no flags,
|
|
+ * which is backwards compatible with the original MWAIT implementation.
|
|
+ */
|
|
+
|
|
+static void mwait_idle(void)
|
|
+{
|
|
+ if (!current_set_polling_and_test()) {
|
|
+ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
|
|
+ mb();
|
|
+ clflush((void *)¤t_thread_info()->flags);
|
|
+ mb();
|
|
+ }
|
|
+
|
|
+ __monitor((void *)¤t_thread_info()->flags, 0, 0);
|
|
+ if (!need_resched())
|
|
+ __sti_mwait(0, 0);
|
|
+ else
|
|
+ local_irq_enable();
|
|
+ } else
|
|
+ local_irq_enable();
|
|
+ current_clr_polling();
|
|
+}
|
|
+
|
|
void select_idle_routine(const struct cpuinfo_x86 *c)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
@@ -411,6 +458,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
|
|
/* E400: APIC timer interrupt does not wake up CPU from C1e */
|
|
pr_info("using AMD E400 aware idle routine\n");
|
|
x86_idle = amd_e400_idle;
|
|
+ } else if (prefer_mwait_c1_over_halt(c)) {
|
|
+ pr_info("using mwait in idle threads\n");
|
|
+ x86_idle = mwait_idle;
|
|
} else
|
|
x86_idle = default_idle;
|
|
}
|
|
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
|
|
index 9c0280f..e2d26ce 100644
|
|
--- a/arch/x86/kernel/process_64.c
|
|
+++ b/arch/x86/kernel/process_64.c
|
|
@@ -286,24 +286,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
|
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
|
|
|
|
- /*
|
|
- * Reload esp0, LDT and the page table pointer:
|
|
- */
|
|
+ /* Reload esp0 and ss1. */
|
|
load_sp0(tss, next);
|
|
|
|
- /*
|
|
- * Switch DS and ES.
|
|
- * This won't pick up thread selector changes, but I guess that is ok.
|
|
- */
|
|
- savesegment(es, prev->es);
|
|
- if (unlikely(next->es | prev->es))
|
|
- loadsegment(es, next->es);
|
|
-
|
|
- savesegment(ds, prev->ds);
|
|
- if (unlikely(next->ds | prev->ds))
|
|
- loadsegment(ds, next->ds);
|
|
-
|
|
-
|
|
/* We must save %fs and %gs before load_TLS() because
|
|
* %fs and %gs may be cleared by load_TLS().
|
|
*
|
|
@@ -312,41 +297,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
savesegment(fs, fsindex);
|
|
savesegment(gs, gsindex);
|
|
|
|
+ /*
|
|
+ * Load TLS before restoring any segments so that segment loads
|
|
+ * reference the correct GDT entries.
|
|
+ */
|
|
load_TLS(next, cpu);
|
|
|
|
/*
|
|
- * Leave lazy mode, flushing any hypercalls made here.
|
|
- * This must be done before restoring TLS segments so
|
|
- * the GDT and LDT are properly updated, and must be
|
|
- * done before math_state_restore, so the TS bit is up
|
|
- * to date.
|
|
+ * Leave lazy mode, flushing any hypercalls made here. This
|
|
+ * must be done after loading TLS entries in the GDT but before
|
|
+ * loading segments that might reference them, and and it must
|
|
+ * be done before math_state_restore, so the TS bit is up to
|
|
+ * date.
|
|
*/
|
|
arch_end_context_switch(next_p);
|
|
|
|
+ /* Switch DS and ES.
|
|
+ *
|
|
+ * Reading them only returns the selectors, but writing them (if
|
|
+ * nonzero) loads the full descriptor from the GDT or LDT. The
|
|
+ * LDT for next is loaded in switch_mm, and the GDT is loaded
|
|
+ * above.
|
|
+ *
|
|
+ * We therefore need to write new values to the segment
|
|
+ * registers on every context switch unless both the new and old
|
|
+ * values are zero.
|
|
+ *
|
|
+ * Note that we don't need to do anything for CS and SS, as
|
|
+ * those are saved and restored as part of pt_regs.
|
|
+ */
|
|
+ savesegment(es, prev->es);
|
|
+ if (unlikely(next->es | prev->es))
|
|
+ loadsegment(es, next->es);
|
|
+
|
|
+ savesegment(ds, prev->ds);
|
|
+ if (unlikely(next->ds | prev->ds))
|
|
+ loadsegment(ds, next->ds);
|
|
+
|
|
/*
|
|
* Switch FS and GS.
|
|
*
|
|
- * Segment register != 0 always requires a reload. Also
|
|
- * reload when it has changed. When prev process used 64bit
|
|
- * base always reload to avoid an information leak.
|
|
+ * These are even more complicated than FS and GS: they have
|
|
+ * 64-bit bases are that controlled by arch_prctl. Those bases
|
|
+ * only differ from the values in the GDT or LDT if the selector
|
|
+ * is 0.
|
|
+ *
|
|
+ * Loading the segment register resets the hidden base part of
|
|
+ * the register to 0 or the value from the GDT / LDT. If the
|
|
+ * next base address zero, writing 0 to the segment register is
|
|
+ * much faster than using wrmsr to explicitly zero the base.
|
|
+ *
|
|
+ * The thread_struct.fs and thread_struct.gs values are 0
|
|
+ * if the fs and gs bases respectively are not overridden
|
|
+ * from the values implied by fsindex and gsindex. They
|
|
+ * are nonzero, and store the nonzero base addresses, if
|
|
+ * the bases are overridden.
|
|
+ *
|
|
+ * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
|
|
+ * be impossible.
|
|
+ *
|
|
+ * Therefore we need to reload the segment registers if either
|
|
+ * the old or new selector is nonzero, and we need to override
|
|
+ * the base address if next thread expects it to be overridden.
|
|
+ *
|
|
+ * This code is unnecessarily slow in the case where the old and
|
|
+ * new indexes are zero and the new base is nonzero -- it will
|
|
+ * unnecessarily write 0 to the selector before writing the new
|
|
+ * base address.
|
|
+ *
|
|
+ * Note: This all depends on arch_prctl being the only way that
|
|
+ * user code can override the segment base. Once wrfsbase and
|
|
+ * wrgsbase are enabled, most of this code will need to change.
|
|
*/
|
|
if (unlikely(fsindex | next->fsindex | prev->fs)) {
|
|
loadsegment(fs, next->fsindex);
|
|
+
|
|
/*
|
|
- * Check if the user used a selector != 0; if yes
|
|
- * clear 64bit base, since overloaded base is always
|
|
- * mapped to the Null selector
|
|
+ * If user code wrote a nonzero value to FS, then it also
|
|
+ * cleared the overridden base address.
|
|
+ *
|
|
+ * XXX: if user code wrote 0 to FS and cleared the base
|
|
+ * address itself, we won't notice and we'll incorrectly
|
|
+ * restore the prior base address next time we reschdule
|
|
+ * the process.
|
|
*/
|
|
if (fsindex)
|
|
prev->fs = 0;
|
|
}
|
|
- /* when next process has a 64bit base use it */
|
|
if (next->fs)
|
|
wrmsrl(MSR_FS_BASE, next->fs);
|
|
prev->fsindex = fsindex;
|
|
|
|
if (unlikely(gsindex | next->gsindex | prev->gs)) {
|
|
load_gs_index(next->gsindex);
|
|
+
|
|
+ /* This works (and fails) the same way as fsindex above. */
|
|
if (gsindex)
|
|
prev->gs = 0;
|
|
}
|
|
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
|
|
index 7461f50..0686fe3 100644
|
|
--- a/arch/x86/kernel/ptrace.c
|
|
+++ b/arch/x86/kernel/ptrace.c
|
|
@@ -1441,15 +1441,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
|
force_sig_info(SIGTRAP, &info, tsk);
|
|
}
|
|
|
|
-
|
|
-#ifdef CONFIG_X86_32
|
|
-# define IS_IA32 1
|
|
-#elif defined CONFIG_IA32_EMULATION
|
|
-# define IS_IA32 is_compat_task()
|
|
-#else
|
|
-# define IS_IA32 0
|
|
-#endif
|
|
-
|
|
/*
|
|
* We must return the syscall number to actually look up in the table.
|
|
* This can be -1L to skip running any syscall at all.
|
|
@@ -1487,7 +1478,7 @@ long syscall_trace_enter(struct pt_regs *regs)
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
|
trace_sys_enter(regs, regs->orig_ax);
|
|
|
|
- if (IS_IA32)
|
|
+ if (is_ia32_task())
|
|
audit_syscall_entry(AUDIT_ARCH_I386,
|
|
regs->orig_ax,
|
|
regs->bx, regs->cx,
|
|
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
|
|
index c752cb4..a6aa91f 100644
|
|
--- a/arch/x86/kernel/reboot.c
|
|
+++ b/arch/x86/kernel/reboot.c
|
|
@@ -181,6 +181,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|
},
|
|
},
|
|
|
|
+ /* ASRock */
|
|
+ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
|
|
+ .callback = set_pci_reboot,
|
|
+ .ident = "ASRock Q1900DC-ITX",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
|
|
+ },
|
|
+ },
|
|
+
|
|
/* ASUS */
|
|
{ /* Handle problems with rebooting on ASUS P4S800 */
|
|
.callback = set_bios_reboot,
|
|
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
|
|
index 2a26819..80eab01 100644
|
|
--- a/arch/x86/kernel/resource.c
|
|
+++ b/arch/x86/kernel/resource.c
|
|
@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
|
|
|
|
void arch_remove_reservations(struct resource *avail)
|
|
{
|
|
- /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
|
|
+ /*
|
|
+ * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
|
|
+ * the low 1MB unconditionally, as this area is needed for some ISA
|
|
+ * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
|
|
+ */
|
|
if (avail->flags & IORESOURCE_MEM) {
|
|
- if (avail->start < BIOS_END)
|
|
- avail->start = BIOS_END;
|
|
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
|
|
|
|
remove_e820_regions(avail);
|
|
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
|
|
index 9e5de68..b88fc86 100644
|
|
--- a/arch/x86/kernel/signal.c
|
|
+++ b/arch/x86/kernel/signal.c
|
|
@@ -673,6 +673,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|
* handler too.
|
|
*/
|
|
regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
|
|
+ /*
|
|
+ * Ensure the signal handler starts with the new fpu state.
|
|
+ */
|
|
+ if (used_math())
|
|
+ drop_init_fpu(current);
|
|
}
|
|
signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
|
|
}
|
|
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
|
index a32da80..6828765 100644
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -243,6 +243,13 @@ static void notrace start_secondary(void *unused)
|
|
check_tsc_sync_target();
|
|
|
|
/*
|
|
+ * Enable the espfix hack for this CPU
|
|
+ */
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
+ init_espfix_ap();
|
|
+#endif
|
|
+
|
|
+ /*
|
|
* We need to hold vector_lock so there the set of online cpus
|
|
* does not change while we are assigning vectors to cpus. Holding
|
|
* this lock ensures we don't half assign or remove an irq from a cpu.
|
|
@@ -1280,6 +1287,9 @@ static void remove_siblinginfo(int cpu)
|
|
|
|
for_each_cpu(sibling, cpu_sibling_mask(cpu))
|
|
cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
|
|
+ for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
|
|
+ cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
|
|
+ cpumask_clear(cpu_llc_shared_mask(cpu));
|
|
cpumask_clear(cpu_sibling_mask(cpu));
|
|
cpumask_clear(cpu_core_mask(cpu));
|
|
c->phys_proc_id = 0;
|
|
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
|
|
index f7fec09..7fc5e84 100644
|
|
--- a/arch/x86/kernel/tls.c
|
|
+++ b/arch/x86/kernel/tls.c
|
|
@@ -27,6 +27,58 @@ static int get_free_idx(void)
|
|
return -ESRCH;
|
|
}
|
|
|
|
+static bool tls_desc_okay(const struct user_desc *info)
|
|
+{
|
|
+ /*
|
|
+ * For historical reasons (i.e. no one ever documented how any
|
|
+ * of the segmentation APIs work), user programs can and do
|
|
+ * assume that a struct user_desc that's all zeros except for
|
|
+ * entry_number means "no segment at all". This never actually
|
|
+ * worked. In fact, up to Linux 3.19, a struct user_desc like
|
|
+ * this would create a 16-bit read-write segment with base and
|
|
+ * limit both equal to zero.
|
|
+ *
|
|
+ * That was close enough to "no segment at all" until we
|
|
+ * hardened this function to disallow 16-bit TLS segments. Fix
|
|
+ * it up by interpreting these zeroed segments the way that they
|
|
+ * were almost certainly intended to be interpreted.
|
|
+ *
|
|
+ * The correct way to ask for "no segment at all" is to specify
|
|
+ * a user_desc that satisfies LDT_empty. To keep everything
|
|
+ * working, we accept both.
|
|
+ *
|
|
+ * Note that there's a similar kludge in modify_ldt -- look at
|
|
+ * the distinction between modes 1 and 0x11.
|
|
+ */
|
|
+ if (LDT_empty(info) || LDT_zero(info))
|
|
+ return true;
|
|
+
|
|
+ /*
|
|
+ * espfix is required for 16-bit data segments, but espfix
|
|
+ * only works for LDT segments.
|
|
+ */
|
|
+ if (!info->seg_32bit)
|
|
+ return false;
|
|
+
|
|
+ /* Only allow data segments in the TLS array. */
|
|
+ if (info->contents > 1)
|
|
+ return false;
|
|
+
|
|
+ /*
|
|
+ * Non-present segments with DPL 3 present an interesting attack
|
|
+ * surface. The kernel should handle such segments correctly,
|
|
+ * but TLS is very difficult to protect in a sandbox, so prevent
|
|
+ * such segments from being created.
|
|
+ *
|
|
+ * If userspace needs to remove a TLS entry, it can still delete
|
|
+ * it outright.
|
|
+ */
|
|
+ if (info->seg_not_present)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
static void set_tls_desc(struct task_struct *p, int idx,
|
|
const struct user_desc *info, int n)
|
|
{
|
|
@@ -40,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
|
|
cpu = get_cpu();
|
|
|
|
while (n-- > 0) {
|
|
- if (LDT_empty(info))
|
|
+ if (LDT_empty(info) || LDT_zero(info))
|
|
desc->a = desc->b = 0;
|
|
else
|
|
fill_ldt(desc, info);
|
|
@@ -66,6 +118,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
|
if (copy_from_user(&info, u_info, sizeof(info)))
|
|
return -EFAULT;
|
|
|
|
+ if (!tls_desc_okay(&info))
|
|
+ return -EINVAL;
|
|
+
|
|
if (idx == -1)
|
|
idx = info.entry_number;
|
|
|
|
@@ -192,6 +247,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
|
|
{
|
|
struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
|
|
const struct user_desc *info;
|
|
+ int i;
|
|
|
|
if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
|
|
(pos % sizeof(struct user_desc)) != 0 ||
|
|
@@ -205,6 +261,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
|
|
else
|
|
info = infobuf;
|
|
|
|
+ for (i = 0; i < count / sizeof(struct user_desc); i++)
|
|
+ if (!tls_desc_okay(info + i))
|
|
+ return -EINVAL;
|
|
+
|
|
set_tls_desc(target,
|
|
GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
|
|
info, count / sizeof(struct user_desc));
|
|
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
|
|
index 57409f6..b1d9002 100644
|
|
--- a/arch/x86/kernel/traps.c
|
|
+++ b/arch/x86/kernel/traps.c
|
|
@@ -218,32 +218,40 @@ DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL
|
|
DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun )
|
|
DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS )
|
|
DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present )
|
|
-#ifdef CONFIG_X86_32
|
|
DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment )
|
|
-#endif
|
|
DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 )
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/* Runs on IST stack */
|
|
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
|
-{
|
|
- enum ctx_state prev_state;
|
|
-
|
|
- prev_state = exception_enter();
|
|
- if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
|
- X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
|
|
- preempt_conditional_sti(regs);
|
|
- do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
|
|
- preempt_conditional_cli(regs);
|
|
- }
|
|
- exception_exit(prev_state);
|
|
-}
|
|
-
|
|
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
|
{
|
|
static const char str[] = "double fault";
|
|
struct task_struct *tsk = current;
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
+ extern unsigned char native_irq_return_iret[];
|
|
+
|
|
+ /*
|
|
+ * If IRET takes a non-IST fault on the espfix64 stack, then we
|
|
+ * end up promoting it to a doublefault. In that case, modify
|
|
+ * the stack to make it look like we just entered the #GP
|
|
+ * handler from user space, similar to bad_iret.
|
|
+ */
|
|
+ if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
|
|
+ regs->cs == __KERNEL_CS &&
|
|
+ regs->ip == (unsigned long)native_irq_return_iret)
|
|
+ {
|
|
+ struct pt_regs *normal_regs = task_pt_regs(current);
|
|
+
|
|
+ /* Fake a #GP(0) from userspace. */
|
|
+ memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
|
|
+ normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
|
|
+ regs->ip = (unsigned long)general_protection;
|
|
+ regs->sp = (unsigned long)&normal_regs->orig_ax;
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+
|
|
exception_enter();
|
|
/* Return not checked because double check cannot be ignored */
|
|
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
|
|
@@ -357,7 +365,7 @@ exit:
|
|
* for scheduling or signal handling. The actual stack switch is done in
|
|
* entry.S
|
|
*/
|
|
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
|
+asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
|
{
|
|
struct pt_regs *regs = eregs;
|
|
/* Did already sync */
|
|
@@ -376,6 +384,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
|
*regs = *eregs;
|
|
return regs;
|
|
}
|
|
+
|
|
+struct bad_iret_stack {
|
|
+ void *error_entry_ret;
|
|
+ struct pt_regs regs;
|
|
+};
|
|
+
|
|
+asmlinkage __visible notrace __kprobes
|
|
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
|
+{
|
|
+ /*
|
|
+ * This is called from entry_64.S early in handling a fault
|
|
+ * caused by a bad iret to user mode. To handle the fault
|
|
+ * correctly, we want move our stack frame to task_pt_regs
|
|
+ * and we want to pretend that the exception came from the
|
|
+ * iret target.
|
|
+ */
|
|
+ struct bad_iret_stack *new_stack =
|
|
+ container_of(task_pt_regs(current),
|
|
+ struct bad_iret_stack, regs);
|
|
+
|
|
+ /* Copy the IRET target to the new stack. */
|
|
+ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
|
|
+
|
|
+ /* Copy the remainder of the stack from the current stack. */
|
|
+ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
|
|
+
|
|
+ BUG_ON(!user_mode_vm(&new_stack->regs));
|
|
+ return new_stack;
|
|
+}
|
|
#endif
|
|
|
|
/*
|
|
@@ -748,7 +785,7 @@ void __init trap_init(void)
|
|
set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
|
|
set_intr_gate(X86_TRAP_TS, invalid_TSS);
|
|
set_intr_gate(X86_TRAP_NP, segment_not_present);
|
|
- set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
|
|
+ set_intr_gate(X86_TRAP_SS, stack_segment);
|
|
set_intr_gate(X86_TRAP_GP, general_protection);
|
|
set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
|
|
set_intr_gate(X86_TRAP_MF, coprocessor_error);
|
|
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
|
|
index e0d1d7a..b20bced 100644
|
|
--- a/arch/x86/kernel/tsc.c
|
|
+++ b/arch/x86/kernel/tsc.c
|
|
@@ -618,7 +618,7 @@ static unsigned long quick_pit_calibrate(void)
|
|
goto success;
|
|
}
|
|
}
|
|
- pr_err("Fast TSC calibration failed\n");
|
|
+ pr_info("Fast TSC calibration failed\n");
|
|
return 0;
|
|
|
|
success:
|
|
@@ -1173,14 +1173,17 @@ void __init tsc_init(void)
|
|
|
|
x86_init.timers.tsc_pre_init();
|
|
|
|
- if (!cpu_has_tsc)
|
|
+ if (!cpu_has_tsc) {
|
|
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
|
return;
|
|
+ }
|
|
|
|
tsc_khz = x86_platform.calibrate_tsc();
|
|
cpu_khz = tsc_khz;
|
|
|
|
if (!tsc_khz) {
|
|
mark_tsc_unstable("could not calculate TSC khz");
|
|
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
|
return;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
|
|
index 1f96f93..09ce23a 100644
|
|
--- a/arch/x86/kernel/vsyscall_64.c
|
|
+++ b/arch/x86/kernel/vsyscall_64.c
|
|
@@ -125,10 +125,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
|
|
if (!show_unhandled_signals)
|
|
return;
|
|
|
|
- pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
|
|
- level, current->comm, task_pid_nr(current),
|
|
- message, regs->ip, regs->cs,
|
|
- regs->sp, regs->ax, regs->si, regs->di);
|
|
+ printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
|
|
+ level, current->comm, task_pid_nr(current),
|
|
+ message, regs->ip, regs->cs,
|
|
+ regs->sp, regs->ax, regs->si, regs->di);
|
|
}
|
|
|
|
static int addr_to_vsyscall_nr(unsigned long addr)
|
|
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
|
|
index a4b451c..7a09aca 100644
|
|
--- a/arch/x86/kernel/xsave.c
|
|
+++ b/arch/x86/kernel/xsave.c
|
|
@@ -268,8 +268,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|
if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
|
|
return -1;
|
|
|
|
- drop_init_fpu(tsk); /* trigger finit */
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -377,7 +375,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|
* thread's fpu state, reconstruct fxstate from the fsave
|
|
* header. Sanitize the copied state etc.
|
|
*/
|
|
- struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
|
|
+ struct fpu *fpu = &tsk->thread.fpu;
|
|
struct user_i387_ia32_struct env;
|
|
int err = 0;
|
|
|
|
@@ -391,16 +389,20 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|
*/
|
|
drop_fpu(tsk);
|
|
|
|
- if (__copy_from_user(xsave, buf_fx, state_size) ||
|
|
+ if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
|
|
__copy_from_user(&env, buf, sizeof(env))) {
|
|
+ fpu_finit(fpu);
|
|
err = -1;
|
|
} else {
|
|
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
|
|
- set_used_math();
|
|
}
|
|
|
|
- if (use_eager_fpu())
|
|
+ set_used_math();
|
|
+ if (use_eager_fpu()) {
|
|
+ preempt_disable();
|
|
math_state_restore();
|
|
+ preempt_enable();
|
|
+ }
|
|
|
|
return err;
|
|
} else {
|
|
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
|
|
index 07ffca0..cf1eeea 100644
|
|
--- a/arch/x86/kvm/emulate.c
|
|
+++ b/arch/x86/kvm/emulate.c
|
|
@@ -498,11 +498,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
|
|
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
|
|
}
|
|
|
|
-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
|
-{
|
|
- register_address_increment(ctxt, &ctxt->_eip, rel);
|
|
-}
|
|
-
|
|
static u32 desc_limit_scaled(struct desc_struct *desc)
|
|
{
|
|
u32 limit = get_desc_limit(desc);
|
|
@@ -576,6 +571,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
|
|
return emulate_exception(ctxt, NM_VECTOR, 0, false);
|
|
}
|
|
|
|
+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
|
|
+ int cs_l)
|
|
+{
|
|
+ switch (ctxt->op_bytes) {
|
|
+ case 2:
|
|
+ ctxt->_eip = (u16)dst;
|
|
+ break;
|
|
+ case 4:
|
|
+ ctxt->_eip = (u32)dst;
|
|
+ break;
|
|
+ case 8:
|
|
+ if ((cs_l && is_noncanonical_address(dst)) ||
|
|
+ (!cs_l && (dst & ~(u32)-1)))
|
|
+ return emulate_gp(ctxt, 0);
|
|
+ ctxt->_eip = dst;
|
|
+ break;
|
|
+ default:
|
|
+ WARN(1, "unsupported eip assignment size\n");
|
|
+ }
|
|
+ return X86EMUL_CONTINUE;
|
|
+}
|
|
+
|
|
+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
|
+{
|
|
+ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
|
|
+}
|
|
+
|
|
+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
|
+{
|
|
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
|
|
+}
|
|
+
|
|
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
|
|
{
|
|
u16 selector;
|
|
@@ -1958,13 +1985,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
|
|
case 2: /* call near abs */ {
|
|
long int old_eip;
|
|
old_eip = ctxt->_eip;
|
|
- ctxt->_eip = ctxt->src.val;
|
|
+ rc = assign_eip_near(ctxt, ctxt->src.val);
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
+ break;
|
|
ctxt->src.val = old_eip;
|
|
rc = em_push(ctxt);
|
|
break;
|
|
}
|
|
case 4: /* jmp abs */
|
|
- ctxt->_eip = ctxt->src.val;
|
|
+ rc = assign_eip_near(ctxt, ctxt->src.val);
|
|
break;
|
|
case 5: /* jmp far */
|
|
rc = em_jmp_far(ctxt);
|
|
@@ -1996,16 +2025,21 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
|
|
|
|
static int em_ret(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
- ctxt->dst.type = OP_REG;
|
|
- ctxt->dst.addr.reg = &ctxt->_eip;
|
|
- ctxt->dst.bytes = ctxt->op_bytes;
|
|
- return em_pop(ctxt);
|
|
+ int rc;
|
|
+ unsigned long eip;
|
|
+
|
|
+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
+ return rc;
|
|
+
|
|
+ return assign_eip_near(ctxt, eip);
|
|
}
|
|
|
|
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
int rc;
|
|
unsigned long cs;
|
|
+ int cpl = ctxt->ops->cpl(ctxt);
|
|
|
|
rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
|
|
if (rc != X86EMUL_CONTINUE)
|
|
@@ -2015,6 +2049,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
|
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
|
|
if (rc != X86EMUL_CONTINUE)
|
|
return rc;
|
|
+ /* Outer-privilege level return is not implemented */
|
|
+ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
|
|
+ return X86EMUL_UNHANDLEABLE;
|
|
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
|
|
return rc;
|
|
}
|
|
@@ -2221,7 +2258,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
|
* Not recognized on AMD in compat mode (but is recognized in legacy
|
|
* mode).
|
|
*/
|
|
- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
|
|
+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
|
|
&& !vendor_intel(ctxt))
|
|
return emulate_ud(ctxt);
|
|
|
|
@@ -2234,25 +2271,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
|
setup_syscalls_segments(ctxt, &cs, &ss);
|
|
|
|
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
|
|
- switch (ctxt->mode) {
|
|
- case X86EMUL_MODE_PROT32:
|
|
- if ((msr_data & 0xfffc) == 0x0)
|
|
- return emulate_gp(ctxt, 0);
|
|
- break;
|
|
- case X86EMUL_MODE_PROT64:
|
|
- if (msr_data == 0x0)
|
|
- return emulate_gp(ctxt, 0);
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
+ if ((msr_data & 0xfffc) == 0x0)
|
|
+ return emulate_gp(ctxt, 0);
|
|
|
|
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
|
|
- cs_sel = (u16)msr_data;
|
|
- cs_sel &= ~SELECTOR_RPL_MASK;
|
|
+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
|
|
ss_sel = cs_sel + 8;
|
|
- ss_sel &= ~SELECTOR_RPL_MASK;
|
|
- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
|
|
+ if (efer & EFER_LMA) {
|
|
cs.d = 0;
|
|
cs.l = 1;
|
|
}
|
|
@@ -2261,10 +2286,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
|
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
|
|
|
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
|
|
- ctxt->_eip = msr_data;
|
|
+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
|
|
|
|
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
|
|
- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
|
|
+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
|
|
+ (u32)msr_data;
|
|
|
|
return X86EMUL_CONTINUE;
|
|
}
|
|
@@ -2273,7 +2299,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
const struct x86_emulate_ops *ops = ctxt->ops;
|
|
struct desc_struct cs, ss;
|
|
- u64 msr_data;
|
|
+ u64 msr_data, rcx, rdx;
|
|
int usermode;
|
|
u16 cs_sel = 0, ss_sel = 0;
|
|
|
|
@@ -2289,6 +2315,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
|
else
|
|
usermode = X86EMUL_MODE_PROT32;
|
|
|
|
+ rcx = reg_read(ctxt, VCPU_REGS_RCX);
|
|
+ rdx = reg_read(ctxt, VCPU_REGS_RDX);
|
|
+
|
|
cs.dpl = 3;
|
|
ss.dpl = 3;
|
|
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
|
|
@@ -2306,6 +2335,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
|
ss_sel = cs_sel + 8;
|
|
cs.d = 0;
|
|
cs.l = 1;
|
|
+ if (is_noncanonical_address(rcx) ||
|
|
+ is_noncanonical_address(rdx))
|
|
+ return emulate_gp(ctxt, 0);
|
|
break;
|
|
}
|
|
cs_sel |= SELECTOR_RPL_MASK;
|
|
@@ -2314,8 +2346,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
|
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
|
|
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
|
|
|
- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
|
|
- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
|
|
+ ctxt->_eip = rdx;
|
|
+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
|
|
|
|
return X86EMUL_CONTINUE;
|
|
}
|
|
@@ -2854,10 +2886,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
|
|
|
|
static int em_call(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
+ int rc;
|
|
long rel = ctxt->src.val;
|
|
|
|
ctxt->src.val = (unsigned long)ctxt->_eip;
|
|
- jmp_rel(ctxt, rel);
|
|
+ rc = jmp_rel(ctxt, rel);
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
+ return rc;
|
|
return em_push(ctxt);
|
|
}
|
|
|
|
@@ -2889,11 +2924,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
|
|
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
int rc;
|
|
+ unsigned long eip;
|
|
|
|
- ctxt->dst.type = OP_REG;
|
|
- ctxt->dst.addr.reg = &ctxt->_eip;
|
|
- ctxt->dst.bytes = ctxt->op_bytes;
|
|
- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
|
|
+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
+ return rc;
|
|
+ rc = assign_eip_near(ctxt, eip);
|
|
if (rc != X86EMUL_CONTINUE)
|
|
return rc;
|
|
rsp_increment(ctxt, ctxt->src.val);
|
|
@@ -3223,20 +3259,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
|
|
|
|
static int em_loop(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
+ int rc = X86EMUL_CONTINUE;
|
|
+
|
|
register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
|
|
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
|
|
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
|
|
- jmp_rel(ctxt, ctxt->src.val);
|
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
|
|
|
- return X86EMUL_CONTINUE;
|
|
+ return rc;
|
|
}
|
|
|
|
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
|
|
{
|
|
+ int rc = X86EMUL_CONTINUE;
|
|
+
|
|
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
|
|
- jmp_rel(ctxt, ctxt->src.val);
|
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
|
|
|
- return X86EMUL_CONTINUE;
|
|
+ return rc;
|
|
}
|
|
|
|
static int em_in(struct x86_emulate_ctxt *ctxt)
|
|
@@ -4595,7 +4635,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
|
if (rc != X86EMUL_CONTINUE)
|
|
goto done;
|
|
}
|
|
- ctxt->dst.orig_val = ctxt->dst.val;
|
|
+ /* Copy full 64-bit value for CMPXCHG8B. */
|
|
+ ctxt->dst.orig_val64 = ctxt->dst.val64;
|
|
|
|
special_insn:
|
|
|
|
@@ -4633,7 +4674,7 @@ special_insn:
|
|
break;
|
|
case 0x70 ... 0x7f: /* jcc (short) */
|
|
if (test_cc(ctxt->b, ctxt->eflags))
|
|
- jmp_rel(ctxt, ctxt->src.val);
|
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
|
break;
|
|
case 0x8d: /* lea r16/r32, m */
|
|
ctxt->dst.val = ctxt->src.addr.mem.ea;
|
|
@@ -4662,7 +4703,7 @@ special_insn:
|
|
break;
|
|
case 0xe9: /* jmp rel */
|
|
case 0xeb: /* jmp rel short */
|
|
- jmp_rel(ctxt, ctxt->src.val);
|
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
|
ctxt->dst.type = OP_NONE; /* Disable writeback. */
|
|
break;
|
|
case 0xf4: /* hlt */
|
|
@@ -4782,7 +4823,7 @@ twobyte_insn:
|
|
break;
|
|
case 0x80 ... 0x8f: /* jnz rel, etc*/
|
|
if (test_cc(ctxt->b, ctxt->eflags))
|
|
- jmp_rel(ctxt, ctxt->src.val);
|
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
|
break;
|
|
case 0x90 ... 0x9f: /* setcc r/m8 */
|
|
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
|
|
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
|
|
index 518d864..1406ffd 100644
|
|
--- a/arch/x86/kvm/i8254.c
|
|
+++ b/arch/x86/kvm/i8254.c
|
|
@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
|
|
return;
|
|
|
|
timer = &pit->pit_state.timer;
|
|
+ mutex_lock(&pit->pit_state.lock);
|
|
if (hrtimer_cancel(timer))
|
|
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
|
+ mutex_unlock(&pit->pit_state.lock);
|
|
}
|
|
|
|
static void destroy_pit_timer(struct kvm_pit *pit)
|
|
@@ -303,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
|
|
* LVT0 to NMI delivery. Other PIC interrupts are just sent to
|
|
* VCPU0, and only if its LVT0 is in EXTINT mode.
|
|
*/
|
|
- if (kvm->arch.vapics_in_nmi_mode > 0)
|
|
+ if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
kvm_apic_nmi_wd_deliver(vcpu);
|
|
}
|
|
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
|
|
index 484bc87..3ec38cb 100644
|
|
--- a/arch/x86/kvm/irq.c
|
|
+++ b/arch/x86/kvm/irq.c
|
|
@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
|
|
|
|
vector = kvm_cpu_get_extint(v);
|
|
|
|
- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
|
|
+ if (vector != -1)
|
|
return vector; /* PIC */
|
|
|
|
return kvm_get_apic_interrupt(v); /* APIC */
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 0069118..6456734 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -352,25 +352,46 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
|
|
|
|
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
|
{
|
|
- apic->irr_pending = false;
|
|
+ struct kvm_vcpu *vcpu;
|
|
+
|
|
+ vcpu = apic->vcpu;
|
|
+
|
|
apic_clear_vector(vec, apic->regs + APIC_IRR);
|
|
- if (apic_search_irr(apic) != -1)
|
|
- apic->irr_pending = true;
|
|
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
|
|
+ /* try to update RVI */
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
+ else {
|
|
+ vec = apic_search_irr(apic);
|
|
+ apic->irr_pending = (vec != -1);
|
|
+ }
|
|
}
|
|
|
|
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
|
{
|
|
- /* Note that we never get here with APIC virtualization enabled. */
|
|
+ struct kvm_vcpu *vcpu;
|
|
+
|
|
+ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
|
|
+ return;
|
|
+
|
|
+ vcpu = apic->vcpu;
|
|
|
|
- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
|
|
- ++apic->isr_count;
|
|
- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
|
|
/*
|
|
- * ISR (in service register) bit is set when injecting an interrupt.
|
|
- * The highest vector is injected. Thus the latest bit set matches
|
|
- * the highest bit in ISR.
|
|
+ * With APIC virtualization enabled, all caching is disabled
|
|
+ * because the processor can modify ISR under the hood. Instead
|
|
+ * just set SVI.
|
|
*/
|
|
- apic->highest_isr_cache = vec;
|
|
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
|
|
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
|
|
+ else {
|
|
+ ++apic->isr_count;
|
|
+ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
|
|
+ /*
|
|
+ * ISR (in service register) bit is set when injecting an interrupt.
|
|
+ * The highest vector is injected. Thus the latest bit set matches
|
|
+ * the highest bit in ISR.
|
|
+ */
|
|
+ apic->highest_isr_cache = vec;
|
|
+ }
|
|
}
|
|
|
|
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
|
|
@@ -1088,10 +1109,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
|
if (!nmi_wd_enabled) {
|
|
apic_debug("Receive NMI setting on APIC_LVT0 "
|
|
"for cpu %d\n", apic->vcpu->vcpu_id);
|
|
- apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
|
|
+ atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
|
}
|
|
} else if (nmi_wd_enabled)
|
|
- apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
|
|
+ atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
|
}
|
|
|
|
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
|
@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
|
|
int vector = kvm_apic_has_interrupt(vcpu);
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
- /* Note that we never get here with APIC virtualization enabled. */
|
|
-
|
|
if (vector == -1)
|
|
return -1;
|
|
|
|
+ /*
|
|
+ * We get here even with APIC virtualization enabled, if doing
|
|
+ * nested virtualization and L1 runs with the "acknowledge interrupt
|
|
+ * on exit" mode. Then we cannot inject the interrupt via RVI,
|
|
+ * because the process would deliver it through the IDT.
|
|
+ */
|
|
+
|
|
apic_set_isr(vector, apic);
|
|
apic_update_ppr(apic);
|
|
apic_clear_irr(vector, apic);
|
|
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
|
|
index 6a11845..7205173 100644
|
|
--- a/arch/x86/kvm/lapic.h
|
|
+++ b/arch/x86/kvm/lapic.h
|
|
@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
|
|
|
|
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
|
|
{
|
|
- return vcpu->arch.apic->pending_events;
|
|
+ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
|
|
}
|
|
|
|
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
|
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
index 9b53135..dba56fb 100644
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -198,16 +198,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
|
|
|
|
/*
|
|
- * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
|
|
- * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
|
|
- * number.
|
|
+ * the low bit of the generation number is always presumed to be zero.
|
|
+ * This disables mmio caching during memslot updates. The concept is
|
|
+ * similar to a seqcount but instead of retrying the access we just punt
|
|
+ * and ignore the cache.
|
|
+ *
|
|
+ * spte bits 3-11 are used as bits 1-9 of the generation number,
|
|
+ * the bits 52-61 are used as bits 10-19 of the generation number.
|
|
*/
|
|
-#define MMIO_SPTE_GEN_LOW_SHIFT 3
|
|
+#define MMIO_SPTE_GEN_LOW_SHIFT 2
|
|
#define MMIO_SPTE_GEN_HIGH_SHIFT 52
|
|
|
|
-#define MMIO_GEN_SHIFT 19
|
|
-#define MMIO_GEN_LOW_SHIFT 9
|
|
-#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
|
|
+#define MMIO_GEN_SHIFT 20
|
|
+#define MMIO_GEN_LOW_SHIFT 10
|
|
+#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
|
|
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
|
|
#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
|
|
|
|
@@ -377,12 +381,6 @@ static u64 __get_spte_lockless(u64 *sptep)
|
|
{
|
|
return ACCESS_ONCE(*sptep);
|
|
}
|
|
-
|
|
-static bool __check_direct_spte_mmio_pf(u64 spte)
|
|
-{
|
|
- /* It is valid if the spte is zapped. */
|
|
- return spte == 0ull;
|
|
-}
|
|
#else
|
|
union split_spte {
|
|
struct {
|
|
@@ -498,23 +496,6 @@ retry:
|
|
|
|
return spte.spte;
|
|
}
|
|
-
|
|
-static bool __check_direct_spte_mmio_pf(u64 spte)
|
|
-{
|
|
- union split_spte sspte = (union split_spte)spte;
|
|
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
|
|
-
|
|
- /* It is valid if the spte is zapped. */
|
|
- if (spte == 0ull)
|
|
- return true;
|
|
-
|
|
- /* It is valid if the spte is being zapped. */
|
|
- if (sspte.spte_low == 0ull &&
|
|
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
-}
|
|
#endif
|
|
|
|
static bool spte_is_locklessly_modifiable(u64 spte)
|
|
@@ -3157,7 +3138,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
|
|
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
return;
|
|
|
|
- vcpu_clear_mmio_info(vcpu, ~0ul);
|
|
+ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
|
|
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
|
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
@@ -3211,21 +3192,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
return vcpu_match_mmio_gva(vcpu, addr);
|
|
}
|
|
|
|
-
|
|
-/*
|
|
- * On direct hosts, the last spte is only allows two states
|
|
- * for mmio page fault:
|
|
- * - It is the mmio spte
|
|
- * - It is zapped or it is being zapped.
|
|
- *
|
|
- * This function completely checks the spte when the last spte
|
|
- * is not the mmio spte.
|
|
- */
|
|
-static bool check_direct_spte_mmio_pf(u64 spte)
|
|
-{
|
|
- return __check_direct_spte_mmio_pf(spte);
|
|
-}
|
|
-
|
|
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
|
|
{
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
@@ -3268,13 +3234,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
}
|
|
|
|
/*
|
|
- * It's ok if the gva is remapped by other cpus on shadow guest,
|
|
- * it's a BUG if the gfn is not a mmio page.
|
|
- */
|
|
- if (direct && !check_direct_spte_mmio_pf(spte))
|
|
- return RET_MMIO_PF_BUG;
|
|
-
|
|
- /*
|
|
* If the page table is zapped by other cpus, let CPU fault again on
|
|
* the address.
|
|
*/
|
|
@@ -4074,7 +4033,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
++vcpu->kvm->stat.mmu_pte_write;
|
|
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
|
|
|
- mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
|
+ mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
|
|
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
|
if (detect_write_misaligned(sp, gpa, bytes) ||
|
|
detect_write_flooding(sp)) {
|
|
@@ -4379,8 +4338,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
|
|
* The very rare case: if the generation-number is round,
|
|
* zap all shadow pages.
|
|
*/
|
|
- if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
|
|
- printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
|
|
+ if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
|
|
+ printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
|
|
kvm_mmu_invalidate_zap_all_pages(kvm);
|
|
}
|
|
}
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index 2de1bc0..0746334 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
- if (svm->vmcb->control.next_rip != 0)
|
|
+ if (svm->vmcb->control.next_rip != 0) {
|
|
+ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
|
|
svm->next_rip = svm->vmcb->control.next_rip;
|
|
+ }
|
|
|
|
if (!svm->next_rip) {
|
|
if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
|
|
@@ -3213,7 +3215,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
|
|
msr.host_initiated = false;
|
|
|
|
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
|
|
- if (svm_set_msr(&svm->vcpu, &msr)) {
|
|
+ if (kvm_set_msr(&svm->vcpu, &msr)) {
|
|
trace_kvm_msr_write_ex(ecx, data);
|
|
kvm_inject_gp(&svm->vcpu, 0);
|
|
} else {
|
|
@@ -3495,9 +3497,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
|
|
|
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|
|
|| !svm_exit_handlers[exit_code]) {
|
|
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
|
|
- kvm_run->hw.hardware_exit_reason = exit_code;
|
|
- return 0;
|
|
+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
+ return 1;
|
|
}
|
|
|
|
return svm_exit_handlers[exit_code](svm);
|
|
@@ -4246,7 +4248,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
|
|
break;
|
|
}
|
|
|
|
- vmcb->control.next_rip = info->next_rip;
|
|
+ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
|
|
+ if (static_cpu_has(X86_FEATURE_NRIPS))
|
|
+ vmcb->control.next_rip = info->next_rip;
|
|
vmcb->control.exit_code = icpt_info.exit_code;
|
|
vmexit = nested_svm_exit_handled(svm);
|
|
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 3927528..80c22a3 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -441,6 +441,7 @@ struct vcpu_vmx {
|
|
#endif
|
|
int gs_ldt_reload_needed;
|
|
int fs_reload_needed;
|
|
+ unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
|
} host_state;
|
|
struct {
|
|
int vm86_active;
|
|
@@ -2320,12 +2321,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
|
|
nested_vmx_secondary_ctls_low = 0;
|
|
nested_vmx_secondary_ctls_high &=
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
SECONDARY_EXEC_WBINVD_EXITING;
|
|
|
|
if (enable_ept) {
|
|
/* nested EPT: emulate EPT also to L1 */
|
|
- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
|
|
+ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
|
|
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
|
nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
|
|
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
|
|
VMX_EPT_INVEPT_BIT;
|
|
@@ -2582,12 +2583,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
default:
|
|
msr = find_msr_entry(vmx, msr_index);
|
|
if (msr) {
|
|
+ u64 old_msr_data = msr->data;
|
|
msr->data = data;
|
|
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
|
|
preempt_disable();
|
|
- kvm_set_shared_msr(msr->index, msr->data,
|
|
- msr->mask);
|
|
+ ret = kvm_set_shared_msr(msr->index, msr->data,
|
|
+ msr->mask);
|
|
preempt_enable();
|
|
+ if (ret)
|
|
+ msr->data = old_msr_data;
|
|
}
|
|
break;
|
|
}
|
|
@@ -4162,11 +4166,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|
u32 low32, high32;
|
|
unsigned long tmpl;
|
|
struct desc_ptr dt;
|
|
+ unsigned long cr4;
|
|
|
|
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
|
|
- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
|
|
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
|
|
|
+ /* Save the most likely value for this task's CR4 in the VMCS. */
|
|
+ cr4 = read_cr4();
|
|
+ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
|
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
|
+
|
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
@@ -5169,7 +5178,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
|
|
msr.data = data;
|
|
msr.index = ecx;
|
|
msr.host_initiated = false;
|
|
- if (vmx_set_msr(vcpu, &msr) != 0) {
|
|
+ if (kvm_set_msr(vcpu, &msr) != 0) {
|
|
trace_kvm_msr_write_ex(ecx, data);
|
|
kvm_inject_gp(vcpu, 0);
|
|
return 1;
|
|
@@ -6441,6 +6450,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
|
|
return 1;
|
|
}
|
|
|
|
+static int handle_invvpid(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
/*
|
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
|
@@ -6486,6 +6501,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
|
|
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
|
|
[EXIT_REASON_INVEPT] = handle_invept,
|
|
+ [EXIT_REASON_INVVPID] = handle_invvpid,
|
|
};
|
|
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
@@ -6719,7 +6735,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
|
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
|
- case EXIT_REASON_INVEPT:
|
|
+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
|
|
/*
|
|
* VMX instructions trap unconditionally. This allows L1 to
|
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
|
@@ -6884,10 +6900,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|
&& kvm_vmx_exit_handlers[exit_reason])
|
|
return kvm_vmx_exit_handlers[exit_reason](vcpu);
|
|
else {
|
|
- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
|
|
- vcpu->run->hw.hardware_exit_reason = exit_reason;
|
|
+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
+ return 1;
|
|
}
|
|
- return 0;
|
|
}
|
|
|
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
|
@@ -7186,7 +7202,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
- unsigned long debugctlmsr;
|
|
+ unsigned long debugctlmsr, cr4;
|
|
|
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
|
|
@@ -7207,6 +7223,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
|
|
+ cr4 = read_cr4();
|
|
+ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
|
+ vmcs_writel(HOST_CR4, cr4);
|
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
|
+ }
|
|
+
|
|
/* When single-stepping over STI and MOV SS, we must clear the
|
|
* corresponding interruptibility bits in the guest state. Otherwise
|
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 8fbd1a7..1777f89 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void)
|
|
shared_msr_update(i, shared_msrs_global.msrs[i]);
|
|
}
|
|
|
|
-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
|
+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
|
|
+ int err;
|
|
|
|
if (((value ^ smsr->values[slot].curr) & mask) == 0)
|
|
- return;
|
|
+ return 0;
|
|
smsr->values[slot].curr = value;
|
|
- wrmsrl(shared_msrs_global.msrs[slot], value);
|
|
+ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
|
|
+ if (err)
|
|
+ return 1;
|
|
+
|
|
if (!smsr->registered) {
|
|
smsr->urn.on_user_return = kvm_on_user_return;
|
|
user_return_notifier_register(&smsr->urn);
|
|
smsr->registered = true;
|
|
}
|
|
+ return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
|
|
|
|
@@ -946,7 +951,6 @@ void kvm_enable_efer_bits(u64 mask)
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
|
|
|
-
|
|
/*
|
|
* Writes msr value into into the appropriate "register".
|
|
* Returns 0 on success, non-0 otherwise.
|
|
@@ -954,8 +958,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
|
*/
|
|
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
{
|
|
+ switch (msr->index) {
|
|
+ case MSR_FS_BASE:
|
|
+ case MSR_GS_BASE:
|
|
+ case MSR_KERNEL_GS_BASE:
|
|
+ case MSR_CSTAR:
|
|
+ case MSR_LSTAR:
|
|
+ if (is_noncanonical_address(msr->data))
|
|
+ return 1;
|
|
+ break;
|
|
+ case MSR_IA32_SYSENTER_EIP:
|
|
+ case MSR_IA32_SYSENTER_ESP:
|
|
+ /*
|
|
+ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
|
|
+ * non-canonical address is written on Intel but not on
|
|
+ * AMD (which ignores the top 32-bits, because it does
|
|
+ * not implement 64-bit SYSENTER).
|
|
+ *
|
|
+ * 64-bit code should hence be able to write a non-canonical
|
|
+ * value on AMD. Making the address canonical ensures that
|
|
+ * vmentry does not fail on Intel after writing a non-canonical
|
|
+ * value, and that something deterministic happens if the guest
|
|
+ * invokes 64-bit SYSENTER.
|
|
+ */
|
|
+ msr->data = get_canonical(msr->data);
|
|
+ }
|
|
return kvm_x86_ops->set_msr(vcpu, msr);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(kvm_set_msr);
|
|
|
|
/*
|
|
* Adapt set_msr() to msr_io()'s calling convention
|
|
@@ -1177,21 +1207,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
bool vcpus_matched;
|
|
- bool do_request = false;
|
|
struct kvm_arch *ka = &vcpu->kvm->arch;
|
|
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
|
|
|
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
|
|
atomic_read(&vcpu->kvm->online_vcpus));
|
|
|
|
- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
|
|
- if (!ka->use_master_clock)
|
|
- do_request = 1;
|
|
-
|
|
- if (!vcpus_matched && ka->use_master_clock)
|
|
- do_request = 1;
|
|
-
|
|
- if (do_request)
|
|
+ /*
|
|
+ * Once the masterclock is enabled, always perform request in
|
|
+ * order to update it.
|
|
+ *
|
|
+ * In order to enable masterclock, the host clocksource must be TSC
|
|
+ * and the vcpus need to have matched TSCs. When that happens,
|
|
+ * perform request to enable masterclock.
|
|
+ */
|
|
+ if (ka->use_master_clock ||
|
|
+ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
|
|
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
|
|
|
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
|
|
@@ -4881,7 +4912,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
|
|
|
|
++vcpu->stat.insn_emulation_fail;
|
|
trace_kvm_emulate_insn_failed(vcpu);
|
|
- if (!is_guest_mode(vcpu)) {
|
|
+ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
|
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
|
vcpu->run->internal.ndata = 0;
|
|
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
|
|
index 8da5823..21ea4fc 100644
|
|
--- a/arch/x86/kvm/x86.h
|
|
+++ b/arch/x86/kvm/x86.h
|
|
@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|
vcpu->arch.mmio_gva = gva & PAGE_MASK;
|
|
vcpu->arch.access = access;
|
|
vcpu->arch.mmio_gfn = gfn;
|
|
+ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
|
+}
|
|
+
|
|
+static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
|
|
}
|
|
|
|
/*
|
|
- * Clear the mmio cache info for the given gva,
|
|
- * specially, if gva is ~0ul, we clear all mmio cache info.
|
|
+ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
|
|
+ * clear all mmio cache info.
|
|
*/
|
|
+#define MMIO_GVA_ANY (~(gva_t)0)
|
|
+
|
|
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
|
{
|
|
- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
|
+ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
|
return;
|
|
|
|
vcpu->arch.mmio_gva = 0;
|
|
@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
|
static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
|
{
|
|
- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
|
+ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
|
|
+ vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
|
return true;
|
|
|
|
return false;
|
|
@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
|
|
|
static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
|
|
{
|
|
- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
|
+ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
|
|
+ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
|
return true;
|
|
|
|
return false;
|
|
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
|
|
index 0002a3a..3620928 100644
|
|
--- a/arch/x86/mm/dump_pagetables.c
|
|
+++ b/arch/x86/mm/dump_pagetables.c
|
|
@@ -30,11 +30,13 @@ struct pg_state {
|
|
unsigned long start_address;
|
|
unsigned long current_address;
|
|
const struct addr_marker *marker;
|
|
+ unsigned long lines;
|
|
};
|
|
|
|
struct addr_marker {
|
|
unsigned long start_address;
|
|
const char *name;
|
|
+ unsigned long max_lines;
|
|
};
|
|
|
|
/* indices for address_markers; keep sync'd w/ address_markers below */
|
|
@@ -45,6 +47,7 @@ enum address_markers_idx {
|
|
LOW_KERNEL_NR,
|
|
VMALLOC_START_NR,
|
|
VMEMMAP_START_NR,
|
|
+ ESPFIX_START_NR,
|
|
HIGH_KERNEL_NR,
|
|
MODULES_VADDR_NR,
|
|
MODULES_END_NR,
|
|
@@ -67,6 +70,7 @@ static struct addr_marker address_markers[] = {
|
|
{ PAGE_OFFSET, "Low Kernel Mapping" },
|
|
{ VMALLOC_START, "vmalloc() Area" },
|
|
{ VMEMMAP_START, "Vmemmap" },
|
|
+ { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
|
|
{ __START_KERNEL_map, "High Kernel Mapping" },
|
|
{ MODULES_VADDR, "Modules" },
|
|
{ MODULES_END, "End Modules" },
|
|
@@ -163,7 +167,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
|
|
pgprot_t new_prot, int level)
|
|
{
|
|
pgprotval_t prot, cur;
|
|
- static const char units[] = "KMGTPE";
|
|
+ static const char units[] = "BKMGTPE";
|
|
|
|
/*
|
|
* If we have a "break" in the series, we need to flush the state that
|
|
@@ -178,6 +182,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
|
|
st->current_prot = new_prot;
|
|
st->level = level;
|
|
st->marker = address_markers;
|
|
+ st->lines = 0;
|
|
seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
|
} else if (prot != cur || level != st->level ||
|
|
st->current_address >= st->marker[1].start_address) {
|
|
@@ -188,17 +193,21 @@ static void note_page(struct seq_file *m, struct pg_state *st,
|
|
/*
|
|
* Now print the actual finished series
|
|
*/
|
|
- seq_printf(m, "0x%0*lx-0x%0*lx ",
|
|
- width, st->start_address,
|
|
- width, st->current_address);
|
|
-
|
|
- delta = (st->current_address - st->start_address) >> 10;
|
|
- while (!(delta & 1023) && unit[1]) {
|
|
- delta >>= 10;
|
|
- unit++;
|
|
+ if (!st->marker->max_lines ||
|
|
+ st->lines < st->marker->max_lines) {
|
|
+ seq_printf(m, "0x%0*lx-0x%0*lx ",
|
|
+ width, st->start_address,
|
|
+ width, st->current_address);
|
|
+
|
|
+ delta = (st->current_address - st->start_address) >> 10;
|
|
+ while (!(delta & 1023) && unit[1]) {
|
|
+ delta >>= 10;
|
|
+ unit++;
|
|
+ }
|
|
+ seq_printf(m, "%9lu%c ", delta, *unit);
|
|
+ printk_prot(m, st->current_prot, st->level);
|
|
}
|
|
- seq_printf(m, "%9lu%c ", delta, *unit);
|
|
- printk_prot(m, st->current_prot, st->level);
|
|
+ st->lines++;
|
|
|
|
/*
|
|
* We print markers for special areas of address space,
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index a10c8c7..ebc551c 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -833,11 +833,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
|
unsigned int fault)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
- struct mm_struct *mm = tsk->mm;
|
|
int code = BUS_ADRERR;
|
|
|
|
- up_read(&mm->mmap_sem);
|
|
-
|
|
/* Kernel mode? Handle exceptions or die: */
|
|
if (!(error_code & PF_USER)) {
|
|
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
|
|
@@ -868,7 +865,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
unsigned long address, unsigned int fault)
|
|
{
|
|
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
|
|
- up_read(¤t->mm->mmap_sem);
|
|
no_context(regs, error_code, address, 0, 0);
|
|
return;
|
|
}
|
|
@@ -876,14 +872,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
if (fault & VM_FAULT_OOM) {
|
|
/* Kernel mode? Handle exceptions or die: */
|
|
if (!(error_code & PF_USER)) {
|
|
- up_read(¤t->mm->mmap_sem);
|
|
no_context(regs, error_code, address,
|
|
SIGSEGV, SEGV_MAPERR);
|
|
return;
|
|
}
|
|
|
|
- up_read(¤t->mm->mmap_sem);
|
|
-
|
|
/*
|
|
* We ran out of memory, call the OOM killer, and return the
|
|
* userspace (which will retry the fault, or kill us if we got
|
|
@@ -894,6 +887,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
|
|
VM_FAULT_HWPOISON_LARGE))
|
|
do_sigbus(regs, error_code, address, fault);
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ bad_area_nosemaphore(regs, error_code, address);
|
|
else
|
|
BUG();
|
|
}
|
|
@@ -1216,6 +1211,7 @@ good_area:
|
|
return;
|
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
+ up_read(&mm->mmap_sem);
|
|
mm_fault_error(regs, error_code, address, fault);
|
|
return;
|
|
}
|
|
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
|
|
index 207d9aef..448ee89 100644
|
|
--- a/arch/x86/mm/gup.c
|
|
+++ b/arch/x86/mm/gup.c
|
|
@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
|
*/
|
|
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
|
return 0;
|
|
- if (unlikely(pmd_large(pmd))) {
|
|
+ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
|
|
/*
|
|
* NUMA hinting faults need to be handled in the GUP
|
|
* slowpath for accounting purposes and so that they
|
|
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
|
|
index 8b977eb..006cc91 100644
|
|
--- a/arch/x86/mm/hugetlbpage.c
|
|
+++ b/arch/x86/mm/hugetlbpage.c
|
|
@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
+/*
|
|
+ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
|
|
+ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
|
|
+ * Otherwise, returns 0.
|
|
+ */
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
- return !!(pmd_val(pmd) & _PAGE_PSE);
|
|
+ return !pmd_none(pmd) &&
|
|
+ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
|
|
index f35c66c..2308a40 100644
|
|
--- a/arch/x86/mm/init_64.c
|
|
+++ b/arch/x86/mm/init_64.c
|
|
@@ -1110,7 +1110,7 @@ void mark_rodata_ro(void)
|
|
unsigned long end = (unsigned long) &__end_rodata_hpage_align;
|
|
unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
|
|
unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
|
|
- unsigned long all_end = PFN_ALIGN(&_end);
|
|
+ unsigned long all_end;
|
|
|
|
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
|
|
(end - start) >> 10);
|
|
@@ -1121,7 +1121,16 @@ void mark_rodata_ro(void)
|
|
/*
|
|
* The rodata/data/bss/brk section (but not the kernel text!)
|
|
* should also be not-executable.
|
|
+ *
|
|
+ * We align all_end to PMD_SIZE because the existing mapping
|
|
+ * is a full PMD. If we would align _brk_end to PAGE_SIZE we
|
|
+ * split the PMD and the reminder between _brk_end and the end
|
|
+ * of the PMD will remain mapped executable.
|
|
+ *
|
|
+ * Any PMD which was setup after the one which covers _brk_end
|
|
+ * has been zapped already via cleanup_highmem().
|
|
*/
|
|
+ all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
|
|
set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
|
|
|
|
rodata_test();
|
|
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
|
|
index 25e7e13..3601ff2 100644
|
|
--- a/arch/x86/mm/mmap.c
|
|
+++ b/arch/x86/mm/mmap.c
|
|
@@ -35,12 +35,12 @@ struct __read_mostly va_alignment va_align = {
|
|
.flags = -1,
|
|
};
|
|
|
|
-static unsigned int stack_maxrandom_size(void)
|
|
+static unsigned long stack_maxrandom_size(void)
|
|
{
|
|
- unsigned int max = 0;
|
|
+ unsigned long max = 0;
|
|
if ((current->flags & PF_RANDOMIZE) &&
|
|
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
|
- max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
|
|
+ max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
|
|
}
|
|
|
|
return max;
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
|
index a348868..fed892d 100644
|
|
--- a/arch/x86/mm/pageattr.c
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
@@ -405,7 +405,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
|
|
psize = page_level_size(level);
|
|
pmask = page_level_mask(level);
|
|
offset = virt_addr & ~pmask;
|
|
- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
|
|
+ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
|
|
return (phys_addr | offset);
|
|
}
|
|
EXPORT_SYMBOL_GPL(slow_virt_to_phys);
|
|
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
|
|
index c96314a..0004ac7 100644
|
|
--- a/arch/x86/mm/pgtable.c
|
|
+++ b/arch/x86/mm/pgtable.c
|
|
@@ -399,13 +399,20 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|
int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *ptep)
|
|
{
|
|
- int young;
|
|
-
|
|
- young = ptep_test_and_clear_young(vma, address, ptep);
|
|
- if (young)
|
|
- flush_tlb_page(vma, address);
|
|
-
|
|
- return young;
|
|
+ /*
|
|
+ * On x86 CPUs, clearing the accessed bit without a TLB flush
|
|
+ * doesn't cause data corruption. [ It could cause incorrect
|
|
+ * page aging and the (mistaken) reclaim of hot pages, but the
|
|
+ * chance of that should be relatively low. ]
|
|
+ *
|
|
+ * So as a performance optimization don't flush the TLB when
|
|
+ * clearing the accessed bit, it will eventually be flushed by
|
|
+ * a context switch or a VM operation anyway. [ In the rare
|
|
+ * event of it not getting flushed for a long time the delay
|
|
+ * shouldn't really matter because there's no real memory
|
|
+ * pressure for swapout to react to. ]
|
|
+ */
|
|
+ return ptep_test_and_clear_young(vma, address, ptep);
|
|
}
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
|
|
index af2d431..1fed139 100644
|
|
--- a/arch/x86/net/bpf_jit_comp.c
|
|
+++ b/arch/x86/net/bpf_jit_comp.c
|
|
@@ -211,7 +211,12 @@ void bpf_jit_compile(struct sk_filter *fp)
|
|
}
|
|
cleanup_addr = proglen; /* epilogue address */
|
|
|
|
- for (pass = 0; pass < 10; pass++) {
|
|
+ /* JITed image shrinks with every pass and the loop iterates
|
|
+ * until the image stops shrinking. Very large bpf programs
|
|
+ * may converge on the last pass. In such case do one more
|
|
+ * pass to emit the final image
|
|
+ */
|
|
+ for (pass = 0; pass < 10 || image; pass++) {
|
|
u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
|
|
/* no prologue/epilogue for trivial filters (RET something) */
|
|
proglen = 0;
|
|
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
|
|
index 4f25ec0..bf00138 100644
|
|
--- a/arch/x86/pci/acpi.c
|
|
+++ b/arch/x86/pci/acpi.c
|
|
@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
|
|
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
|
},
|
|
},
|
|
+ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
|
|
+ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
|
|
+ {
|
|
+ .callback = set_use_crs,
|
|
+ .ident = "Foxconn K8M890-8237A",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
|
|
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
|
+ },
|
|
+ },
|
|
|
|
/* Now for the blacklist.. */
|
|
|
|
@@ -124,8 +135,10 @@ void __init pci_acpi_crs_quirks(void)
|
|
{
|
|
int year;
|
|
|
|
- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
|
|
- pci_use_crs = false;
|
|
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
|
|
+ if (iomem_resource.end <= 0xffffffff)
|
|
+ pci_use_crs = false;
|
|
+ }
|
|
|
|
dmi_check_system(pci_crs_quirks);
|
|
|
|
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
|
|
index 981c2db..88f143d 100644
|
|
--- a/arch/x86/pci/common.c
|
|
+++ b/arch/x86/pci/common.c
|
|
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
|
|
},
|
|
},
|
|
+ {
|
|
+ .callback = set_scan_all,
|
|
+ .ident = "Stratus/NEC ftServer",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .callback = set_scan_all,
|
|
+ .ident = "Stratus/NEC ftServer",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
|
|
+ },
|
|
+ },
|
|
{}
|
|
};
|
|
|
|
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
|
|
index db6b1ab..96a159a 100644
|
|
--- a/arch/x86/pci/i386.c
|
|
+++ b/arch/x86/pci/i386.c
|
|
@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
|
|
return start;
|
|
if (start & 0x300)
|
|
start = (start + 0x3ff) & ~0x3ff;
|
|
+ } else if (res->flags & IORESOURCE_MEM) {
|
|
+ /* The low 1MB range is reserved for ISA cards */
|
|
+ if (start < BIOS_END)
|
|
+ start = BIOS_END;
|
|
}
|
|
return start;
|
|
}
|
|
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
|
|
index 7d28c88..291226b 100644
|
|
--- a/arch/x86/power/hibernate_32.c
|
|
+++ b/arch/x86/power/hibernate_32.c
|
|
@@ -13,13 +13,11 @@
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/mmzone.h>
|
|
+#include <asm/sections.h>
|
|
|
|
/* Defined in hibernate_asm_32.S */
|
|
extern int restore_image(void);
|
|
|
|
-/* References to section boundaries */
|
|
-extern const void __nosave_begin, __nosave_end;
|
|
-
|
|
/* Pointer to the temporary resume page tables */
|
|
pgd_t *resume_pg_dir;
|
|
|
|
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
|
|
index 304fca2..2276238 100644
|
|
--- a/arch/x86/power/hibernate_64.c
|
|
+++ b/arch/x86/power/hibernate_64.c
|
|
@@ -17,11 +17,9 @@
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/mtrr.h>
|
|
+#include <asm/sections.h>
|
|
#include <asm/suspend.h>
|
|
|
|
-/* References to section boundaries */
|
|
-extern __visible const void __nosave_begin, __nosave_end;
|
|
-
|
|
/* Defined in hibernate_asm_64.S */
|
|
extern asmlinkage int restore_image(void);
|
|
|
|
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
|
|
index 531d426..bd16d6c 100644
|
|
--- a/arch/x86/um/sys_call_table_32.c
|
|
+++ b/arch/x86/um/sys_call_table_32.c
|
|
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
|
|
|
|
extern asmlinkage void sys_ni_syscall(void);
|
|
|
|
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
|
|
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
|
|
/*
|
|
* Smells like a compiler bug -- it doesn't work
|
|
* when the & below is removed.
|
|
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
|
|
index f2f0723..9578308 100644
|
|
--- a/arch/x86/um/sys_call_table_64.c
|
|
+++ b/arch/x86/um/sys_call_table_64.c
|
|
@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
|
|
|
|
extern void sys_ni_syscall(void);
|
|
|
|
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
|
|
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
|
|
/*
|
|
* Smells like a compiler bug -- it doesn't work
|
|
* when the & below is removed.
|
|
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
|
|
index f1d633a..d6bfb87 100644
|
|
--- a/arch/x86/vdso/vdso32-setup.c
|
|
+++ b/arch/x86/vdso/vdso32-setup.c
|
|
@@ -41,7 +41,6 @@ enum {
|
|
#ifdef CONFIG_X86_64
|
|
#define vdso_enabled sysctl_vsyscall32
|
|
#define arch_setup_additional_pages syscall32_setup_pages
|
|
-extern int sysctl_ldt16;
|
|
#endif
|
|
|
|
/*
|
|
@@ -381,13 +380,6 @@ static struct ctl_table abi_table2[] = {
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec
|
|
},
|
|
- {
|
|
- .procname = "ldt16",
|
|
- .data = &sysctl_ldt16,
|
|
- .maxlen = sizeof(int),
|
|
- .mode = 0644,
|
|
- .proc_handler = proc_dointvec
|
|
- },
|
|
{}
|
|
};
|
|
|
|
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
|
|
index 31776d0..d7ec4e2 100644
|
|
--- a/arch/x86/vdso/vdso32/sigreturn.S
|
|
+++ b/arch/x86/vdso/vdso32/sigreturn.S
|
|
@@ -17,6 +17,7 @@
|
|
.text
|
|
.globl __kernel_sigreturn
|
|
.type __kernel_sigreturn,@function
|
|
+ nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
|
|
ALIGN
|
|
__kernel_sigreturn:
|
|
.LSTART_sigreturn:
|
|
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
|
|
index 431e875..ab6ba35 100644
|
|
--- a/arch/x86/vdso/vma.c
|
|
+++ b/arch/x86/vdso/vma.c
|
|
@@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
|
|
|
|
struct linux_binprm;
|
|
|
|
-/* Put the vdso above the (randomized) stack with another randomized offset.
|
|
- This way there is no hole in the middle of address space.
|
|
- To save memory make sure it is still in the same PTE as the stack top.
|
|
- This doesn't give that many random bits */
|
|
+/*
|
|
+ * Put the vdso above the (randomized) stack with another randomized
|
|
+ * offset. This way there is no hole in the middle of address space.
|
|
+ * To save memory make sure it is still in the same PTE as the stack
|
|
+ * top. This doesn't give that many random bits.
|
|
+ *
|
|
+ * Note that this algorithm is imperfect: the distribution of the vdso
|
|
+ * start address within a PMD is biased toward the end.
|
|
+ *
|
|
+ * Only used for the 64-bit and x32 vdsos.
|
|
+ */
|
|
static unsigned long vdso_addr(unsigned long start, unsigned len)
|
|
{
|
|
unsigned long addr, end;
|
|
unsigned offset;
|
|
- end = (start + PMD_SIZE - 1) & PMD_MASK;
|
|
+
|
|
+ /*
|
|
+ * Round up the start address. It can start out unaligned as a result
|
|
+ * of stack start randomization.
|
|
+ */
|
|
+ start = PAGE_ALIGN(start);
|
|
+
|
|
+ /* Round the lowest possible end address up to a PMD boundary. */
|
|
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
|
|
if (end >= TASK_SIZE_MAX)
|
|
end = TASK_SIZE_MAX;
|
|
end -= len;
|
|
- /* This loses some more bits than a modulo, but is cheaper */
|
|
- offset = get_random_int() & (PTRS_PER_PTE - 1);
|
|
- addr = start + (offset << PAGE_SHIFT);
|
|
- if (addr >= end)
|
|
- addr = end;
|
|
+
|
|
+ if (end > start) {
|
|
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
|
|
+ addr = start + (offset << PAGE_SHIFT);
|
|
+ } else {
|
|
+ addr = start;
|
|
+ }
|
|
|
|
/*
|
|
- * page-align it here so that get_unmapped_area doesn't
|
|
- * align it wrongfully again to the next page. addr can come in 4K
|
|
- * unaligned here as a result of stack start randomization.
|
|
+ * Forcibly align the final address in case we have a hardware
|
|
+ * issue that requires alignment for performance reasons.
|
|
*/
|
|
- addr = PAGE_ALIGN(addr);
|
|
addr = align_vdso_addr(addr);
|
|
|
|
return addr;
|
|
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
|
|
index 201d09a..2302f10 100644
|
|
--- a/arch/x86/xen/enlighten.c
|
|
+++ b/arch/x86/xen/enlighten.c
|
|
@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
pte_t pte;
|
|
unsigned long pfn;
|
|
struct page *page;
|
|
+ unsigned char dummy;
|
|
|
|
ptep = lookup_address((unsigned long)v, &level);
|
|
BUG_ON(ptep == NULL);
|
|
@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
|
|
pte = pfn_pte(pfn, prot);
|
|
|
|
+ /*
|
|
+ * Careful: update_va_mapping() will fail if the virtual address
|
|
+ * we're poking isn't populated in the page tables. We don't
|
|
+ * need to worry about the direct map (that's always in the page
|
|
+ * tables), but we need to be careful about vmap space. In
|
|
+ * particular, the top level page table can lazily propagate
|
|
+ * entries between processes, so if we've switched mms since we
|
|
+ * vmapped the target in the first place, we might not have the
|
|
+ * top-level page table entry populated.
|
|
+ *
|
|
+ * We disable preemption because we want the same mm active when
|
|
+ * we probe the target and when we issue the hypercall. We'll
|
|
+ * have the same nominal mm, but if we're a kernel thread, lazy
|
|
+ * mm dropping could change our pgd.
|
|
+ *
|
|
+ * Out of an abundance of caution, this uses __get_user() to fault
|
|
+ * in the target address just in case there's some obscure case
|
|
+ * in which the target address isn't readable.
|
|
+ */
|
|
+
|
|
+ preempt_disable();
|
|
+
|
|
+ pagefault_disable(); /* Avoid warnings due to being atomic. */
|
|
+ __get_user(dummy, (unsigned char __user __force *)v);
|
|
+ pagefault_enable();
|
|
+
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
|
BUG();
|
|
|
|
@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
BUG();
|
|
} else
|
|
kmap_flush_unused();
|
|
+
|
|
+ preempt_enable();
|
|
}
|
|
|
|
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|
@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
|
int i;
|
|
|
|
+ /*
|
|
+ * We need to mark the all aliases of the LDT pages RO. We
|
|
+ * don't need to call vm_flush_aliases(), though, since that's
|
|
+ * only responsible for flushing aliases out the TLBs, not the
|
|
+ * page tables, and Xen will flush the TLB for us if needed.
|
|
+ *
|
|
+ * To avoid confusing future readers: none of this is necessary
|
|
+ * to load the LDT. The hypervisor only checks this when the
|
|
+ * LDT is faulted in due to subsequent descriptor access.
|
|
+ */
|
|
+
|
|
for(i = 0; i < entries; i += entries_per_page)
|
|
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
|
}
|
|
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
|
|
index c985835..5b406fc 100644
|
|
--- a/arch/x86/xen/grant-table.c
|
|
+++ b/arch/x86/xen/grant-table.c
|
|
@@ -134,6 +134,7 @@ static int __init xlated_setup_gnttab_pages(void)
|
|
{
|
|
struct page **pages;
|
|
xen_pfn_t *pfns;
|
|
+ void *vaddr;
|
|
int rc;
|
|
unsigned int i;
|
|
unsigned long nr_grant_frames = gnttab_max_grant_frames();
|
|
@@ -159,21 +160,20 @@ static int __init xlated_setup_gnttab_pages(void)
|
|
for (i = 0; i < nr_grant_frames; i++)
|
|
pfns[i] = page_to_pfn(pages[i]);
|
|
|
|
- rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames,
|
|
- &xen_auto_xlat_grant_frames.vaddr);
|
|
-
|
|
- if (rc) {
|
|
+ vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL);
|
|
+ if (!vaddr) {
|
|
pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
|
|
nr_grant_frames, rc);
|
|
free_xenballooned_pages(nr_grant_frames, pages);
|
|
kfree(pages);
|
|
kfree(pfns);
|
|
- return rc;
|
|
+ return -ENOMEM;
|
|
}
|
|
kfree(pages);
|
|
|
|
xen_auto_xlat_grant_frames.pfn = pfns;
|
|
xen_auto_xlat_grant_frames.count = nr_grant_frames;
|
|
+ xen_auto_xlat_grant_frames.vaddr = vaddr;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
|
|
index 2423ef0..c83da6f 100644
|
|
--- a/arch/x86/xen/mmu.c
|
|
+++ b/arch/x86/xen/mmu.c
|
|
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
|
|
*
|
|
* We can construct this by grafting the Xen provided pagetable into
|
|
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
|
|
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
|
|
- * means that only the kernel has a physical mapping to start with -
|
|
- * but that's enough to get __va working. We need to fill in the rest
|
|
- * of the physical mapping once some sort of allocator has been set
|
|
- * up.
|
|
- * NOTE: for PVH, the page tables are native.
|
|
+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
|
|
+ * kernel has a physical mapping to start with - but that's enough to
|
|
+ * get __va working. We need to fill in the rest of the physical
|
|
+ * mapping once some sort of allocator has been set up. NOTE: for
|
|
+ * PVH, the page tables are native.
|
|
*/
|
|
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
|
{
|
|
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
|
/* L3_i[0] -> level2_ident_pgt */
|
|
convert_pfn_mfn(level3_ident_pgt);
|
|
/* L3_k[510] -> level2_kernel_pgt
|
|
- * L3_i[511] -> level2_fixmap_pgt */
|
|
+ * L3_k[511] -> level2_fixmap_pgt */
|
|
convert_pfn_mfn(level3_kernel_pgt);
|
|
+
|
|
+ /* L3_k[511][506] -> level1_fixmap_pgt */
|
|
+ convert_pfn_mfn(level2_fixmap_pgt);
|
|
}
|
|
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
|
|
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
|
|
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
|
addr[1] = (unsigned long)l3;
|
|
addr[2] = (unsigned long)l2;
|
|
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
|
|
- * Both L4[272][0] and L4[511][511] have entries that point to the same
|
|
+ * Both L4[272][0] and L4[511][510] have entries that point to the same
|
|
* L2 (PMD) tables. Meaning that if you modify it in __va space
|
|
* it will be also modified in the __ka space! (But if you just
|
|
* modify the PMD table to point to other PTE's or none, then you
|
|
* are OK - which is what cleanup_highmap does) */
|
|
copy_page(level2_ident_pgt, l2);
|
|
- /* Graft it onto L4[511][511] */
|
|
+ /* Graft it onto L4[511][510] */
|
|
copy_page(level2_kernel_pgt, l2);
|
|
|
|
- /* Get [511][510] and graft that in level2_fixmap_pgt */
|
|
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
|
|
- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
|
|
- copy_page(level2_fixmap_pgt, l2);
|
|
- /* Note that we don't do anything with level1_fixmap_pgt which
|
|
- * we don't need. */
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
/* Make pagetable pieces RO */
|
|
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
|
|
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
|
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
|
|
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
|
|
|
|
/* Pin down new L4 */
|
|
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
|
|
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
|
|
index 0982233..a6a72ce 100644
|
|
--- a/arch/x86/xen/setup.c
|
|
+++ b/arch/x86/xen/setup.c
|
|
@@ -574,13 +574,7 @@ void xen_enable_syscall(void)
|
|
}
|
|
#endif /* CONFIG_X86_64 */
|
|
}
|
|
-void xen_enable_nmi(void)
|
|
-{
|
|
-#ifdef CONFIG_X86_64
|
|
- if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
|
|
- BUG();
|
|
-#endif
|
|
-}
|
|
+
|
|
void __init xen_pvmmu_arch_setup(void)
|
|
{
|
|
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
|
|
@@ -595,7 +589,6 @@ void __init xen_pvmmu_arch_setup(void)
|
|
|
|
xen_enable_sysenter();
|
|
xen_enable_syscall();
|
|
- xen_enable_nmi();
|
|
}
|
|
|
|
/* This function is not called for HVM domains */
|
|
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
|
|
index 7b78f88..5718b0b 100644
|
|
--- a/arch/x86/xen/time.c
|
|
+++ b/arch/x86/xen/time.c
|
|
@@ -444,7 +444,7 @@ void xen_setup_timer(int cpu)
|
|
|
|
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
|
|
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
|
|
- IRQF_FORCE_RESUME,
|
|
+ IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
|
|
name, NULL);
|
|
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
|
|
|
|
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
|
|
index c87ae7c..8879361 100644
|
|
--- a/arch/xtensa/Kconfig
|
|
+++ b/arch/xtensa/Kconfig
|
|
@@ -336,6 +336,36 @@ menu "Executable file formats"
|
|
|
|
source "fs/Kconfig.binfmt"
|
|
|
|
+config XTFPGA_LCD
|
|
+ bool "Enable XTFPGA LCD driver"
|
|
+ depends on XTENSA_PLATFORM_XTFPGA
|
|
+ default n
|
|
+ help
|
|
+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
|
|
+ progress messages there during bootup/shutdown. It may be useful
|
|
+ during board bringup.
|
|
+
|
|
+ If unsure, say N.
|
|
+
|
|
+config XTFPGA_LCD_BASE_ADDR
|
|
+ hex "XTFPGA LCD base address"
|
|
+ depends on XTFPGA_LCD
|
|
+ default "0x0d0c0000"
|
|
+ help
|
|
+ Base address of the LCD controller inside KIO region.
|
|
+ Different boards from XTFPGA family have LCD controller at different
|
|
+ addresses. Please consult prototyping user guide for your board for
|
|
+ the correct address. Wrong address here may lead to hardware lockup.
|
|
+
|
|
+config XTFPGA_LCD_8BIT_ACCESS
|
|
+ bool "Use 8-bit access to XTFPGA LCD"
|
|
+ depends on XTFPGA_LCD
|
|
+ default n
|
|
+ help
|
|
+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
|
|
+ only be used with 8-bit interface. Please consult prototyping user
|
|
+ guide for your board for the correct interface width.
|
|
+
|
|
endmenu
|
|
|
|
source "net/Kconfig"
|
|
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
|
|
index 2164462..51230ba 100644
|
|
--- a/arch/xtensa/include/asm/pgtable.h
|
|
+++ b/arch/xtensa/include/asm/pgtable.h
|
|
@@ -67,7 +67,12 @@
|
|
#define VMALLOC_START 0xC0000000
|
|
#define VMALLOC_END 0xC7FEFFFF
|
|
#define TLBTEMP_BASE_1 0xC7FF0000
|
|
-#define TLBTEMP_BASE_2 0xC7FF8000
|
|
+#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
|
|
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
|
|
+#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
|
|
+#else
|
|
+#define TLBTEMP_SIZE ICACHE_WAY_SIZE
|
|
+#endif
|
|
|
|
/*
|
|
* For the Xtensa architecture, the PTE layout is as follows:
|
|
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
|
|
index 677bfcf..28f33a8 100644
|
|
--- a/arch/xtensa/include/asm/traps.h
|
|
+++ b/arch/xtensa/include/asm/traps.h
|
|
@@ -25,30 +25,39 @@ static inline void spill_registers(void)
|
|
{
|
|
#if XCHAL_NUM_AREGS > 16
|
|
__asm__ __volatile__ (
|
|
- " call12 1f\n"
|
|
+ " call8 1f\n"
|
|
" _j 2f\n"
|
|
" retw\n"
|
|
" .align 4\n"
|
|
"1:\n"
|
|
+#if XCHAL_NUM_AREGS == 32
|
|
+ " _entry a1, 32\n"
|
|
+ " addi a8, a0, 3\n"
|
|
+ " _entry a1, 16\n"
|
|
+ " mov a12, a12\n"
|
|
+ " retw\n"
|
|
+#else
|
|
" _entry a1, 48\n"
|
|
- " addi a12, a0, 3\n"
|
|
-#if XCHAL_NUM_AREGS > 32
|
|
- " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
|
|
+ " call12 1f\n"
|
|
+ " retw\n"
|
|
+ " .align 4\n"
|
|
+ "1:\n"
|
|
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
|
|
" _entry a1, 48\n"
|
|
" mov a12, a0\n"
|
|
" .endr\n"
|
|
-#endif
|
|
- " _entry a1, 48\n"
|
|
+ " _entry a1, 16\n"
|
|
#if XCHAL_NUM_AREGS % 12 == 0
|
|
- " mov a8, a8\n"
|
|
-#elif XCHAL_NUM_AREGS % 12 == 4
|
|
" mov a12, a12\n"
|
|
-#elif XCHAL_NUM_AREGS % 12 == 8
|
|
+#elif XCHAL_NUM_AREGS % 12 == 4
|
|
" mov a4, a4\n"
|
|
+#elif XCHAL_NUM_AREGS % 12 == 8
|
|
+ " mov a8, a8\n"
|
|
#endif
|
|
" retw\n"
|
|
+#endif
|
|
"2:\n"
|
|
- : : : "a12", "a13", "memory");
|
|
+ : : : "a8", "a9", "memory");
|
|
#else
|
|
__asm__ __volatile__ (
|
|
" mov a12, a12\n"
|
|
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
|
|
index fd686dc..c7211e7 100644
|
|
--- a/arch/xtensa/include/asm/uaccess.h
|
|
+++ b/arch/xtensa/include/asm/uaccess.h
|
|
@@ -52,7 +52,12 @@
|
|
*/
|
|
.macro get_fs ad, sp
|
|
GET_CURRENT(\ad,\sp)
|
|
+#if THREAD_CURRENT_DS > 1020
|
|
+ addi \ad, \ad, TASK_THREAD
|
|
+ l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
|
|
+#else
|
|
l32i \ad, \ad, THREAD_CURRENT_DS
|
|
+#endif
|
|
.endm
|
|
|
|
/*
|
|
diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
|
|
index b4cb110..a47909f 100644
|
|
--- a/arch/xtensa/include/uapi/asm/ioctls.h
|
|
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
|
|
@@ -28,17 +28,17 @@
|
|
#define TCSETSW 0x5403
|
|
#define TCSETSF 0x5404
|
|
|
|
-#define TCGETA _IOR('t', 23, struct termio)
|
|
-#define TCSETA _IOW('t', 24, struct termio)
|
|
-#define TCSETAW _IOW('t', 25, struct termio)
|
|
-#define TCSETAF _IOW('t', 28, struct termio)
|
|
+#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
|
|
+#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
|
|
+#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
|
|
+#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
|
|
|
|
#define TCSBRK _IO('t', 29)
|
|
#define TCXONC _IO('t', 30)
|
|
#define TCFLSH _IO('t', 31)
|
|
|
|
-#define TIOCSWINSZ _IOW('t', 103, struct winsize)
|
|
-#define TIOCGWINSZ _IOR('t', 104, struct winsize)
|
|
+#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
|
|
+#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
|
|
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
|
|
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
|
|
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
|
|
@@ -88,7 +88,6 @@
|
|
#define TIOCSETD _IOW('T', 35, int)
|
|
#define TIOCGETD _IOR('T', 36, int)
|
|
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
|
|
-#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
|
|
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
|
|
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
|
|
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
|
|
@@ -114,8 +113,10 @@
|
|
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
|
|
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
|
|
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
|
|
-#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
|
|
-#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
|
|
+#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
|
|
+ /* _IOR('T', 90, struct serial_multiport_struct) */
|
|
+#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
|
|
+ /* _IOW('T', 91, struct serial_multiport_struct) */
|
|
|
|
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
|
|
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
|
|
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
|
|
index b939552..b54fa1b 100644
|
|
--- a/arch/xtensa/include/uapi/asm/unistd.h
|
|
+++ b/arch/xtensa/include/uapi/asm/unistd.h
|
|
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
|
|
#define __NR_pivot_root 175
|
|
__SYSCALL(175, sys_pivot_root, 2)
|
|
#define __NR_umount 176
|
|
-__SYSCALL(176, sys_umount, 2)
|
|
+__SYSCALL(176, sys_oldumount, 1)
|
|
+#define __ARCH_WANT_SYS_OLDUMOUNT
|
|
#define __NR_swapoff 177
|
|
__SYSCALL(177, sys_swapoff, 1)
|
|
#define __NR_sync 178
|
|
@@ -714,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
|
|
__SYSCALL(324, sys_name_to_handle_at, 5)
|
|
#define __NR_open_by_handle_at 325
|
|
__SYSCALL(325, sys_open_by_handle_at, 3)
|
|
-#define __NR_sync_file_range 326
|
|
+#define __NR_sync_file_range2 326
|
|
__SYSCALL(326, sys_sync_file_range2, 6)
|
|
#define __NR_perf_event_open 327
|
|
__SYSCALL(327, sys_perf_event_open, 5)
|
|
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
|
|
index ef7f499..cf8a354 100644
|
|
--- a/arch/xtensa/kernel/entry.S
|
|
+++ b/arch/xtensa/kernel/entry.S
|
|
@@ -568,12 +568,13 @@ user_exception_exit:
|
|
* (if we have restored WSBITS-1 frames).
|
|
*/
|
|
|
|
+2:
|
|
#if XCHAL_HAVE_THREADPTR
|
|
l32i a3, a1, PT_THREADPTR
|
|
wur a3, threadptr
|
|
#endif
|
|
|
|
-2: j common_exception_exit
|
|
+ j common_exception_exit
|
|
|
|
/* This is the kernel exception exit.
|
|
* We avoided to do a MOVSP when we entered the exception, but we
|
|
@@ -1001,9 +1002,8 @@ ENTRY(fast_syscall_xtensa)
|
|
movi a7, 4 # sizeof(unsigned int)
|
|
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
|
|
|
|
- addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
|
|
- _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
|
|
- _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
|
|
+ _bgeui a6, SYS_XTENSA_COUNT, .Lill
|
|
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
|
|
|
|
/* Fall through for ATOMIC_CMP_SWP. */
|
|
|
|
@@ -1015,27 +1015,26 @@ TRY s32i a5, a3, 0 # different, modify value
|
|
l32i a7, a2, PT_AREG7 # restore a7
|
|
l32i a0, a2, PT_AREG0 # restore a0
|
|
movi a2, 1 # and return 1
|
|
- addi a6, a6, 1 # restore a6 (really necessary?)
|
|
rfe
|
|
|
|
1: l32i a7, a2, PT_AREG7 # restore a7
|
|
l32i a0, a2, PT_AREG0 # restore a0
|
|
movi a2, 0 # return 0 (note that we cannot set
|
|
- addi a6, a6, 1 # restore a6 (really necessary?)
|
|
rfe
|
|
|
|
.Lnswp: /* Atomic set, add, and exg_add. */
|
|
|
|
TRY l32i a7, a3, 0 # orig
|
|
+ addi a6, a6, -SYS_XTENSA_ATOMIC_SET
|
|
add a0, a4, a7 # + arg
|
|
moveqz a0, a4, a6 # set
|
|
+ addi a6, a6, SYS_XTENSA_ATOMIC_SET
|
|
TRY s32i a0, a3, 0 # write new value
|
|
|
|
mov a0, a2
|
|
mov a2, a7
|
|
l32i a7, a0, PT_AREG7 # restore a7
|
|
l32i a0, a0, PT_AREG0 # restore a0
|
|
- addi a6, a6, 1 # restore a6 (really necessary?)
|
|
rfe
|
|
|
|
CATCH
|
|
@@ -1044,7 +1043,7 @@ CATCH
|
|
movi a2, -EFAULT
|
|
rfe
|
|
|
|
-.Lill: l32i a7, a2, PT_AREG0 # restore a7
|
|
+.Lill: l32i a7, a2, PT_AREG7 # restore a7
|
|
l32i a0, a2, PT_AREG0 # restore a0
|
|
movi a2, -EINVAL
|
|
rfe
|
|
@@ -1565,7 +1564,7 @@ ENTRY(fast_second_level_miss)
|
|
rsr a0, excvaddr
|
|
bltu a0, a3, 2f
|
|
|
|
- addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
|
|
+ addi a1, a0, -TLBTEMP_SIZE
|
|
bgeu a1, a3, 2f
|
|
|
|
/* Check if we have to restore an ITLB mapping. */
|
|
@@ -1794,7 +1793,7 @@ ENDPROC(system_call)
|
|
mov a12, a0
|
|
.endr
|
|
#endif
|
|
- _entry a1, 48
|
|
+ _entry a1, 16
|
|
#if XCHAL_NUM_AREGS % 12 == 0
|
|
mov a8, a8
|
|
#elif XCHAL_NUM_AREGS % 12 == 4
|
|
@@ -1818,9 +1817,8 @@ ENDPROC(system_call)
|
|
|
|
ENTRY(_switch_to)
|
|
|
|
- entry a1, 16
|
|
+ entry a1, 48
|
|
|
|
- mov a10, a2 # preserve 'prev' (a2)
|
|
mov a11, a3 # and 'next' (a3)
|
|
|
|
l32i a4, a2, TASK_THREAD_INFO
|
|
@@ -1828,8 +1826,14 @@ ENTRY(_switch_to)
|
|
|
|
save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
|
|
|
- s32i a0, a10, THREAD_RA # save return address
|
|
- s32i a1, a10, THREAD_SP # save stack pointer
|
|
+#if THREAD_RA > 1020 || THREAD_SP > 1020
|
|
+ addi a10, a2, TASK_THREAD
|
|
+ s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
|
|
+ s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
|
|
+#else
|
|
+ s32i a0, a2, THREAD_RA # save return address
|
|
+ s32i a1, a2, THREAD_SP # save stack pointer
|
|
+#endif
|
|
|
|
/* Disable ints while we manipulate the stack pointer. */
|
|
|
|
@@ -1870,7 +1874,6 @@ ENTRY(_switch_to)
|
|
load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
|
|
|
wsr a14, ps
|
|
- mov a2, a10 # return 'prev'
|
|
rsync
|
|
|
|
retw
|
|
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
|
|
index 2d9cc6d..e8b76b8 100644
|
|
--- a/arch/xtensa/kernel/pci-dma.c
|
|
+++ b/arch/xtensa/kernel/pci-dma.c
|
|
@@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
|
|
|
|
/* We currently don't support coherent memory outside KSEG */
|
|
|
|
- if (ret < XCHAL_KSEG_CACHED_VADDR
|
|
- || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
|
|
- BUG();
|
|
+ BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
|
|
+ ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
|
|
|
|
|
if (ret != 0) {
|
|
@@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
|
|
void dma_free_coherent(struct device *hwdev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle)
|
|
{
|
|
- long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
|
|
+ unsigned long addr = (unsigned long)vaddr +
|
|
+ XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
|
|
|
- if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
|
|
- BUG();
|
|
+ BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
|
|
+ addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
|
|
|
free_pages(addr, get_order(size));
|
|
}
|
|
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
|
|
index f9e1ec3..8453e6e 100644
|
|
--- a/arch/xtensa/kernel/vectors.S
|
|
+++ b/arch/xtensa/kernel/vectors.S
|
|
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
|
|
beqz a2, 1f # if at start of vector, don't restore
|
|
|
|
addi a0, a0, -128
|
|
- bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
|
|
- bbsi a0, 7, 2f
|
|
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
|
|
+
|
|
+ /*
|
|
+ * This fixup handler is for the extremely unlikely case where the
|
|
+ * overflow handler's reference thru a0 gets a hardware TLB refill
|
|
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
|
|
+ * prior references thru a9/a13, and where our reference now thru
|
|
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
|
|
+ */
|
|
+ movi a2, window_overflow_restore_a0_fixup
|
|
+ s32i a2, a3, EXC_TABLE_FIXUP
|
|
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ xsr a3, excsave1
|
|
+
|
|
+ bbsi.l a0, 7, 2f
|
|
|
|
/*
|
|
* Restore a0 as saved by _WindowOverflow8().
|
|
- *
|
|
- * FIXME: we really need a fixup handler for this L32E,
|
|
- * for the extremely unlikely case where the overflow handler's
|
|
- * reference thru a0 gets a hardware TLB refill that bumps out
|
|
- * the (distinct, aliasing) TLB entry that mapped its prior
|
|
- * references thru a9, and where our reference now thru a9
|
|
- * gets a 2nd-level miss exception (not hardware TLB refill).
|
|
*/
|
|
|
|
- l32e a2, a9, -16
|
|
- wsr a2, depc # replace the saved a0
|
|
- j 1f
|
|
+ l32e a0, a9, -16
|
|
+ wsr a0, depc # replace the saved a0
|
|
+ j 3f
|
|
|
|
2:
|
|
/*
|
|
* Restore a0 as saved by _WindowOverflow12().
|
|
- *
|
|
- * FIXME: we really need a fixup handler for this L32E,
|
|
- * for the extremely unlikely case where the overflow handler's
|
|
- * reference thru a0 gets a hardware TLB refill that bumps out
|
|
- * the (distinct, aliasing) TLB entry that mapped its prior
|
|
- * references thru a13, and where our reference now thru a13
|
|
- * gets a 2nd-level miss exception (not hardware TLB refill).
|
|
*/
|
|
|
|
- l32e a2, a13, -16
|
|
- wsr a2, depc # replace the saved a0
|
|
+ l32e a0, a13, -16
|
|
+ wsr a0, depc # replace the saved a0
|
|
+3:
|
|
+ xsr a3, excsave1
|
|
+ movi a0, 0
|
|
+ s32i a0, a3, EXC_TABLE_FIXUP
|
|
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
1:
|
|
/*
|
|
* Restore WindowBase while leaving all address registers restored.
|
|
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
|
|
|
|
s32i a0, a2, PT_DEPC
|
|
|
|
+_DoubleExceptionVector_handle_exception:
|
|
addx4 a0, a0, a3
|
|
l32i a0, a0, EXC_TABLE_FAST_USER
|
|
xsr a3, excsave1
|
|
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
|
|
rotw -3
|
|
j 1b
|
|
|
|
- .end literal_prefix
|
|
|
|
ENDPROC(_DoubleExceptionVector)
|
|
|
|
/*
|
|
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
|
|
+ * We get here with windowbase set to the window that was being spilled and
|
|
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
|
|
+ * (bit set) window.
|
|
+ *
|
|
+ * We do the following here:
|
|
+ * - go to the original window retaining a0 value;
|
|
+ * - set up exception stack to return back to appropriate a0 restore code
|
|
+ * (we'll need to rotate window back and there's no place to save this
|
|
+ * information, use different return address for that);
|
|
+ * - handle the exception;
|
|
+ * - go to the window that was being spilled;
|
|
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
|
|
+ * - reload a0;
|
|
+ * - restore the original window;
|
|
+ * - reset the default fixup routine;
|
|
+ * - return to user. By the time we get to this fixup handler all information
|
|
+ * about the conditions of the original double exception that happened in
|
|
+ * the window overflow handler is lost, so we just return to userspace to
|
|
+ * retry overflow from start.
|
|
+ *
|
|
+ * a0: value of depc, original value in depc
|
|
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
|
|
+ * a3: exctable, original value in excsave1
|
|
+ */
|
|
+
|
|
+ENTRY(window_overflow_restore_a0_fixup)
|
|
+
|
|
+ rsr a0, ps
|
|
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
|
|
+ rsr a2, windowbase
|
|
+ sub a0, a2, a0
|
|
+ extui a0, a0, 0, 3
|
|
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ xsr a3, excsave1
|
|
+
|
|
+ _beqi a0, 1, .Lhandle_1
|
|
+ _beqi a0, 3, .Lhandle_3
|
|
+
|
|
+ .macro overflow_fixup_handle_exception_pane n
|
|
+
|
|
+ rsr a0, depc
|
|
+ rotw -\n
|
|
+
|
|
+ xsr a3, excsave1
|
|
+ wsr a2, depc
|
|
+ l32i a2, a3, EXC_TABLE_KSTK
|
|
+ s32i a0, a2, PT_AREG0
|
|
+
|
|
+ movi a0, .Lrestore_\n
|
|
+ s32i a0, a2, PT_DEPC
|
|
+ rsr a0, exccause
|
|
+ j _DoubleExceptionVector_handle_exception
|
|
+
|
|
+ .endm
|
|
+
|
|
+ overflow_fixup_handle_exception_pane 2
|
|
+.Lhandle_1:
|
|
+ overflow_fixup_handle_exception_pane 1
|
|
+.Lhandle_3:
|
|
+ overflow_fixup_handle_exception_pane 3
|
|
+
|
|
+ .macro overflow_fixup_restore_a0_pane n
|
|
+
|
|
+ rotw \n
|
|
+ /* Need to preserve a0 value here to be able to handle exception
|
|
+ * that may occur on a0 reload from stack. It may occur because
|
|
+ * TLB miss handler may not be atomic and pointer to page table
|
|
+ * may be lost before we get here. There are no free registers,
|
|
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
|
|
+ */
|
|
+ xsr a3, excsave1
|
|
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ movi a2, window_overflow_restore_a0_fixup
|
|
+ s32i a2, a3, EXC_TABLE_FIXUP
|
|
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ xsr a3, excsave1
|
|
+ bbsi.l a0, 7, 1f
|
|
+ l32e a0, a9, -16
|
|
+ j 2f
|
|
+1:
|
|
+ l32e a0, a13, -16
|
|
+2:
|
|
+ rotw -\n
|
|
+
|
|
+ .endm
|
|
+
|
|
+.Lrestore_2:
|
|
+ overflow_fixup_restore_a0_pane 2
|
|
+
|
|
+.Lset_default_fixup:
|
|
+ xsr a3, excsave1
|
|
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ movi a2, 0
|
|
+ s32i a2, a3, EXC_TABLE_FIXUP
|
|
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
|
+ xsr a3, excsave1
|
|
+ rfe
|
|
+
|
|
+.Lrestore_1:
|
|
+ overflow_fixup_restore_a0_pane 1
|
|
+ j .Lset_default_fixup
|
|
+.Lrestore_3:
|
|
+ overflow_fixup_restore_a0_pane 3
|
|
+ j .Lset_default_fixup
|
|
+
|
|
+ENDPROC(window_overflow_restore_a0_fixup)
|
|
+
|
|
+ .end literal_prefix
|
|
+/*
|
|
* Debug interrupt vector
|
|
*
|
|
* There is not much space here, so simply jump to another handler.
|
|
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
|
|
index ee32c00..d16db6d 100644
|
|
--- a/arch/xtensa/kernel/vmlinux.lds.S
|
|
+++ b/arch/xtensa/kernel/vmlinux.lds.S
|
|
@@ -269,13 +269,13 @@ SECTIONS
|
|
.UserExceptionVector.literal)
|
|
SECTION_VECTOR (_DoubleExceptionVector_literal,
|
|
.DoubleExceptionVector.literal,
|
|
- DOUBLEEXC_VECTOR_VADDR - 16,
|
|
+ DOUBLEEXC_VECTOR_VADDR - 40,
|
|
SIZEOF(.UserExceptionVector.text),
|
|
.UserExceptionVector.text)
|
|
SECTION_VECTOR (_DoubleExceptionVector_text,
|
|
.DoubleExceptionVector.text,
|
|
DOUBLEEXC_VECTOR_VADDR,
|
|
- 32,
|
|
+ 40,
|
|
.DoubleExceptionVector.literal)
|
|
|
|
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
|
|
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
|
|
index b57c4f9..9e3571a 100644
|
|
--- a/arch/xtensa/mm/fault.c
|
|
+++ b/arch/xtensa/mm/fault.c
|
|
@@ -117,6 +117,8 @@ good_area:
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
+ else if (fault & VM_FAULT_SIGSEGV)
|
|
+ goto bad_area;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
BUG();
|
|
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
|
|
index d05f8fe..17b1ef3 100644
|
|
--- a/arch/xtensa/platforms/iss/network.c
|
|
+++ b/arch/xtensa/platforms/iss/network.c
|
|
@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
|
|
{
|
|
struct iss_net_private *lp = (struct iss_net_private *)priv;
|
|
|
|
- spin_lock(&lp->lock);
|
|
iss_net_poll();
|
|
+ spin_lock(&lp->lock);
|
|
mod_timer(&lp->timer, jiffies + lp->timer_val);
|
|
spin_unlock(&lp->lock);
|
|
}
|
|
@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
|
|
struct iss_net_private *lp = netdev_priv(dev);
|
|
int err;
|
|
|
|
- spin_lock(&lp->lock);
|
|
+ spin_lock_bh(&lp->lock);
|
|
|
|
err = lp->tp.open(lp);
|
|
if (err < 0)
|
|
@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
|
|
while ((err = iss_net_rx(dev)) > 0)
|
|
;
|
|
|
|
- spin_lock(&opened_lock);
|
|
+ spin_unlock_bh(&lp->lock);
|
|
+ spin_lock_bh(&opened_lock);
|
|
list_add(&lp->opened_list, &opened);
|
|
- spin_unlock(&opened_lock);
|
|
+ spin_unlock_bh(&opened_lock);
|
|
+ spin_lock_bh(&lp->lock);
|
|
|
|
init_timer(&lp->timer);
|
|
lp->timer_val = ISS_NET_TIMER_VALUE;
|
|
@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
|
|
mod_timer(&lp->timer, jiffies + lp->timer_val);
|
|
|
|
out:
|
|
- spin_unlock(&lp->lock);
|
|
+ spin_unlock_bh(&lp->lock);
|
|
return err;
|
|
}
|
|
|
|
@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
|
|
{
|
|
struct iss_net_private *lp = netdev_priv(dev);
|
|
netif_stop_queue(dev);
|
|
- spin_lock(&lp->lock);
|
|
+ spin_lock_bh(&lp->lock);
|
|
|
|
spin_lock(&opened_lock);
|
|
list_del(&opened);
|
|
@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
|
|
|
|
lp->tp.close(lp);
|
|
|
|
- spin_unlock(&lp->lock);
|
|
+ spin_unlock_bh(&lp->lock);
|
|
return 0;
|
|
}
|
|
|
|
static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
struct iss_net_private *lp = netdev_priv(dev);
|
|
- unsigned long flags;
|
|
int len;
|
|
|
|
netif_stop_queue(dev);
|
|
- spin_lock_irqsave(&lp->lock, flags);
|
|
+ spin_lock_bh(&lp->lock);
|
|
|
|
len = lp->tp.write(lp, &skb);
|
|
|
|
@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&lp->lock, flags);
|
|
+ spin_unlock_bh(&lp->lock);
|
|
|
|
dev_kfree_skb(skb);
|
|
return NETDEV_TX_OK;
|
|
@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
|
|
|
|
if (!is_valid_ether_addr(hwaddr->sa_data))
|
|
return -EADDRNOTAVAIL;
|
|
- spin_lock(&lp->lock);
|
|
+ spin_lock_bh(&lp->lock);
|
|
memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
|
|
- spin_unlock(&lp->lock);
|
|
+ spin_unlock_bh(&lp->lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
|
|
*lp = (struct iss_net_private) {
|
|
.device_list = LIST_HEAD_INIT(lp->device_list),
|
|
.opened_list = LIST_HEAD_INIT(lp->opened_list),
|
|
- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
|
|
.dev = dev,
|
|
.index = index,
|
|
- };
|
|
+ };
|
|
|
|
+ spin_lock_init(&lp->lock);
|
|
/*
|
|
* If this name ends up conflicting with an existing registered
|
|
* netdevice, that is OK, register_netdev{,ice}() will notice this
|
|
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
|
|
index b9ae206..7839d38 100644
|
|
--- a/arch/xtensa/platforms/xtfpga/Makefile
|
|
+++ b/arch/xtensa/platforms/xtfpga/Makefile
|
|
@@ -6,4 +6,5 @@
|
|
#
|
|
# Note 2! The CFLAGS definitions are in the main makefile...
|
|
|
|
-obj-y = setup.o lcd.o
|
|
+obj-y += setup.o
|
|
+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
|
|
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
|
|
index aeb316b..e8cc86f 100644
|
|
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
|
|
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
|
|
@@ -40,9 +40,6 @@
|
|
|
|
/* UART */
|
|
#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
|
|
-/* LCD instruction and data addresses. */
|
|
-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
|
|
-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
|
|
|
|
/* Misc. */
|
|
#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
|
|
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
|
|
index 0e43564..4c8541e 100644
|
|
--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
|
|
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
|
|
@@ -11,10 +11,25 @@
|
|
#ifndef __XTENSA_XTAVNET_LCD_H
|
|
#define __XTENSA_XTAVNET_LCD_H
|
|
|
|
+#ifdef CONFIG_XTFPGA_LCD
|
|
/* Display string STR at position POS on the LCD. */
|
|
void lcd_disp_at_pos(char *str, unsigned char pos);
|
|
|
|
/* Shift the contents of the LCD display left or right. */
|
|
void lcd_shiftleft(void);
|
|
void lcd_shiftright(void);
|
|
+#else
|
|
+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
|
|
+{
|
|
+}
|
|
+
|
|
+static inline void lcd_shiftleft(void)
|
|
+{
|
|
+}
|
|
+
|
|
+static inline void lcd_shiftright(void)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
#endif
|
|
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
|
|
index 2872301..4dc0c1b 100644
|
|
--- a/arch/xtensa/platforms/xtfpga/lcd.c
|
|
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
|
|
@@ -1,50 +1,63 @@
|
|
/*
|
|
- * Driver for the LCD display on the Tensilica LX60 Board.
|
|
+ * Driver for the LCD display on the Tensilica XTFPGA board family.
|
|
+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001, 2006 Tensilica Inc.
|
|
+ * Copyright (C) 2015 Cadence Design Systems Inc.
|
|
*/
|
|
|
|
-/*
|
|
- *
|
|
- * FIXME: this code is from the examples from the LX60 user guide.
|
|
- *
|
|
- * The lcd_pause function does busy waiting, which is probably not
|
|
- * great. Maybe the code could be changed to use kernel timers, or
|
|
- * change the hardware to not need to wait.
|
|
- */
|
|
-
|
|
+#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
#include <linux/io.h>
|
|
|
|
#include <platform/hardware.h>
|
|
#include <platform/lcd.h>
|
|
-#include <linux/delay.h>
|
|
|
|
-#define LCD_PAUSE_ITERATIONS 4000
|
|
+/* LCD instruction and data addresses. */
|
|
+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
|
|
+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
|
|
+
|
|
#define LCD_CLEAR 0x1
|
|
#define LCD_DISPLAY_ON 0xc
|
|
|
|
/* 8bit and 2 lines display */
|
|
#define LCD_DISPLAY_MODE8BIT 0x38
|
|
+#define LCD_DISPLAY_MODE4BIT 0x28
|
|
#define LCD_DISPLAY_POS 0x80
|
|
#define LCD_SHIFT_LEFT 0x18
|
|
#define LCD_SHIFT_RIGHT 0x1c
|
|
|
|
+static void lcd_put_byte(u8 *addr, u8 data)
|
|
+{
|
|
+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
|
|
+ ACCESS_ONCE(*addr) = data;
|
|
+#else
|
|
+ ACCESS_ONCE(*addr) = data & 0xf0;
|
|
+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
|
|
+#endif
|
|
+}
|
|
+
|
|
static int __init lcd_init(void)
|
|
{
|
|
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
|
|
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
|
mdelay(5);
|
|
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
|
|
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
|
udelay(200);
|
|
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
|
|
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
|
|
+ udelay(50);
|
|
+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
|
|
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
|
|
+ udelay(50);
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
|
|
udelay(50);
|
|
- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
|
|
+#endif
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
|
|
udelay(50);
|
|
- *LCD_INSTR_ADDR = LCD_CLEAR;
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
|
|
mdelay(10);
|
|
lcd_disp_at_pos("XTENSA LINUX", 0);
|
|
return 0;
|
|
@@ -52,10 +65,10 @@ static int __init lcd_init(void)
|
|
|
|
void lcd_disp_at_pos(char *str, unsigned char pos)
|
|
{
|
|
- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
|
|
udelay(100);
|
|
while (*str != 0) {
|
|
- *LCD_DATA_ADDR = *str;
|
|
+ lcd_put_byte(LCD_DATA_ADDR, *str);
|
|
udelay(200);
|
|
str++;
|
|
}
|
|
@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
|
|
|
|
void lcd_shiftleft(void)
|
|
{
|
|
- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
|
|
udelay(50);
|
|
}
|
|
|
|
void lcd_shiftright(void)
|
|
{
|
|
- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
|
|
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
|
|
udelay(50);
|
|
}
|
|
|
|
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
|
|
index dd0dd2d..a717585 100644
|
|
--- a/block/blk-cgroup.c
|
|
+++ b/block/blk-cgroup.c
|
|
@@ -703,8 +703,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|
return -EINVAL;
|
|
|
|
disk = get_gendisk(MKDEV(major, minor), &part);
|
|
- if (!disk || part)
|
|
+ if (!disk)
|
|
return -EINVAL;
|
|
+ if (part) {
|
|
+ put_disk(disk);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
rcu_read_lock();
|
|
spin_lock_irq(disk->queue->queue_lock);
|
|
@@ -859,6 +863,13 @@ void blkcg_drain_queue(struct request_queue *q)
|
|
{
|
|
lockdep_assert_held(q->queue_lock);
|
|
|
|
+ /*
|
|
+ * @q could be exiting and already have destroyed all blkgs as
|
|
+ * indicated by NULL root_blkg. If so, don't confuse policies.
|
|
+ */
|
|
+ if (!q->root_blkg)
|
|
+ return;
|
|
+
|
|
blk_throtl_drain(q);
|
|
}
|
|
|
|
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
|
|
index f872127..78d3835 100644
|
|
--- a/block/blk-mq-cpumap.c
|
|
+++ b/block/blk-mq-cpumap.c
|
|
@@ -95,7 +95,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
|
|
unsigned int *map;
|
|
|
|
/* If cpus are offline, map them to first hctx */
|
|
- map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
|
|
+ map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
|
|
reg->numa_node);
|
|
if (!map)
|
|
return NULL;
|
|
diff --git a/block/blk-settings.c b/block/blk-settings.c
|
|
index 5d21239..95138e9 100644
|
|
--- a/block/blk-settings.c
|
|
+++ b/block/blk-settings.c
|
|
@@ -553,7 +553,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|
bottom = max(b->physical_block_size, b->io_min) + alignment;
|
|
|
|
/* Verify that top and bottom intervals line up */
|
|
- if (max(top, bottom) & (min(top, bottom) - 1)) {
|
|
+ if (max(top, bottom) % min(top, bottom)) {
|
|
t->misaligned = 1;
|
|
ret = -1;
|
|
}
|
|
@@ -598,7 +598,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|
|
|
/* Find lowest common alignment_offset */
|
|
t->alignment_offset = lcm(t->alignment_offset, alignment)
|
|
- & (max(t->physical_block_size, t->io_min) - 1);
|
|
+ % max(t->physical_block_size, t->io_min);
|
|
|
|
/* Verify that new alignment_offset is on a logical block boundary */
|
|
if (t->alignment_offset & (t->logical_block_size - 1)) {
|
|
diff --git a/block/blk-tag.c b/block/blk-tag.c
|
|
index 3f33d86..a185b86 100644
|
|
--- a/block/blk-tag.c
|
|
+++ b/block/blk-tag.c
|
|
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
|
|
EXPORT_SYMBOL(blk_queue_find_tag);
|
|
|
|
/**
|
|
- * __blk_free_tags - release a given set of tag maintenance info
|
|
+ * blk_free_tags - release a given set of tag maintenance info
|
|
* @bqt: the tag map to free
|
|
*
|
|
- * Tries to free the specified @bqt. Returns true if it was
|
|
- * actually freed and false if there are still references using it
|
|
+ * Drop the reference count on @bqt and frees it when the last reference
|
|
+ * is dropped.
|
|
*/
|
|
-static int __blk_free_tags(struct blk_queue_tag *bqt)
|
|
+void blk_free_tags(struct blk_queue_tag *bqt)
|
|
{
|
|
- int retval;
|
|
-
|
|
- retval = atomic_dec_and_test(&bqt->refcnt);
|
|
- if (retval) {
|
|
+ if (atomic_dec_and_test(&bqt->refcnt)) {
|
|
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
|
|
bqt->max_depth);
|
|
|
|
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
|
|
|
|
kfree(bqt);
|
|
}
|
|
-
|
|
- return retval;
|
|
}
|
|
+EXPORT_SYMBOL(blk_free_tags);
|
|
|
|
/**
|
|
* __blk_queue_free_tags - release tag maintenance info
|
|
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
|
|
if (!bqt)
|
|
return;
|
|
|
|
- __blk_free_tags(bqt);
|
|
+ blk_free_tags(bqt);
|
|
|
|
q->queue_tags = NULL;
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
|
|
}
|
|
|
|
/**
|
|
- * blk_free_tags - release a given set of tag maintenance info
|
|
- * @bqt: the tag map to free
|
|
- *
|
|
- * For externally managed @bqt frees the map. Callers of this
|
|
- * function must guarantee to have released all the queues that
|
|
- * might have been using this tag map.
|
|
- */
|
|
-void blk_free_tags(struct blk_queue_tag *bqt)
|
|
-{
|
|
- if (unlikely(!__blk_free_tags(bqt)))
|
|
- BUG();
|
|
-}
|
|
-EXPORT_SYMBOL(blk_free_tags);
|
|
-
|
|
-/**
|
|
* blk_queue_free_tags - release tag maintenance info
|
|
* @q: the request queue for the device
|
|
*
|
|
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
|
|
index 1474c3a..1599878 100644
|
|
--- a/block/blk-throttle.c
|
|
+++ b/block/blk-throttle.c
|
|
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
|
|
struct blkg_rwstat rwstat = { }, tmp;
|
|
int i, cpu;
|
|
|
|
+ if (tg->stats_cpu == NULL)
|
|
+ return 0;
|
|
+
|
|
for_each_possible_cpu(cpu) {
|
|
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
|
|
|
|
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
|
|
index 744833b..d9bba99 100644
|
|
--- a/block/cfq-iosched.c
|
|
+++ b/block/cfq-iosched.c
|
|
@@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
static void
|
|
cfq_update_group_weight(struct cfq_group *cfqg)
|
|
{
|
|
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
|
-
|
|
if (cfqg->new_weight) {
|
|
cfqg->weight = cfqg->new_weight;
|
|
cfqg->new_weight = 0;
|
|
}
|
|
+}
|
|
+
|
|
+static void
|
|
+cfq_update_group_leaf_weight(struct cfq_group *cfqg)
|
|
+{
|
|
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
|
|
|
if (cfqg->new_leaf_weight) {
|
|
cfqg->leaf_weight = cfqg->new_leaf_weight;
|
|
@@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
/* add to the service tree */
|
|
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
|
|
|
- cfq_update_group_weight(cfqg);
|
|
+ cfq_update_group_leaf_weight(cfqg);
|
|
__cfq_group_service_tree_add(st, cfqg);
|
|
|
|
/*
|
|
@@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
*/
|
|
while ((parent = cfqg_parent(pos))) {
|
|
if (propagate) {
|
|
+ cfq_update_group_weight(pos);
|
|
propagate = !parent->nr_active++;
|
|
parent->children_weight += pos->weight;
|
|
}
|
|
@@ -3580,6 +3585,11 @@ retry:
|
|
|
|
blkcg = bio_blkcg(bio);
|
|
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
|
|
+ if (!cfqg) {
|
|
+ cfqq = &cfqd->oom_cfqq;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
cfqq = cic_to_cfqq(cic, is_sync);
|
|
|
|
/*
|
|
@@ -3616,7 +3626,7 @@ retry:
|
|
} else
|
|
cfqq = &cfqd->oom_cfqq;
|
|
}
|
|
-
|
|
+out:
|
|
if (new_cfqq)
|
|
kmem_cache_free(cfq_pool, new_cfqq);
|
|
|
|
@@ -3646,12 +3656,17 @@ static struct cfq_queue *
|
|
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|
struct bio *bio, gfp_t gfp_mask)
|
|
{
|
|
- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
|
- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
|
+ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
|
+ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
|
struct cfq_queue **async_cfqq = NULL;
|
|
struct cfq_queue *cfqq = NULL;
|
|
|
|
if (!is_sync) {
|
|
+ if (!ioprio_valid(cic->ioprio)) {
|
|
+ struct task_struct *tsk = current;
|
|
+ ioprio = task_nice_ioprio(tsk);
|
|
+ ioprio_class = task_nice_ioclass(tsk);
|
|
+ }
|
|
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
|
|
cfqq = *async_cfqq;
|
|
}
|
|
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
|
|
index fbd5a67..a0926a6 100644
|
|
--- a/block/compat_ioctl.c
|
|
+++ b/block/compat_ioctl.c
|
|
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|
case BLKROSET:
|
|
case BLKDISCARD:
|
|
case BLKSECDISCARD:
|
|
+ case BLKZEROOUT:
|
|
/*
|
|
* the ones below are implemented in blkdev_locked_ioctl,
|
|
* but we call blkdev_ioctl, which gets the lock for us
|
|
diff --git a/block/genhd.c b/block/genhd.c
|
|
index 791f419..9316f5f 100644
|
|
--- a/block/genhd.c
|
|
+++ b/block/genhd.c
|
|
@@ -28,10 +28,10 @@ struct kobject *block_depr;
|
|
/* for extended dynamic devt allocation, currently only one major is used */
|
|
#define NR_EXT_DEVT (1 << MINORBITS)
|
|
|
|
-/* For extended devt allocation. ext_devt_mutex prevents look up
|
|
+/* For extended devt allocation. ext_devt_lock prevents look up
|
|
* results from going away underneath its user.
|
|
*/
|
|
-static DEFINE_MUTEX(ext_devt_mutex);
|
|
+static DEFINE_SPINLOCK(ext_devt_lock);
|
|
static DEFINE_IDR(ext_devt_idr);
|
|
|
|
static struct device_type disk_type;
|
|
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
|
}
|
|
|
|
/* allocate ext devt */
|
|
- mutex_lock(&ext_devt_mutex);
|
|
- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
|
|
- mutex_unlock(&ext_devt_mutex);
|
|
+ idr_preload(GFP_KERNEL);
|
|
+
|
|
+ spin_lock_bh(&ext_devt_lock);
|
|
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
|
|
+ spin_unlock_bh(&ext_devt_lock);
|
|
+
|
|
+ idr_preload_end();
|
|
if (idx < 0)
|
|
return idx == -ENOSPC ? -EBUSY : idx;
|
|
|
|
@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
|
*/
|
|
void blk_free_devt(dev_t devt)
|
|
{
|
|
- might_sleep();
|
|
-
|
|
if (devt == MKDEV(0, 0))
|
|
return;
|
|
|
|
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
|
|
- mutex_lock(&ext_devt_mutex);
|
|
+ spin_lock_bh(&ext_devt_lock);
|
|
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
|
- mutex_unlock(&ext_devt_mutex);
|
|
+ spin_unlock_bh(&ext_devt_lock);
|
|
}
|
|
}
|
|
|
|
@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
|
|
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
|
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
|
device_del(disk_to_dev(disk));
|
|
- blk_free_devt(disk_to_dev(disk)->devt);
|
|
}
|
|
EXPORT_SYMBOL(del_gendisk);
|
|
|
|
@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
|
} else {
|
|
struct hd_struct *part;
|
|
|
|
- mutex_lock(&ext_devt_mutex);
|
|
+ spin_lock_bh(&ext_devt_lock);
|
|
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
|
if (part && get_disk(part_to_disk(part))) {
|
|
*partno = part->partno;
|
|
disk = part_to_disk(part);
|
|
}
|
|
- mutex_unlock(&ext_devt_mutex);
|
|
+ spin_unlock_bh(&ext_devt_lock);
|
|
}
|
|
|
|
return disk;
|
|
@@ -1069,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
|
|
struct disk_part_tbl *old_ptbl = disk->part_tbl;
|
|
struct disk_part_tbl *new_ptbl;
|
|
int len = old_ptbl ? old_ptbl->len : 0;
|
|
- int target = partno + 1;
|
|
+ int i, target;
|
|
size_t size;
|
|
- int i;
|
|
+
|
|
+ /*
|
|
+ * check for int overflow, since we can get here from blkpg_ioctl()
|
|
+ * with a user passed 'partno'.
|
|
+ */
|
|
+ target = partno + 1;
|
|
+ if (target < 0)
|
|
+ return -EINVAL;
|
|
|
|
/* disk_max_parts() is zero during initialization, ignore if so */
|
|
if (disk_max_parts(disk) && target > disk_max_parts(disk))
|
|
@@ -1098,6 +1106,7 @@ static void disk_release(struct device *dev)
|
|
{
|
|
struct gendisk *disk = dev_to_disk(dev);
|
|
|
|
+ blk_free_devt(dev->devt);
|
|
disk_release_events(disk);
|
|
kfree(disk->random);
|
|
disk_replace_part_tbl(disk, NULL);
|
|
diff --git a/block/partition-generic.c b/block/partition-generic.c
|
|
index 789cdea..0d9e5f9 100644
|
|
--- a/block/partition-generic.c
|
|
+++ b/block/partition-generic.c
|
|
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
|
|
static void part_release(struct device *dev)
|
|
{
|
|
struct hd_struct *p = dev_to_part(dev);
|
|
+ blk_free_devt(dev->devt);
|
|
free_part_stats(p);
|
|
free_part_info(p);
|
|
kfree(p);
|
|
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
|
|
rcu_assign_pointer(ptbl->last_lookup, NULL);
|
|
kobject_put(part->holder_dir);
|
|
device_del(part_to_dev(part));
|
|
- blk_free_devt(part_devt(part));
|
|
|
|
hd_struct_put(part);
|
|
}
|
|
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
|
|
index 43be471..0931f51 100644
|
|
--- a/block/partitions/aix.c
|
|
+++ b/block/partitions/aix.c
|
|
@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitions *state)
|
|
continue;
|
|
}
|
|
lv_ix = be16_to_cpu(p->lv_ix) - 1;
|
|
- if (lv_ix > state->limit) {
|
|
+ if (lv_ix >= state->limit) {
|
|
cur_lv_ix = -1;
|
|
continue;
|
|
}
|
|
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
|
|
index 2648797..4044cf7 100644
|
|
--- a/block/scsi_ioctl.c
|
|
+++ b/block/scsi_ioctl.c
|
|
@@ -489,7 +489,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|
|
|
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
|
|
err = DRIVER_ERROR << 24;
|
|
- goto out;
|
|
+ goto error;
|
|
}
|
|
|
|
memset(sense, 0, sizeof(sense));
|
|
@@ -499,7 +499,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|
|
|
blk_execute_rq(q, disk, rq, 0);
|
|
|
|
-out:
|
|
err = rq->errors & 0xff; /* only 8 bit SCSI status */
|
|
if (err) {
|
|
if (rq->sense_len && rq->sense) {
|
|
diff --git a/crypto/842.c b/crypto/842.c
|
|
index 65c7a89c..b48f4f1 100644
|
|
--- a/crypto/842.c
|
|
+++ b/crypto/842.c
|
|
@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("842 Compression Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("842");
|
|
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
|
|
index fd0d6b4..3dd1011 100644
|
|
--- a/crypto/aes_generic.c
|
|
+++ b/crypto/aes_generic.c
|
|
@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
-MODULE_ALIAS("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes-generic");
|
|
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
|
|
index 966f893..1de4bee 100644
|
|
--- a/crypto/af_alg.c
|
|
+++ b/crypto/af_alg.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/net.h>
|
|
#include <linux/rwsem.h>
|
|
+#include <linux/security.h>
|
|
|
|
struct alg_type_list {
|
|
const struct af_alg_type *type;
|
|
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
|
|
|
sock_init_data(newsock, sk2);
|
|
sock_graft(sk2, newsock);
|
|
+ security_sk_clone(sk, sk2);
|
|
|
|
err = type->accept(ask->private, sk2);
|
|
if (err) {
|
|
@@ -447,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
|
|
{
|
|
struct af_alg_completion *completion = req->data;
|
|
|
|
+ if (err == -EINPROGRESS)
|
|
+ return;
|
|
+
|
|
completion->err = err;
|
|
complete(&completion->completion);
|
|
}
|
|
diff --git a/crypto/algapi.c b/crypto/algapi.c
|
|
index 7a1ae87..00d8d93 100644
|
|
--- a/crypto/algapi.c
|
|
+++ b/crypto/algapi.c
|
|
@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
|
|
|
|
struct crypto_template *crypto_lookup_template(const char *name)
|
|
{
|
|
- return try_then_request_module(__crypto_lookup_template(name), "%s",
|
|
- name);
|
|
+ return try_then_request_module(__crypto_lookup_template(name),
|
|
+ "crypto-%s", name);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_lookup_template);
|
|
|
|
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
|
|
index a19c027..83187f4 100644
|
|
--- a/crypto/algif_skcipher.c
|
|
+++ b/crypto/algif_skcipher.c
|
|
@@ -49,7 +49,7 @@ struct skcipher_ctx {
|
|
struct ablkcipher_request req;
|
|
};
|
|
|
|
-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
|
|
+#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
|
|
sizeof(struct scatterlist) - 1)
|
|
|
|
static inline int skcipher_sndbuf(struct sock *sk)
|
|
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
|
|
index 666f196..6f5bebc 100644
|
|
--- a/crypto/ansi_cprng.c
|
|
+++ b/crypto/ansi_cprng.c
|
|
@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
|
|
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
|
|
module_init(prng_mod_init);
|
|
module_exit(prng_mod_fini);
|
|
-MODULE_ALIAS("stdrng");
|
|
+MODULE_ALIAS_CRYPTO("stdrng");
|
|
+MODULE_ALIAS_CRYPTO("ansi_cprng");
|
|
diff --git a/crypto/anubis.c b/crypto/anubis.c
|
|
index 008c8a4..4bb187c 100644
|
|
--- a/crypto/anubis.c
|
|
+++ b/crypto/anubis.c
|
|
@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("anubis");
|
|
diff --git a/crypto/api.c b/crypto/api.c
|
|
index a2b39c5..2a81e98 100644
|
|
--- a/crypto/api.c
|
|
+++ b/crypto/api.c
|
|
@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
|
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
|
if (!alg) {
|
|
- request_module("%s", name);
|
|
+ request_module("crypto-%s", name);
|
|
|
|
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
|
|
CRYPTO_ALG_NEED_FALLBACK))
|
|
- request_module("%s-all", name);
|
|
+ request_module("crypto-%s-all", name);
|
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
|
}
|
|
diff --git a/crypto/arc4.c b/crypto/arc4.c
|
|
index 5a772c3..f1a8192 100644
|
|
--- a/crypto/arc4.c
|
|
+++ b/crypto/arc4.c
|
|
@@ -166,3 +166,4 @@ module_exit(arc4_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
|
|
MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
|
|
+MODULE_ALIAS_CRYPTO("arc4");
|
|
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
|
|
index 3c562f5..e1bce26 100644
|
|
--- a/crypto/async_tx/async_xor.c
|
|
+++ b/crypto/async_tx/async_xor.c
|
|
@@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
|
|
tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
|
|
xor_src_cnt, unmap->len,
|
|
dma_flags);
|
|
- src_list[0] = tmp;
|
|
-
|
|
|
|
if (unlikely(!tx))
|
|
async_tx_quiesce(&submit->depend_tx);
|
|
@@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
|
|
xor_src_cnt, unmap->len,
|
|
dma_flags);
|
|
}
|
|
+ src_list[0] = tmp;
|
|
|
|
dma_set_unmap(tx, unmap);
|
|
async_tx_submit(chan, tx, submit);
|
|
diff --git a/crypto/authenc.c b/crypto/authenc.c
|
|
index e122355..78fb16c 100644
|
|
--- a/crypto/authenc.c
|
|
+++ b/crypto/authenc.c
|
|
@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
|
|
+MODULE_ALIAS_CRYPTO("authenc");
|
|
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
|
|
index 4be0dd4..024bff2 100644
|
|
--- a/crypto/authencesn.c
|
|
+++ b/crypto/authencesn.c
|
|
@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
|
|
MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
|
|
+MODULE_ALIAS_CRYPTO("authencesn");
|
|
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
|
|
index 8baf544..87b392a 100644
|
|
--- a/crypto/blowfish_generic.c
|
|
+++ b/crypto/blowfish_generic.c
|
|
@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
|
|
-MODULE_ALIAS("blowfish");
|
|
+MODULE_ALIAS_CRYPTO("blowfish");
|
|
+MODULE_ALIAS_CRYPTO("blowfish-generic");
|
|
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
|
|
index 26bcd7a..a02286b 100644
|
|
--- a/crypto/camellia_generic.c
|
|
+++ b/crypto/camellia_generic.c
|
|
@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
|
|
|
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("camellia");
|
|
+MODULE_ALIAS_CRYPTO("camellia");
|
|
+MODULE_ALIAS_CRYPTO("camellia-generic");
|
|
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
|
|
index 5558f63..df5c726 100644
|
|
--- a/crypto/cast5_generic.c
|
|
+++ b/crypto/cast5_generic.c
|
|
@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
|
|
-MODULE_ALIAS("cast5");
|
|
+MODULE_ALIAS_CRYPTO("cast5");
|
|
+MODULE_ALIAS_CRYPTO("cast5-generic");
|
|
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
|
|
index de73252..058c8d7 100644
|
|
--- a/crypto/cast6_generic.c
|
|
+++ b/crypto/cast6_generic.c
|
|
@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
|
|
-MODULE_ALIAS("cast6");
|
|
+MODULE_ALIAS_CRYPTO("cast6");
|
|
+MODULE_ALIAS_CRYPTO("cast6-generic");
|
|
diff --git a/crypto/cbc.c b/crypto/cbc.c
|
|
index 61ac42e..780ee27 100644
|
|
--- a/crypto/cbc.c
|
|
+++ b/crypto/cbc.c
|
|
@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("CBC block cipher algorithm");
|
|
+MODULE_ALIAS_CRYPTO("cbc");
|
|
diff --git a/crypto/ccm.c b/crypto/ccm.c
|
|
index 1df8421..003bbbd 100644
|
|
--- a/crypto/ccm.c
|
|
+++ b/crypto/ccm.c
|
|
@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Counter with CBC MAC");
|
|
-MODULE_ALIAS("ccm_base");
|
|
-MODULE_ALIAS("rfc4309");
|
|
+MODULE_ALIAS_CRYPTO("ccm_base");
|
|
+MODULE_ALIAS_CRYPTO("rfc4309");
|
|
+MODULE_ALIAS_CRYPTO("ccm");
|
|
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
|
|
index 834d8dd..22b7e55 100644
|
|
--- a/crypto/chainiv.c
|
|
+++ b/crypto/chainiv.c
|
|
@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Chain IV Generator");
|
|
+MODULE_ALIAS_CRYPTO("chainiv");
|
|
diff --git a/crypto/cmac.c b/crypto/cmac.c
|
|
index 50880cf..7a8bfbd 100644
|
|
--- a/crypto/cmac.c
|
|
+++ b/crypto/cmac.c
|
|
@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
|
|
+MODULE_ALIAS_CRYPTO("cmac");
|
|
diff --git a/crypto/crc32.c b/crypto/crc32.c
|
|
index 9d1c415..187ded2 100644
|
|
--- a/crypto/crc32.c
|
|
+++ b/crypto/crc32.c
|
|
@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
|
|
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
|
|
MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
|
|
MODULE_LICENSE("GPL");
|
|
+MODULE_ALIAS_CRYPTO("crc32");
|
|
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
|
|
index 06f7018..238f0e6 100644
|
|
--- a/crypto/crc32c.c
|
|
+++ b/crypto/crc32c.c
|
|
@@ -170,3 +170,4 @@ module_exit(crc32c_mod_fini);
|
|
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
|
|
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
|
|
MODULE_LICENSE("GPL");
|
|
+MODULE_ALIAS_CRYPTO("crc32c");
|
|
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
|
|
index 877e711..c1229614 100644
|
|
--- a/crypto/crct10dif_generic.c
|
|
+++ b/crypto/crct10dif_generic.c
|
|
@@ -124,4 +124,5 @@ module_exit(crct10dif_mod_fini);
|
|
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
|
|
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("crct10dif");
|
|
+MODULE_ALIAS_CRYPTO("crct10dif");
|
|
+MODULE_ALIAS_CRYPTO("crct10dif-generic");
|
|
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
|
|
index 7bdd61b..75c415d 100644
|
|
--- a/crypto/cryptd.c
|
|
+++ b/crypto/cryptd.c
|
|
@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Software async crypto daemon");
|
|
+MODULE_ALIAS_CRYPTO("cryptd");
|
|
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
|
|
index fee7265..7b39fa3 100644
|
|
--- a/crypto/crypto_null.c
|
|
+++ b/crypto/crypto_null.c
|
|
@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { {
|
|
.coa_decompress = null_compress } }
|
|
} };
|
|
|
|
-MODULE_ALIAS("compress_null");
|
|
-MODULE_ALIAS("digest_null");
|
|
-MODULE_ALIAS("cipher_null");
|
|
+MODULE_ALIAS_CRYPTO("compress_null");
|
|
+MODULE_ALIAS_CRYPTO("digest_null");
|
|
+MODULE_ALIAS_CRYPTO("cipher_null");
|
|
|
|
static int __init crypto_null_mod_init(void)
|
|
{
|
|
diff --git a/crypto/ctr.c b/crypto/ctr.c
|
|
index f2b94f2..2386f73 100644
|
|
--- a/crypto/ctr.c
|
|
+++ b/crypto/ctr.c
|
|
@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("CTR Counter block mode");
|
|
-MODULE_ALIAS("rfc3686");
|
|
+MODULE_ALIAS_CRYPTO("rfc3686");
|
|
+MODULE_ALIAS_CRYPTO("ctr");
|
|
diff --git a/crypto/cts.c b/crypto/cts.c
|
|
index 042223f..60b9da3 100644
|
|
--- a/crypto/cts.c
|
|
+++ b/crypto/cts.c
|
|
@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit);
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
|
|
+MODULE_ALIAS_CRYPTO("cts");
|
|
diff --git a/crypto/deflate.c b/crypto/deflate.c
|
|
index b57d70e..95d8d37 100644
|
|
--- a/crypto/deflate.c
|
|
+++ b/crypto/deflate.c
|
|
@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
|
|
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
|
|
-
|
|
+MODULE_ALIAS_CRYPTO("deflate");
|
|
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
|
|
index f6cf63f..3ec6071 100644
|
|
--- a/crypto/des_generic.c
|
|
+++ b/crypto/des_generic.c
|
|
@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { {
|
|
.cia_decrypt = des3_ede_decrypt } }
|
|
} };
|
|
|
|
-MODULE_ALIAS("des3_ede");
|
|
-
|
|
static int __init des_generic_mod_init(void)
|
|
{
|
|
return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
|
|
@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
|
|
MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
|
|
-MODULE_ALIAS("des");
|
|
+MODULE_ALIAS_CRYPTO("des");
|
|
+MODULE_ALIAS_CRYPTO("des-generic");
|
|
+MODULE_ALIAS_CRYPTO("des3_ede");
|
|
+MODULE_ALIAS_CRYPTO("des3_ede-generic");
|
|
diff --git a/crypto/ecb.c b/crypto/ecb.c
|
|
index 935cfef..12011af 100644
|
|
--- a/crypto/ecb.c
|
|
+++ b/crypto/ecb.c
|
|
@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("ECB block cipher algorithm");
|
|
+MODULE_ALIAS_CRYPTO("ecb");
|
|
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
|
|
index 42ce9f5..388f582 100644
|
|
--- a/crypto/eseqiv.c
|
|
+++ b/crypto/eseqiv.c
|
|
@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
|
|
+MODULE_ALIAS_CRYPTO("eseqiv");
|
|
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
|
|
index 021d7fe..77286ea 100644
|
|
--- a/crypto/fcrypt.c
|
|
+++ b/crypto/fcrypt.c
|
|
@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
|
|
MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
|
|
+MODULE_ALIAS_CRYPTO("fcrypt");
|
|
diff --git a/crypto/gcm.c b/crypto/gcm.c
|
|
index b4f0179..9cea4d0 100644
|
|
--- a/crypto/gcm.c
|
|
+++ b/crypto/gcm.c
|
|
@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Galois/Counter Mode");
|
|
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
|
|
-MODULE_ALIAS("gcm_base");
|
|
-MODULE_ALIAS("rfc4106");
|
|
-MODULE_ALIAS("rfc4543");
|
|
+MODULE_ALIAS_CRYPTO("gcm_base");
|
|
+MODULE_ALIAS_CRYPTO("rfc4106");
|
|
+MODULE_ALIAS_CRYPTO("rfc4543");
|
|
+MODULE_ALIAS_CRYPTO("gcm");
|
|
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
|
|
index 9d3f0c6..bac7099 100644
|
|
--- a/crypto/ghash-generic.c
|
|
+++ b/crypto/ghash-generic.c
|
|
@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
|
|
-MODULE_ALIAS("ghash");
|
|
+MODULE_ALIAS_CRYPTO("ghash");
|
|
+MODULE_ALIAS_CRYPTO("ghash-generic");
|
|
diff --git a/crypto/hmac.c b/crypto/hmac.c
|
|
index 8d9544c..ade790b 100644
|
|
--- a/crypto/hmac.c
|
|
+++ b/crypto/hmac.c
|
|
@@ -271,3 +271,4 @@ module_exit(hmac_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("HMAC hash algorithm");
|
|
+MODULE_ALIAS_CRYPTO("hmac");
|
|
diff --git a/crypto/khazad.c b/crypto/khazad.c
|
|
index 60e7cd6..873eb5d 100644
|
|
--- a/crypto/khazad.c
|
|
+++ b/crypto/khazad.c
|
|
@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("khazad");
|
|
diff --git a/crypto/krng.c b/crypto/krng.c
|
|
index a2d2b72..0224841 100644
|
|
--- a/crypto/krng.c
|
|
+++ b/crypto/krng.c
|
|
@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Kernel Random Number Generator");
|
|
-MODULE_ALIAS("stdrng");
|
|
+MODULE_ALIAS_CRYPTO("stdrng");
|
|
+MODULE_ALIAS_CRYPTO("krng");
|
|
diff --git a/crypto/lrw.c b/crypto/lrw.c
|
|
index ba42acc..6f9908a 100644
|
|
--- a/crypto/lrw.c
|
|
+++ b/crypto/lrw.c
|
|
@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("LRW block cipher mode");
|
|
+MODULE_ALIAS_CRYPTO("lrw");
|
|
diff --git a/crypto/lz4.c b/crypto/lz4.c
|
|
index 4586dd1..53279ab 100644
|
|
--- a/crypto/lz4.c
|
|
+++ b/crypto/lz4.c
|
|
@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("LZ4 Compression Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("lz4");
|
|
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
|
|
index 151ba31..eaec5fa 100644
|
|
--- a/crypto/lz4hc.c
|
|
+++ b/crypto/lz4hc.c
|
|
@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("lz4hc");
|
|
diff --git a/crypto/lzo.c b/crypto/lzo.c
|
|
index 1c2aa69..d1ff694 100644
|
|
--- a/crypto/lzo.c
|
|
+++ b/crypto/lzo.c
|
|
@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("LZO Compression Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("lzo");
|
|
diff --git a/crypto/md4.c b/crypto/md4.c
|
|
index 0477a6a..3515af4 100644
|
|
--- a/crypto/md4.c
|
|
+++ b/crypto/md4.c
|
|
@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
|
|
-
|
|
+MODULE_ALIAS_CRYPTO("md4");
|
|
diff --git a/crypto/md5.c b/crypto/md5.c
|
|
index 7febeaa..36f5e5b 100644
|
|
--- a/crypto/md5.c
|
|
+++ b/crypto/md5.c
|
|
@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
|
|
+MODULE_ALIAS_CRYPTO("md5");
|
|
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
|
|
index 079b761..46195e0 100644
|
|
--- a/crypto/michael_mic.c
|
|
+++ b/crypto/michael_mic.c
|
|
@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
|
|
MODULE_LICENSE("GPL v2");
|
|
MODULE_DESCRIPTION("Michael MIC");
|
|
MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
|
|
+MODULE_ALIAS_CRYPTO("michael_mic");
|
|
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
|
|
index d1b8bdf..f654965 100644
|
|
--- a/crypto/pcbc.c
|
|
+++ b/crypto/pcbc.c
|
|
@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("PCBC block cipher algorithm");
|
|
+MODULE_ALIAS_CRYPTO("pcbc");
|
|
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
|
|
index 309d345..c305d41 100644
|
|
--- a/crypto/pcrypt.c
|
|
+++ b/crypto/pcrypt.c
|
|
@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
|
|
MODULE_DESCRIPTION("Parallel crypto wrapper");
|
|
+MODULE_ALIAS_CRYPTO("pcrypt");
|
|
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
|
|
index 8a0f68b..049486e 100644
|
|
--- a/crypto/rmd128.c
|
|
+++ b/crypto/rmd128.c
|
|
@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
|
|
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
|
|
+MODULE_ALIAS_CRYPTO("rmd128");
|
|
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
|
|
index 525d7bb..de585e5 100644
|
|
--- a/crypto/rmd160.c
|
|
+++ b/crypto/rmd160.c
|
|
@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
|
|
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
|
|
+MODULE_ALIAS_CRYPTO("rmd160");
|
|
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
|
|
index 69293d9..4ec02a7 100644
|
|
--- a/crypto/rmd256.c
|
|
+++ b/crypto/rmd256.c
|
|
@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
|
|
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
|
|
+MODULE_ALIAS_CRYPTO("rmd256");
|
|
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
|
|
index 09f97df..770f2cb 100644
|
|
--- a/crypto/rmd320.c
|
|
+++ b/crypto/rmd320.c
|
|
@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
|
|
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
|
|
+MODULE_ALIAS_CRYPTO("rmd320");
|
|
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
|
|
index 9a4770c..f550b5d 100644
|
|
--- a/crypto/salsa20_generic.c
|
|
+++ b/crypto/salsa20_generic.c
|
|
@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
|
|
-MODULE_ALIAS("salsa20");
|
|
+MODULE_ALIAS_CRYPTO("salsa20");
|
|
+MODULE_ALIAS_CRYPTO("salsa20-generic");
|
|
diff --git a/crypto/seed.c b/crypto/seed.c
|
|
index 9c904d6..c6ba843 100644
|
|
--- a/crypto/seed.c
|
|
+++ b/crypto/seed.c
|
|
@@ -476,3 +476,4 @@ module_exit(seed_fini);
|
|
MODULE_DESCRIPTION("SEED Cipher Algorithm");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
|
|
+MODULE_ALIAS_CRYPTO("seed");
|
|
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
|
|
index f2cba4ed..49a4069 100644
|
|
--- a/crypto/seqiv.c
|
|
+++ b/crypto/seqiv.c
|
|
@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Sequence Number IV Generator");
|
|
+MODULE_ALIAS_CRYPTO("seqiv");
|
|
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
|
|
index 7ddbd7e..94970a7 100644
|
|
--- a/crypto/serpent_generic.c
|
|
+++ b/crypto/serpent_generic.c
|
|
@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
|
|
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
|
|
-MODULE_ALIAS("tnepres");
|
|
-MODULE_ALIAS("serpent");
|
|
+MODULE_ALIAS_CRYPTO("tnepres");
|
|
+MODULE_ALIAS_CRYPTO("serpent");
|
|
+MODULE_ALIAS_CRYPTO("serpent-generic");
|
|
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
|
|
index 4279480..fdf7c00 100644
|
|
--- a/crypto/sha1_generic.c
|
|
+++ b/crypto/sha1_generic.c
|
|
@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
|
|
|
|
-MODULE_ALIAS("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1");
|
|
+MODULE_ALIAS_CRYPTO("sha1-generic");
|
|
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
|
|
index 5433667..136381b 100644
|
|
--- a/crypto/sha256_generic.c
|
|
+++ b/crypto/sha256_generic.c
|
|
@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
|
|
|
|
-MODULE_ALIAS("sha224");
|
|
-MODULE_ALIAS("sha256");
|
|
+MODULE_ALIAS_CRYPTO("sha224");
|
|
+MODULE_ALIAS_CRYPTO("sha224-generic");
|
|
+MODULE_ALIAS_CRYPTO("sha256");
|
|
+MODULE_ALIAS_CRYPTO("sha256-generic");
|
|
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
|
|
index 6ed124f..6c6d901 100644
|
|
--- a/crypto/sha512_generic.c
|
|
+++ b/crypto/sha512_generic.c
|
|
@@ -287,5 +287,7 @@ module_exit(sha512_generic_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
|
|
|
|
-MODULE_ALIAS("sha384");
|
|
-MODULE_ALIAS("sha512");
|
|
+MODULE_ALIAS_CRYPTO("sha384");
|
|
+MODULE_ALIAS_CRYPTO("sha384-generic");
|
|
+MODULE_ALIAS_CRYPTO("sha512");
|
|
+MODULE_ALIAS_CRYPTO("sha512-generic");
|
|
diff --git a/crypto/tea.c b/crypto/tea.c
|
|
index 0a57232..b70b441 100644
|
|
--- a/crypto/tea.c
|
|
+++ b/crypto/tea.c
|
|
@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
|
|
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
|
|
}
|
|
|
|
-MODULE_ALIAS("xtea");
|
|
-MODULE_ALIAS("xeta");
|
|
+MODULE_ALIAS_CRYPTO("tea");
|
|
+MODULE_ALIAS_CRYPTO("xtea");
|
|
+MODULE_ALIAS_CRYPTO("xeta");
|
|
|
|
module_init(tea_mod_init);
|
|
module_exit(tea_mod_fini);
|
|
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
|
|
index 8740355..f7ed2fb 100644
|
|
--- a/crypto/tgr192.c
|
|
+++ b/crypto/tgr192.c
|
|
@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
|
|
crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
|
|
}
|
|
|
|
-MODULE_ALIAS("tgr160");
|
|
-MODULE_ALIAS("tgr128");
|
|
+MODULE_ALIAS_CRYPTO("tgr192");
|
|
+MODULE_ALIAS_CRYPTO("tgr160");
|
|
+MODULE_ALIAS_CRYPTO("tgr128");
|
|
|
|
module_init(tgr192_mod_init);
|
|
module_exit(tgr192_mod_fini);
|
|
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
|
|
index 2d50005..ebf7a3e 100644
|
|
--- a/crypto/twofish_generic.c
|
|
+++ b/crypto/twofish_generic.c
|
|
@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
|
|
-MODULE_ALIAS("twofish");
|
|
+MODULE_ALIAS_CRYPTO("twofish");
|
|
+MODULE_ALIAS_CRYPTO("twofish-generic");
|
|
diff --git a/crypto/vmac.c b/crypto/vmac.c
|
|
index 2eb11a3..bf2d3a8 100644
|
|
--- a/crypto/vmac.c
|
|
+++ b/crypto/vmac.c
|
|
@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("VMAC hash algorithm");
|
|
+MODULE_ALIAS_CRYPTO("vmac");
|
|
diff --git a/crypto/wp512.c b/crypto/wp512.c
|
|
index 180f1d6..253db94 100644
|
|
--- a/crypto/wp512.c
|
|
+++ b/crypto/wp512.c
|
|
@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
|
|
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
|
|
}
|
|
|
|
-MODULE_ALIAS("wp384");
|
|
-MODULE_ALIAS("wp256");
|
|
+MODULE_ALIAS_CRYPTO("wp512");
|
|
+MODULE_ALIAS_CRYPTO("wp384");
|
|
+MODULE_ALIAS_CRYPTO("wp256");
|
|
|
|
module_init(wp512_mod_init);
|
|
module_exit(wp512_mod_fini);
|
|
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
|
|
index a5fbdf3..df90b33 100644
|
|
--- a/crypto/xcbc.c
|
|
+++ b/crypto/xcbc.c
|
|
@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
|
|
+MODULE_ALIAS_CRYPTO("xcbc");
|
|
diff --git a/crypto/xts.c b/crypto/xts.c
|
|
index ca1608f..f6fd43f 100644
|
|
--- a/crypto/xts.c
|
|
+++ b/crypto/xts.c
|
|
@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("XTS block cipher mode");
|
|
+MODULE_ALIAS_CRYPTO("xts");
|
|
diff --git a/crypto/zlib.c b/crypto/zlib.c
|
|
index 06b62e5..d980788 100644
|
|
--- a/crypto/zlib.c
|
|
+++ b/crypto/zlib.c
|
|
@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Zlib Compression Algorithm");
|
|
MODULE_AUTHOR("Sony Corporation");
|
|
+MODULE_ALIAS_CRYPTO("zlib");
|
|
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
|
|
index 84190ed..aff69d9 100644
|
|
--- a/drivers/acpi/acpi_cmos_rtc.c
|
|
+++ b/drivers/acpi/acpi_cmos_rtc.c
|
|
@@ -35,7 +35,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
|
|
void *handler_context, void *region_context)
|
|
{
|
|
int i;
|
|
- u8 *value = (u8 *)&value64;
|
|
+ u8 *value = (u8 *)value64;
|
|
|
|
if (address > 0xff || !value64)
|
|
return AE_BAD_PARAMETER;
|
|
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
|
|
index d95ca54..e6ab104 100644
|
|
--- a/drivers/acpi/acpica/aclocal.h
|
|
+++ b/drivers/acpi/acpica/aclocal.h
|
|
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
|
|
u32 field_bit_position;
|
|
u32 field_bit_length;
|
|
u16 resource_length;
|
|
+ u16 pin_number_index;
|
|
u8 field_flags;
|
|
u8 attribute;
|
|
u8 field_type;
|
|
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
|
|
index 2a86c65..97c7a52 100644
|
|
--- a/drivers/acpi/acpica/acmacros.h
|
|
+++ b/drivers/acpi/acpica/acmacros.h
|
|
@@ -63,19 +63,15 @@
|
|
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
|
|
|
|
/*
|
|
- * printf() format helpers
|
|
+ * printf() format helper. This macros is a workaround for the difficulties
|
|
+ * with emitting 64-bit integers and 64-bit pointers with the same code
|
|
+ * for both 32-bit and 64-bit hosts.
|
|
*/
|
|
|
|
/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
|
|
|
|
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
|
|
|
|
-#if ACPI_MACHINE_WIDTH == 64
|
|
-#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
|
|
-#else
|
|
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
|
|
-#endif
|
|
-
|
|
/*
|
|
* Macros for moving data around to/from buffers that are possibly unaligned.
|
|
* If the hardware supports the transfer of unaligned data, just do the store.
|
|
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
|
|
index cc7ab6d..a47cc78f 100644
|
|
--- a/drivers/acpi/acpica/acobject.h
|
|
+++ b/drivers/acpi/acpica/acobject.h
|
|
@@ -263,6 +263,7 @@ struct acpi_object_region_field {
|
|
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
|
|
union acpi_operand_object *region_obj; /* Containing op_region object */
|
|
u8 *resource_buffer; /* resource_template for serial regions/fields */
|
|
+ u16 pin_number_index; /* Index relative to previous Connection/Template */
|
|
};
|
|
|
|
struct acpi_object_bank_field {
|
|
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
|
|
index e7a57c5..9af55bd 100644
|
|
--- a/drivers/acpi/acpica/dsfield.c
|
|
+++ b/drivers/acpi/acpica/dsfield.c
|
|
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
|
|
*/
|
|
info->resource_buffer = NULL;
|
|
info->connection_node = NULL;
|
|
+ info->pin_number_index = 0;
|
|
|
|
/*
|
|
* A Connection() is either an actual resource descriptor (buffer)
|
|
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
|
|
}
|
|
|
|
info->field_bit_position += info->field_bit_length;
|
|
+ info->pin_number_index++; /* Index relative to previous Connection() */
|
|
break;
|
|
|
|
default:
|
|
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
|
|
index 5205edc..fe79296 100644
|
|
--- a/drivers/acpi/acpica/dsopcode.c
|
|
+++ b/drivers/acpi/acpica/dsopcode.c
|
|
@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
|
|
obj_desc,
|
|
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
|
|
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
|
|
obj_desc->region.length));
|
|
|
|
/* Now the address and length are valid for this opregion */
|
|
@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
|
|
return_ACPI_STATUS(AE_NOT_EXIST);
|
|
}
|
|
|
|
- obj_desc->region.address =
|
|
- (acpi_physical_address) ACPI_TO_INTEGER(table);
|
|
+ obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
|
|
obj_desc->region.length = table->length;
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
|
|
obj_desc,
|
|
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
|
|
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
|
|
obj_desc->region.length));
|
|
|
|
/* Now the address and length are valid for this opregion */
|
|
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
|
|
index 144cbb9..ee8ec4b 100644
|
|
--- a/drivers/acpi/acpica/evregion.c
|
|
+++ b/drivers/acpi/acpica/evregion.c
|
|
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|
union acpi_operand_object *region_obj2;
|
|
void *region_context = NULL;
|
|
struct acpi_connection_info *context;
|
|
+ acpi_physical_address address;
|
|
|
|
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
|
|
|
|
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|
/* We have everything we need, we can invoke the address space handler */
|
|
|
|
handler = handler_desc->address_space.handler;
|
|
-
|
|
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
|
- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
|
- ®ion_obj->region.handler->address_space, handler,
|
|
- ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
|
|
- region_offset),
|
|
- acpi_ut_get_region_name(region_obj->region.
|
|
- space_id)));
|
|
+ address = (region_obj->region.address + region_offset);
|
|
|
|
/*
|
|
* Special handling for generic_serial_bus and general_purpose_io:
|
|
* There are three extra parameters that must be passed to the
|
|
* handler via the context:
|
|
- * 1) Connection buffer, a resource template from Connection() op.
|
|
- * 2) Length of the above buffer.
|
|
- * 3) Actual access length from the access_as() op.
|
|
+ * 1) Connection buffer, a resource template from Connection() op
|
|
+ * 2) Length of the above buffer
|
|
+ * 3) Actual access length from the access_as() op
|
|
+ *
|
|
+ * In addition, for general_purpose_io, the Address and bit_width fields
|
|
+ * are defined as follows:
|
|
+ * 1) Address is the pin number index of the field (bit offset from
|
|
+ * the previous Connection)
|
|
+ * 2) bit_width is the actual bit length of the field (number of pins)
|
|
*/
|
|
- if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
|
|
- (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
|
|
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
|
|
context && field_obj) {
|
|
|
|
/* Get the Connection (resource_template) buffer */
|
|
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|
context->length = field_obj->field.resource_length;
|
|
context->access_length = field_obj->field.access_length;
|
|
}
|
|
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
|
|
+ context && field_obj) {
|
|
+
|
|
+ /* Get the Connection (resource_template) buffer */
|
|
+
|
|
+ context->connection = field_obj->field.resource_buffer;
|
|
+ context->length = field_obj->field.resource_length;
|
|
+ context->access_length = field_obj->field.access_length;
|
|
+ address = field_obj->field.pin_number_index;
|
|
+ bit_width = field_obj->field.bit_length;
|
|
+ }
|
|
+
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
|
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
|
+ ®ion_obj->region.handler->address_space, handler,
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
+ acpi_ut_get_region_name(region_obj->region.
|
|
+ space_id)));
|
|
|
|
if (!(handler_desc->address_space.handler_flags &
|
|
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
|
|
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|
|
|
/* Call the handler */
|
|
|
|
- status = handler(function,
|
|
- (region_obj->region.address + region_offset),
|
|
- bit_width, value, context,
|
|
+ status = handler(function, address, bit_width, value, context,
|
|
region_obj2->extra.region_context);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
|
|
index 4d046fa..b64fb68 100644
|
|
--- a/drivers/acpi/acpica/exdump.c
|
|
+++ b/drivers/acpi/acpica/exdump.c
|
|
@@ -622,8 +622,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
|
|
acpi_os_printf("\n");
|
|
} else {
|
|
acpi_os_printf(" base %8.8X%8.8X Length %X\n",
|
|
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
|
|
- address),
|
|
+ ACPI_FORMAT_UINT64(obj_desc->region.
|
|
+ address),
|
|
obj_desc->region.length);
|
|
}
|
|
break;
|
|
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
|
|
index cfd8752..d36894a 100644
|
|
--- a/drivers/acpi/acpica/exfield.c
|
|
+++ b/drivers/acpi/acpica/exfield.c
|
|
@@ -178,6 +178,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
|
|
buffer = &buffer_desc->integer.value;
|
|
}
|
|
|
|
+ if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
|
|
+ (obj_desc->field.region_obj->region.space_id ==
|
|
+ ACPI_ADR_SPACE_GPIO)) {
|
|
+ /*
|
|
+ * For GPIO (general_purpose_io), the Address will be the bit offset
|
|
+ * from the previous Connection() operator, making it effectively a
|
|
+ * pin number index. The bit_length is the length of the field, which
|
|
+ * is thus the number of pins.
|
|
+ */
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
|
+ "GPIO FieldRead [FROM]: Pin %u Bits %u\n",
|
|
+ obj_desc->field.pin_number_index,
|
|
+ obj_desc->field.bit_length));
|
|
+
|
|
+ /* Lock entire transaction if requested */
|
|
+
|
|
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
|
|
+
|
|
+ /* Perform the write */
|
|
+
|
|
+ status = acpi_ex_access_region(obj_desc, 0,
|
|
+ (u64 *)buffer, ACPI_READ);
|
|
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ acpi_ut_remove_reference(buffer_desc);
|
|
+ } else {
|
|
+ *ret_buffer_desc = buffer_desc;
|
|
+ }
|
|
+ return_ACPI_STATUS(status);
|
|
+ }
|
|
+
|
|
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
|
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
|
|
obj_desc, obj_desc->common.type, buffer,
|
|
@@ -325,6 +356,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
|
|
|
|
*result_desc = buffer_desc;
|
|
return_ACPI_STATUS(status);
|
|
+ } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
|
|
+ (obj_desc->field.region_obj->region.space_id ==
|
|
+ ACPI_ADR_SPACE_GPIO)) {
|
|
+ /*
|
|
+ * For GPIO (general_purpose_io), we will bypass the entire field
|
|
+ * mechanism and handoff the bit address and bit width directly to
|
|
+ * the handler. The Address will be the bit offset
|
|
+ * from the previous Connection() operator, making it effectively a
|
|
+ * pin number index. The bit_length is the length of the field, which
|
|
+ * is thus the number of pins.
|
|
+ */
|
|
+ if (source_desc->common.type != ACPI_TYPE_INTEGER) {
|
|
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
|
+ }
|
|
+
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
|
+ "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n",
|
|
+ acpi_ut_get_type_name(source_desc->common.
|
|
+ type),
|
|
+ source_desc->common.type,
|
|
+ (u32)source_desc->integer.value,
|
|
+ obj_desc->field.pin_number_index,
|
|
+ obj_desc->field.bit_length));
|
|
+
|
|
+ buffer = &source_desc->integer.value;
|
|
+
|
|
+ /* Lock entire transaction if requested */
|
|
+
|
|
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
|
|
+
|
|
+ /* Perform the write */
|
|
+
|
|
+ status = acpi_ex_access_region(obj_desc, 0,
|
|
+ (u64 *)buffer, ACPI_WRITE);
|
|
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
|
|
+ return_ACPI_STATUS(status);
|
|
}
|
|
|
|
/* Get a pointer to the data to be written */
|
|
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
|
|
index 49fb742..98af39f 100644
|
|
--- a/drivers/acpi/acpica/exfldio.c
|
|
+++ b/drivers/acpi/acpica/exfldio.c
|
|
@@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
|
|
}
|
|
|
|
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
|
|
- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
|
|
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
|
|
acpi_ut_get_region_name(rgn_desc->region.
|
|
space_id),
|
|
rgn_desc->region.space_id,
|
|
obj_desc->common_field.access_byte_width,
|
|
obj_desc->common_field.base_byte_offset,
|
|
- field_datum_byte_offset, ACPI_CAST_PTR(void,
|
|
- (rgn_desc->
|
|
- region.
|
|
- address +
|
|
- region_offset))));
|
|
+ field_datum_byte_offset,
|
|
+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
|
|
+ region_offset)));
|
|
|
|
/* Invoke the appropriate address_space/op_region handler */
|
|
|
|
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
|
|
index 5a58861..8c88cfd 100644
|
|
--- a/drivers/acpi/acpica/exprep.c
|
|
+++ b/drivers/acpi/acpica/exprep.c
|
|
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
|
|
obj_desc->field.resource_length = info->resource_length;
|
|
}
|
|
|
|
+ obj_desc->field.pin_number_index = info->pin_number_index;
|
|
+
|
|
/* Allow full data read from EC address space */
|
|
|
|
if ((obj_desc->field.region_obj->region.space_id ==
|
|
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
|
|
index 9d28867e..cf40223 100644
|
|
--- a/drivers/acpi/acpica/exregion.c
|
|
+++ b/drivers/acpi/acpica/exregion.c
|
|
@@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|
if (!mem_info->mapped_logical_address) {
|
|
ACPI_ERROR((AE_INFO,
|
|
"Could not map memory at 0x%8.8X%8.8X, size %u",
|
|
- ACPI_FORMAT_NATIVE_UINT(address),
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
(u32) map_length));
|
|
mem_info->mapped_length = 0;
|
|
return_ACPI_STATUS(AE_NO_MEMORY);
|
|
@@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
|
|
- bit_width, function,
|
|
- ACPI_FORMAT_NATIVE_UINT(address)));
|
|
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
|
|
|
|
/*
|
|
* Perform the memory read or write
|
|
@@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function,
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
|
|
- bit_width, function,
|
|
- ACPI_FORMAT_NATIVE_UINT(address)));
|
|
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
|
|
|
|
/* Decode the function parameter */
|
|
|
|
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
|
|
index eab70d5..fae5758 100644
|
|
--- a/drivers/acpi/acpica/hwvalid.c
|
|
+++ b/drivers/acpi/acpica/hwvalid.c
|
|
@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
|
|
byte_width = ACPI_DIV_8(bit_width);
|
|
last_address = address + byte_width - 1;
|
|
|
|
- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
|
|
- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
|
|
- last_address),
|
|
- byte_width));
|
|
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
|
+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
+ ACPI_FORMAT_UINT64(last_address), byte_width));
|
|
|
|
/* Maximum 16-bit address in I/O space */
|
|
|
|
if (last_address > ACPI_UINT16_MAX) {
|
|
ACPI_ERROR((AE_INFO,
|
|
- "Illegal I/O port address/length above 64K: %p/0x%X",
|
|
- ACPI_CAST_PTR(void, address), byte_width));
|
|
+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
|
|
+ ACPI_FORMAT_UINT64(address), byte_width));
|
|
return_ACPI_STATUS(AE_LIMIT);
|
|
}
|
|
|
|
@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
|
|
|
|
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
|
|
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
|
- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
|
|
- ACPI_CAST_PTR(void, address),
|
|
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
byte_width, port_info->name,
|
|
port_info->start,
|
|
port_info->end));
|
|
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
|
|
index 48b9c6f..fc82c53 100644
|
|
--- a/drivers/acpi/acpica/nsdump.c
|
|
+++ b/drivers/acpi/acpica/nsdump.c
|
|
@@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|
switch (type) {
|
|
case ACPI_TYPE_PROCESSOR:
|
|
|
|
- acpi_os_printf("ID %02X Len %02X Addr %p\n",
|
|
+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
|
|
obj_desc->processor.proc_id,
|
|
obj_desc->processor.length,
|
|
- ACPI_CAST_PTR(void,
|
|
- obj_desc->processor.
|
|
- address));
|
|
+ ACPI_FORMAT_UINT64(obj_desc->processor.
|
|
+ address));
|
|
break;
|
|
|
|
case ACPI_TYPE_DEVICE:
|
|
@@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|
space_id));
|
|
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
|
|
acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
|
|
- ACPI_FORMAT_NATIVE_UINT
|
|
- (obj_desc->region.address),
|
|
+ ACPI_FORMAT_UINT64(obj_desc->
|
|
+ region.
|
|
+ address),
|
|
obj_desc->region.length);
|
|
} else {
|
|
acpi_os_printf
|
|
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
|
|
index 634357d..c4d0977 100644
|
|
--- a/drivers/acpi/acpica/tbinstal.c
|
|
+++ b/drivers/acpi/acpica/tbinstal.c
|
|
@@ -294,8 +294,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
|
|
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
|
|
"%4.4s %p Attempted physical table override failed",
|
|
table_header->signature,
|
|
- ACPI_CAST_PTR(void,
|
|
- table_desc->address)));
|
|
+ ACPI_PHYSADDR_TO_PTR(table_desc->address)));
|
|
return (NULL);
|
|
}
|
|
|
|
@@ -311,7 +310,7 @@ finish_override:
|
|
ACPI_INFO((AE_INFO,
|
|
"%4.4s %p %s table override, new table: %p",
|
|
table_header->signature,
|
|
- ACPI_CAST_PTR(void, table_desc->address),
|
|
+ ACPI_PHYSADDR_TO_PTR(table_desc->address),
|
|
override_type, new_table));
|
|
|
|
/* We can now unmap/delete the original table (if fully mapped) */
|
|
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
|
|
index 6866e76..2957ed5 100644
|
|
--- a/drivers/acpi/acpica/tbprint.c
|
|
+++ b/drivers/acpi/acpica/tbprint.c
|
|
@@ -127,16 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
|
{
|
|
struct acpi_table_header local_header;
|
|
|
|
- /*
|
|
- * The reason that the Address is cast to a void pointer is so that we
|
|
- * can use %p which will work properly on both 32-bit and 64-bit hosts.
|
|
- */
|
|
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
|
|
|
|
/* FACS only has signature and length fields */
|
|
|
|
- ACPI_INFO((AE_INFO, "%4.4s %p %06X",
|
|
- header->signature, ACPI_CAST_PTR(void, address),
|
|
+ ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
|
|
+ header->signature, ACPI_FORMAT_UINT64(address),
|
|
header->length));
|
|
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
|
|
|
|
@@ -147,8 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
|
header)->oem_id, ACPI_OEM_ID_SIZE);
|
|
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
|
|
|
|
- ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
|
|
- ACPI_CAST_PTR(void, address),
|
|
+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
|
|
revision >
|
|
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
|
|
@@ -162,8 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
|
acpi_tb_cleanup_table_header(&local_header, header);
|
|
|
|
ACPI_INFO((AE_INFO,
|
|
- "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
|
|
- local_header.signature, ACPI_CAST_PTR(void, address),
|
|
+ "%-4.4s 0x%8.8X%8.8X"
|
|
+ " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
|
|
+ local_header.signature, ACPI_FORMAT_UINT64(address),
|
|
local_header.length, local_header.revision,
|
|
local_header.oem_id, local_header.oem_table_id,
|
|
local_header.oem_revision,
|
|
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
|
|
index 1bc879e..4cca6b7 100644
|
|
--- a/drivers/acpi/acpica/tbutils.c
|
|
+++ b/drivers/acpi/acpica/tbutils.c
|
|
@@ -227,8 +227,8 @@ acpi_tb_install_table(acpi_physical_address address,
|
|
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
|
|
if (!table) {
|
|
ACPI_ERROR((AE_INFO,
|
|
- "Could not map memory for table [%s] at %p",
|
|
- signature, ACPI_CAST_PTR(void, address)));
|
|
+ "Could not map memory for table [%s] at %8.8X%8.8X",
|
|
+ signature, ACPI_FORMAT_UINT64(address)));
|
|
return;
|
|
}
|
|
|
|
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
|
|
index 60b5a87..daad59d 100644
|
|
--- a/drivers/acpi/acpica/tbxfload.c
|
|
+++ b/drivers/acpi/acpica/tbxfload.c
|
|
@@ -184,11 +184,10 @@ static acpi_status acpi_tb_load_namespace(void)
|
|
* be useful for debugging ACPI problems on some machines.
|
|
*/
|
|
if (acpi_gbl_disable_ssdt_table_load) {
|
|
- ACPI_INFO((AE_INFO, "Ignoring %4.4s at %p",
|
|
+ ACPI_INFO((AE_INFO, "Ignoring %4.4s at %8.8X%8.8X",
|
|
acpi_gbl_root_table_list.tables[i].signature.
|
|
- ascii, ACPI_CAST_PTR(void,
|
|
- acpi_gbl_root_table_list.
|
|
- tables[i].address)));
|
|
+ ascii, ACPI_FORMAT_UINT64(acpi_gbl_root_table_list.
|
|
+ tables[i].address)));
|
|
continue;
|
|
}
|
|
|
|
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
|
|
index e4e1468..01bf7eb 100644
|
|
--- a/drivers/acpi/acpica/tbxfroot.c
|
|
+++ b/drivers/acpi/acpica/tbxfroot.c
|
|
@@ -111,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
|
|
*
|
|
******************************************************************************/
|
|
|
|
-acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
|
|
+acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
|
|
{
|
|
u8 *table_ptr;
|
|
u8 *mem_rover;
|
|
@@ -169,7 +169,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
|
|
physical_address +=
|
|
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
|
|
|
|
- *table_address = physical_address;
|
|
+ *table_address =
|
|
+ (acpi_physical_address) physical_address;
|
|
return_ACPI_STATUS(AE_OK);
|
|
}
|
|
}
|
|
@@ -202,7 +203,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
|
|
(ACPI_HI_RSDP_WINDOW_BASE +
|
|
ACPI_PTR_DIFF(mem_rover, table_ptr));
|
|
|
|
- *table_address = physical_address;
|
|
+ *table_address = (acpi_physical_address) physical_address;
|
|
return_ACPI_STATUS(AE_OK);
|
|
}
|
|
|
|
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
|
|
index 2c2b6ae..3a02b65 100644
|
|
--- a/drivers/acpi/acpica/utaddress.c
|
|
+++ b/drivers/acpi/acpica/utaddress.c
|
|
@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
|
|
acpi_gbl_address_range_list[space_id] = range_info;
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
|
|
- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
|
|
+ "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
|
|
acpi_ut_get_node_name(range_info->region_node),
|
|
- ACPI_CAST_PTR(void, address),
|
|
- ACPI_CAST_PTR(void, range_info->end_address)));
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
+ ACPI_FORMAT_UINT64(range_info->end_address)));
|
|
|
|
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
|
return_ACPI_STATUS(AE_OK);
|
|
@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
|
|
}
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
|
|
- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
|
|
+ "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
|
|
acpi_ut_get_node_name(range_info->
|
|
region_node),
|
|
- ACPI_CAST_PTR(void,
|
|
- range_info->
|
|
- start_address),
|
|
- ACPI_CAST_PTR(void,
|
|
- range_info->
|
|
- end_address)));
|
|
+ ACPI_FORMAT_UINT64(range_info->
|
|
+ start_address),
|
|
+ ACPI_FORMAT_UINT64(range_info->
|
|
+ end_address)));
|
|
|
|
ACPI_FREE(range_info);
|
|
return_VOID;
|
|
@@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
|
|
region_node);
|
|
|
|
ACPI_WARNING((AE_INFO,
|
|
- "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
|
|
+ "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)",
|
|
acpi_ut_get_region_name(space_id),
|
|
- ACPI_CAST_PTR(void, address),
|
|
- ACPI_CAST_PTR(void, end_address),
|
|
- ACPI_CAST_PTR(void,
|
|
- range_info->
|
|
- start_address),
|
|
- ACPI_CAST_PTR(void,
|
|
- range_info->
|
|
- end_address),
|
|
+ ACPI_FORMAT_UINT64(address),
|
|
+ ACPI_FORMAT_UINT64(end_address),
|
|
+ ACPI_FORMAT_UINT64(range_info->
|
|
+ start_address),
|
|
+ ACPI_FORMAT_UINT64(range_info->
|
|
+ end_address),
|
|
pathname));
|
|
ACPI_FREE(pathname);
|
|
}
|
|
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
|
|
index edff4e6..c66bca1 100644
|
|
--- a/drivers/acpi/acpica/utcopy.c
|
|
+++ b/drivers/acpi/acpica/utcopy.c
|
|
@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
|
|
status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
|
|
}
|
|
|
|
+ /* Delete the allocated object if copy failed */
|
|
+
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ acpi_ut_remove_reference(*dest_desc);
|
|
+ }
|
|
+
|
|
return_ACPI_STATUS(status);
|
|
}
|
|
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
|
|
index 246ef68..2c3c578 100644
|
|
--- a/drivers/acpi/acpica/utxfinit.c
|
|
+++ b/drivers/acpi/acpica/utxfinit.c
|
|
@@ -175,10 +175,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
|
|
* Obtain a permanent mapping for the FACS. This is required for the
|
|
* Global Lock and the Firmware Waking Vector
|
|
*/
|
|
- status = acpi_tb_initialize_facs();
|
|
- if (ACPI_FAILURE(status)) {
|
|
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
|
- return_ACPI_STATUS(status);
|
|
+ if (!(flags & ACPI_NO_FACS_INIT)) {
|
|
+ status = acpi_tb_initialize_facs();
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
|
+ return_ACPI_STATUS(status);
|
|
+ }
|
|
}
|
|
#endif /* !ACPI_REDUCED_HARDWARE */
|
|
|
|
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
|
|
index b48aefa..60be8d0 100644
|
|
--- a/drivers/acpi/bus.c
|
|
+++ b/drivers/acpi/bus.c
|
|
@@ -450,6 +450,16 @@ static int __init acpi_bus_init_irq(void)
|
|
u8 acpi_gbl_permanent_mmap;
|
|
|
|
|
|
+/**
|
|
+ * acpi_early_init - Initialize ACPICA and populate the ACPI namespace.
|
|
+ *
|
|
+ * The ACPI tables are accessible after this, but the handling of events has not
|
|
+ * been initialized and the global lock is not available yet, so AML should not
|
|
+ * be executed at this point.
|
|
+ *
|
|
+ * Doing this before switching the EFI runtime services to virtual mode allows
|
|
+ * the EfiBootServices memory to be freed slightly earlier on boot.
|
|
+ */
|
|
void __init acpi_early_init(void)
|
|
{
|
|
acpi_status status;
|
|
@@ -510,26 +520,42 @@ void __init acpi_early_init(void)
|
|
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
|
|
}
|
|
#endif
|
|
+ return;
|
|
+
|
|
+ error0:
|
|
+ disable_acpi();
|
|
+}
|
|
+
|
|
+/**
|
|
+ * acpi_subsystem_init - Finalize the early initialization of ACPI.
|
|
+ *
|
|
+ * Switch over the platform to the ACPI mode (if possible), initialize the
|
|
+ * handling of ACPI events, install the interrupt and global lock handlers.
|
|
+ *
|
|
+ * Doing this too early is generally unsafe, but at the same time it needs to be
|
|
+ * done before all things that really depend on ACPI. The right spot appears to
|
|
+ * be before finalizing the EFI initialization.
|
|
+ */
|
|
+void __init acpi_subsystem_init(void)
|
|
+{
|
|
+ acpi_status status;
|
|
+
|
|
+ if (acpi_disabled)
|
|
+ return;
|
|
|
|
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
|
|
if (ACPI_FAILURE(status)) {
|
|
printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
|
|
- goto error0;
|
|
+ disable_acpi();
|
|
+ } else {
|
|
+ /*
|
|
+ * If the system is using ACPI then we can be reasonably
|
|
+ * confident that any regulators are managed by the firmware
|
|
+ * so tell the regulator core it has everything it needs to
|
|
+ * know.
|
|
+ */
|
|
+ regulator_has_full_constraints();
|
|
}
|
|
-
|
|
- /*
|
|
- * If the system is using ACPI then we can be reasonably
|
|
- * confident that any regulators are managed by the firmware
|
|
- * so tell the regulator core it has everything it needs to
|
|
- * know.
|
|
- */
|
|
- regulator_has_full_constraints();
|
|
-
|
|
- return;
|
|
-
|
|
- error0:
|
|
- disable_acpi();
|
|
- return;
|
|
}
|
|
|
|
static int __init acpi_bus_init(void)
|
|
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
|
|
index 368f9dd..e4a6f78 100644
|
|
--- a/drivers/acpi/container.c
|
|
+++ b/drivers/acpi/container.c
|
|
@@ -96,6 +96,13 @@ static void container_device_detach(struct acpi_device *adev)
|
|
device_unregister(dev);
|
|
}
|
|
|
|
+static void container_device_online(struct acpi_device *adev)
|
|
+{
|
|
+ struct device *dev = acpi_driver_data(adev);
|
|
+
|
|
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
|
+}
|
|
+
|
|
static struct acpi_scan_handler container_handler = {
|
|
.ids = container_device_ids,
|
|
.attach = container_device_attach,
|
|
@@ -103,6 +110,7 @@ static struct acpi_scan_handler container_handler = {
|
|
.hotplug = {
|
|
.enabled = true,
|
|
.demand_offline = true,
|
|
+ .notify_online = container_device_online,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
|
|
index c14a00d..19f6505 100644
|
|
--- a/drivers/acpi/device_pm.c
|
|
+++ b/drivers/acpi/device_pm.c
|
|
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
|
|
|
|
device->power.state = ACPI_STATE_UNKNOWN;
|
|
if (!acpi_device_is_present(device))
|
|
- return 0;
|
|
+ return -ENXIO;
|
|
|
|
result = acpi_device_get_power(device, &state);
|
|
if (result)
|
|
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
|
|
index fc1aa79..726c969 100644
|
|
--- a/drivers/acpi/osl.c
|
|
+++ b/drivers/acpi/osl.c
|
|
@@ -172,7 +172,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
|
|
request_mem_region(addr, length, desc);
|
|
}
|
|
|
|
-static int __init acpi_reserve_resources(void)
|
|
+static void __init acpi_reserve_resources(void)
|
|
{
|
|
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
|
|
"ACPI PM1a_EVT_BLK");
|
|
@@ -201,10 +201,7 @@ static int __init acpi_reserve_resources(void)
|
|
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
|
|
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
|
|
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
|
|
-
|
|
- return 0;
|
|
}
|
|
-device_initcall(acpi_reserve_resources);
|
|
|
|
void acpi_os_printf(const char *fmt, ...)
|
|
{
|
|
@@ -1792,6 +1789,7 @@ acpi_status __init acpi_os_initialize(void)
|
|
|
|
acpi_status __init acpi_os_initialize1(void)
|
|
{
|
|
+ acpi_reserve_resources();
|
|
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
|
|
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
|
|
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
|
|
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
|
|
index 3dca36d..fd8496a 100644
|
|
--- a/drivers/acpi/processor_idle.c
|
|
+++ b/drivers/acpi/processor_idle.c
|
|
@@ -962,7 +962,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
|
return -EINVAL;
|
|
|
|
drv->safe_state_index = -1;
|
|
- for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
|
|
+ for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
|
|
drv->states[i].name[0] = '\0';
|
|
drv->states[i].desc[0] = '\0';
|
|
}
|
|
@@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|
|
|
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
|
|
|
|
- cpuidle_pause_and_lock();
|
|
/* Protect against cpu-hotplug */
|
|
get_online_cpus();
|
|
+ cpuidle_pause_and_lock();
|
|
|
|
/* Disable all cpuidle devices */
|
|
for_each_online_cpu(cpu) {
|
|
@@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|
cpuidle_enable_device(dev);
|
|
}
|
|
}
|
|
- put_online_cpus();
|
|
cpuidle_resume_and_unlock();
|
|
+ put_online_cpus();
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
|
|
index 57b053f..9498c3d 100644
|
|
--- a/drivers/acpi/scan.c
|
|
+++ b/drivers/acpi/scan.c
|
|
@@ -106,7 +106,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
|
|
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
|
|
count = snprintf(&modalias[len], size, "%s:", id->id);
|
|
if (count < 0)
|
|
- return EINVAL;
|
|
+ return -EINVAL;
|
|
if (count >= size)
|
|
return -ENOMEM;
|
|
len += count;
|
|
@@ -192,7 +192,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
|
|
struct acpi_device_physical_node *pn;
|
|
bool offline = true;
|
|
|
|
- mutex_lock(&adev->physical_node_lock);
|
|
+ /*
|
|
+ * acpi_container_offline() calls this for all of the container's
|
|
+ * children under the container's physical_node_lock lock.
|
|
+ */
|
|
+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
|
|
|
|
list_for_each_entry(pn, &adev->physical_node_list, node)
|
|
if (device_supports_offline(pn->dev) && !pn->dev->offline) {
|
|
@@ -329,7 +333,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
|
|
unsigned long long sta;
|
|
acpi_status status;
|
|
|
|
- if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
|
|
+ if (device->handler && device->handler->hotplug.demand_offline
|
|
+ && !acpi_force_hot_remove) {
|
|
if (!acpi_scan_is_offline(device, true))
|
|
return -EBUSY;
|
|
} else {
|
|
@@ -660,8 +665,14 @@ static ssize_t
|
|
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
|
|
char *buf) {
|
|
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
|
+ acpi_status status;
|
|
+ unsigned long long sun;
|
|
|
|
- return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
|
|
+ status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
|
|
+ if (ACPI_FAILURE(status))
|
|
+ return -ENODEV;
|
|
+
|
|
+ return sprintf(buf, "%llu\n", sun);
|
|
}
|
|
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
|
|
|
|
@@ -683,7 +694,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
|
|
{
|
|
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
|
|
acpi_status status;
|
|
- unsigned long long sun;
|
|
int result = 0;
|
|
|
|
/*
|
|
@@ -724,14 +734,10 @@ static int acpi_device_setup_files(struct acpi_device *dev)
|
|
if (dev->pnp.unique_id)
|
|
result = device_create_file(&dev->dev, &dev_attr_uid);
|
|
|
|
- status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
|
|
- if (ACPI_SUCCESS(status)) {
|
|
- dev->pnp.sun = (unsigned long)sun;
|
|
+ if (acpi_has_method(dev->handle, "_SUN")) {
|
|
result = device_create_file(&dev->dev, &dev_attr_sun);
|
|
if (result)
|
|
goto end;
|
|
- } else {
|
|
- dev->pnp.sun = (unsigned long)-1;
|
|
}
|
|
|
|
if (acpi_has_method(dev->handle, "_STA")) {
|
|
@@ -863,7 +869,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
|
|
if (device->wakeup.flags.valid)
|
|
acpi_power_resources_list_free(&device->wakeup.resources);
|
|
|
|
- if (!device->flags.power_manageable)
|
|
+ if (!device->power.flags.power_resources)
|
|
return;
|
|
|
|
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
|
|
@@ -915,12 +921,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
|
|
device->driver->ops.notify(device, event);
|
|
}
|
|
|
|
-static acpi_status acpi_device_notify_fixed(void *data)
|
|
+static void acpi_device_notify_fixed(void *data)
|
|
{
|
|
struct acpi_device *device = data;
|
|
|
|
/* Fixed hardware devices have no handles */
|
|
acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
|
|
+}
|
|
+
|
|
+static acpi_status acpi_device_fixed_event(void *data)
|
|
+{
|
|
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
|
|
return AE_OK;
|
|
}
|
|
|
|
@@ -931,12 +942,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
|
|
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
|
|
status =
|
|
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
|
|
- acpi_device_notify_fixed,
|
|
+ acpi_device_fixed_event,
|
|
device);
|
|
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
|
|
status =
|
|
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
|
|
- acpi_device_notify_fixed,
|
|
+ acpi_device_fixed_event,
|
|
device);
|
|
else
|
|
status = acpi_install_notify_handler(device->handle,
|
|
@@ -953,10 +964,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
|
|
{
|
|
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
|
|
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
|
|
- acpi_device_notify_fixed);
|
|
+ acpi_device_fixed_event);
|
|
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
|
|
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
|
|
- acpi_device_notify_fixed);
|
|
+ acpi_device_fixed_event);
|
|
else
|
|
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
|
|
acpi_device_notify);
|
|
@@ -1547,10 +1558,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
|
|
device->power.flags.power_resources)
|
|
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
|
|
|
|
- if (acpi_bus_init_power(device)) {
|
|
- acpi_free_power_resources_lists(device);
|
|
+ if (acpi_bus_init_power(device))
|
|
device->flags.power_manageable = 0;
|
|
- }
|
|
}
|
|
|
|
static void acpi_bus_get_flags(struct acpi_device *device)
|
|
@@ -2036,13 +2045,18 @@ static void acpi_bus_attach(struct acpi_device *device)
|
|
/* Skip devices that are not present. */
|
|
if (!acpi_device_is_present(device)) {
|
|
device->flags.visited = false;
|
|
+ device->flags.power_manageable = 0;
|
|
return;
|
|
}
|
|
if (device->handler)
|
|
goto ok;
|
|
|
|
if (!device->flags.initialized) {
|
|
- acpi_bus_update_power(device, NULL);
|
|
+ device->flags.power_manageable =
|
|
+ device->power.states[ACPI_STATE_D0].flags.valid;
|
|
+ if (acpi_bus_init_power(device))
|
|
+ device->flags.power_manageable = 0;
|
|
+
|
|
device->flags.initialized = true;
|
|
}
|
|
device->flags.visited = false;
|
|
@@ -2061,6 +2075,9 @@ static void acpi_bus_attach(struct acpi_device *device)
|
|
ok:
|
|
list_for_each_entry(child, &device->children, node)
|
|
acpi_bus_attach(child);
|
|
+
|
|
+ if (device->handler && device->handler->hotplug.notify_online)
|
|
+ device->handler->hotplug.notify_online(device);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
|
|
index bb0b904..997540d 100644
|
|
--- a/drivers/acpi/video.c
|
|
+++ b/drivers/acpi/video.c
|
|
@@ -2064,6 +2064,17 @@ EXPORT_SYMBOL(acpi_video_unregister);
|
|
|
|
static int __init acpi_video_init(void)
|
|
{
|
|
+ /*
|
|
+ * Let the module load even if ACPI is disabled (e.g. due to
|
|
+ * a broken BIOS) so that i915.ko can still be loaded on such
|
|
+ * old systems without an AcpiOpRegion.
|
|
+ *
|
|
+ * acpi_video_register() will report -ENODEV later as well due
|
|
+ * to acpi_disabled when i915.ko tries to register itself afterwards.
|
|
+ */
|
|
+ if (acpi_disabled)
|
|
+ return 0;
|
|
+
|
|
dmi_check_system(video_dmi_table);
|
|
|
|
if (intel_opregion_present())
|
|
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
|
|
index 3e23046..47d28b9 100644
|
|
--- a/drivers/ata/ahci.c
|
|
+++ b/drivers/ata/ahci.c
|
|
@@ -60,6 +60,7 @@ enum board_ids {
|
|
/* board IDs by feature in alphabetical order */
|
|
board_ahci,
|
|
board_ahci_ign_iferr,
|
|
+ board_ahci_nomsi,
|
|
board_ahci_noncq,
|
|
board_ahci_nosntf,
|
|
board_ahci_yes_fbs,
|
|
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
|
|
.udma_mask = ATA_UDMA6,
|
|
.port_ops = &ahci_ops,
|
|
},
|
|
+ [board_ahci_nomsi] = {
|
|
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
|
|
+ .flags = AHCI_FLAG_COMMON,
|
|
+ .pio_mask = ATA_PIO4,
|
|
+ .udma_mask = ATA_UDMA6,
|
|
+ .port_ops = &ahci_ops,
|
|
+ },
|
|
[board_ahci_noncq] = {
|
|
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
|
|
.flags = AHCI_FLAG_COMMON,
|
|
@@ -305,6 +313,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
|
|
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
|
|
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
|
|
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
|
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
|
|
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
|
|
|
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
|
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
|
@@ -442,6 +466,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
|
|
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
|
|
+ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
|
|
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
|
|
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
|
|
@@ -456,6 +482,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|
|
|
/* Promise */
|
|
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
|
|
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
|
|
|
|
/* Asmedia */
|
|
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
|
|
@@ -464,10 +491,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
|
|
|
|
/*
|
|
- * Samsung SSDs found on some macbooks. NCQ times out.
|
|
- * https://bugzilla.kernel.org/show_bug.cgi?id=60731
|
|
+ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
|
|
+ * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
|
|
*/
|
|
- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
|
|
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
|
|
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
|
|
|
|
/* Enmotus */
|
|
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
|
|
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
|
|
index 6334c8d..39f76b9 100644
|
|
--- a/drivers/ata/ata_piix.c
|
|
+++ b/drivers/ata/ata_piix.c
|
|
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
|
|
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
|
|
/* SATA Controller IDE (Coleto Creek) */
|
|
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
|
|
+ /* SATA Controller IDE (9 Series) */
|
|
+ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
|
|
+ /* SATA Controller IDE (9 Series) */
|
|
+ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
|
|
+ /* SATA Controller IDE (9 Series) */
|
|
+ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
|
|
+ /* SATA Controller IDE (9 Series) */
|
|
+ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
|
|
|
|
{ } /* terminate list */
|
|
};
|
|
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
|
|
index 5509e8e..0b6cb25 100644
|
|
--- a/drivers/ata/libahci.c
|
|
+++ b/drivers/ata/libahci.c
|
|
@@ -1711,8 +1711,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
|
|
if (unlikely(resetting))
|
|
status &= ~PORT_IRQ_BAD_PMP;
|
|
|
|
- /* if LPM is enabled, PHYRDY doesn't mean anything */
|
|
- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
|
|
+ if (sata_lpm_ignore_phy_events(&ap->link)) {
|
|
status &= ~PORT_IRQ_PHYRDY;
|
|
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
|
|
}
|
|
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
|
|
index d09fca7..01354dd 100644
|
|
--- a/drivers/ata/libata-core.c
|
|
+++ b/drivers/ata/libata-core.c
|
|
@@ -4173,9 +4173,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
|
ATA_HORKAGE_FIRMWARE_WARN },
|
|
|
|
- /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
|
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
|
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
|
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
|
+ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
|
|
|
/* Blacklist entries taken from Silicon Image 3124/3132
|
|
Windows driver .inf file - also several Linux problem reports */
|
|
@@ -4227,7 +4228,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
|
{ "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
|
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
|
- { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
|
+ { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
|
+
|
|
+ /* devices that don't properly handle TRIM commands */
|
|
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
|
|
|
/*
|
|
* Some WD SATA-I drives spin up and down erratically when the link
|
|
@@ -4533,7 +4537,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
|
else /* In the ancient relic department - skip all of this */
|
|
return 0;
|
|
|
|
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
|
+ /* On some disks, this command causes spin-up, so we need longer timeout */
|
|
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
|
|
|
|
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
|
return err_mask;
|
|
@@ -4787,6 +4792,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|
* ata_qc_new - Request an available ATA command, for queueing
|
|
* @ap: target port
|
|
*
|
|
+ * Some ATA host controllers may implement a queue depth which is less
|
|
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
|
|
+ * the hardware limitation.
|
|
+ *
|
|
* LOCKING:
|
|
* None.
|
|
*/
|
|
@@ -4794,14 +4803,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
|
|
{
|
|
struct ata_queued_cmd *qc = NULL;
|
|
+ unsigned int max_queue = ap->host->n_tags;
|
|
unsigned int i, tag;
|
|
|
|
/* no command while frozen */
|
|
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
|
|
return NULL;
|
|
|
|
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
|
|
- tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
|
|
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
|
|
+ tag = tag < max_queue ? tag : 0;
|
|
|
|
/* the last tag is reserved for internal command. */
|
|
if (tag == ATA_TAG_INTERNAL)
|
|
@@ -6103,6 +6113,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
|
|
{
|
|
spin_lock_init(&host->lock);
|
|
mutex_init(&host->eh_mutex);
|
|
+ host->n_tags = ATA_MAX_QUEUE - 1;
|
|
host->dev = dev;
|
|
host->ops = ops;
|
|
}
|
|
@@ -6184,6 +6195,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
|
|
{
|
|
int i, rc;
|
|
|
|
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
|
|
+
|
|
/* host must have been started */
|
|
if (!(host->flags & ATA_HOST_STARTED)) {
|
|
dev_err(host->dev, "BUG: trying to register unstarted host\n");
|
|
@@ -6817,6 +6830,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
|
|
return tmp;
|
|
}
|
|
|
|
+/**
|
|
+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
|
|
+ * @link: Link receiving the event
|
|
+ *
|
|
+ * Test whether the received PHY event has to be ignored or not.
|
|
+ *
|
|
+ * LOCKING:
|
|
+ * None:
|
|
+ *
|
|
+ * RETURNS:
|
|
+ * True if the event has to be ignored.
|
|
+ */
|
|
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
|
|
+{
|
|
+ unsigned long lpm_timeout = link->last_lpm_change +
|
|
+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
|
|
+
|
|
+ /* if LPM is enabled, PHYRDY doesn't mean anything */
|
|
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
|
|
+ return true;
|
|
+
|
|
+ /* ignore the first PHY event after the LPM policy changed
|
|
+ * as it is might be spurious
|
|
+ */
|
|
+ if ((link->flags & ATA_LFLAG_CHANGED) &&
|
|
+ time_before(jiffies, lpm_timeout))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
|
|
+
|
|
/*
|
|
* Dummy port_ops
|
|
*/
|
|
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
|
|
index 6d87570..c6c77b7 100644
|
|
--- a/drivers/ata/libata-eh.c
|
|
+++ b/drivers/ata/libata-eh.c
|
|
@@ -3488,6 +3488,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
|
}
|
|
}
|
|
|
|
+ link->last_lpm_change = jiffies;
|
|
+ link->flags |= ATA_LFLAG_CHANGED;
|
|
+
|
|
return 0;
|
|
|
|
fail:
|
|
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
|
|
index 7ccc084..85aa761 100644
|
|
--- a/drivers/ata/libata-pmp.c
|
|
+++ b/drivers/ata/libata-pmp.c
|
|
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
|
ATA_LFLAG_NO_SRST |
|
|
ATA_LFLAG_ASSUME_ATA;
|
|
}
|
|
+ } else if (vendor == 0x11ab && devid == 0x4140) {
|
|
+ /* Marvell 4140 quirks */
|
|
+ ata_for_each_link(link, ap, EDGE) {
|
|
+ /* port 4 is for SEMB device and it doesn't like SRST */
|
|
+ if (link->pmp == 4)
|
|
+ link->flags |= ATA_LFLAG_DISABLED;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
|
|
index ef8567d..6fecf0b 100644
|
|
--- a/drivers/ata/libata-scsi.c
|
|
+++ b/drivers/ata/libata-scsi.c
|
|
@@ -2510,7 +2510,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
|
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
|
|
rbuf[15] = lowest_aligned;
|
|
|
|
- if (ata_id_has_trim(args->id)) {
|
|
+ if (ata_id_has_trim(args->id) &&
|
|
+ !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
|
|
rbuf[14] |= 0x80; /* TPE */
|
|
|
|
if (ata_id_has_zero_after_trim(args->id))
|
|
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
|
|
index b603720..136803c 100644
|
|
--- a/drivers/ata/libata-sff.c
|
|
+++ b/drivers/ata/libata-sff.c
|
|
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
|
|
DPRINTK("ENTER\n");
|
|
|
|
cancel_delayed_work_sync(&ap->sff_pio_task);
|
|
+
|
|
+ /*
|
|
+ * We wanna reset the HSM state to IDLE. If we do so without
|
|
+ * grabbing the port lock, critical sections protected by it which
|
|
+ * expect the HSM state to stay stable may get surprised. For
|
|
+ * example, we may set IDLE in between the time
|
|
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
|
|
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
|
|
+ */
|
|
+ spin_lock_irq(ap->lock);
|
|
ap->hsm_task_state = HSM_ST_IDLE;
|
|
+ spin_unlock_irq(ap->lock);
|
|
+
|
|
ap->sff_pio_task_link = NULL;
|
|
|
|
if (ata_msg_ctl(ap))
|
|
@@ -2008,13 +2020,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
|
|
|
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
|
|
|
|
- /* software reset. causes dev0 to be selected */
|
|
- iowrite8(ap->ctl, ioaddr->ctl_addr);
|
|
- udelay(20); /* FIXME: flush */
|
|
- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
|
- udelay(20); /* FIXME: flush */
|
|
- iowrite8(ap->ctl, ioaddr->ctl_addr);
|
|
- ap->last_ctl = ap->ctl;
|
|
+ if (ap->ioaddr.ctl_addr) {
|
|
+ /* software reset. causes dev0 to be selected */
|
|
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
|
|
+ udelay(20); /* FIXME: flush */
|
|
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
|
+ udelay(20); /* FIXME: flush */
|
|
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
|
|
+ ap->last_ctl = ap->ctl;
|
|
+ }
|
|
|
|
/* wait the port to become ready */
|
|
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
|
|
@@ -2215,10 +2229,6 @@ void ata_sff_error_handler(struct ata_port *ap)
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
- /* ignore ata_sff_softreset if ctl isn't accessible */
|
|
- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
|
|
- softreset = NULL;
|
|
-
|
|
/* ignore built-in hardresets if SCR access is not available */
|
|
if ((hardreset == sata_std_hardreset ||
|
|
hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
|
|
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
|
|
index 83c4ddb..08223cc 100644
|
|
--- a/drivers/ata/pata_octeon_cf.c
|
|
+++ b/drivers/ata/pata_octeon_cf.c
|
|
@@ -1069,7 +1069,7 @@ static struct of_device_id octeon_cf_match[] = {
|
|
},
|
|
{},
|
|
};
|
|
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
|
|
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
|
|
|
|
static struct platform_driver octeon_cf_driver = {
|
|
.probe = octeon_cf_probe,
|
|
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
|
|
index f1f5b5a..f5640ec 100644
|
|
--- a/drivers/ata/pata_scc.c
|
|
+++ b/drivers/ata/pata_scc.c
|
|
@@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
|
|
* Note: Original code is ata_bus_softreset().
|
|
*/
|
|
|
|
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
|
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
|
unsigned long deadline)
|
|
{
|
|
struct ata_ioports *ioaddr = &ap->ioaddr;
|
|
@@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
|
udelay(20);
|
|
out_be32(ioaddr->ctl_addr, ap->ctl);
|
|
|
|
- scc_wait_after_reset(&ap->link, devmask, deadline);
|
|
-
|
|
- return 0;
|
|
+ return scc_wait_after_reset(&ap->link, devmask, deadline);
|
|
}
|
|
|
|
/**
|
|
@@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
|
|
{
|
|
struct ata_port *ap = link->ap;
|
|
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
|
|
- unsigned int devmask = 0, err_mask;
|
|
+ unsigned int devmask = 0;
|
|
+ int rc;
|
|
u8 err;
|
|
|
|
DPRINTK("ENTER\n");
|
|
@@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
|
|
|
|
/* issue bus reset */
|
|
DPRINTK("about to softreset, devmask=%x\n", devmask);
|
|
- err_mask = scc_bus_softreset(ap, devmask, deadline);
|
|
- if (err_mask) {
|
|
- ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
|
|
+ rc = scc_bus_softreset(ap, devmask, deadline);
|
|
+ if (rc) {
|
|
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
|
|
return -EIO;
|
|
}
|
|
|
|
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
|
|
index e27f31f..9e79f55 100644
|
|
--- a/drivers/ata/pata_serverworks.c
|
|
+++ b/drivers/ata/pata_serverworks.c
|
|
@@ -251,12 +251,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
|
|
pci_write_config_byte(pdev, 0x54, ultra_cfg);
|
|
}
|
|
|
|
-static struct scsi_host_template serverworks_sht = {
|
|
+static struct scsi_host_template serverworks_osb4_sht = {
|
|
+ ATA_BMDMA_SHT(DRV_NAME),
|
|
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
|
|
+};
|
|
+
|
|
+static struct scsi_host_template serverworks_csb_sht = {
|
|
ATA_BMDMA_SHT(DRV_NAME),
|
|
};
|
|
|
|
static struct ata_port_operations serverworks_osb4_port_ops = {
|
|
.inherits = &ata_bmdma_port_ops,
|
|
+ .qc_prep = ata_bmdma_dumb_qc_prep,
|
|
.cable_detect = serverworks_cable_detect,
|
|
.mode_filter = serverworks_osb4_filter,
|
|
.set_piomode = serverworks_set_piomode,
|
|
@@ -265,6 +271,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
|
|
|
|
static struct ata_port_operations serverworks_csb_port_ops = {
|
|
.inherits = &serverworks_osb4_port_ops,
|
|
+ .qc_prep = ata_bmdma_qc_prep,
|
|
.mode_filter = serverworks_csb_filter,
|
|
};
|
|
|
|
@@ -404,6 +411,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
|
}
|
|
};
|
|
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
|
|
+ struct scsi_host_template *sht = &serverworks_csb_sht;
|
|
int rc;
|
|
|
|
rc = pcim_enable_device(pdev);
|
|
@@ -417,6 +425,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
|
/* Select non UDMA capable OSB4 if we can't do fixups */
|
|
if (rc < 0)
|
|
ppi[0] = &info[1];
|
|
+ sht = &serverworks_osb4_sht;
|
|
}
|
|
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
|
|
else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
|
|
@@ -433,7 +442,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
|
ppi[1] = &ata_dummy_port_info;
|
|
}
|
|
|
|
- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
|
|
+ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
|
|
index 73510d0..113d722 100644
|
|
--- a/drivers/ata/sata_dwc_460ex.c
|
|
+++ b/drivers/ata/sata_dwc_460ex.c
|
|
@@ -798,7 +798,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
|
|
if (err) {
|
|
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
|
|
" %d\n", __func__, err);
|
|
- goto error_out;
|
|
+ return err;
|
|
}
|
|
|
|
/* Enabe DMA */
|
|
@@ -809,11 +809,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
|
|
sata_dma_regs);
|
|
|
|
return 0;
|
|
-
|
|
-error_out:
|
|
- dma_dwc_exit(hsdev);
|
|
-
|
|
- return err;
|
|
}
|
|
|
|
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
|
|
@@ -1663,7 +1658,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
char *ver = (char *)&versionr;
|
|
u8 *base = NULL;
|
|
int err = 0;
|
|
- int irq, rc;
|
|
+ int irq;
|
|
struct ata_host *host;
|
|
struct ata_port_info pi = sata_dwc_port_info[0];
|
|
const struct ata_port_info *ppi[] = { &pi, NULL };
|
|
@@ -1726,7 +1721,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
if (irq == NO_IRQ) {
|
|
dev_err(&ofdev->dev, "no SATA DMA irq\n");
|
|
err = -ENODEV;
|
|
- goto error_out;
|
|
+ goto error_iomap;
|
|
}
|
|
|
|
/* Get physical SATA DMA register base address */
|
|
@@ -1735,14 +1730,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
|
|
" address\n");
|
|
err = -ENODEV;
|
|
- goto error_out;
|
|
+ goto error_iomap;
|
|
}
|
|
|
|
/* Save dev for later use in dev_xxx() routines */
|
|
host_pvt.dwc_dev = &ofdev->dev;
|
|
|
|
/* Initialize AHB DMAC */
|
|
- dma_dwc_init(hsdev, irq);
|
|
+ err = dma_dwc_init(hsdev, irq);
|
|
+ if (err)
|
|
+ goto error_dma_iomap;
|
|
|
|
/* Enable SATA Interrupts */
|
|
sata_dwc_enable_interrupts(hsdev);
|
|
@@ -1760,9 +1757,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
* device discovery process, invoking our port_start() handler &
|
|
* error_handler() to execute a dummy Softreset EH session
|
|
*/
|
|
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
|
|
-
|
|
- if (rc != 0)
|
|
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
|
|
+ if (err)
|
|
dev_err(&ofdev->dev, "failed to activate host");
|
|
|
|
dev_set_drvdata(&ofdev->dev, host);
|
|
@@ -1771,7 +1767,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
error_out:
|
|
/* Free SATA DMA resources */
|
|
dma_dwc_exit(hsdev);
|
|
-
|
|
+error_dma_iomap:
|
|
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
|
|
error_iomap:
|
|
iounmap(base);
|
|
error_kmalloc:
|
|
@@ -1792,6 +1789,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
|
|
/* Free SATA DMA resources */
|
|
dma_dwc_exit(hsdev);
|
|
|
|
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
|
|
iounmap(hsdev->reg_base);
|
|
kfree(hsdev);
|
|
kfree(host);
|
|
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
|
|
index fb0b40a..ee2780d 100644
|
|
--- a/drivers/ata/sata_fsl.c
|
|
+++ b/drivers/ata/sata_fsl.c
|
|
@@ -1503,7 +1503,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
|
host_priv->csr_base = csr_base;
|
|
|
|
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
|
- if (irq < 0) {
|
|
+ if (!irq) {
|
|
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
|
goto error_exit_with_cleanup;
|
|
}
|
|
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
|
|
index 2b25bd8..c1ea780 100644
|
|
--- a/drivers/ata/sata_rcar.c
|
|
+++ b/drivers/ata/sata_rcar.c
|
|
@@ -146,6 +146,7 @@
|
|
enum sata_rcar_type {
|
|
RCAR_GEN1_SATA,
|
|
RCAR_GEN2_SATA,
|
|
+ RCAR_R8A7790_ES1_SATA,
|
|
};
|
|
|
|
struct sata_rcar_priv {
|
|
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
|
|
ap->udma_mask = ATA_UDMA6;
|
|
ap->flags |= ATA_FLAG_SATA;
|
|
|
|
+ if (priv->type == RCAR_R8A7790_ES1_SATA)
|
|
+ ap->flags |= ATA_FLAG_NO_DIPM;
|
|
+
|
|
ioaddr->cmd_addr = base + SDATA_REG;
|
|
ioaddr->ctl_addr = base + SSDEVCON_REG;
|
|
ioaddr->scr_addr = base + SCRSSTS_REG;
|
|
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
|
|
sata_rcar_gen1_phy_init(priv);
|
|
break;
|
|
case RCAR_GEN2_SATA:
|
|
+ case RCAR_R8A7790_ES1_SATA:
|
|
sata_rcar_gen2_phy_init(priv);
|
|
break;
|
|
default:
|
|
@@ -838,6 +843,10 @@ static struct of_device_id sata_rcar_match[] = {
|
|
.data = (void *)RCAR_GEN2_SATA
|
|
},
|
|
{
|
|
+ .compatible = "renesas,sata-r8a7790-es1",
|
|
+ .data = (void *)RCAR_R8A7790_ES1_SATA
|
|
+ },
|
|
+ {
|
|
.compatible = "renesas,sata-r8a7791",
|
|
.data = (void *)RCAR_GEN2_SATA
|
|
},
|
|
@@ -849,6 +858,7 @@ static const struct platform_device_id sata_rcar_id_table[] = {
|
|
{ "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
|
|
{ "sata-r8a7779", RCAR_GEN1_SATA },
|
|
{ "sata-r8a7790", RCAR_GEN2_SATA },
|
|
+ { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
|
|
{ "sata-r8a7791", RCAR_GEN2_SATA },
|
|
{ },
|
|
};
|
|
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
|
|
index 5b93852..0d75285 100644
|
|
--- a/drivers/auxdisplay/ks0108.c
|
|
+++ b/drivers/auxdisplay/ks0108.c
|
|
@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
|
|
|
|
ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
|
|
NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
|
|
+ parport_put_port(ks0108_parport);
|
|
if (ks0108_pardevice == NULL) {
|
|
printk(KERN_ERR KS0108_NAME ": ERROR: "
|
|
"parport didn't register new device\n");
|
|
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
|
|
index 83e910a..79bc203 100644
|
|
--- a/drivers/base/bus.c
|
|
+++ b/drivers/base/bus.c
|
|
@@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct device *dev;
|
|
+ int err = -EINVAL;
|
|
|
|
dev = bus_find_device_by_name(bus, NULL, buf);
|
|
if (!dev)
|
|
return -ENODEV;
|
|
- if (bus_rescan_devices_helper(dev, NULL) != 0)
|
|
- return -EINVAL;
|
|
- return count;
|
|
+ if (bus_rescan_devices_helper(dev, NULL) == 0)
|
|
+ err = count;
|
|
+ put_device(dev);
|
|
+ return err;
|
|
}
|
|
|
|
static struct device *next_device(struct klist_iter *i)
|
|
@@ -513,11 +515,11 @@ int bus_add_device(struct device *dev)
|
|
goto out_put;
|
|
error = device_add_groups(dev, bus->dev_groups);
|
|
if (error)
|
|
- goto out_groups;
|
|
+ goto out_id;
|
|
error = sysfs_create_link(&bus->p->devices_kset->kobj,
|
|
&dev->kobj, dev_name(dev));
|
|
if (error)
|
|
- goto out_id;
|
|
+ goto out_groups;
|
|
error = sysfs_create_link(&dev->kobj,
|
|
&dev->bus->p->subsys.kobj, "subsystem");
|
|
if (error)
|
|
diff --git a/drivers/base/core.c b/drivers/base/core.c
|
|
index 2b56717..6a8955e 100644
|
|
--- a/drivers/base/core.c
|
|
+++ b/drivers/base/core.c
|
|
@@ -741,12 +741,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
|
|
return &dir->kobj;
|
|
}
|
|
|
|
+static DEFINE_MUTEX(gdp_mutex);
|
|
|
|
static struct kobject *get_device_parent(struct device *dev,
|
|
struct device *parent)
|
|
{
|
|
if (dev->class) {
|
|
- static DEFINE_MUTEX(gdp_mutex);
|
|
struct kobject *kobj = NULL;
|
|
struct kobject *parent_kobj;
|
|
struct kobject *k;
|
|
@@ -810,7 +810,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
|
|
glue_dir->kset != &dev->class->p->glue_dirs)
|
|
return;
|
|
|
|
+ mutex_lock(&gdp_mutex);
|
|
kobject_put(glue_dir);
|
|
+ mutex_unlock(&gdp_mutex);
|
|
}
|
|
|
|
static void cleanup_device_parent(struct device *dev)
|
|
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
|
|
index 545c4de..cbe0b58 100644
|
|
--- a/drivers/base/devres.c
|
|
+++ b/drivers/base/devres.c
|
|
@@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,
|
|
if (!dr) {
|
|
add_dr(dev, &new_dr->node);
|
|
dr = new_dr;
|
|
- new_dr = NULL;
|
|
+ new_res = NULL;
|
|
}
|
|
spin_unlock_irqrestore(&dev->devres_lock, flags);
|
|
- devres_free(new_dr);
|
|
+ devres_free(new_res);
|
|
|
|
return dr->data;
|
|
}
|
|
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
|
|
index c30df50e..f0c15f9 100644
|
|
--- a/drivers/base/firmware_class.c
|
|
+++ b/drivers/base/firmware_class.c
|
|
@@ -544,10 +544,8 @@ static void fw_dev_release(struct device *dev)
|
|
kfree(fw_priv);
|
|
}
|
|
|
|
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
|
|
{
|
|
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
|
-
|
|
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
|
|
return -ENOMEM;
|
|
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
|
|
@@ -558,6 +556,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
return 0;
|
|
}
|
|
|
|
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
+{
|
|
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
|
+ int err = 0;
|
|
+
|
|
+ mutex_lock(&fw_lock);
|
|
+ if (fw_priv->buf)
|
|
+ err = do_firmware_uevent(fw_priv, env);
|
|
+ mutex_unlock(&fw_lock);
|
|
+ return err;
|
|
+}
|
|
+
|
|
static struct class firmware_class = {
|
|
.name = "firmware",
|
|
.class_attrs = firmware_class_attrs,
|
|
@@ -1081,6 +1091,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|
if (!firmware_p)
|
|
return -EINVAL;
|
|
|
|
+ if (!name || name[0] == '\0')
|
|
+ return -EINVAL;
|
|
+
|
|
ret = _request_firmware_prepare(&fw, name, device);
|
|
if (ret <= 0) /* error or already assigned */
|
|
goto out;
|
|
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
|
|
index 3c51eb0..57d8f67 100644
|
|
--- a/drivers/base/platform.c
|
|
+++ b/drivers/base/platform.c
|
|
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
|
|
return dev->archdata.irqs[num];
|
|
#else
|
|
struct resource *r;
|
|
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
|
|
- return of_irq_get(dev->dev.of_node, num);
|
|
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
|
|
+ int ret;
|
|
+
|
|
+ ret = of_irq_get(dev->dev.of_node, num);
|
|
+ if (ret >= 0 || ret == -EPROBE_DEFER)
|
|
+ return ret;
|
|
+ }
|
|
|
|
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
|
|
|
|
@@ -349,9 +354,7 @@ int platform_device_add(struct platform_device *pdev)
|
|
|
|
while (--i >= 0) {
|
|
struct resource *r = &pdev->resource[i];
|
|
- unsigned long type = resource_type(r);
|
|
-
|
|
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
|
|
+ if (r->parent)
|
|
release_resource(r);
|
|
}
|
|
|
|
@@ -382,9 +385,7 @@ void platform_device_del(struct platform_device *pdev)
|
|
|
|
for (i = 0; i < pdev->num_resources; i++) {
|
|
struct resource *r = &pdev->resource[i];
|
|
- unsigned long type = resource_type(r);
|
|
-
|
|
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
|
|
+ if (r->parent)
|
|
release_resource(r);
|
|
}
|
|
}
|
|
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
|
|
index 930cad4..f3f7136 100644
|
|
--- a/drivers/base/regmap/regcache-rbtree.c
|
|
+++ b/drivers/base/regmap/regcache-rbtree.c
|
|
@@ -302,18 +302,27 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|
if (!blk)
|
|
return -ENOMEM;
|
|
|
|
- present = krealloc(rbnode->cache_present,
|
|
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
|
|
- if (!present) {
|
|
- kfree(blk);
|
|
- return -ENOMEM;
|
|
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
|
+ present = krealloc(rbnode->cache_present,
|
|
+ BITS_TO_LONGS(blklen) * sizeof(*present),
|
|
+ GFP_KERNEL);
|
|
+ if (!present) {
|
|
+ kfree(blk);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
|
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
|
+ * sizeof(*present));
|
|
+ } else {
|
|
+ present = rbnode->cache_present;
|
|
}
|
|
|
|
/* insert the register value in the correct place in the rbnode block */
|
|
if (pos == 0) {
|
|
memmove(blk + offset * map->cache_word_size,
|
|
blk, rbnode->blklen * map->cache_word_size);
|
|
- bitmap_shift_right(present, present, offset, blklen);
|
|
+ bitmap_shift_left(present, present, offset, blklen);
|
|
}
|
|
|
|
/* update the rbnode block, its size and the base register */
|
|
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
|
|
index d4dd771..154e7a8 100644
|
|
--- a/drivers/base/regmap/regcache.c
|
|
+++ b/drivers/base/regmap/regcache.c
|
|
@@ -701,7 +701,7 @@ int regcache_sync_block(struct regmap *map, void *block,
|
|
unsigned int block_base, unsigned int start,
|
|
unsigned int end)
|
|
{
|
|
- if (regmap_can_raw_write(map))
|
|
+ if (regmap_can_raw_write(map) && !map->use_single_rw)
|
|
return regcache_sync_block_raw(map, block, cache_present,
|
|
block_base, start, end);
|
|
else
|
|
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
|
|
index c5471cd..d39fd61 100644
|
|
--- a/drivers/base/regmap/regmap-debugfs.c
|
|
+++ b/drivers/base/regmap/regmap-debugfs.c
|
|
@@ -473,6 +473,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
|
{
|
|
struct rb_node *next;
|
|
struct regmap_range_node *range_node;
|
|
+ const char *devname = "dummy";
|
|
|
|
/* If we don't have the debugfs root yet, postpone init */
|
|
if (!regmap_debugfs_root) {
|
|
@@ -491,12 +492,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
|
INIT_LIST_HEAD(&map->debugfs_off_cache);
|
|
mutex_init(&map->cache_lock);
|
|
|
|
+ if (map->dev)
|
|
+ devname = dev_name(map->dev);
|
|
+
|
|
if (name) {
|
|
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
|
|
- dev_name(map->dev), name);
|
|
+ devname, name);
|
|
name = map->debugfs_name;
|
|
} else {
|
|
- name = dev_name(map->dev);
|
|
+ name = devname;
|
|
}
|
|
|
|
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
|
|
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
|
|
index 6a19515..58559d7 100644
|
|
--- a/drivers/base/regmap/regmap.c
|
|
+++ b/drivers/base/regmap/regmap.c
|
|
@@ -105,7 +105,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
|
|
|
|
bool regmap_volatile(struct regmap *map, unsigned int reg)
|
|
{
|
|
- if (!regmap_readable(map, reg))
|
|
+ if (!map->format.format_write && !regmap_readable(map, reg))
|
|
return false;
|
|
|
|
if (map->volatile_reg)
|
|
@@ -808,11 +808,10 @@ EXPORT_SYMBOL_GPL(devm_regmap_init);
|
|
static void regmap_field_init(struct regmap_field *rm_field,
|
|
struct regmap *regmap, struct reg_field reg_field)
|
|
{
|
|
- int field_bits = reg_field.msb - reg_field.lsb + 1;
|
|
rm_field->regmap = regmap;
|
|
rm_field->reg = reg_field.reg;
|
|
rm_field->shift = reg_field.lsb;
|
|
- rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
|
|
+ rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
|
|
rm_field->id_size = reg_field.id_size;
|
|
rm_field->id_offset = reg_field.id_offset;
|
|
}
|
|
@@ -1308,7 +1307,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
|
|
}
|
|
|
|
#ifdef LOG_DEVICE
|
|
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
|
|
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
|
|
dev_info(map->dev, "%x <= %x\n", reg, val);
|
|
#endif
|
|
|
|
@@ -1557,6 +1556,11 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
|
} else {
|
|
void *wval;
|
|
|
|
+ if (!val_count) {
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
|
|
if (!wval) {
|
|
ret = -ENOMEM;
|
|
@@ -1739,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
|
|
ret = map->reg_read(context, reg, val);
|
|
if (ret == 0) {
|
|
#ifdef LOG_DEVICE
|
|
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
|
|
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
|
|
dev_info(map->dev, "%x => %x\n", reg, *val);
|
|
#endif
|
|
|
|
@@ -1942,7 +1946,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
|
&ival);
|
|
if (ret != 0)
|
|
return ret;
|
|
- memcpy(val + (i * val_bytes), &ival, val_bytes);
|
|
+ map->format.format_val(val + (i * val_bytes), ival, 0);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
|
|
index 89c497c..04a14e0 100644
|
|
--- a/drivers/block/drbd/drbd_interval.c
|
|
+++ b/drivers/block/drbd/drbd_interval.c
|
|
@@ -79,6 +79,7 @@ bool
|
|
drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
|
{
|
|
struct rb_node **new = &root->rb_node, *parent = NULL;
|
|
+ sector_t this_end = this->sector + (this->size >> 9);
|
|
|
|
BUG_ON(!IS_ALIGNED(this->size, 512));
|
|
|
|
@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
|
rb_entry(*new, struct drbd_interval, rb);
|
|
|
|
parent = *new;
|
|
+ if (here->end < this_end)
|
|
+ here->end = this_end;
|
|
if (this->sector < here->sector)
|
|
new = &(*new)->rb_left;
|
|
else if (this->sector > here->sector)
|
|
@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
|
return false;
|
|
}
|
|
|
|
+ this->end = this_end;
|
|
rb_link_node(&this->rb, parent, new);
|
|
rb_insert_augmented(&this->rb, root, &augment_callbacks);
|
|
return true;
|
|
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
|
|
index c706d50..8c16c2f 100644
|
|
--- a/drivers/block/drbd/drbd_nl.c
|
|
+++ b/drivers/block/drbd/drbd_nl.c
|
|
@@ -525,6 +525,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
|
|
struct task_struct *opa;
|
|
|
|
kref_get(&tconn->kref);
|
|
+ /* We may just have force_sig()'ed this thread
|
|
+ * to get it out of some blocking network function.
|
|
+ * Clear signals; otherwise kthread_run(), which internally uses
|
|
+ * wait_on_completion_killable(), will mistake our pending signal
|
|
+ * for a new fatal signal and fail. */
|
|
+ flush_signals(current);
|
|
opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
|
|
if (IS_ERR(opa)) {
|
|
conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
|
|
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
|
|
index 104a040..6efdbea 100644
|
|
--- a/drivers/block/drbd/drbd_req.c
|
|
+++ b/drivers/block/drbd/drbd_req.c
|
|
@@ -1310,6 +1310,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
|
|
struct request_queue * const b =
|
|
mdev->ldev->backing_bdev->bd_disk->queue;
|
|
if (b->merge_bvec_fn) {
|
|
+ bvm->bi_bdev = mdev->ldev->backing_bdev;
|
|
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
|
|
limit = min(limit, backing_limit);
|
|
}
|
|
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
|
|
index 55298db..d180936 100644
|
|
--- a/drivers/block/nbd.c
|
|
+++ b/drivers/block/nbd.c
|
|
@@ -814,10 +814,6 @@ static int __init nbd_init(void)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
|
|
- if (!nbd_dev)
|
|
- return -ENOMEM;
|
|
-
|
|
part_shift = 0;
|
|
if (max_part > 0) {
|
|
part_shift = fls(max_part);
|
|
@@ -839,6 +835,10 @@ static int __init nbd_init(void)
|
|
if (nbds_max > 1UL << (MINORBITS - part_shift))
|
|
return -EINVAL;
|
|
|
|
+ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
|
|
+ if (!nbd_dev)
|
|
+ return -ENOMEM;
|
|
+
|
|
for (i = 0; i < nbds_max; i++) {
|
|
struct gendisk *disk = alloc_disk(1 << part_shift);
|
|
if (!disk)
|
|
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
|
|
index 7296c7f..b583773 100644
|
|
--- a/drivers/block/rbd.c
|
|
+++ b/drivers/block/rbd.c
|
|
@@ -508,6 +508,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
|
|
# define rbd_assert(expr) ((void) 0)
|
|
#endif /* !RBD_DEBUG */
|
|
|
|
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
|
|
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
|
|
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
|
|
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
|
|
@@ -1651,6 +1652,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
|
|
obj_request_done_set(obj_request);
|
|
}
|
|
|
|
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
|
|
+{
|
|
+ dout("%s: obj %p\n", __func__, obj_request);
|
|
+
|
|
+ if (obj_request_img_data_test(obj_request))
|
|
+ rbd_osd_copyup_callback(obj_request);
|
|
+ else
|
|
+ obj_request_done_set(obj_request);
|
|
+}
|
|
+
|
|
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
|
struct ceph_msg *msg)
|
|
{
|
|
@@ -1689,6 +1700,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
|
rbd_osd_stat_callback(obj_request);
|
|
break;
|
|
case CEPH_OSD_OP_CALL:
|
|
+ rbd_osd_call_callback(obj_request);
|
|
+ break;
|
|
case CEPH_OSD_OP_NOTIFY_ACK:
|
|
case CEPH_OSD_OP_WATCH:
|
|
rbd_osd_trivial_callback(obj_request);
|
|
@@ -1826,11 +1839,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
|
|
rbd_assert(obj_request_type_valid(type));
|
|
|
|
size = strlen(object_name) + 1;
|
|
- name = kmalloc(size, GFP_KERNEL);
|
|
+ name = kmalloc(size, GFP_NOIO);
|
|
if (!name)
|
|
return NULL;
|
|
|
|
- obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
|
|
+ obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
|
|
if (!obj_request) {
|
|
kfree(name);
|
|
return NULL;
|
|
@@ -1926,32 +1939,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
|
|
* If an image has a non-zero parent overlap, get a reference to its
|
|
* parent.
|
|
*
|
|
- * We must get the reference before checking for the overlap to
|
|
- * coordinate properly with zeroing the parent overlap in
|
|
- * rbd_dev_v2_parent_info() when an image gets flattened. We
|
|
- * drop it again if there is no overlap.
|
|
- *
|
|
* Returns true if the rbd device has a parent with a non-zero
|
|
* overlap and a reference for it was successfully taken, or
|
|
* false otherwise.
|
|
*/
|
|
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
|
|
{
|
|
- int counter;
|
|
+ int counter = 0;
|
|
|
|
if (!rbd_dev->parent_spec)
|
|
return false;
|
|
|
|
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
|
|
- if (counter > 0 && rbd_dev->parent_overlap)
|
|
- return true;
|
|
-
|
|
- /* Image was flattened, but parent is not yet torn down */
|
|
+ down_read(&rbd_dev->header_rwsem);
|
|
+ if (rbd_dev->parent_overlap)
|
|
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
|
|
+ up_read(&rbd_dev->header_rwsem);
|
|
|
|
if (counter < 0)
|
|
rbd_warn(rbd_dev, "parent reference overflow\n");
|
|
|
|
- return false;
|
|
+ return counter > 0;
|
|
}
|
|
|
|
/*
|
|
@@ -2090,6 +2097,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
|
|
result, xferred);
|
|
if (!img_request->result)
|
|
img_request->result = result;
|
|
+ /*
|
|
+ * Need to end I/O on the entire obj_request worth of
|
|
+ * bytes in case of error.
|
|
+ */
|
|
+ xferred = obj_request->length;
|
|
}
|
|
|
|
/* Image object requests don't own their page array */
|
|
@@ -2276,13 +2288,15 @@ out_unwind:
|
|
}
|
|
|
|
static void
|
|
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
|
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
|
|
{
|
|
struct rbd_img_request *img_request;
|
|
struct rbd_device *rbd_dev;
|
|
struct page **pages;
|
|
u32 page_count;
|
|
|
|
+ dout("%s: obj %p\n", __func__, obj_request);
|
|
+
|
|
rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
|
|
rbd_assert(obj_request_img_data_test(obj_request));
|
|
img_request = obj_request->img_request;
|
|
@@ -2308,9 +2322,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
|
if (!obj_request->result)
|
|
obj_request->xferred = obj_request->length;
|
|
|
|
- /* Finish up with the normal image object callback */
|
|
-
|
|
- rbd_img_obj_callback(obj_request);
|
|
+ obj_request_done_set(obj_request);
|
|
}
|
|
|
|
static void
|
|
@@ -2407,7 +2419,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
|
|
|
|
/* All set, send it off. */
|
|
|
|
- orig_request->callback = rbd_img_obj_copyup_callback;
|
|
osdc = &rbd_dev->rbd_client->client->osdc;
|
|
img_result = rbd_obj_request_submit(osdc, orig_request);
|
|
if (!img_result)
|
|
@@ -3217,7 +3228,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
|
|
page_count = (u32) calc_pages_for(offset, length);
|
|
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
|
|
if (IS_ERR(pages))
|
|
- ret = PTR_ERR(pages);
|
|
+ return PTR_ERR(pages);
|
|
|
|
ret = -ENOMEM;
|
|
obj_request = rbd_obj_request_create(object_name, offset, length,
|
|
@@ -3904,7 +3915,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
|
|
*/
|
|
if (rbd_dev->parent_overlap) {
|
|
rbd_dev->parent_overlap = 0;
|
|
- smp_mb();
|
|
rbd_dev_parent_put(rbd_dev);
|
|
pr_info("%s: clone image has been flattened\n",
|
|
rbd_dev->disk->disk_name);
|
|
@@ -3948,7 +3958,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
|
|
* treat it specially.
|
|
*/
|
|
rbd_dev->parent_overlap = overlap;
|
|
- smp_mb();
|
|
if (!overlap) {
|
|
|
|
/* A null parent_spec indicates it's the initial probe */
|
|
@@ -4764,10 +4773,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
|
|
{
|
|
struct rbd_image_header *header;
|
|
|
|
- /* Drop parent reference unless it's already been done (or none) */
|
|
-
|
|
- if (rbd_dev->parent_overlap)
|
|
- rbd_dev_parent_put(rbd_dev);
|
|
+ rbd_dev_parent_put(rbd_dev);
|
|
|
|
/* Free dynamic fields from the header, then zero it out */
|
|
|
|
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
|
|
index 5814deb..0ebadf9 100644
|
|
--- a/drivers/block/sunvdc.c
|
|
+++ b/drivers/block/sunvdc.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <linux/blkdev.h>
|
|
#include <linux/hdreg.h>
|
|
#include <linux/genhd.h>
|
|
+#include <linux/cdrom.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/completion.h>
|
|
@@ -22,8 +23,8 @@
|
|
|
|
#define DRV_MODULE_NAME "sunvdc"
|
|
#define PFX DRV_MODULE_NAME ": "
|
|
-#define DRV_MODULE_VERSION "1.0"
|
|
-#define DRV_MODULE_RELDATE "June 25, 2007"
|
|
+#define DRV_MODULE_VERSION "1.1"
|
|
+#define DRV_MODULE_RELDATE "February 13, 2013"
|
|
|
|
static char version[] =
|
|
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
|
@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_VERSION(DRV_MODULE_VERSION);
|
|
|
|
-#define VDC_TX_RING_SIZE 256
|
|
+#define VDC_TX_RING_SIZE 512
|
|
|
|
#define WAITING_FOR_LINK_UP 0x01
|
|
#define WAITING_FOR_TX_SPACE 0x02
|
|
@@ -65,11 +66,9 @@ struct vdc_port {
|
|
u64 operations;
|
|
u32 vdisk_size;
|
|
u8 vdisk_type;
|
|
+ u8 vdisk_mtype;
|
|
|
|
char disk_name[32];
|
|
-
|
|
- struct vio_disk_geom geom;
|
|
- struct vio_disk_vtoc label;
|
|
};
|
|
|
|
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
|
|
@@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
|
|
|
|
/* Ordered from largest major to lowest */
|
|
static struct vio_version vdc_versions[] = {
|
|
+ { .major = 1, .minor = 1 },
|
|
{ .major = 1, .minor = 0 },
|
|
};
|
|
|
|
+static inline int vdc_version_supported(struct vdc_port *port,
|
|
+ u16 major, u16 minor)
|
|
+{
|
|
+ return port->vio.ver.major == major && port->vio.ver.minor >= minor;
|
|
+}
|
|
+
|
|
#define VDCBLK_NAME "vdisk"
|
|
static int vdc_major;
|
|
#define PARTITION_SHIFT 3
|
|
@@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
|
|
static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
{
|
|
struct gendisk *disk = bdev->bd_disk;
|
|
- struct vdc_port *port = disk->private_data;
|
|
+ sector_t nsect = get_capacity(disk);
|
|
+ sector_t cylinders = nsect;
|
|
|
|
- geo->heads = (u8) port->geom.num_hd;
|
|
- geo->sectors = (u8) port->geom.num_sec;
|
|
- geo->cylinders = port->geom.num_cyl;
|
|
+ geo->heads = 0xff;
|
|
+ geo->sectors = 0x3f;
|
|
+ sector_div(cylinders, geo->heads * geo->sectors);
|
|
+ geo->cylinders = cylinders;
|
|
+ if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
|
|
+ geo->cylinders = 0xffff;
|
|
|
|
return 0;
|
|
}
|
|
|
|
+/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
|
|
+ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
|
|
+ * Needed to be able to install inside an ldom from an iso image.
|
|
+ */
|
|
+static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
|
|
+ unsigned command, unsigned long argument)
|
|
+{
|
|
+ int i;
|
|
+ struct gendisk *disk;
|
|
+
|
|
+ switch (command) {
|
|
+ case CDROMMULTISESSION:
|
|
+ pr_debug(PFX "Multisession CDs not supported\n");
|
|
+ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
|
|
+ if (put_user(0, (char __user *)(argument + i)))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+
|
|
+ case CDROM_GET_CAPABILITY:
|
|
+ disk = bdev->bd_disk;
|
|
+
|
|
+ if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
|
|
+ return 0;
|
|
+ return -EINVAL;
|
|
+
|
|
+ default:
|
|
+ pr_debug(PFX "ioctl %08x not supported\n", command);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
static const struct block_device_operations vdc_fops = {
|
|
.owner = THIS_MODULE,
|
|
.getgeo = vdc_getgeo,
|
|
+ .ioctl = vdc_ioctl,
|
|
};
|
|
|
|
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
|
|
@@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
|
|
struct vio_disk_attr_info *pkt = arg;
|
|
|
|
viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
|
|
- "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
|
|
+ "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
|
|
pkt->tag.stype, pkt->operations,
|
|
- pkt->vdisk_size, pkt->vdisk_type,
|
|
+ pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
|
|
pkt->xfer_mode, pkt->vdisk_block_size,
|
|
pkt->max_xfer_size);
|
|
|
|
@@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
|
|
}
|
|
|
|
port->operations = pkt->operations;
|
|
- port->vdisk_size = pkt->vdisk_size;
|
|
port->vdisk_type = pkt->vdisk_type;
|
|
+ if (vdc_version_supported(port, 1, 1)) {
|
|
+ port->vdisk_size = pkt->vdisk_size;
|
|
+ port->vdisk_mtype = pkt->vdisk_mtype;
|
|
+ }
|
|
if (pkt->max_xfer_size < port->max_xfer_size)
|
|
port->max_xfer_size = pkt->max_xfer_size;
|
|
port->vdisk_block_size = pkt->vdisk_block_size;
|
|
@@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
|
|
|
|
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
|
|
|
|
- if (blk_queue_stopped(port->disk->queue))
|
|
+ /* restart blk queue when ring is half emptied */
|
|
+ if (blk_queue_stopped(port->disk->queue) &&
|
|
+ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
|
|
blk_start_queue(port->disk->queue);
|
|
}
|
|
|
|
@@ -388,12 +435,6 @@ static int __send_request(struct request *req)
|
|
for (i = 0; i < nsg; i++)
|
|
len += sg[i].length;
|
|
|
|
- if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
|
|
- blk_stop_queue(port->disk->queue);
|
|
- err = -ENOMEM;
|
|
- goto out;
|
|
- }
|
|
-
|
|
desc = vio_dring_cur(dr);
|
|
|
|
err = ldc_map_sg(port->vio.lp, sg, nsg,
|
|
@@ -433,21 +474,32 @@ static int __send_request(struct request *req)
|
|
port->req_id++;
|
|
dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
|
|
}
|
|
-out:
|
|
|
|
return err;
|
|
}
|
|
|
|
-static void do_vdc_request(struct request_queue *q)
|
|
+static void do_vdc_request(struct request_queue *rq)
|
|
{
|
|
- while (1) {
|
|
- struct request *req = blk_fetch_request(q);
|
|
+ struct request *req;
|
|
|
|
- if (!req)
|
|
- break;
|
|
+ while ((req = blk_peek_request(rq)) != NULL) {
|
|
+ struct vdc_port *port;
|
|
+ struct vio_dring_state *dr;
|
|
|
|
- if (__send_request(req) < 0)
|
|
- __blk_end_request_all(req, -EIO);
|
|
+ port = req->rq_disk->private_data;
|
|
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
|
+ if (unlikely(vdc_tx_dring_avail(dr) < 1))
|
|
+ goto wait;
|
|
+
|
|
+ blk_start_request(req);
|
|
+
|
|
+ if (__send_request(req) < 0) {
|
|
+ blk_requeue_request(rq, req);
|
|
+wait:
|
|
+ /* Avoid pointless unplugs. */
|
|
+ blk_stop_queue(rq);
|
|
+ break;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port)
|
|
if (comp.err)
|
|
return comp.err;
|
|
|
|
- err = generic_request(port, VD_OP_GET_VTOC,
|
|
- &port->label, sizeof(port->label));
|
|
- if (err < 0) {
|
|
- printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
|
|
- return err;
|
|
- }
|
|
-
|
|
- err = generic_request(port, VD_OP_GET_DISKGEOM,
|
|
- &port->geom, sizeof(port->geom));
|
|
- if (err < 0) {
|
|
- printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
|
|
- "error %d\n", err);
|
|
- return err;
|
|
+ if (vdc_version_supported(port, 1, 1)) {
|
|
+ /* vdisk_size should be set during the handshake, if it wasn't
|
|
+ * then the underlying disk is reserved by another system
|
|
+ */
|
|
+ if (port->vdisk_size == -1)
|
|
+ return -ENODEV;
|
|
+ } else {
|
|
+ struct vio_disk_geom geom;
|
|
+
|
|
+ err = generic_request(port, VD_OP_GET_DISKGEOM,
|
|
+ &geom, sizeof(geom));
|
|
+ if (err < 0) {
|
|
+ printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
|
|
+ "error %d\n", err);
|
|
+ return err;
|
|
+ }
|
|
+ port->vdisk_size = ((u64)geom.num_cyl *
|
|
+ (u64)geom.num_hd *
|
|
+ (u64)geom.num_sec);
|
|
}
|
|
|
|
- port->vdisk_size = ((u64)port->geom.num_cyl *
|
|
- (u64)port->geom.num_hd *
|
|
- (u64)port->geom.num_sec);
|
|
-
|
|
q = blk_init_queue(do_vdc_request, &port->vio.lock);
|
|
if (!q) {
|
|
printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
|
|
@@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port)
|
|
|
|
port->disk = g;
|
|
|
|
+ /* Each segment in a request is up to an aligned page in size. */
|
|
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
|
|
+ blk_queue_max_segment_size(q, PAGE_SIZE);
|
|
+
|
|
blk_queue_max_segments(q, port->ring_cookies);
|
|
blk_queue_max_hw_sectors(q, port->max_xfer_size);
|
|
g->major = vdc_major;
|
|
@@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port)
|
|
|
|
set_capacity(g, port->vdisk_size);
|
|
|
|
- printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
|
|
+ if (vdc_version_supported(port, 1, 1)) {
|
|
+ switch (port->vdisk_mtype) {
|
|
+ case VD_MEDIA_TYPE_CD:
|
|
+ pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
|
|
+ g->flags |= GENHD_FL_CD;
|
|
+ g->flags |= GENHD_FL_REMOVABLE;
|
|
+ set_disk_ro(g, 1);
|
|
+ break;
|
|
+
|
|
+ case VD_MEDIA_TYPE_DVD:
|
|
+ pr_info(PFX "Virtual DVD %s\n", port->disk_name);
|
|
+ g->flags |= GENHD_FL_CD;
|
|
+ g->flags |= GENHD_FL_REMOVABLE;
|
|
+ set_disk_ro(g, 1);
|
|
+ break;
|
|
+
|
|
+ case VD_MEDIA_TYPE_FIXED:
|
|
+ pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
|
|
g->disk_name,
|
|
- port->vdisk_size, (port->vdisk_size >> (20 - 9)));
|
|
+ port->vdisk_size, (port->vdisk_size >> (20 - 9)),
|
|
+ port->vio.ver.major, port->vio.ver.minor);
|
|
|
|
add_disk(g);
|
|
|
|
@@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
else
|
|
snprintf(port->disk_name, sizeof(port->disk_name),
|
|
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
|
|
+ port->vdisk_size = -1;
|
|
|
|
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
|
|
vdc_versions, ARRAY_SIZE(vdc_versions),
|
|
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
|
|
index 64c60ed..63fc7f0 100644
|
|
--- a/drivers/block/xen-blkback/blkback.c
|
|
+++ b/drivers/block/xen-blkback/blkback.c
|
|
@@ -763,6 +763,7 @@ again:
|
|
BUG_ON(new_map_idx >= segs_to_map);
|
|
if (unlikely(map[new_map_idx].status != 0)) {
|
|
pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
|
|
+ put_free_pages(blkif, &pages[seg_idx]->page, 1);
|
|
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
|
|
ret |= 1;
|
|
goto next;
|
|
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
|
|
index efe1b47..e88556a 100644
|
|
--- a/drivers/block/xen-blkfront.c
|
|
+++ b/drivers/block/xen-blkfront.c
|
|
@@ -1093,8 +1093,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
|
* Add the used indirect page back to the list of
|
|
* available pages for indirect grefs.
|
|
*/
|
|
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
|
|
- list_add(&indirect_page->lru, &info->indirect_pages);
|
|
+ if (!info->feature_persistent) {
|
|
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
|
|
+ list_add(&indirect_page->lru, &info->indirect_pages);
|
|
+ }
|
|
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
|
|
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
|
|
}
|
|
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
|
index 51c557c..d8ddb8e 100644
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -447,7 +447,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
}
|
|
|
|
if (page_zero_filled(uncmem)) {
|
|
- kunmap_atomic(user_mem);
|
|
+ if (user_mem)
|
|
+ kunmap_atomic(user_mem);
|
|
/* Free memory associated with this sector now. */
|
|
write_lock(&zram->meta->tb_lock);
|
|
zram_free_page(zram, index);
|
|
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
|
|
index b11949c..8ff2b3c 100644
|
|
--- a/drivers/bluetooth/ath3k.c
|
|
+++ b/drivers/bluetooth/ath3k.c
|
|
@@ -62,51 +62,62 @@ static const struct usb_device_id ath3k_table[] = {
|
|
{ USB_DEVICE(0x0CF3, 0x3000) },
|
|
|
|
/* Atheros AR3011 with sflash firmware*/
|
|
+ { USB_DEVICE(0x0489, 0xE027) },
|
|
+ { USB_DEVICE(0x0489, 0xE03D) },
|
|
+ { USB_DEVICE(0x04F2, 0xAFF1) },
|
|
+ { USB_DEVICE(0x0930, 0x0215) },
|
|
{ USB_DEVICE(0x0CF3, 0x3002) },
|
|
{ USB_DEVICE(0x0CF3, 0xE019) },
|
|
{ USB_DEVICE(0x13d3, 0x3304) },
|
|
- { USB_DEVICE(0x0930, 0x0215) },
|
|
- { USB_DEVICE(0x0489, 0xE03D) },
|
|
- { USB_DEVICE(0x0489, 0xE027) },
|
|
|
|
/* Atheros AR9285 Malbec with sflash firmware */
|
|
{ USB_DEVICE(0x03F0, 0x311D) },
|
|
|
|
/* Atheros AR3012 with sflash firmware*/
|
|
- { USB_DEVICE(0x0CF3, 0x0036) },
|
|
- { USB_DEVICE(0x0CF3, 0x3004) },
|
|
- { USB_DEVICE(0x0CF3, 0x3008) },
|
|
- { USB_DEVICE(0x0CF3, 0x311D) },
|
|
- { USB_DEVICE(0x0CF3, 0x817a) },
|
|
- { USB_DEVICE(0x13d3, 0x3375) },
|
|
+ { USB_DEVICE(0x0489, 0xe04d) },
|
|
+ { USB_DEVICE(0x0489, 0xe04e) },
|
|
+ { USB_DEVICE(0x0489, 0xe057) },
|
|
+ { USB_DEVICE(0x0489, 0xe056) },
|
|
+ { USB_DEVICE(0x0489, 0xe05f) },
|
|
+ { USB_DEVICE(0x0489, 0xe076) },
|
|
+ { USB_DEVICE(0x0489, 0xe078) },
|
|
+ { USB_DEVICE(0x04c5, 0x1330) },
|
|
{ USB_DEVICE(0x04CA, 0x3004) },
|
|
{ USB_DEVICE(0x04CA, 0x3005) },
|
|
{ USB_DEVICE(0x04CA, 0x3006) },
|
|
{ USB_DEVICE(0x04CA, 0x3007) },
|
|
{ USB_DEVICE(0x04CA, 0x3008) },
|
|
{ USB_DEVICE(0x04CA, 0x300b) },
|
|
- { USB_DEVICE(0x13d3, 0x3362) },
|
|
- { USB_DEVICE(0x0CF3, 0xE004) },
|
|
- { USB_DEVICE(0x0CF3, 0xE005) },
|
|
+ { USB_DEVICE(0x04CA, 0x3010) },
|
|
{ USB_DEVICE(0x0930, 0x0219) },
|
|
{ USB_DEVICE(0x0930, 0x0220) },
|
|
- { USB_DEVICE(0x0489, 0xe057) },
|
|
- { USB_DEVICE(0x13d3, 0x3393) },
|
|
- { USB_DEVICE(0x0489, 0xe04e) },
|
|
- { USB_DEVICE(0x0489, 0xe056) },
|
|
- { USB_DEVICE(0x0489, 0xe04d) },
|
|
- { USB_DEVICE(0x04c5, 0x1330) },
|
|
- { USB_DEVICE(0x13d3, 0x3402) },
|
|
+ { USB_DEVICE(0x0930, 0x0227) },
|
|
+ { USB_DEVICE(0x0b05, 0x17d0) },
|
|
+ { USB_DEVICE(0x0CF3, 0x0036) },
|
|
+ { USB_DEVICE(0x0CF3, 0x3004) },
|
|
+ { USB_DEVICE(0x0CF3, 0x3008) },
|
|
+ { USB_DEVICE(0x0CF3, 0x311D) },
|
|
+ { USB_DEVICE(0x0CF3, 0x311E) },
|
|
+ { USB_DEVICE(0x0CF3, 0x311F) },
|
|
{ USB_DEVICE(0x0cf3, 0x3121) },
|
|
+ { USB_DEVICE(0x0CF3, 0x817a) },
|
|
{ USB_DEVICE(0x0cf3, 0xe003) },
|
|
- { USB_DEVICE(0x0489, 0xe05f) },
|
|
+ { USB_DEVICE(0x0CF3, 0xE004) },
|
|
+ { USB_DEVICE(0x0CF3, 0xE005) },
|
|
+ { USB_DEVICE(0x13d3, 0x3362) },
|
|
+ { USB_DEVICE(0x13d3, 0x3375) },
|
|
+ { USB_DEVICE(0x13d3, 0x3393) },
|
|
+ { USB_DEVICE(0x13d3, 0x3402) },
|
|
+ { USB_DEVICE(0x13d3, 0x3408) },
|
|
+ { USB_DEVICE(0x13d3, 0x3432) },
|
|
+ { USB_DEVICE(0x13d3, 0x3474) },
|
|
|
|
/* Atheros AR5BBU12 with sflash firmware */
|
|
{ USB_DEVICE(0x0489, 0xE02C) },
|
|
|
|
/* Atheros AR5BBU22 with sflash firmware */
|
|
- { USB_DEVICE(0x0489, 0xE03C) },
|
|
{ USB_DEVICE(0x0489, 0xE036) },
|
|
+ { USB_DEVICE(0x0489, 0xE03C) },
|
|
|
|
{ } /* Terminating entry */
|
|
};
|
|
@@ -119,37 +130,47 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
|
|
static const struct usb_device_id ath3k_blist_tbl[] = {
|
|
|
|
/* Atheros AR3012 with sflash firmware*/
|
|
- { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
|
|
|
|
/* Atheros AR5BBU22 with sflash firmware */
|
|
- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
|
|
|
|
{ } /* Terminating entry */
|
|
};
|
|
@@ -157,6 +178,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
|
#define USB_REQ_DFU_DNLOAD 1
|
|
#define BULK_SIZE 4096
|
|
#define FW_HDR_SIZE 20
|
|
+#define TIMEGAP_USEC_MIN 50
|
|
+#define TIMEGAP_USEC_MAX 100
|
|
|
|
static int ath3k_load_firmware(struct usb_device *udev,
|
|
const struct firmware *firmware)
|
|
@@ -187,6 +210,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
|
|
count -= 20;
|
|
|
|
while (count) {
|
|
+ /* workaround the compatibility issue with xHCI controller*/
|
|
+ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
|
|
+
|
|
size = min_t(uint, count, BULK_SIZE);
|
|
pipe = usb_sndbulkpipe(udev, 0x02);
|
|
memcpy(send_buf, firmware->data + sent, size);
|
|
@@ -283,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
|
|
count -= size;
|
|
|
|
while (count) {
|
|
+ /* workaround the compatibility issue with xHCI controller*/
|
|
+ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
|
|
+
|
|
size = min_t(uint, count, BULK_SIZE);
|
|
pipe = usb_sndbulkpipe(udev, 0x02);
|
|
|
|
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
|
|
index 7399303..9e81a3d0 100644
|
|
--- a/drivers/bluetooth/btmrvl_drv.h
|
|
+++ b/drivers/bluetooth/btmrvl_drv.h
|
|
@@ -66,6 +66,7 @@ struct btmrvl_adapter {
|
|
u8 hs_state;
|
|
u8 wakeup_tries;
|
|
wait_queue_head_t cmd_wait_q;
|
|
+ wait_queue_head_t event_hs_wait_q;
|
|
u8 cmd_complete;
|
|
bool is_suspended;
|
|
};
|
|
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
|
|
index 1e0320a..49d2098 100644
|
|
--- a/drivers/bluetooth/btmrvl_main.c
|
|
+++ b/drivers/bluetooth/btmrvl_main.c
|
|
@@ -112,6 +112,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
|
|
adapter->hs_state = HS_ACTIVATED;
|
|
if (adapter->psmode)
|
|
adapter->ps_state = PS_SLEEP;
|
|
+ wake_up_interruptible(&adapter->event_hs_wait_q);
|
|
BT_DBG("HS ACTIVATED!");
|
|
} else {
|
|
BT_DBG("HS Enable failed");
|
|
@@ -251,11 +252,31 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
|
|
|
|
int btmrvl_enable_hs(struct btmrvl_private *priv)
|
|
{
|
|
+ struct btmrvl_adapter *adapter = priv->adapter;
|
|
int ret;
|
|
|
|
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
BT_ERR("Host sleep enable command failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
|
|
+ adapter->hs_state,
|
|
+ msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
|
|
+ if (ret < 0) {
|
|
+ BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
|
|
+ ret, adapter->hs_state, adapter->ps_state,
|
|
+ adapter->wakeup_tries);
|
|
+ } else if (!ret) {
|
|
+ BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state,
|
|
+ adapter->ps_state, adapter->wakeup_tries);
|
|
+ ret = -ETIMEDOUT;
|
|
+ } else {
|
|
+ BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state,
|
|
+ adapter->ps_state, adapter->wakeup_tries);
|
|
+ ret = 0;
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
@@ -341,6 +362,7 @@ static void btmrvl_init_adapter(struct btmrvl_private *priv)
|
|
priv->adapter->ps_state = PS_AWAKE;
|
|
|
|
init_waitqueue_head(&priv->adapter->cmd_wait_q);
|
|
+ init_waitqueue_head(&priv->adapter->event_hs_wait_q);
|
|
}
|
|
|
|
static void btmrvl_free_adapter(struct btmrvl_private *priv)
|
|
@@ -648,6 +670,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv)
|
|
hdev = priv->btmrvl_dev.hcidev;
|
|
|
|
wake_up_interruptible(&priv->adapter->cmd_wait_q);
|
|
+ wake_up_interruptible(&priv->adapter->event_hs_wait_q);
|
|
|
|
kthread_stop(priv->main_thread.task);
|
|
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index 1c7b504..c23658e 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
|
|
#define BTUSB_WRONG_SCO_MTU 0x40
|
|
#define BTUSB_ATH3012 0x80
|
|
#define BTUSB_INTEL 0x100
|
|
+#define BTUSB_INTEL_BOOT 0x200
|
|
|
|
static const struct usb_device_id btusb_table[] = {
|
|
/* Generic Bluetooth USB device */
|
|
@@ -101,21 +102,31 @@ static const struct usb_device_id btusb_table[] = {
|
|
{ USB_DEVICE(0x0c10, 0x0000) },
|
|
|
|
/* Broadcom BCM20702A0 */
|
|
+ { USB_DEVICE(0x0489, 0xe042) },
|
|
+ { USB_DEVICE(0x04ca, 0x2003) },
|
|
{ USB_DEVICE(0x0b05, 0x17b5) },
|
|
{ USB_DEVICE(0x0b05, 0x17cb) },
|
|
- { USB_DEVICE(0x04ca, 0x2003) },
|
|
- { USB_DEVICE(0x0489, 0xe042) },
|
|
{ USB_DEVICE(0x413c, 0x8197) },
|
|
|
|
/* Foxconn - Hon Hai */
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
|
|
|
|
- /*Broadcom devices with vendor specific id */
|
|
+ /* Broadcom devices with vendor specific id */
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
|
|
|
|
+ /* ASUSTek Computer - Broadcom based */
|
|
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
|
|
+
|
|
/* Belkin F8065bf - Broadcom based */
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
|
|
|
|
+ /* IMC Networks - Broadcom based */
|
|
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
|
|
+
|
|
+ /* Intel Bluetooth USB Bootloader (RAM module) */
|
|
+ { USB_DEVICE(0x8087, 0x0a5a),
|
|
+ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
|
|
+
|
|
{ } /* Terminating entry */
|
|
};
|
|
|
|
@@ -129,56 +140,67 @@ static const struct usb_device_id blacklist_table[] = {
|
|
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
|
|
|
|
/* Atheros 3011 with sflash firmware */
|
|
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
|
|
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
|
|
+ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
|
|
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
|
|
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
|
|
{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
|
|
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
|
|
- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
|
|
- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
|
|
- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
|
|
|
|
/* Atheros AR9285 Malbec with sflash firmware */
|
|
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
|
|
|
|
/* Atheros 3012 with sflash firmware */
|
|
- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
|
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
|
|
|
|
/* Atheros AR5BBU12 with sflash firmware */
|
|
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
|
|
|
|
/* Atheros AR5BBU12 with sflash firmware */
|
|
- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
|
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
|
|
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
|
|
|
/* Broadcom BCM2035 */
|
|
- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
|
|
- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
|
|
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
|
|
+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
|
|
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
|
|
|
|
/* Broadcom BCM2045 */
|
|
{ USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
|
|
@@ -309,6 +331,9 @@ static void btusb_intr_complete(struct urb *urb)
|
|
BT_ERR("%s corrupted event packet", hdev->name);
|
|
hdev->stat.err_rx++;
|
|
}
|
|
+ } else if (urb->status == -ENOENT) {
|
|
+ /* Avoid suspend failed when usb_kill_urb */
|
|
+ return;
|
|
}
|
|
|
|
if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
|
|
@@ -397,6 +422,9 @@ static void btusb_bulk_complete(struct urb *urb)
|
|
BT_ERR("%s corrupted ACL packet", hdev->name);
|
|
hdev->stat.err_rx++;
|
|
}
|
|
+ } else if (urb->status == -ENOENT) {
|
|
+ /* Avoid suspend failed when usb_kill_urb */
|
|
+ return;
|
|
}
|
|
|
|
if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
|
|
@@ -491,6 +519,9 @@ static void btusb_isoc_complete(struct urb *urb)
|
|
hdev->stat.err_rx++;
|
|
}
|
|
}
|
|
+ } else if (urb->status == -ENOENT) {
|
|
+ /* Avoid suspend failed when usb_kill_urb */
|
|
+ return;
|
|
}
|
|
|
|
if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
|
|
@@ -1262,6 +1293,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
|
}
|
|
fw_ptr = fw->data;
|
|
|
|
+ kfree_skb(skb);
|
|
+
|
|
/* This Intel specific command enables the manufacturer mode of the
|
|
* controller.
|
|
*
|
|
@@ -1482,6 +1515,9 @@ static int btusb_probe(struct usb_interface *intf,
|
|
if (id->driver_info & BTUSB_INTEL)
|
|
hdev->setup = btusb_setup_intel;
|
|
|
|
+ if (id->driver_info & BTUSB_INTEL_BOOT)
|
|
+ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
|
|
+
|
|
/* Interface numbers are hardcoded in the specification */
|
|
data->isoc = usb_ifnum_to_if(data->udev, 1);
|
|
|
|
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
|
|
index e36a024..5651992 100644
|
|
--- a/drivers/bluetooth/hci_h5.c
|
|
+++ b/drivers/bluetooth/hci_h5.c
|
|
@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5)
|
|
break;
|
|
|
|
to_remove--;
|
|
- seq = (seq - 1) % 8;
|
|
+ seq = (seq - 1) & 0x07;
|
|
}
|
|
|
|
if (seq != h5->rx_ack)
|
|
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
|
|
index 372ae72..1aa0130 100644
|
|
--- a/drivers/bus/mvebu-mbus.c
|
|
+++ b/drivers/bus/mvebu-mbus.c
|
|
@@ -181,12 +181,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
|
|
}
|
|
|
|
/* Checks whether the given window number is available */
|
|
+
|
|
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
|
|
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
|
|
+ * isn't currently taking into account this special case, which means
|
|
+ * that when window 13 is actually used, the remap registers are left
|
|
+ * to 0, making the device using this MBus window unavailable. The
|
|
+ * quick fix for stable is to not use window 13. A follow up patch
|
|
+ * will correctly handle this window.
|
|
+*/
|
|
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
|
|
const int win)
|
|
{
|
|
void __iomem *addr = mbus->mbuswins_base +
|
|
mbus->soc->win_cfg_offset(win);
|
|
u32 ctrl = readl(addr + WIN_CTRL_OFF);
|
|
+
|
|
+ if (win == 13)
|
|
+ return false;
|
|
+
|
|
return !(ctrl & WIN_CTRL_ENABLE);
|
|
}
|
|
|
|
@@ -688,7 +701,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
|
|
phys_addr_t sdramwins_phys_base,
|
|
size_t sdramwins_size)
|
|
{
|
|
- struct device_node *np;
|
|
int win;
|
|
|
|
mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
|
|
@@ -701,12 +713,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
|
|
- if (np) {
|
|
- mbus->hw_io_coherency = 1;
|
|
- of_node_put(np);
|
|
- }
|
|
-
|
|
for (win = 0; win < mbus->soc->num_wins; win++)
|
|
mvebu_mbus_disable_window(mbus, win);
|
|
|
|
@@ -876,7 +882,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
|
|
}
|
|
}
|
|
|
|
-int __init mvebu_mbus_dt_init(void)
|
|
+int __init mvebu_mbus_dt_init(bool is_coherent)
|
|
{
|
|
struct resource mbuswins_res, sdramwins_res;
|
|
struct device_node *np, *controller;
|
|
@@ -915,6 +921,8 @@ int __init mvebu_mbus_dt_init(void)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ mbus_state.hw_io_coherency = is_coherent;
|
|
+
|
|
/* Get optional pcie-{mem,io}-aperture properties */
|
|
mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
|
|
&mbus_state.pcie_io_aperture);
|
|
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
|
|
index 5c85350..19e301f 100644
|
|
--- a/drivers/char/agp/intel-gtt.c
|
|
+++ b/drivers/char/agp/intel-gtt.c
|
|
@@ -586,7 +586,7 @@ static inline int needs_ilk_vtd_wa(void)
|
|
/* Query intel_iommu to see if we need the workaround. Presumably that
|
|
* was loaded first.
|
|
*/
|
|
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
|
|
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
|
|
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
|
intel_iommu_gfx_mapped)
|
|
return 1;
|
|
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
|
|
index ab7ffde..f38f2c1 100644
|
|
--- a/drivers/char/hw_random/pseries-rng.c
|
|
+++ b/drivers/char/hw_random/pseries-rng.c
|
|
@@ -25,18 +25,21 @@
|
|
#include <asm/vio.h>
|
|
|
|
|
|
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
|
|
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
|
{
|
|
+ u64 buffer[PLPAR_HCALL_BUFSIZE];
|
|
+ size_t size = max < 8 ? max : 8;
|
|
int rc;
|
|
|
|
- rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
|
|
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
|
|
if (rc != H_SUCCESS) {
|
|
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
|
|
return -EIO;
|
|
}
|
|
+ memcpy(data, buffer, size);
|
|
|
|
/* The hypervisor interface returns 64 bits */
|
|
- return 8;
|
|
+ return size;
|
|
}
|
|
|
|
/**
|
|
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
|
|
|
|
static struct hwrng pseries_rng = {
|
|
.name = KBUILD_MODNAME,
|
|
- .data_read = pseries_rng_data_read,
|
|
+ .read = pseries_rng_read,
|
|
};
|
|
|
|
static int __init pseries_rng_probe(struct vio_dev *dev,
|
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
|
index 429b75b..8a64dbe 100644
|
|
--- a/drivers/char/random.c
|
|
+++ b/drivers/char/random.c
|
|
@@ -1063,8 +1063,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
|
|
* pool while mixing, and hash one final time.
|
|
*/
|
|
sha_transform(hash.w, extract, workspace);
|
|
- memset(extract, 0, sizeof(extract));
|
|
- memset(workspace, 0, sizeof(workspace));
|
|
+ memzero_explicit(extract, sizeof(extract));
|
|
+ memzero_explicit(workspace, sizeof(workspace));
|
|
|
|
/*
|
|
* In case the hash function has some recognizable output
|
|
@@ -1076,7 +1076,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
|
|
hash.w[2] ^= rol32(hash.w[2], 16);
|
|
|
|
memcpy(out, &hash, EXTRACT_SIZE);
|
|
- memset(&hash, 0, sizeof(hash));
|
|
+ memzero_explicit(&hash, sizeof(hash));
|
|
}
|
|
|
|
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
|
|
@@ -1124,7 +1124,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
|
|
}
|
|
|
|
/* Wipe data just returned from memory */
|
|
- memset(tmp, 0, sizeof(tmp));
|
|
+ memzero_explicit(tmp, sizeof(tmp));
|
|
|
|
return ret;
|
|
}
|
|
@@ -1162,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
|
|
}
|
|
|
|
/* Wipe data just returned from memory */
|
|
- memset(tmp, 0, sizeof(tmp));
|
|
+ memzero_explicit(tmp, sizeof(tmp));
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
|
|
index 62e10fd..cfb9089 100644
|
|
--- a/drivers/char/tpm/tpm-interface.c
|
|
+++ b/drivers/char/tpm/tpm-interface.c
|
|
@@ -491,11 +491,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
|
|
int tpm_get_timeouts(struct tpm_chip *chip)
|
|
{
|
|
struct tpm_cmd_t tpm_cmd;
|
|
- struct timeout_t *timeout_cap;
|
|
+ unsigned long new_timeout[4];
|
|
+ unsigned long old_timeout[4];
|
|
struct duration_t *duration_cap;
|
|
ssize_t rc;
|
|
- u32 timeout;
|
|
- unsigned int scale = 1;
|
|
|
|
tpm_cmd.header.in = tpm_getcap_header;
|
|
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
|
|
@@ -529,25 +528,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
|
|
!= sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
|
|
return -EINVAL;
|
|
|
|
- timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
|
|
- /* Don't overwrite default if value is 0 */
|
|
- timeout = be32_to_cpu(timeout_cap->a);
|
|
- if (timeout && timeout < 1000) {
|
|
- /* timeouts in msec rather usec */
|
|
- scale = 1000;
|
|
- chip->vendor.timeout_adjusted = true;
|
|
+ old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
|
|
+ old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
|
|
+ old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
|
|
+ old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
|
|
+ memcpy(new_timeout, old_timeout, sizeof(new_timeout));
|
|
+
|
|
+ /*
|
|
+ * Provide ability for vendor overrides of timeout values in case
|
|
+ * of misreporting.
|
|
+ */
|
|
+ if (chip->ops->update_timeouts != NULL)
|
|
+ chip->vendor.timeout_adjusted =
|
|
+ chip->ops->update_timeouts(chip, new_timeout);
|
|
+
|
|
+ if (!chip->vendor.timeout_adjusted) {
|
|
+ /* Don't overwrite default if value is 0 */
|
|
+ if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
|
|
+ int i;
|
|
+
|
|
+ /* timeouts in msec rather usec */
|
|
+ for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
|
|
+ new_timeout[i] *= 1000;
|
|
+ chip->vendor.timeout_adjusted = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Report adjusted timeouts */
|
|
+ if (chip->vendor.timeout_adjusted) {
|
|
+ dev_info(chip->dev,
|
|
+ HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
|
|
+ old_timeout[0], new_timeout[0],
|
|
+ old_timeout[1], new_timeout[1],
|
|
+ old_timeout[2], new_timeout[2],
|
|
+ old_timeout[3], new_timeout[3]);
|
|
}
|
|
- if (timeout)
|
|
- chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
|
|
- timeout = be32_to_cpu(timeout_cap->b);
|
|
- if (timeout)
|
|
- chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
|
|
- timeout = be32_to_cpu(timeout_cap->c);
|
|
- if (timeout)
|
|
- chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
|
|
- timeout = be32_to_cpu(timeout_cap->d);
|
|
- if (timeout)
|
|
- chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
|
|
+
|
|
+ chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
|
|
+ chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
|
|
+ chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
|
|
+ chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
|
|
|
|
duration:
|
|
tpm_cmd.header.in = tpm_getcap_header;
|
|
@@ -991,13 +1011,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
|
|
int err, total = 0, retries = 5;
|
|
u8 *dest = out;
|
|
|
|
+ if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
|
|
+ return -EINVAL;
|
|
+
|
|
chip = tpm_chip_find_get(chip_num);
|
|
if (chip == NULL)
|
|
return -ENODEV;
|
|
|
|
- if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
|
|
- return -EINVAL;
|
|
-
|
|
do {
|
|
tpm_cmd.header.in = tpm_getrandom_header;
|
|
tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
|
|
@@ -1016,6 +1036,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
|
|
num_bytes -= recd;
|
|
} while (retries-- && total < max);
|
|
|
|
+ tpm_chip_put(chip);
|
|
return total ? total : -EIO;
|
|
}
|
|
EXPORT_SYMBOL_GPL(tpm_get_random);
|
|
@@ -1095,17 +1116,19 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
|
|
goto del_misc;
|
|
|
|
if (tpm_add_ppi(&dev->kobj))
|
|
- goto del_misc;
|
|
+ goto del_sysfs;
|
|
|
|
chip->bios_dir = tpm_bios_log_setup(chip->devname);
|
|
|
|
/* Make chip available */
|
|
spin_lock(&driver_lock);
|
|
- list_add_rcu(&chip->list, &tpm_chip_list);
|
|
+ list_add_tail_rcu(&chip->list, &tpm_chip_list);
|
|
spin_unlock(&driver_lock);
|
|
|
|
return chip;
|
|
|
|
+del_sysfs:
|
|
+ tpm_sysfs_del_device(chip);
|
|
del_misc:
|
|
tpm_dev_del_device(chip);
|
|
put_device:
|
|
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
|
|
index 7727292..503a85a 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_atmel.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
|
|
@@ -168,6 +168,10 @@ static int i2c_atmel_probe(struct i2c_client *client,
|
|
|
|
chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
|
|
GFP_KERNEL);
|
|
+ if (!chip->vendor.priv) {
|
|
+ rc = -ENOMEM;
|
|
+ goto out_err;
|
|
+ }
|
|
|
|
/* Default timeouts */
|
|
chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
|
|
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
index 7b158ef..23c7b13 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
@@ -538,6 +538,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
|
|
|
|
chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
|
|
GFP_KERNEL);
|
|
+ if (!chip->vendor.priv) {
|
|
+ rc = -ENOMEM;
|
|
+ goto out_err;
|
|
+ }
|
|
+
|
|
init_waitqueue_head(&chip->vendor.read_queue);
|
|
init_waitqueue_head(&chip->vendor.int_queue);
|
|
|
|
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
|
|
index 5b0dd8e..576d111 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
|
|
@@ -488,7 +488,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
|
|
if (burstcnt < 0)
|
|
return burstcnt;
|
|
size = min_t(int, len - i - 1, burstcnt);
|
|
- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
|
|
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
|
|
if (ret < 0)
|
|
goto out_err;
|
|
|
|
@@ -715,6 +715,7 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
|
}
|
|
|
|
tpm_get_timeouts(chip);
|
|
+ tpm_do_selftest(chip);
|
|
|
|
dev_info(chip->dev, "TPM I2C Initialized\n");
|
|
return 0;
|
|
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
|
|
index af74c57..643bba7 100644
|
|
--- a/drivers/char/tpm/tpm_ibmvtpm.c
|
|
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
|
|
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
{
|
|
struct ibmvtpm_dev *ibmvtpm;
|
|
struct ibmvtpm_crq crq;
|
|
- u64 *word = (u64 *) &crq;
|
|
+ __be64 *word = (__be64 *)&crq;
|
|
int rc;
|
|
|
|
ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
|
|
@@ -145,10 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
|
|
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
|
crq.msg = (u8)VTPM_TPM_COMMAND;
|
|
- crq.len = (u16)count;
|
|
- crq.data = ibmvtpm->rtce_dma_handle;
|
|
+ crq.len = cpu_to_be16(count);
|
|
+ crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
|
|
|
|
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
|
|
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
|
|
+ be64_to_cpu(word[1]));
|
|
if (rc != H_SUCCESS) {
|
|
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
|
|
rc = 0;
|
|
@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
|
|
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
|
crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
|
|
|
|
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
|
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
|
+ cpu_to_be64(buf[1]));
|
|
if (rc != H_SUCCESS)
|
|
dev_err(ibmvtpm->dev,
|
|
"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
|
|
@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
|
|
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
|
crq.msg = (u8)VTPM_GET_VERSION;
|
|
|
|
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
|
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
|
+ cpu_to_be64(buf[1]));
|
|
if (rc != H_SUCCESS)
|
|
dev_err(ibmvtpm->dev,
|
|
"ibmvtpm_crq_get_version failed rc=%d\n", rc);
|
|
@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
|
|
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
|
|
{
|
|
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
|
|
+
|
|
+ /* ibmvtpm initializes at probe time, so the data we are
|
|
+ * asking for may not be set yet. Estimate that 4K required
|
|
+ * for TCE-mapped buffer in addition to CRQ.
|
|
+ */
|
|
+ if (!ibmvtpm)
|
|
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
|
|
+
|
|
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
|
|
}
|
|
|
|
@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
|
|
crq.valid = (u8)IBMVTPM_VALID_CMD;
|
|
crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
|
|
|
|
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
|
|
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
|
|
+ cpu_to_be64(buf[1]));
|
|
if (rc != H_SUCCESS)
|
|
dev_err(ibmvtpm->dev,
|
|
"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
|
|
@@ -472,11 +484,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
|
|
case IBMVTPM_VALID_CMD:
|
|
switch (crq->msg) {
|
|
case VTPM_GET_RTCE_BUFFER_SIZE_RES:
|
|
- if (crq->len <= 0) {
|
|
+ if (be16_to_cpu(crq->len) <= 0) {
|
|
dev_err(ibmvtpm->dev, "Invalid rtce size\n");
|
|
return;
|
|
}
|
|
- ibmvtpm->rtce_size = crq->len;
|
|
+ ibmvtpm->rtce_size = be16_to_cpu(crq->len);
|
|
ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
|
|
GFP_KERNEL);
|
|
if (!ibmvtpm->rtce_buf) {
|
|
@@ -497,11 +509,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
|
|
|
|
return;
|
|
case VTPM_GET_VERSION_RES:
|
|
- ibmvtpm->vtpm_version = crq->data;
|
|
+ ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
|
|
return;
|
|
case VTPM_TPM_COMMAND_RES:
|
|
/* len of the data in rtce buffer */
|
|
- ibmvtpm->res_len = crq->len;
|
|
+ ibmvtpm->res_len = be16_to_cpu(crq->len);
|
|
wake_up_interruptible(&ibmvtpm->wq);
|
|
return;
|
|
default:
|
|
@@ -567,6 +579,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
|
goto cleanup;
|
|
}
|
|
|
|
+ ibmvtpm->dev = dev;
|
|
+ ibmvtpm->vdev = vio_dev;
|
|
+
|
|
crq_q = &ibmvtpm->crq_queue;
|
|
crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
|
|
if (!crq_q->crq_addr) {
|
|
@@ -611,8 +626,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
|
|
|
crq_q->index = 0;
|
|
|
|
- ibmvtpm->dev = dev;
|
|
- ibmvtpm->vdev = vio_dev;
|
|
TPM_VPRIV(chip) = (void *)ibmvtpm;
|
|
|
|
spin_lock_init(&ibmvtpm->rtce_lock);
|
|
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
|
|
index bd82a79..b2c231b 100644
|
|
--- a/drivers/char/tpm/tpm_ibmvtpm.h
|
|
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
|
|
@@ -22,9 +22,9 @@
|
|
struct ibmvtpm_crq {
|
|
u8 valid;
|
|
u8 msg;
|
|
- u16 len;
|
|
- u32 data;
|
|
- u64 reserved;
|
|
+ __be16 len;
|
|
+ __be32 data;
|
|
+ __be64 reserved;
|
|
} __attribute__((packed, aligned(8)));
|
|
|
|
struct ibmvtpm_crq_queue {
|
|
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
|
|
index a9ed227..51350cd 100644
|
|
--- a/drivers/char/tpm/tpm_tis.c
|
|
+++ b/drivers/char/tpm/tpm_tis.c
|
|
@@ -75,6 +75,10 @@ enum tis_defaults {
|
|
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
|
|
#define TPM_RID(l) (0x0F04 | ((l) << 12))
|
|
|
|
+struct priv_data {
|
|
+ bool irq_tested;
|
|
+};
|
|
+
|
|
static LIST_HEAD(tis_chips);
|
|
static DEFINE_MUTEX(tis_lock);
|
|
|
|
@@ -338,12 +342,27 @@ out_err:
|
|
return rc;
|
|
}
|
|
|
|
+static void disable_interrupts(struct tpm_chip *chip)
|
|
+{
|
|
+ u32 intmask;
|
|
+
|
|
+ intmask =
|
|
+ ioread32(chip->vendor.iobase +
|
|
+ TPM_INT_ENABLE(chip->vendor.locality));
|
|
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
|
|
+ iowrite32(intmask,
|
|
+ chip->vendor.iobase +
|
|
+ TPM_INT_ENABLE(chip->vendor.locality));
|
|
+ free_irq(chip->vendor.irq, chip);
|
|
+ chip->vendor.irq = 0;
|
|
+}
|
|
+
|
|
/*
|
|
* If interrupts are used (signaled by an irq set in the vendor structure)
|
|
* tpm.c can skip polling for the data to be available as the interrupt is
|
|
* waited for here
|
|
*/
|
|
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
+static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
{
|
|
int rc;
|
|
u32 ordinal;
|
|
@@ -373,6 +392,60 @@ out_err:
|
|
return rc;
|
|
}
|
|
|
|
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
+{
|
|
+ int rc, irq;
|
|
+ struct priv_data *priv = chip->vendor.priv;
|
|
+
|
|
+ if (!chip->vendor.irq || priv->irq_tested)
|
|
+ return tpm_tis_send_main(chip, buf, len);
|
|
+
|
|
+ /* Verify receipt of the expected IRQ */
|
|
+ irq = chip->vendor.irq;
|
|
+ chip->vendor.irq = 0;
|
|
+ rc = tpm_tis_send_main(chip, buf, len);
|
|
+ chip->vendor.irq = irq;
|
|
+ if (!priv->irq_tested)
|
|
+ msleep(1);
|
|
+ if (!priv->irq_tested) {
|
|
+ disable_interrupts(chip);
|
|
+ dev_err(chip->dev,
|
|
+ FW_BUG "TPM interrupt not working, polling instead\n");
|
|
+ }
|
|
+ priv->irq_tested = true;
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+struct tis_vendor_timeout_override {
|
|
+ u32 did_vid;
|
|
+ unsigned long timeout_us[4];
|
|
+};
|
|
+
|
|
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
|
|
+ /* Atmel 3204 */
|
|
+ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
|
|
+ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
|
|
+};
|
|
+
|
|
+static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
|
|
+ unsigned long *timeout_cap)
|
|
+{
|
|
+ int i;
|
|
+ u32 did_vid;
|
|
+
|
|
+ did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
|
|
+
|
|
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
|
|
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
|
|
+ continue;
|
|
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
|
|
+ sizeof(vendor_timeout_overrides[i].timeout_us));
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
/*
|
|
* Early probing for iTPM with STS_DATA_EXPECT flaw.
|
|
* Try sending command without itpm flag set and if that
|
|
@@ -437,6 +510,7 @@ static const struct tpm_class_ops tpm_tis = {
|
|
.recv = tpm_tis_recv,
|
|
.send = tpm_tis_send,
|
|
.cancel = tpm_tis_ready,
|
|
+ .update_timeouts = tpm_tis_update_timeouts,
|
|
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
|
|
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
|
|
.req_canceled = tpm_tis_req_canceled,
|
|
@@ -474,6 +548,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
|
if (interrupt == 0)
|
|
return IRQ_NONE;
|
|
|
|
+ ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
|
|
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
|
|
wake_up_interruptible(&chip->vendor.read_queue);
|
|
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
|
|
@@ -503,9 +578,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|
u32 vendor, intfcaps, intmask;
|
|
int rc, i, irq_s, irq_e, probe;
|
|
struct tpm_chip *chip;
|
|
+ struct priv_data *priv;
|
|
|
|
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
|
|
+ if (priv == NULL)
|
|
+ return -ENOMEM;
|
|
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
|
|
return -ENODEV;
|
|
+ chip->vendor.priv = priv;
|
|
|
|
chip->vendor.iobase = ioremap(start, len);
|
|
if (!chip->vendor.iobase) {
|
|
@@ -574,19 +654,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
|
|
dev_dbg(dev, "\tData Avail Int Support\n");
|
|
|
|
- /* get the timeouts before testing for irqs */
|
|
- if (tpm_get_timeouts(chip)) {
|
|
- dev_err(dev, "Could not get TPM timeouts and durations\n");
|
|
- rc = -ENODEV;
|
|
- goto out_err;
|
|
- }
|
|
-
|
|
- if (tpm_do_selftest(chip)) {
|
|
- dev_err(dev, "TPM self test failed\n");
|
|
- rc = -ENODEV;
|
|
- goto out_err;
|
|
- }
|
|
-
|
|
/* INTERRUPT Setup */
|
|
init_waitqueue_head(&chip->vendor.read_queue);
|
|
init_waitqueue_head(&chip->vendor.int_queue);
|
|
@@ -688,6 +755,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|
}
|
|
}
|
|
|
|
+ if (tpm_get_timeouts(chip)) {
|
|
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
|
|
+ rc = -ENODEV;
|
|
+ goto out_err;
|
|
+ }
|
|
+
|
|
+ if (tpm_do_selftest(chip)) {
|
|
+ dev_err(dev, "TPM self test failed\n");
|
|
+ rc = -ENODEV;
|
|
+ goto out_err;
|
|
+ }
|
|
+
|
|
INIT_LIST_HEAD(&chip->vendor.list);
|
|
mutex_lock(&tis_lock);
|
|
list_add(&chip->vendor.list, &tis_chips);
|
|
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
|
|
index 6928d09..b08eadb 100644
|
|
--- a/drivers/char/virtio_console.c
|
|
+++ b/drivers/char/virtio_console.c
|
|
@@ -142,6 +142,7 @@ struct ports_device {
|
|
* notification
|
|
*/
|
|
struct work_struct control_work;
|
|
+ struct work_struct config_work;
|
|
|
|
struct list_head ports;
|
|
|
|
@@ -1832,10 +1833,21 @@ static void config_intr(struct virtio_device *vdev)
|
|
|
|
portdev = vdev->priv;
|
|
|
|
+ if (!use_multiport(portdev))
|
|
+ schedule_work(&portdev->config_work);
|
|
+}
|
|
+
|
|
+static void config_work_handler(struct work_struct *work)
|
|
+{
|
|
+ struct ports_device *portdev;
|
|
+
|
|
+ portdev = container_of(work, struct ports_device, control_work);
|
|
if (!use_multiport(portdev)) {
|
|
+ struct virtio_device *vdev;
|
|
struct port *port;
|
|
u16 rows, cols;
|
|
|
|
+ vdev = portdev->vdev;
|
|
virtio_cread(vdev, struct virtio_console_config, cols, &cols);
|
|
virtio_cread(vdev, struct virtio_console_config, rows, &rows);
|
|
|
|
@@ -2024,12 +2036,14 @@ static int virtcons_probe(struct virtio_device *vdev)
|
|
spin_lock_init(&portdev->ports_lock);
|
|
INIT_LIST_HEAD(&portdev->ports);
|
|
|
|
+ INIT_WORK(&portdev->config_work, &config_work_handler);
|
|
+ INIT_WORK(&portdev->control_work, &control_work_handler);
|
|
+
|
|
if (multiport) {
|
|
unsigned int nr_added_bufs;
|
|
|
|
spin_lock_init(&portdev->c_ivq_lock);
|
|
spin_lock_init(&portdev->c_ovq_lock);
|
|
- INIT_WORK(&portdev->control_work, &control_work_handler);
|
|
|
|
nr_added_bufs = fill_queue(portdev->c_ivq,
|
|
&portdev->c_ivq_lock);
|
|
@@ -2097,6 +2111,8 @@ static void virtcons_remove(struct virtio_device *vdev)
|
|
/* Finish up work that's lined up */
|
|
if (use_multiport(portdev))
|
|
cancel_work_sync(&portdev->control_work);
|
|
+ else
|
|
+ cancel_work_sync(&portdev->config_work);
|
|
|
|
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
|
unplug_port(port);
|
|
@@ -2148,6 +2164,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
|
|
|
|
virtqueue_disable_cb(portdev->c_ivq);
|
|
cancel_work_sync(&portdev->control_work);
|
|
+ cancel_work_sync(&portdev->config_work);
|
|
/*
|
|
* Once more: if control_work_handler() was running, it would
|
|
* enable the cb as the last step.
|
|
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
|
|
index 4a58c55..797bab9 100644
|
|
--- a/drivers/clk/clk-gate.c
|
|
+++ b/drivers/clk/clk-gate.c
|
|
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
|
|
struct clk_init_data init;
|
|
|
|
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
|
|
- if (bit_idx > 16) {
|
|
+ if (bit_idx > 15) {
|
|
pr_err("gate bit exceeds LOWORD field\n");
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
|
|
index 020400d..8b1e2ef 100644
|
|
--- a/drivers/clk/clk.c
|
|
+++ b/drivers/clk/clk.c
|
|
@@ -1487,6 +1487,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
|
|
static void clk_change_rate(struct clk *clk)
|
|
{
|
|
struct clk *child;
|
|
+ struct hlist_node *tmp;
|
|
unsigned long old_rate;
|
|
unsigned long best_parent_rate = 0;
|
|
bool skip_set_rate = false;
|
|
@@ -1525,7 +1526,11 @@ static void clk_change_rate(struct clk *clk)
|
|
if (clk->notifier_count && old_rate != clk->rate)
|
|
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
|
|
|
|
- hlist_for_each_entry(child, &clk->children, child_node) {
|
|
+ /*
|
|
+ * Use safe iteration, as change_rate can actually swap parents
|
|
+ * for certain clock types.
|
|
+ */
|
|
+ hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
|
|
/* Skip children who will be reparented to another clock */
|
|
if (child->new_parent && child->new_parent != clk)
|
|
continue;
|
|
@@ -2240,14 +2245,17 @@ int __clk_get(struct clk *clk)
|
|
|
|
void __clk_put(struct clk *clk)
|
|
{
|
|
+ struct module *owner;
|
|
+
|
|
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
|
|
return;
|
|
|
|
clk_prepare_lock();
|
|
+ owner = clk->owner;
|
|
kref_put(&clk->ref, __clk_release);
|
|
clk_prepare_unlock();
|
|
|
|
- module_put(clk->owner);
|
|
+ module_put(owner);
|
|
}
|
|
|
|
/*** clk rate change notifiers ***/
|
|
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
|
|
index 0996a3a..a9dd21a 100644
|
|
--- a/drivers/clk/qcom/clk-rcg2.c
|
|
+++ b/drivers/clk/qcom/clk-rcg2.c
|
|
@@ -257,7 +257,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
|
|
mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
|
|
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
|
|
cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
|
|
- if (rcg->mnd_width && f->n)
|
|
+ if (rcg->mnd_width && f->n && (f->m != f->n))
|
|
cfg |= CFG_MODE_DUAL_EDGE;
|
|
ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask,
|
|
cfg);
|
|
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
|
|
index 9be47a8..f3c95d6 100644
|
|
--- a/drivers/clk/qcom/mmcc-msm8960.c
|
|
+++ b/drivers/clk/qcom/mmcc-msm8960.c
|
|
@@ -37,6 +37,8 @@
|
|
#define P_PLL2 2
|
|
#define P_PLL3 3
|
|
|
|
+#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
|
|
+
|
|
static u8 mmcc_pxo_pll8_pll2_map[] = {
|
|
[P_PXO] = 0,
|
|
[P_PLL8] = 2,
|
|
@@ -58,8 +60,8 @@ static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
|
|
|
|
static const char *mmcc_pxo_pll8_pll2_pll3[] = {
|
|
"pxo",
|
|
- "pll2",
|
|
"pll8_vote",
|
|
+ "pll2",
|
|
"pll3",
|
|
};
|
|
|
|
@@ -709,18 +711,18 @@ static struct clk_branch csiphy2_timer_clk = {
|
|
};
|
|
|
|
static struct freq_tbl clk_tbl_gfx2d[] = {
|
|
- { 27000000, P_PXO, 1, 0 },
|
|
- { 48000000, P_PLL8, 1, 8 },
|
|
- { 54857000, P_PLL8, 1, 7 },
|
|
- { 64000000, P_PLL8, 1, 6 },
|
|
- { 76800000, P_PLL8, 1, 5 },
|
|
- { 96000000, P_PLL8, 1, 4 },
|
|
- { 128000000, P_PLL8, 1, 3 },
|
|
- { 145455000, P_PLL2, 2, 11 },
|
|
- { 160000000, P_PLL2, 1, 5 },
|
|
- { 177778000, P_PLL2, 2, 9 },
|
|
- { 200000000, P_PLL2, 1, 4 },
|
|
- { 228571000, P_PLL2, 2, 7 },
|
|
+ F_MN( 27000000, P_PXO, 1, 0),
|
|
+ F_MN( 48000000, P_PLL8, 1, 8),
|
|
+ F_MN( 54857000, P_PLL8, 1, 7),
|
|
+ F_MN( 64000000, P_PLL8, 1, 6),
|
|
+ F_MN( 76800000, P_PLL8, 1, 5),
|
|
+ F_MN( 96000000, P_PLL8, 1, 4),
|
|
+ F_MN(128000000, P_PLL8, 1, 3),
|
|
+ F_MN(145455000, P_PLL2, 2, 11),
|
|
+ F_MN(160000000, P_PLL2, 1, 5),
|
|
+ F_MN(177778000, P_PLL2, 2, 9),
|
|
+ F_MN(200000000, P_PLL2, 1, 4),
|
|
+ F_MN(228571000, P_PLL2, 2, 7),
|
|
{ }
|
|
};
|
|
|
|
@@ -841,22 +843,22 @@ static struct clk_branch gfx2d1_clk = {
|
|
};
|
|
|
|
static struct freq_tbl clk_tbl_gfx3d[] = {
|
|
- { 27000000, P_PXO, 1, 0 },
|
|
- { 48000000, P_PLL8, 1, 8 },
|
|
- { 54857000, P_PLL8, 1, 7 },
|
|
- { 64000000, P_PLL8, 1, 6 },
|
|
- { 76800000, P_PLL8, 1, 5 },
|
|
- { 96000000, P_PLL8, 1, 4 },
|
|
- { 128000000, P_PLL8, 1, 3 },
|
|
- { 145455000, P_PLL2, 2, 11 },
|
|
- { 160000000, P_PLL2, 1, 5 },
|
|
- { 177778000, P_PLL2, 2, 9 },
|
|
- { 200000000, P_PLL2, 1, 4 },
|
|
- { 228571000, P_PLL2, 2, 7 },
|
|
- { 266667000, P_PLL2, 1, 3 },
|
|
- { 300000000, P_PLL3, 1, 4 },
|
|
- { 320000000, P_PLL2, 2, 5 },
|
|
- { 400000000, P_PLL2, 1, 2 },
|
|
+ F_MN( 27000000, P_PXO, 1, 0),
|
|
+ F_MN( 48000000, P_PLL8, 1, 8),
|
|
+ F_MN( 54857000, P_PLL8, 1, 7),
|
|
+ F_MN( 64000000, P_PLL8, 1, 6),
|
|
+ F_MN( 76800000, P_PLL8, 1, 5),
|
|
+ F_MN( 96000000, P_PLL8, 1, 4),
|
|
+ F_MN(128000000, P_PLL8, 1, 3),
|
|
+ F_MN(145455000, P_PLL2, 2, 11),
|
|
+ F_MN(160000000, P_PLL2, 1, 5),
|
|
+ F_MN(177778000, P_PLL2, 2, 9),
|
|
+ F_MN(200000000, P_PLL2, 1, 4),
|
|
+ F_MN(228571000, P_PLL2, 2, 7),
|
|
+ F_MN(266667000, P_PLL2, 1, 3),
|
|
+ F_MN(300000000, P_PLL3, 1, 4),
|
|
+ F_MN(320000000, P_PLL2, 2, 5),
|
|
+ F_MN(400000000, P_PLL2, 1, 2),
|
|
{ }
|
|
};
|
|
|
|
@@ -896,7 +898,7 @@ static struct clk_dyn_rcg gfx3d_src = {
|
|
.hw.init = &(struct clk_init_data){
|
|
.name = "gfx3d_src",
|
|
.parent_names = mmcc_pxo_pll8_pll2_pll3,
|
|
- .num_parents = 3,
|
|
+ .num_parents = 4,
|
|
.ops = &clk_dyn_rcg_ops,
|
|
},
|
|
},
|
|
@@ -994,7 +996,7 @@ static struct clk_rcg jpegd_src = {
|
|
.ns_reg = 0x00ac,
|
|
.p = {
|
|
.pre_div_shift = 12,
|
|
- .pre_div_width = 2,
|
|
+ .pre_div_width = 4,
|
|
},
|
|
.s = {
|
|
.src_sel_shift = 0,
|
|
@@ -1114,7 +1116,7 @@ static struct clk_branch mdp_lut_clk = {
|
|
.enable_reg = 0x016c,
|
|
.enable_mask = BIT(0),
|
|
.hw.init = &(struct clk_init_data){
|
|
- .parent_names = (const char *[]){ "mdp_clk" },
|
|
+ .parent_names = (const char *[]){ "mdp_src" },
|
|
.num_parents = 1,
|
|
.name = "mdp_lut_clk",
|
|
.ops = &clk_branch_ops,
|
|
@@ -1341,15 +1343,15 @@ static struct clk_branch hdmi_app_clk = {
|
|
};
|
|
|
|
static struct freq_tbl clk_tbl_vcodec[] = {
|
|
- { 27000000, P_PXO, 1, 0 },
|
|
- { 32000000, P_PLL8, 1, 12 },
|
|
- { 48000000, P_PLL8, 1, 8 },
|
|
- { 54860000, P_PLL8, 1, 7 },
|
|
- { 96000000, P_PLL8, 1, 4 },
|
|
- { 133330000, P_PLL2, 1, 6 },
|
|
- { 200000000, P_PLL2, 1, 4 },
|
|
- { 228570000, P_PLL2, 2, 7 },
|
|
- { 266670000, P_PLL2, 1, 3 },
|
|
+ F_MN( 27000000, P_PXO, 1, 0),
|
|
+ F_MN( 32000000, P_PLL8, 1, 12),
|
|
+ F_MN( 48000000, P_PLL8, 1, 8),
|
|
+ F_MN( 54860000, P_PLL8, 1, 7),
|
|
+ F_MN( 96000000, P_PLL8, 1, 4),
|
|
+ F_MN(133330000, P_PLL2, 1, 6),
|
|
+ F_MN(200000000, P_PLL2, 1, 4),
|
|
+ F_MN(228570000, P_PLL2, 2, 7),
|
|
+ F_MN(266670000, P_PLL2, 1, 3),
|
|
{ }
|
|
};
|
|
|
|
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
|
|
index 884187f..7f30b94 100644
|
|
--- a/drivers/clk/samsung/clk-exynos-audss.c
|
|
+++ b/drivers/clk/samsung/clk-exynos-audss.c
|
|
@@ -210,6 +210,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
|
|
{
|
|
int i;
|
|
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+ unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
|
|
+#endif
|
|
+
|
|
of_clk_del_provider(pdev->dev.of_node);
|
|
|
|
for (i = 0; i < clk_data.clk_num; i++) {
|
|
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
|
|
index 9e23264..ea4db84 100644
|
|
--- a/drivers/clk/sunxi/clk-factors.c
|
|
+++ b/drivers/clk/sunxi/clk-factors.c
|
|
@@ -62,7 +62,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
|
|
p = FACTOR_GET(config->pshift, config->pwidth, reg);
|
|
|
|
/* Calculate the rate */
|
|
- rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
|
|
+ rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
|
|
|
|
return rate;
|
|
}
|
|
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
|
|
index 02e1a43..d2d0efa 100644
|
|
--- a/drivers/clk/sunxi/clk-factors.h
|
|
+++ b/drivers/clk/sunxi/clk-factors.h
|
|
@@ -15,6 +15,7 @@ struct clk_factors_config {
|
|
u8 mwidth;
|
|
u8 pshift;
|
|
u8 pwidth;
|
|
+ u8 n_start;
|
|
};
|
|
|
|
struct clk_factors {
|
|
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
|
|
index abb6c5a..06a14b8 100644
|
|
--- a/drivers/clk/sunxi/clk-sunxi.c
|
|
+++ b/drivers/clk/sunxi/clk-sunxi.c
|
|
@@ -407,6 +407,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
|
|
.kwidth = 2,
|
|
.mshift = 0,
|
|
.mwidth = 2,
|
|
+ .n_start = 1,
|
|
};
|
|
|
|
static struct clk_factors_config sun4i_pll5_config = {
|
|
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
|
|
index c0a7d77..a90af17 100644
|
|
--- a/drivers/clk/tegra/clk.c
|
|
+++ b/drivers/clk/tegra/clk.c
|
|
@@ -266,7 +266,7 @@ void __init tegra_add_of_provider(struct device_node *np)
|
|
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
|
|
|
|
rst_ctlr.of_node = np;
|
|
- rst_ctlr.nr_resets = clk_num * 32;
|
|
+ rst_ctlr.nr_resets = periph_banks * 32;
|
|
reset_controller_register(&rst_ctlr);
|
|
}
|
|
|
|
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
|
|
index c6e86a9..5122ef2 100644
|
|
--- a/drivers/clk/versatile/clk-sp810.c
|
|
+++ b/drivers/clk/versatile/clk-sp810.c
|
|
@@ -128,8 +128,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
|
|
{
|
|
struct clk_sp810 *sp810 = data;
|
|
|
|
- if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
|
|
- ARRAY_SIZE(sp810->timerclken)))
|
|
+ if (WARN_ON(clkspec->args_count != 1 ||
|
|
+ clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken)))
|
|
return NULL;
|
|
|
|
return sp810->timerclken[clkspec->args[0]].clk;
|
|
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
|
|
index 09dd017..5f52f3f 100644
|
|
--- a/drivers/clk/zynq/clkc.c
|
|
+++ b/drivers/clk/zynq/clkc.c
|
|
@@ -300,6 +300,7 @@ static void __init zynq_clk_setup(struct device_node *np)
|
|
clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
|
|
"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
|
|
26, 0, &armclk_lock);
|
|
+ clk_prepare_enable(clks[cpu_2x]);
|
|
|
|
clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
|
|
4 + 2 * tmp);
|
|
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
|
|
index e252939..ddd03f8 100644
|
|
--- a/drivers/clocksource/exynos_mct.c
|
|
+++ b/drivers/clocksource/exynos_mct.c
|
|
@@ -98,8 +98,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
|
|
__raw_writel(value, reg_base + offset);
|
|
|
|
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
|
|
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
|
|
- switch (offset & EXYNOS4_MCT_L_MASK) {
|
|
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
|
|
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
|
|
case MCT_L_TCON_OFFSET:
|
|
mask = 1 << 3; /* L_TCON write status */
|
|
break;
|
|
@@ -422,15 +422,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
|
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
|
|
|
|
if (mct_int_type == MCT_INT_SPI) {
|
|
- evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
|
|
- if (request_irq(evt->irq, exynos4_mct_tick_isr,
|
|
- IRQF_TIMER | IRQF_NOBALANCING,
|
|
- evt->name, mevt)) {
|
|
- pr_err("exynos-mct: cannot register IRQ %d\n",
|
|
- evt->irq);
|
|
+
|
|
+ if (evt->irq == -1)
|
|
return -EIO;
|
|
- }
|
|
- irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
|
|
+
|
|
+ irq_force_affinity(evt->irq, cpumask_of(cpu));
|
|
+ enable_irq(evt->irq);
|
|
} else {
|
|
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
|
|
}
|
|
@@ -443,10 +440,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
|
static void exynos4_local_timer_stop(struct clock_event_device *evt)
|
|
{
|
|
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
|
- if (mct_int_type == MCT_INT_SPI)
|
|
- free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
|
|
- else
|
|
+ if (mct_int_type == MCT_INT_SPI) {
|
|
+ if (evt->irq != -1)
|
|
+ disable_irq_nosync(evt->irq);
|
|
+ } else {
|
|
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
|
|
+ }
|
|
}
|
|
|
|
static int exynos4_mct_cpu_notify(struct notifier_block *self,
|
|
@@ -478,7 +477,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
|
|
|
|
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
|
|
{
|
|
- int err;
|
|
+ int err, cpu;
|
|
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
|
|
struct clk *mct_clk, *tick_clk;
|
|
|
|
@@ -505,7 +504,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
|
|
WARN(err, "MCT: can't request IRQ %d (%d)\n",
|
|
mct_irqs[MCT_L0_IRQ], err);
|
|
} else {
|
|
- irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
|
|
+ struct mct_clock_event_device *pcpu_mevt =
|
|
+ per_cpu_ptr(&percpu_mct_tick, cpu);
|
|
+
|
|
+ pcpu_mevt->evt.irq = -1;
|
|
+
|
|
+ irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
|
|
+ if (request_irq(mct_irq,
|
|
+ exynos4_mct_tick_isr,
|
|
+ IRQF_TIMER | IRQF_NOBALANCING,
|
|
+ pcpu_mevt->name, pcpu_mevt)) {
|
|
+ pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
|
|
+ cpu);
|
|
+
|
|
+ continue;
|
|
+ }
|
|
+ pcpu_mevt->evt.irq = mct_irq;
|
|
+ }
|
|
}
|
|
|
|
err = register_cpu_notifier(&exynos4_mct_cpu_nb);
|
|
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
|
|
index bf497af..7d19f86 100644
|
|
--- a/drivers/clocksource/sun4i_timer.c
|
|
+++ b/drivers/clocksource/sun4i_timer.c
|
|
@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node)
|
|
/* Make sure timer is stopped before playing with interrupts */
|
|
sun4i_clkevt_time_stop(0);
|
|
|
|
+ sun4i_clockevent.cpumask = cpu_possible_mask;
|
|
+ sun4i_clockevent.irq = irq;
|
|
+
|
|
+ clockevents_config_and_register(&sun4i_clockevent, rate,
|
|
+ TIMER_SYNC_TICKS, 0xffffffff);
|
|
+
|
|
ret = setup_irq(irq, &sun4i_timer_irq);
|
|
if (ret)
|
|
pr_warn("failed to setup irq %d\n", irq);
|
|
@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node)
|
|
/* Enable timer0 interrupt */
|
|
val = readl(timer_base + TIMER_IRQ_EN_REG);
|
|
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
|
|
-
|
|
- sun4i_clockevent.cpumask = cpu_possible_mask;
|
|
- sun4i_clockevent.irq = irq;
|
|
-
|
|
- clockevents_config_and_register(&sun4i_clockevent, rate,
|
|
- TIMER_SYNC_TICKS, 0xffffffff);
|
|
}
|
|
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
|
|
sun4i_timer_init);
|
|
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
|
|
index 1a6205b..35b0161 100644
|
|
--- a/drivers/clocksource/time-efm32.c
|
|
+++ b/drivers/clocksource/time-efm32.c
|
|
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
|
|
clock_event_ddata.base = base;
|
|
clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
|
|
|
|
- setup_irq(irq, &efm32_clock_event_irq);
|
|
-
|
|
clockevents_config_and_register(&clock_event_ddata.evtdev,
|
|
DIV_ROUND_CLOSEST(rate, 1024),
|
|
0xf, 0xffff);
|
|
|
|
+ setup_irq(irq, &efm32_clock_event_irq);
|
|
+
|
|
return 0;
|
|
|
|
err_get_irq:
|
|
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
|
|
index deebcd6..4f0f361 100644
|
|
--- a/drivers/clocksource/timer-sun5i.c
|
|
+++ b/drivers/clocksource/timer-sun5i.c
|
|
@@ -172,10 +172,6 @@ static void __init sun5i_timer_init(struct device_node *node)
|
|
|
|
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
|
|
|
|
- ret = setup_irq(irq, &sun5i_timer_irq);
|
|
- if (ret)
|
|
- pr_warn("failed to setup irq %d\n", irq);
|
|
-
|
|
/* Enable timer0 interrupt */
|
|
val = readl(timer_base + TIMER_IRQ_EN_REG);
|
|
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
|
|
@@ -185,6 +181,10 @@ static void __init sun5i_timer_init(struct device_node *node)
|
|
|
|
clockevents_config_and_register(&sun5i_clockevent, rate,
|
|
TIMER_SYNC_TICKS, 0xffffffff);
|
|
+
|
|
+ ret = setup_irq(irq, &sun5i_timer_irq);
|
|
+ if (ret)
|
|
+ pr_warn("failed to setup irq %d\n", irq);
|
|
}
|
|
CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
|
|
sun5i_timer_init);
|
|
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
|
|
index 199b52b..ef3b8ad 100644
|
|
--- a/drivers/cpufreq/cpufreq.c
|
|
+++ b/drivers/cpufreq/cpufreq.c
|
|
@@ -460,7 +460,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
|
|
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
|
|
show_one(scaling_min_freq, min);
|
|
show_one(scaling_max_freq, max);
|
|
-show_one(scaling_cur_freq, cur);
|
|
+
|
|
+static ssize_t show_scaling_cur_freq(
|
|
+ struct cpufreq_policy *policy, char *buf)
|
|
+{
|
|
+ ssize_t ret;
|
|
+
|
|
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
|
|
+ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
|
+ else
|
|
+ ret = sprintf(buf, "%u\n", policy->cur);
|
|
+ return ret;
|
|
+}
|
|
|
|
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|
struct cpufreq_policy *new_policy);
|
|
@@ -854,11 +865,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
|
|
if (ret)
|
|
goto err_out_kobj_put;
|
|
}
|
|
- if (has_target()) {
|
|
- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
|
|
- if (ret)
|
|
- goto err_out_kobj_put;
|
|
- }
|
|
+
|
|
+ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
|
|
+ if (ret)
|
|
+ goto err_out_kobj_put;
|
|
+
|
|
if (cpufreq_driver->bios_limit) {
|
|
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
|
|
if (ret)
|
|
@@ -1089,10 +1100,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|
* the creation of a brand new one. So we need to perform this update
|
|
* by invoking update_policy_cpu().
|
|
*/
|
|
- if (frozen && cpu != policy->cpu)
|
|
+ if (frozen && cpu != policy->cpu) {
|
|
update_policy_cpu(policy, cpu);
|
|
- else
|
|
+ WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
|
|
+ } else {
|
|
policy->cpu = cpu;
|
|
+ }
|
|
|
|
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
|
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
|
@@ -1223,6 +1236,8 @@ err_get_freq:
|
|
per_cpu(cpufreq_cpu_data, j) = NULL;
|
|
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
+ up_write(&policy->rwsem);
|
|
+
|
|
if (cpufreq_driver->exit)
|
|
cpufreq_driver->exit(policy);
|
|
err_set_policy_cpu:
|
|
@@ -1350,9 +1365,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
unsigned long flags;
|
|
struct cpufreq_policy *policy;
|
|
|
|
- read_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
|
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
if (!policy) {
|
|
pr_debug("%s: No cpu_data found\n", __func__);
|
|
@@ -1407,7 +1423,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
}
|
|
}
|
|
|
|
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
|
|
index 0e27844..8089dd2 100644
|
|
--- a/drivers/cpufreq/integrator-cpufreq.c
|
|
+++ b/drivers/cpufreq/integrator-cpufreq.c
|
|
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
|
|
return cpufreq_register_driver(&integrator_driver);
|
|
}
|
|
|
|
-static void __exit integrator_cpufreq_remove(struct platform_device *pdev)
|
|
+static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
|
|
{
|
|
- cpufreq_unregister_driver(&integrator_driver);
|
|
+ return cpufreq_unregister_driver(&integrator_driver);
|
|
}
|
|
|
|
static const struct of_device_id integrator_cpufreq_match[] = {
|
|
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
|
|
index ae52c77..fbc693b 100644
|
|
--- a/drivers/cpufreq/intel_pstate.c
|
|
+++ b/drivers/cpufreq/intel_pstate.c
|
|
@@ -55,6 +55,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
|
|
return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
|
|
}
|
|
|
|
+static inline int ceiling_fp(int32_t x)
|
|
+{
|
|
+ int mask, ret;
|
|
+
|
|
+ ret = fp_toint(x);
|
|
+ mask = (1 << FRAC_BITS) - 1;
|
|
+ if (x & mask)
|
|
+ ret += 1;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
struct sample {
|
|
int32_t core_pct_busy;
|
|
u64 aperf;
|
|
@@ -67,6 +78,7 @@ struct pstate_data {
|
|
int current_pstate;
|
|
int min_pstate;
|
|
int max_pstate;
|
|
+ int scaling;
|
|
int turbo_pstate;
|
|
};
|
|
|
|
@@ -118,6 +130,7 @@ struct pstate_funcs {
|
|
int (*get_max)(void);
|
|
int (*get_min)(void);
|
|
int (*get_turbo)(void);
|
|
+ int (*get_scaling)(void);
|
|
void (*set)(struct cpudata*, int pstate);
|
|
void (*get_vid)(struct cpudata *);
|
|
};
|
|
@@ -397,14 +410,30 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
|
cpudata->vid.ratio);
|
|
|
|
vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
|
|
- vid = fp_toint(vid_fp);
|
|
+ vid = ceiling_fp(vid_fp);
|
|
|
|
if (pstate > cpudata->pstate.max_pstate)
|
|
vid = cpudata->vid.turbo;
|
|
|
|
val |= vid;
|
|
|
|
- wrmsrl(MSR_IA32_PERF_CTL, val);
|
|
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
|
|
+}
|
|
+
|
|
+#define BYT_BCLK_FREQS 5
|
|
+static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
|
|
+
|
|
+static int byt_get_scaling(void)
|
|
+{
|
|
+ u64 value;
|
|
+ int i;
|
|
+
|
|
+ rdmsrl(MSR_FSB_FREQ, value);
|
|
+ i = value & 0x3;
|
|
+
|
|
+ BUG_ON(i > BYT_BCLK_FREQS);
|
|
+
|
|
+ return byt_freq_table[i] * 100;
|
|
}
|
|
|
|
static void byt_get_vid(struct cpudata *cpudata)
|
|
@@ -451,6 +480,11 @@ static int core_get_turbo_pstate(void)
|
|
return ret;
|
|
}
|
|
|
|
+static inline int core_get_scaling(void)
|
|
+{
|
|
+ return 100000;
|
|
+}
|
|
+
|
|
static void core_set_pstate(struct cpudata *cpudata, int pstate)
|
|
{
|
|
u64 val;
|
|
@@ -475,6 +509,7 @@ static struct cpu_defaults core_params = {
|
|
.get_max = core_get_max_pstate,
|
|
.get_min = core_get_min_pstate,
|
|
.get_turbo = core_get_turbo_pstate,
|
|
+ .get_scaling = core_get_scaling,
|
|
.set = core_set_pstate,
|
|
},
|
|
};
|
|
@@ -493,6 +528,7 @@ static struct cpu_defaults byt_params = {
|
|
.get_min = byt_get_min_pstate,
|
|
.get_turbo = byt_get_turbo_pstate,
|
|
.set = byt_set_pstate,
|
|
+ .get_scaling = byt_get_scaling,
|
|
.get_vid = byt_get_vid,
|
|
},
|
|
};
|
|
@@ -526,7 +562,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
|
if (pstate == cpu->pstate.current_pstate)
|
|
return;
|
|
|
|
- trace_cpu_frequency(pstate * 100000, cpu->cpu);
|
|
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
|
|
|
|
cpu->pstate.current_pstate = pstate;
|
|
|
|
@@ -555,6 +591,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
|
cpu->pstate.min_pstate = pstate_funcs.get_min();
|
|
cpu->pstate.max_pstate = pstate_funcs.get_max();
|
|
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
|
|
+ cpu->pstate.scaling = pstate_funcs.get_scaling();
|
|
|
|
if (pstate_funcs.get_vid)
|
|
pstate_funcs.get_vid(cpu);
|
|
@@ -574,7 +611,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
|
|
core_pct += 1;
|
|
|
|
sample->freq = fp_toint(
|
|
- mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
|
|
+ mul_fp(int_tofp(
|
|
+ cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
|
|
+ core_pct));
|
|
|
|
sample->core_pct_busy = (int32_t)core_pct;
|
|
}
|
|
@@ -685,10 +724,14 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|
ICPU(0x37, byt_params),
|
|
ICPU(0x3a, core_params),
|
|
ICPU(0x3c, core_params),
|
|
+ ICPU(0x3d, core_params),
|
|
ICPU(0x3e, core_params),
|
|
ICPU(0x3f, core_params),
|
|
ICPU(0x45, core_params),
|
|
ICPU(0x46, core_params),
|
|
+ ICPU(0x4c, byt_params),
|
|
+ ICPU(0x4f, core_params),
|
|
+ ICPU(0x56, core_params),
|
|
{}
|
|
};
|
|
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
|
@@ -751,6 +794,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
|
limits.min_perf_pct = 100;
|
|
limits.min_perf = int_tofp(1);
|
|
+ limits.max_policy_pct = 100;
|
|
limits.max_perf_pct = 100;
|
|
limits.max_perf = int_tofp(1);
|
|
limits.no_turbo = limits.turbo_disabled;
|
|
@@ -812,12 +856,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|
else
|
|
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
|
|
|
- policy->min = cpu->pstate.min_pstate * 100000;
|
|
- policy->max = cpu->pstate.turbo_pstate * 100000;
|
|
+ policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
|
+ policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
|
|
|
/* cpuinfo and default policy values */
|
|
- policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
|
|
- policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
|
|
+ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
|
+ policy->cpuinfo.max_freq =
|
|
+ cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
|
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
|
cpumask_set_cpu(policy->cpu, policy->cpus);
|
|
|
|
@@ -875,6 +920,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
|
|
pstate_funcs.get_max = funcs->get_max;
|
|
pstate_funcs.get_min = funcs->get_min;
|
|
pstate_funcs.get_turbo = funcs->get_turbo;
|
|
+ pstate_funcs.get_scaling = funcs->get_scaling;
|
|
pstate_funcs.set = funcs->set;
|
|
pstate_funcs.get_vid = funcs->get_vid;
|
|
}
|
|
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
|
|
index 826b8be..82cef00 100644
|
|
--- a/drivers/cpufreq/s3c2416-cpufreq.c
|
|
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
|
|
@@ -263,7 +263,7 @@ out:
|
|
}
|
|
|
|
#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
|
|
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
|
|
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
|
|
{
|
|
int count, v, i, found;
|
|
struct cpufreq_frequency_table *freq;
|
|
@@ -335,7 +335,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
|
|
.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
|
|
};
|
|
|
|
-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|
{
|
|
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
|
|
struct cpufreq_frequency_table *freq;
|
|
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
index 2506974..0eb5b40 100644
|
|
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
@@ -454,7 +454,7 @@ static struct cpufreq_driver s3c24xx_driver = {
|
|
};
|
|
|
|
|
|
-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
|
|
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
|
|
{
|
|
if (!info || !info->name) {
|
|
printk(KERN_ERR "%s: failed to pass valid information\n",
|
|
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
|
|
index 7047821..4ab7a21 100644
|
|
--- a/drivers/cpufreq/speedstep-lib.c
|
|
+++ b/drivers/cpufreq/speedstep-lib.c
|
|
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
|
|
|
pr_debug("previous speed is %u\n", prev_speed);
|
|
|
|
+ preempt_disable();
|
|
local_irq_save(flags);
|
|
|
|
/* switch to low state */
|
|
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
|
|
|
out:
|
|
local_irq_restore(flags);
|
|
+ preempt_enable();
|
|
+
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
|
|
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
|
|
index 998c17b..b52d8af 100644
|
|
--- a/drivers/cpufreq/speedstep-smi.c
|
|
+++ b/drivers/cpufreq/speedstep-smi.c
|
|
@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
|
|
return;
|
|
|
|
/* Disable IRQs */
|
|
+ preempt_disable();
|
|
local_irq_save(flags);
|
|
|
|
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
|
|
@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
|
|
|
|
do {
|
|
if (retry) {
|
|
+ /*
|
|
+ * We need to enable interrupts, otherwise the blockage
|
|
+ * won't resolve.
|
|
+ *
|
|
+ * We disable preemption so that other processes don't
|
|
+ * run. If other processes were running, they could
|
|
+ * submit more DMA requests, making the blockage worse.
|
|
+ */
|
|
pr_debug("retry %u, previous result %u, waiting...\n",
|
|
retry, result);
|
|
+ local_irq_enable();
|
|
mdelay(retry * 50);
|
|
+ local_irq_disable();
|
|
}
|
|
retry++;
|
|
__asm__ __volatile__(
|
|
@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
|
|
|
|
/* enable IRQs */
|
|
local_irq_restore(flags);
|
|
+ preempt_enable();
|
|
|
|
if (new_state == state)
|
|
pr_debug("change to %u MHz succeeded after %u tries "
|
|
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
|
|
index a55e68f..1adc039 100644
|
|
--- a/drivers/cpuidle/cpuidle.c
|
|
+++ b/drivers/cpuidle/cpuidle.c
|
|
@@ -131,6 +131,9 @@ int cpuidle_idle_call(void)
|
|
|
|
/* ask the governor for the next state */
|
|
next_state = cpuidle_curr_governor->select(drv, dev);
|
|
+ if (next_state < 0)
|
|
+ return -EBUSY;
|
|
+
|
|
if (need_resched()) {
|
|
dev->last_residency = 0;
|
|
/* give the governor an opportunity to reflect on the outcome */
|
|
@@ -252,9 +255,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
|
|
if (!dev->registered)
|
|
return -EINVAL;
|
|
|
|
- if (!dev->state_count)
|
|
- dev->state_count = drv->state_count;
|
|
-
|
|
ret = cpuidle_add_device_sysfs(dev);
|
|
if (ret)
|
|
return ret;
|
|
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
|
|
index cf7f2f0..027c484 100644
|
|
--- a/drivers/cpuidle/governors/menu.c
|
|
+++ b/drivers/cpuidle/governors/menu.c
|
|
@@ -297,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|
data->needs_update = 0;
|
|
}
|
|
|
|
- data->last_state_idx = 0;
|
|
+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
|
|
data->exit_us = 0;
|
|
|
|
/* Special case when user has set very strict latency requirement */
|
|
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
|
|
index e918b6d..dcaae4c 100644
|
|
--- a/drivers/cpuidle/sysfs.c
|
|
+++ b/drivers/cpuidle/sysfs.c
|
|
@@ -398,7 +398,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
|
|
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
|
|
|
|
/* state statistics */
|
|
- for (i = 0; i < device->state_count; i++) {
|
|
+ for (i = 0; i < drv->state_count; i++) {
|
|
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
|
|
if (!kobj)
|
|
goto error_state;
|
|
@@ -430,9 +430,10 @@ error_state:
|
|
*/
|
|
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
|
|
{
|
|
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
|
|
int i;
|
|
|
|
- for (i = 0; i < device->state_count; i++)
|
|
+ for (i = 0; i < drv->state_count; i++)
|
|
cpuidle_free_state_kobj(device, i);
|
|
}
|
|
|
|
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
|
|
index 0378328..1489927 100644
|
|
--- a/drivers/crypto/caam/caamhash.c
|
|
+++ b/drivers/crypto/caam/caamhash.c
|
|
@@ -835,8 +835,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|
edesc->sec4_sg + sec4_sg_src_index,
|
|
chained);
|
|
if (*next_buflen) {
|
|
- sg_copy_part(next_buf, req->src, to_hash -
|
|
- *buflen, req->nbytes);
|
|
+ scatterwalk_map_and_copy(next_buf, req->src,
|
|
+ to_hash - *buflen,
|
|
+ *next_buflen, 0);
|
|
state->current_buf = !state->current_buf;
|
|
}
|
|
} else {
|
|
@@ -869,7 +870,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|
kfree(edesc);
|
|
}
|
|
} else if (*next_buflen) {
|
|
- sg_copy(buf + *buflen, req->src, req->nbytes);
|
|
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
+ req->nbytes, 0);
|
|
*buflen = *next_buflen;
|
|
*next_buflen = last_buflen;
|
|
}
|
|
@@ -898,13 +900,14 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|
state->buflen_1;
|
|
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
|
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
|
- int sec4_sg_bytes;
|
|
+ int sec4_sg_bytes, sec4_sg_src_index;
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
struct ahash_edesc *edesc;
|
|
int ret = 0;
|
|
int sh_len;
|
|
|
|
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
|
|
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
|
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
|
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
|
@@ -931,7 +934,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
|
buf, state->buf_dma, buflen,
|
|
last_buflen);
|
|
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
|
|
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
|
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
|
LDST_SGF);
|
|
@@ -1216,8 +1219,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|
src_map_to_sec4_sg(jrdev, req->src, src_nents,
|
|
edesc->sec4_sg + 1, chained);
|
|
if (*next_buflen) {
|
|
- sg_copy_part(next_buf, req->src, to_hash - *buflen,
|
|
- req->nbytes);
|
|
+ scatterwalk_map_and_copy(next_buf, req->src,
|
|
+ to_hash - *buflen,
|
|
+ *next_buflen, 0);
|
|
state->current_buf = !state->current_buf;
|
|
}
|
|
|
|
@@ -1248,7 +1252,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|
kfree(edesc);
|
|
}
|
|
} else if (*next_buflen) {
|
|
- sg_copy(buf + *buflen, req->src, req->nbytes);
|
|
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
+ req->nbytes, 0);
|
|
*buflen = *next_buflen;
|
|
*next_buflen = 0;
|
|
}
|
|
@@ -1348,9 +1353,9 @@ static int ahash_update_first(struct ahash_request *req)
|
|
struct device *jrdev = ctx->jrdev;
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
- u8 *next_buf = state->buf_0 + state->current_buf *
|
|
- CAAM_MAX_HASH_BLOCK_SIZE;
|
|
- int *next_buflen = &state->buflen_0 + state->current_buf;
|
|
+ u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
+ int *next_buflen = state->current_buf ?
|
|
+ &state->buflen_1 : &state->buflen_0;
|
|
int to_hash;
|
|
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
|
|
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
|
|
@@ -1405,7 +1410,8 @@ static int ahash_update_first(struct ahash_request *req)
|
|
}
|
|
|
|
if (*next_buflen)
|
|
- sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
|
|
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
|
|
+ *next_buflen, 0);
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
desc = edesc->hw_desc;
|
|
@@ -1438,7 +1444,8 @@ static int ahash_update_first(struct ahash_request *req)
|
|
state->update = ahash_update_no_ctx;
|
|
state->finup = ahash_finup_no_ctx;
|
|
state->final = ahash_final_no_ctx;
|
|
- sg_copy(next_buf, req->src, req->nbytes);
|
|
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
|
|
+ req->nbytes, 0);
|
|
}
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
|
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
|
|
index 28486b1..ae6dae8 100644
|
|
--- a/drivers/crypto/caam/caamrng.c
|
|
+++ b/drivers/crypto/caam/caamrng.c
|
|
@@ -56,7 +56,7 @@
|
|
|
|
/* Buffer, its dma address and lock */
|
|
struct buf_data {
|
|
- u8 buf[RN_BUF_SIZE];
|
|
+ u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
|
|
dma_addr_t addr;
|
|
struct completion filled;
|
|
u32 hw_desc[DESC_JOB_O_LEN];
|
|
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
|
|
index ea2e406..b872eed 100644
|
|
--- a/drivers/crypto/caam/key_gen.c
|
|
+++ b/drivers/crypto/caam/key_gen.c
|
|
@@ -51,23 +51,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|
u32 *desc;
|
|
struct split_key_result result;
|
|
dma_addr_t dma_addr_in, dma_addr_out;
|
|
- int ret = 0;
|
|
+ int ret = -ENOMEM;
|
|
|
|
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
|
|
if (!desc) {
|
|
dev_err(jrdev, "unable to allocate key input memory\n");
|
|
- return -ENOMEM;
|
|
+ return ret;
|
|
}
|
|
|
|
- init_job_desc(desc, 0);
|
|
-
|
|
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
|
|
DMA_TO_DEVICE);
|
|
if (dma_mapping_error(jrdev, dma_addr_in)) {
|
|
dev_err(jrdev, "unable to map key input memory\n");
|
|
- kfree(desc);
|
|
- return -ENOMEM;
|
|
+ goto out_free;
|
|
}
|
|
+
|
|
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
|
|
+ DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
|
|
+ dev_err(jrdev, "unable to map key output memory\n");
|
|
+ goto out_unmap_in;
|
|
+ }
|
|
+
|
|
+ init_job_desc(desc, 0);
|
|
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
|
|
|
|
/* Sets MDHA up into an HMAC-INIT */
|
|
@@ -84,13 +90,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|
* FIFO_STORE with the explicit split-key content store
|
|
* (0x26 output type)
|
|
*/
|
|
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
|
|
- DMA_FROM_DEVICE);
|
|
- if (dma_mapping_error(jrdev, dma_addr_out)) {
|
|
- dev_err(jrdev, "unable to map key output memory\n");
|
|
- kfree(desc);
|
|
- return -ENOMEM;
|
|
- }
|
|
append_fifo_store(desc, dma_addr_out, split_key_len,
|
|
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
|
|
|
@@ -118,10 +117,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|
|
|
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
|
|
DMA_FROM_DEVICE);
|
|
+out_unmap_in:
|
|
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
|
|
-
|
|
+out_free:
|
|
kfree(desc);
|
|
-
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(gen_split_key);
|
|
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
|
|
index b12ff85..ce28a56 100644
|
|
--- a/drivers/crypto/caam/sg_sw_sec4.h
|
|
+++ b/drivers/crypto/caam/sg_sw_sec4.h
|
|
@@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
|
|
}
|
|
return nents;
|
|
}
|
|
-
|
|
-/* Map SG page in kernel virtual address space and copy */
|
|
-static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
|
|
- int len, int offset)
|
|
-{
|
|
- u8 *mapped_addr;
|
|
-
|
|
- /*
|
|
- * Page here can be user-space pinned using get_user_pages
|
|
- * Same must be kmapped before use and kunmapped subsequently
|
|
- */
|
|
- mapped_addr = kmap_atomic(sg_page(sg));
|
|
- memcpy(dest, mapped_addr + offset, len);
|
|
- kunmap_atomic(mapped_addr);
|
|
-}
|
|
-
|
|
-/* Copy from len bytes of sg to dest, starting from beginning */
|
|
-static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
|
|
-{
|
|
- struct scatterlist *current_sg = sg;
|
|
- int cpy_index = 0, next_cpy_index = current_sg->length;
|
|
-
|
|
- while (next_cpy_index < len) {
|
|
- sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
|
|
- current_sg->offset);
|
|
- current_sg = scatterwalk_sg_next(current_sg);
|
|
- cpy_index = next_cpy_index;
|
|
- next_cpy_index += current_sg->length;
|
|
- }
|
|
- if (cpy_index < len)
|
|
- sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
|
|
- current_sg->offset);
|
|
-}
|
|
-
|
|
-/* Copy sg data, from to_skip to end, to dest */
|
|
-static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
|
|
- int to_skip, unsigned int end)
|
|
-{
|
|
- struct scatterlist *current_sg = sg;
|
|
- int sg_index, cpy_index, offset;
|
|
-
|
|
- sg_index = current_sg->length;
|
|
- while (sg_index <= to_skip) {
|
|
- current_sg = scatterwalk_sg_next(current_sg);
|
|
- sg_index += current_sg->length;
|
|
- }
|
|
- cpy_index = sg_index - to_skip;
|
|
- offset = current_sg->offset + current_sg->length - cpy_index;
|
|
- sg_map_copy(dest, current_sg, cpy_index, offset);
|
|
- if (end - sg_index) {
|
|
- current_sg = scatterwalk_sg_next(current_sg);
|
|
- sg_copy(dest + cpy_index, current_sg, end - sg_index);
|
|
- }
|
|
-}
|
|
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
|
|
index f757a0f..3beed38 100644
|
|
--- a/drivers/crypto/ixp4xx_crypto.c
|
|
+++ b/drivers/crypto/ixp4xx_crypto.c
|
|
@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
|
|
crypt->mode |= NPE_OP_NOT_IN_PLACE;
|
|
/* This was never tested by Intel
|
|
* for more than one dst buffer, I think. */
|
|
- BUG_ON(req->dst->length < nbytes);
|
|
req_ctx->dst = NULL;
|
|
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
|
|
flags, DMA_FROM_DEVICE))
|
|
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
|
|
index dde41f1d..d522396 100644
|
|
--- a/drivers/crypto/omap-aes.c
|
|
+++ b/drivers/crypto/omap-aes.c
|
|
@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
|
|
return err;
|
|
}
|
|
|
|
-static int omap_aes_check_aligned(struct scatterlist *sg)
|
|
+static int omap_aes_check_aligned(struct scatterlist *sg, int total)
|
|
{
|
|
+ int len = 0;
|
|
+
|
|
while (sg) {
|
|
if (!IS_ALIGNED(sg->offset, 4))
|
|
return -1;
|
|
if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
|
|
return -1;
|
|
+
|
|
+ len += sg->length;
|
|
sg = sg_next(sg);
|
|
}
|
|
+
|
|
+ if (len != total)
|
|
+ return -1;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
|
dd->in_sg = req->src;
|
|
dd->out_sg = req->dst;
|
|
|
|
- if (omap_aes_check_aligned(dd->in_sg) ||
|
|
- omap_aes_check_aligned(dd->out_sg)) {
|
|
+ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
|
|
+ omap_aes_check_aligned(dd->out_sg, dd->total)) {
|
|
if (omap_aes_copy_sgs(dd))
|
|
pr_err("Failed to copy SGs for unaligned cases\n");
|
|
dd->sgs_copied = 1;
|
|
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
|
|
index 633ba94..c178ed8 100644
|
|
--- a/drivers/crypto/padlock-aes.c
|
|
+++ b/drivers/crypto/padlock-aes.c
|
|
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Michal Ludvig");
|
|
|
|
-MODULE_ALIAS("aes");
|
|
+MODULE_ALIAS_CRYPTO("aes");
|
|
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
|
|
index 9266c0e..93d7753 100644
|
|
--- a/drivers/crypto/padlock-sha.c
|
|
+++ b/drivers/crypto/padlock-sha.c
|
|
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Michal Ludvig");
|
|
|
|
-MODULE_ALIAS("sha1-all");
|
|
-MODULE_ALIAS("sha256-all");
|
|
-MODULE_ALIAS("sha1-padlock");
|
|
-MODULE_ALIAS("sha256-padlock");
|
|
+MODULE_ALIAS_CRYPTO("sha1-all");
|
|
+MODULE_ALIAS_CRYPTO("sha256-all");
|
|
+MODULE_ALIAS_CRYPTO("sha1-padlock");
|
|
+MODULE_ALIAS_CRYPTO("sha256-padlock");
|
|
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
|
|
index 5967667..1f35487 100644
|
|
--- a/drivers/crypto/talitos.c
|
|
+++ b/drivers/crypto/talitos.c
|
|
@@ -927,7 +927,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
|
|
sg_count--;
|
|
link_tbl_ptr--;
|
|
}
|
|
- be16_add_cpu(&link_tbl_ptr->len, cryptlen);
|
|
+ link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
|
|
+ + cryptlen);
|
|
|
|
/* tag end of link table */
|
|
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
|
|
@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
|
break;
|
|
default:
|
|
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
|
|
+ kfree(t_alg);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
|
|
index a999f53..e4cea7c 100644
|
|
--- a/drivers/crypto/ux500/cryp/cryp_core.c
|
|
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
|
|
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
|
|
static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
|
{
|
|
struct cryp_ctx *ctx;
|
|
- int i;
|
|
+ int count;
|
|
struct cryp_device_data *device_data;
|
|
|
|
if (param == NULL) {
|
|
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
|
if (cryp_pending_irq_src(device_data,
|
|
CRYP_IRQ_SRC_OUTPUT_FIFO)) {
|
|
if (ctx->outlen / ctx->blocksize > 0) {
|
|
- for (i = 0; i < ctx->blocksize / 4; i++) {
|
|
- *(ctx->outdata) = readl_relaxed(
|
|
- &device_data->base->dout);
|
|
- ctx->outdata += 4;
|
|
- ctx->outlen -= 4;
|
|
- }
|
|
+ count = ctx->blocksize / 4;
|
|
+
|
|
+ readsl(&device_data->base->dout, ctx->outdata, count);
|
|
+ ctx->outdata += count;
|
|
+ ctx->outlen -= count;
|
|
|
|
if (ctx->outlen == 0) {
|
|
cryp_disable_irq_src(device_data,
|
|
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
|
} else if (cryp_pending_irq_src(device_data,
|
|
CRYP_IRQ_SRC_INPUT_FIFO)) {
|
|
if (ctx->datalen / ctx->blocksize > 0) {
|
|
- for (i = 0 ; i < ctx->blocksize / 4; i++) {
|
|
- writel_relaxed(ctx->indata,
|
|
- &device_data->base->din);
|
|
- ctx->indata += 4;
|
|
- ctx->datalen -= 4;
|
|
- }
|
|
+ count = ctx->blocksize / 4;
|
|
+
|
|
+ writesl(&device_data->base->din, ctx->indata, count);
|
|
+
|
|
+ ctx->indata += count;
|
|
+ ctx->datalen -= count;
|
|
|
|
if (ctx->datalen == 0)
|
|
cryp_disable_irq_src(device_data,
|
|
@@ -1811,7 +1810,7 @@ module_exit(ux500_cryp_mod_fini);
|
|
module_param(cryp_mode, int, 0);
|
|
|
|
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
|
|
-MODULE_ALIAS("aes-all");
|
|
-MODULE_ALIAS("des-all");
|
|
+MODULE_ALIAS_CRYPTO("aes-all");
|
|
+MODULE_ALIAS_CRYPTO("des-all");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
|
|
index 1c73f4f..8e5e018 100644
|
|
--- a/drivers/crypto/ux500/hash/hash_core.c
|
|
+++ b/drivers/crypto/ux500/hash/hash_core.c
|
|
@@ -1995,7 +1995,7 @@ module_exit(ux500_hash_mod_fini);
|
|
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-MODULE_ALIAS("sha1-all");
|
|
-MODULE_ALIAS("sha256-all");
|
|
-MODULE_ALIAS("hmac-sha1-all");
|
|
-MODULE_ALIAS("hmac-sha256-all");
|
|
+MODULE_ALIAS_CRYPTO("sha1-all");
|
|
+MODULE_ALIAS_CRYPTO("sha256-all");
|
|
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
|
|
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
|
|
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
|
|
index 734ed02..b8045cd 100644
|
|
--- a/drivers/dma/TODO
|
|
+++ b/drivers/dma/TODO
|
|
@@ -7,7 +7,6 @@ TODO for slave dma
|
|
- imx-dma
|
|
- imx-sdma
|
|
- mxs-dma.c
|
|
- - dw_dmac
|
|
- intel_mid_dma
|
|
4. Check other subsystems for dma drivers and merge/move to dmaengine
|
|
5. Remove dma_slave_config's dma direction.
|
|
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
|
|
index 01a200c..b0972b3 100644
|
|
--- a/drivers/dma/dw/core.c
|
|
+++ b/drivers/dma/dw/core.c
|
|
@@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
|
}
|
|
|
|
+static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
|
|
+{
|
|
+ if (list_empty(&dwc->queue))
|
|
+ return;
|
|
+
|
|
+ list_move(dwc->queue.next, &dwc->active_list);
|
|
+ dwc_dostart(dwc, dwc_first_active(dwc));
|
|
+}
|
|
+
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
static void
|
|
@@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
* the completed ones.
|
|
*/
|
|
list_splice_init(&dwc->active_list, &list);
|
|
- if (!list_empty(&dwc->queue)) {
|
|
- list_move(dwc->queue.next, &dwc->active_list);
|
|
- dwc_dostart(dwc, dwc_first_active(dwc));
|
|
- }
|
|
+ dwc_dostart_first_queued(dwc);
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
@@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
/* Try to continue after resetting the channel... */
|
|
dwc_chan_disable(dw, dwc);
|
|
|
|
- if (!list_empty(&dwc->queue)) {
|
|
- list_move(dwc->queue.next, &dwc->active_list);
|
|
- dwc_dostart(dwc, dwc_first_active(dwc));
|
|
- }
|
|
+ dwc_dostart_first_queued(dwc);
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
}
|
|
|
|
@@ -677,17 +680,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
* possible, perhaps even appending to those already submitted
|
|
* for DMA. But this is hard to do in a race-free manner.
|
|
*/
|
|
- if (list_empty(&dwc->active_list)) {
|
|
- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
|
|
- desc->txd.cookie);
|
|
- list_add_tail(&desc->desc_node, &dwc->active_list);
|
|
- dwc_dostart(dwc, dwc_first_active(dwc));
|
|
- } else {
|
|
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
|
|
- desc->txd.cookie);
|
|
|
|
- list_add_tail(&desc->desc_node, &dwc->queue);
|
|
- }
|
|
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
|
|
+ list_add_tail(&desc->desc_node, &dwc->queue);
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
@@ -1092,9 +1087,12 @@ dwc_tx_status(struct dma_chan *chan,
|
|
static void dwc_issue_pending(struct dma_chan *chan)
|
|
{
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
+ unsigned long flags;
|
|
|
|
- if (!list_empty(&dwc->queue))
|
|
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
+ if (list_empty(&dwc->active_list))
|
|
+ dwc_dostart_first_queued(dwc);
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
}
|
|
|
|
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
|
|
index 453822c..fe8b0c9 100644
|
|
--- a/drivers/dma/dw/platform.c
|
|
+++ b/drivers/dma/dw/platform.c
|
|
@@ -48,6 +48,8 @@ static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
|
|
return true;
|
|
}
|
|
|
|
+#define DRV_NAME "dw_dmac"
|
|
+
|
|
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
|
|
struct of_dma *ofdma)
|
|
{
|
|
@@ -293,7 +295,7 @@ static struct platform_driver dw_driver = {
|
|
.remove = dw_remove,
|
|
.shutdown = dw_shutdown,
|
|
.driver = {
|
|
- .name = "dw_dmac",
|
|
+ .name = DRV_NAME,
|
|
.pm = &dw_dev_pm_ops,
|
|
.of_match_table = of_match_ptr(dw_dma_of_id_table),
|
|
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
|
|
@@ -314,3 +316,4 @@ module_exit(dw_exit);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
|
|
+MODULE_ALIAS("platform:" DRV_NAME);
|
|
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
|
|
index 394cbc5..6b2f01d 100644
|
|
--- a/drivers/dma/mv_xor.c
|
|
+++ b/drivers/dma/mv_xor.c
|
|
@@ -316,7 +316,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
dma_cookie_t cookie = 0;
|
|
int busy = mv_chan_is_busy(mv_chan);
|
|
u32 current_desc = mv_chan_get_current_desc(mv_chan);
|
|
- int seen_current = 0;
|
|
+ int current_cleaned = 0;
|
|
+ struct mv_xor_desc *hw_desc;
|
|
|
|
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
|
|
dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
|
|
@@ -328,38 +329,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
|
|
|
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
|
chain_node) {
|
|
- prefetch(_iter);
|
|
- prefetch(&_iter->async_tx);
|
|
|
|
- /* do not advance past the current descriptor loaded into the
|
|
- * hardware channel, subsequent descriptors are either in
|
|
- * process or have not been submitted
|
|
- */
|
|
- if (seen_current)
|
|
- break;
|
|
+ /* clean finished descriptors */
|
|
+ hw_desc = iter->hw_desc;
|
|
+ if (hw_desc->status & XOR_DESC_SUCCESS) {
|
|
+ cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
|
|
+ cookie);
|
|
|
|
- /* stop the search if we reach the current descriptor and the
|
|
- * channel is busy
|
|
- */
|
|
- if (iter->async_tx.phys == current_desc) {
|
|
- seen_current = 1;
|
|
- if (busy)
|
|
+ /* done processing desc, clean slot */
|
|
+ mv_xor_clean_slot(iter, mv_chan);
|
|
+
|
|
+ /* break if we did cleaned the current */
|
|
+ if (iter->async_tx.phys == current_desc) {
|
|
+ current_cleaned = 1;
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ if (iter->async_tx.phys == current_desc) {
|
|
+ current_cleaned = 0;
|
|
break;
|
|
+ }
|
|
}
|
|
-
|
|
- cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
|
|
-
|
|
- if (mv_xor_clean_slot(iter, mv_chan))
|
|
- break;
|
|
}
|
|
|
|
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
|
|
- struct mv_xor_desc_slot *chain_head;
|
|
- chain_head = list_entry(mv_chan->chain.next,
|
|
- struct mv_xor_desc_slot,
|
|
- chain_node);
|
|
-
|
|
- mv_xor_start_new_chain(mv_chan, chain_head);
|
|
+ if (current_cleaned) {
|
|
+ /*
|
|
+ * current descriptor cleaned and removed, run
|
|
+ * from list head
|
|
+ */
|
|
+ iter = list_entry(mv_chan->chain.next,
|
|
+ struct mv_xor_desc_slot,
|
|
+ chain_node);
|
|
+ mv_xor_start_new_chain(mv_chan, iter);
|
|
+ } else {
|
|
+ if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
|
|
+ /*
|
|
+ * descriptors are still waiting after
|
|
+ * current, trigger them
|
|
+ */
|
|
+ iter = list_entry(iter->chain_node.next,
|
|
+ struct mv_xor_desc_slot,
|
|
+ chain_node);
|
|
+ mv_xor_start_new_chain(mv_chan, iter);
|
|
+ } else {
|
|
+ /*
|
|
+ * some descriptors are still waiting
|
|
+ * to be cleaned
|
|
+ */
|
|
+ tasklet_schedule(&mv_chan->irq_tasklet);
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
if (cookie > 0)
|
|
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
|
|
index d074922..5d14e4b 100644
|
|
--- a/drivers/dma/mv_xor.h
|
|
+++ b/drivers/dma/mv_xor.h
|
|
@@ -33,6 +33,7 @@
|
|
#define XOR_OPERATION_MODE_XOR 0
|
|
#define XOR_OPERATION_MODE_MEMCPY 2
|
|
#define XOR_DESCRIPTOR_SWAP BIT(14)
|
|
+#define XOR_DESC_SUCCESS 0x40000000
|
|
|
|
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
|
|
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
|
|
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
|
|
index 362e7c4..12f8294 100644
|
|
--- a/drivers/dma/omap-dma.c
|
|
+++ b/drivers/dma/omap-dma.c
|
|
@@ -487,6 +487,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
|
|
* c->desc is NULL and exit.)
|
|
*/
|
|
if (c->desc) {
|
|
+ omap_dma_desc_free(&c->desc->vd);
|
|
c->desc = NULL;
|
|
/* Avoid stopping the dma twice */
|
|
if (!c->paused)
|
|
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
|
|
index 98e14ee..278603c 100644
|
|
--- a/drivers/edac/amd64_edac.c
|
|
+++ b/drivers/edac/amd64_edac.c
|
|
@@ -2006,14 +2006,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
|
|
|
|
static inline void decode_bus_error(int node_id, struct mce *m)
|
|
{
|
|
- struct mem_ctl_info *mci = mcis[node_id];
|
|
- struct amd64_pvt *pvt = mci->pvt_info;
|
|
+ struct mem_ctl_info *mci;
|
|
+ struct amd64_pvt *pvt;
|
|
u8 ecc_type = (m->status >> 45) & 0x3;
|
|
u8 xec = XEC(m->status, 0x1f);
|
|
u16 ec = EC(m->status);
|
|
u64 sys_addr;
|
|
struct err_info err;
|
|
|
|
+ mci = edac_mc_find(node_id);
|
|
+ if (!mci)
|
|
+ return;
|
|
+
|
|
+ pvt = mci->pvt_info;
|
|
+
|
|
/* Bail out early if this was an 'observed' error */
|
|
if (PP(ec) == NBSL_PP_OBS)
|
|
return;
|
|
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
|
|
index df6575f..682288c 100644
|
|
--- a/drivers/edac/cpc925_edac.c
|
|
+++ b/drivers/edac/cpc925_edac.c
|
|
@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
|
|
|
|
if (apiexcp & UECC_EXCP_DETECTED) {
|
|
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
|
|
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
|
|
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
|
|
pfn, offset, 0,
|
|
csrow, -1, -1,
|
|
mci->ctl_name, "");
|
|
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
|
|
index 3cda79b..ece3aef 100644
|
|
--- a/drivers/edac/e7xxx_edac.c
|
|
+++ b/drivers/edac/e7xxx_edac.c
|
|
@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
|
|
static void process_ce_no_info(struct mem_ctl_info *mci)
|
|
{
|
|
edac_dbg(3, "\n");
|
|
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
|
|
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
|
|
"e7xxx CE log register overflow", "");
|
|
}
|
|
|
|
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
|
|
index fa1326e..ad76f10 100644
|
|
--- a/drivers/edac/i3200_edac.c
|
|
+++ b/drivers/edac/i3200_edac.c
|
|
@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
|
|
-1, -1,
|
|
"i3000 UE", "");
|
|
} else if (log & I3200_ECCERRLOG_CE) {
|
|
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
|
|
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
|
|
0, 0, eccerrlog_syndrome(log),
|
|
eccerrlog_row(channel, log),
|
|
-1, -1,
|
|
- "i3000 UE", "");
|
|
+ "i3000 CE", "");
|
|
}
|
|
}
|
|
}
|
|
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
|
|
index 3382f63..4382343 100644
|
|
--- a/drivers/edac/i82860_edac.c
|
|
+++ b/drivers/edac/i82860_edac.c
|
|
@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
|
|
dimm->location[0], dimm->location[1], -1,
|
|
"i82860 UE", "");
|
|
else
|
|
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
|
|
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
|
|
info->eap, 0, info->derrsyn,
|
|
dimm->location[0], dimm->location[1], -1,
|
|
"i82860 CE", "");
|
|
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
|
|
index ef6b7e0..5c361f3 100644
|
|
--- a/drivers/edac/ppc4xx_edac.c
|
|
+++ b/drivers/edac/ppc4xx_edac.c
|
|
@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
|
|
*/
|
|
|
|
for (row = 0; row < mci->nr_csrows; row++) {
|
|
- struct csrow_info *csi = &mci->csrows[row];
|
|
+ struct csrow_info *csi = mci->csrows[row];
|
|
|
|
/*
|
|
* Get the configuration settings for this
|
|
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
|
|
index 54e2abe..3e623ab 100644
|
|
--- a/drivers/edac/sb_edac.c
|
|
+++ b/drivers/edac/sb_edac.c
|
|
@@ -285,8 +285,9 @@ static const u32 correrrthrsld[] = {
|
|
* sbridge structs
|
|
*/
|
|
|
|
-#define NUM_CHANNELS 4
|
|
-#define MAX_DIMMS 3 /* Max DIMMS per channel */
|
|
+#define NUM_CHANNELS 4
|
|
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
|
|
+#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
|
|
|
|
enum type {
|
|
SANDY_BRIDGE,
|
|
@@ -764,7 +765,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
u32 reg;
|
|
u64 limit, prv = 0;
|
|
u64 tmp_mb;
|
|
- u32 mb, kb;
|
|
+ u32 gb, mb;
|
|
u32 rir_way;
|
|
|
|
/*
|
|
@@ -774,15 +775,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
pvt->tolm = pvt->info.get_tolm(pvt);
|
|
tmp_mb = (1 + pvt->tolm) >> 20;
|
|
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
+ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
|
|
+ gb, (mb*1000)/1024, (u64)pvt->tolm);
|
|
|
|
/* Address range is already 45:25 */
|
|
pvt->tohm = pvt->info.get_tohm(pvt);
|
|
tmp_mb = (1 + pvt->tohm) >> 20;
|
|
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
|
|
+ gb, (mb*1000)/1024, (u64)pvt->tohm);
|
|
|
|
/*
|
|
* Step 2) Get SAD range and SAD Interleave list
|
|
@@ -804,11 +807,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
break;
|
|
|
|
tmp_mb = (limit + 1) >> 20;
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
|
|
n_sads,
|
|
get_dram_attr(reg),
|
|
- mb, kb,
|
|
+ gb, (mb*1000)/1024,
|
|
((u64)tmp_mb) << 20L,
|
|
INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
|
|
reg);
|
|
@@ -839,9 +842,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
break;
|
|
tmp_mb = (limit + 1) >> 20;
|
|
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
|
|
- n_tads, mb, kb,
|
|
+ n_tads, gb, (mb*1000)/1024,
|
|
((u64)tmp_mb) << 20L,
|
|
(u32)TAD_SOCK(reg),
|
|
(u32)TAD_CH(reg),
|
|
@@ -864,10 +867,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
tad_ch_nilv_offset[j],
|
|
®);
|
|
tmp_mb = TAD_OFFSET(reg) >> 20;
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
|
|
i, j,
|
|
- mb, kb,
|
|
+ gb, (mb*1000)/1024,
|
|
((u64)tmp_mb) << 20L,
|
|
reg);
|
|
}
|
|
@@ -889,10 +892,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
|
|
tmp_mb = RIR_LIMIT(reg) >> 20;
|
|
rir_way = 1 << RIR_WAY(reg);
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
|
|
i, j,
|
|
- mb, kb,
|
|
+ gb, (mb*1000)/1024,
|
|
((u64)tmp_mb) << 20L,
|
|
rir_way,
|
|
reg);
|
|
@@ -903,10 +906,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
®);
|
|
tmp_mb = RIR_OFFSET(reg) << 6;
|
|
|
|
- mb = div_u64_rem(tmp_mb, 1000, &kb);
|
|
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
|
|
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
|
|
i, j, k,
|
|
- mb, kb,
|
|
+ gb, (mb*1000)/1024,
|
|
((u64)tmp_mb) << 20L,
|
|
(u32)RIR_RNK_TGT(reg),
|
|
reg);
|
|
@@ -944,7 +947,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
u8 ch_way, sck_way, pkg, sad_ha = 0;
|
|
u32 tad_offset;
|
|
u32 rir_way;
|
|
- u32 mb, kb;
|
|
+ u32 mb, gb;
|
|
u64 ch_addr, offset, limit = 0, prv = 0;
|
|
|
|
|
|
@@ -1182,10 +1185,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
continue;
|
|
|
|
limit = RIR_LIMIT(reg);
|
|
- mb = div_u64_rem(limit >> 20, 1000, &kb);
|
|
+ gb = div_u64_rem(limit >> 20, 1024, &mb);
|
|
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
|
|
n_rir,
|
|
- mb, kb,
|
|
+ gb, (mb*1000)/1024,
|
|
limit,
|
|
1 << RIR_WAY(reg));
|
|
if (ch_addr <= limit)
|
|
@@ -1750,6 +1753,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
|
|
|
|
/* FIXME: need support for channel mask */
|
|
|
|
+ if (channel == CHANNEL_UNSPECIFIED)
|
|
+ channel = -1;
|
|
+
|
|
/* Call the helper to output message */
|
|
edac_mc_handle_error(tp_event, mci, core_err_cnt,
|
|
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
|
|
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
|
|
index d7d5c8a..6d44568 100644
|
|
--- a/drivers/firewire/core-cdev.c
|
|
+++ b/drivers/firewire/core-cdev.c
|
|
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
|
|
_IOC_SIZE(cmd) > sizeof(buffer))
|
|
return -ENOTTY;
|
|
|
|
- if (_IOC_DIR(cmd) == _IOC_READ)
|
|
- memset(&buffer, 0, _IOC_SIZE(cmd));
|
|
+ memset(&buffer, 0, sizeof(buffer));
|
|
|
|
if (_IOC_DIR(cmd) & _IOC_WRITE)
|
|
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
|
|
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
|
|
index 97cdd16..c98b101 100644
|
|
--- a/drivers/firmware/efi/runtime-map.c
|
|
+++ b/drivers/firmware/efi/runtime-map.c
|
|
@@ -170,7 +170,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
|
|
|
|
return 0;
|
|
out_add_entry:
|
|
- for (j = i - 1; j > 0; j--) {
|
|
+ for (j = i - 1; j >= 0; j--) {
|
|
entry = *(map_entries + j);
|
|
kobject_put(&entry->kobj);
|
|
}
|
|
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
|
|
index b22659c..e612552 100644
|
|
--- a/drivers/firmware/efi/vars.c
|
|
+++ b/drivers/firmware/efi/vars.c
|
|
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
|
|
*/
|
|
static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
|
|
{
|
|
- WARN_ON(!spin_is_locked(&__efivars->lock));
|
|
+ lockdep_assert_held(&__efivars->lock);
|
|
|
|
list_del(&entry->list);
|
|
spin_unlock_irq(&__efivars->lock);
|
|
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
|
|
const struct efivar_operations *ops = __efivars->ops;
|
|
efi_status_t status;
|
|
|
|
- WARN_ON(!spin_is_locked(&__efivars->lock));
|
|
+ lockdep_assert_held(&__efivars->lock);
|
|
|
|
status = ops->set_variable(entry->var.VariableName,
|
|
&entry->var.VendorGuid,
|
|
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
|
|
int strsize1, strsize2;
|
|
bool found = false;
|
|
|
|
- WARN_ON(!spin_is_locked(&__efivars->lock));
|
|
+ lockdep_assert_held(&__efivars->lock);
|
|
|
|
list_for_each_entry_safe(entry, n, head, list) {
|
|
strsize1 = ucs2_strsize(name, 1024);
|
|
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
|
|
const struct efivar_operations *ops = __efivars->ops;
|
|
efi_status_t status;
|
|
|
|
- WARN_ON(!spin_is_locked(&__efivars->lock));
|
|
+ lockdep_assert_held(&__efivars->lock);
|
|
|
|
status = ops->get_variable(entry->var.VariableName,
|
|
&entry->var.VendorGuid,
|
|
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
|
|
index c6d8817..fe6d4a1 100644
|
|
--- a/drivers/gpio/gpio-kempld.c
|
|
+++ b/drivers/gpio/gpio-kempld.c
|
|
@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
|
|
= container_of(chip, struct kempld_gpio_data, chip);
|
|
struct kempld_device_data *pld = gpio->pld;
|
|
|
|
- return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
|
|
+ return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
|
|
}
|
|
|
|
static int kempld_gpio_pincount(struct kempld_device_data *pld)
|
|
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
|
|
index 3b1fd1c..e9d8cf6 100644
|
|
--- a/drivers/gpio/gpio-mvebu.c
|
|
+++ b/drivers/gpio/gpio-mvebu.c
|
|
@@ -304,11 +304,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
|
|
{
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct mvebu_gpio_chip *mvchip = gc->private;
|
|
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
u32 mask = 1 << (d->irq - gc->irq_base);
|
|
|
|
irq_gc_lock(gc);
|
|
- gc->mask_cache &= ~mask;
|
|
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
|
|
+ ct->mask_cache_priv &= ~mask;
|
|
+
|
|
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
|
|
irq_gc_unlock(gc);
|
|
}
|
|
|
|
@@ -316,11 +318,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
|
|
{
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct mvebu_gpio_chip *mvchip = gc->private;
|
|
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
+
|
|
u32 mask = 1 << (d->irq - gc->irq_base);
|
|
|
|
irq_gc_lock(gc);
|
|
- gc->mask_cache |= mask;
|
|
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
|
|
+ ct->mask_cache_priv |= mask;
|
|
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
|
|
irq_gc_unlock(gc);
|
|
}
|
|
|
|
@@ -328,11 +332,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
|
|
{
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct mvebu_gpio_chip *mvchip = gc->private;
|
|
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
+
|
|
u32 mask = 1 << (d->irq - gc->irq_base);
|
|
|
|
irq_gc_lock(gc);
|
|
- gc->mask_cache &= ~mask;
|
|
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
|
|
+ ct->mask_cache_priv &= ~mask;
|
|
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
|
|
irq_gc_unlock(gc);
|
|
}
|
|
|
|
@@ -340,11 +346,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
|
|
{
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct mvebu_gpio_chip *mvchip = gc->private;
|
|
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
+
|
|
u32 mask = 1 << (d->irq - gc->irq_base);
|
|
|
|
irq_gc_lock(gc);
|
|
- gc->mask_cache |= mask;
|
|
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
|
|
+ ct->mask_cache_priv |= mask;
|
|
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
|
|
irq_gc_unlock(gc);
|
|
}
|
|
|
|
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
|
|
index 59ee486..6005d26 100644
|
|
--- a/drivers/gpio/gpio-tps65912.c
|
|
+++ b/drivers/gpio/gpio-tps65912.c
|
|
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
|
|
struct gpio_chip gpio_chip;
|
|
};
|
|
|
|
+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
|
|
+
|
|
static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
|
|
{
|
|
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
|
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
|
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
|
int val;
|
|
|
|
val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
|
|
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
|
|
static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
|
|
int value)
|
|
{
|
|
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
|
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
|
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
|
|
|
if (value)
|
|
tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
|
|
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
|
|
static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
|
|
int value)
|
|
{
|
|
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
|
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
|
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
|
|
|
/* Set the initial value */
|
|
tps65912_gpio_set(gc, offset, value);
|
|
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
|
|
|
|
static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
|
|
{
|
|
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
|
|
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
|
|
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
|
|
|
|
return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
|
|
GPIO_CFG_MASK);
|
|
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
|
|
index e0a98f5..d26028c 100644
|
|
--- a/drivers/gpio/gpiolib-of.c
|
|
+++ b/drivers/gpio/gpiolib-of.c
|
|
@@ -44,8 +44,15 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
|
|
return false;
|
|
|
|
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ /* We've found a gpio chip, but the translation failed.
|
|
+ * Store translation error in out_gpio.
|
|
+ * Return false to keep looking, as more than one gpio chip
|
|
+ * could be registered per of-node.
|
|
+ */
|
|
+ gg_data->out_gpio = ERR_PTR(ret);
|
|
return false;
|
|
+ }
|
|
|
|
gg_data->out_gpio = gpio_to_desc(ret + gc->base);
|
|
return true;
|
|
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
|
|
index 50c4922..a03e18f 100644
|
|
--- a/drivers/gpio/gpiolib.c
|
|
+++ b/drivers/gpio/gpiolib.c
|
|
@@ -408,7 +408,7 @@ static ssize_t gpio_value_store(struct device *dev,
|
|
return status;
|
|
}
|
|
|
|
-static const DEVICE_ATTR(value, 0644,
|
|
+static DEVICE_ATTR(value, 0644,
|
|
gpio_value_show, gpio_value_store);
|
|
|
|
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
|
|
@@ -633,18 +633,16 @@ static ssize_t gpio_active_low_store(struct device *dev,
|
|
return status ? : size;
|
|
}
|
|
|
|
-static const DEVICE_ATTR(active_low, 0644,
|
|
+static DEVICE_ATTR(active_low, 0644,
|
|
gpio_active_low_show, gpio_active_low_store);
|
|
|
|
-static const struct attribute *gpio_attrs[] = {
|
|
+static struct attribute *gpio_attrs[] = {
|
|
&dev_attr_value.attr,
|
|
&dev_attr_active_low.attr,
|
|
NULL,
|
|
};
|
|
|
|
-static const struct attribute_group gpio_attr_group = {
|
|
- .attrs = (struct attribute **) gpio_attrs,
|
|
-};
|
|
+ATTRIBUTE_GROUPS(gpio);
|
|
|
|
/*
|
|
* /sys/class/gpio/gpiochipN/
|
|
@@ -680,16 +678,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
|
|
}
|
|
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
|
|
|
|
-static const struct attribute *gpiochip_attrs[] = {
|
|
+static struct attribute *gpiochip_attrs[] = {
|
|
&dev_attr_base.attr,
|
|
&dev_attr_label.attr,
|
|
&dev_attr_ngpio.attr,
|
|
NULL,
|
|
};
|
|
-
|
|
-static const struct attribute_group gpiochip_attr_group = {
|
|
- .attrs = (struct attribute **) gpiochip_attrs,
|
|
-};
|
|
+ATTRIBUTE_GROUPS(gpiochip);
|
|
|
|
/*
|
|
* /sys/class/gpio/export ... write-only
|
|
@@ -805,6 +800,7 @@ static struct class gpio_class = {
|
|
*/
|
|
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
|
|
{
|
|
+ struct gpio_chip *chip;
|
|
unsigned long flags;
|
|
int status;
|
|
const char *ioname = NULL;
|
|
@@ -822,8 +818,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ chip = desc->chip;
|
|
+
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
+ /* check if chip is being removed */
|
|
+ if (!chip || !chip->exported) {
|
|
+ status = -ENODEV;
|
|
+ goto fail_unlock;
|
|
+ }
|
|
+
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
|
|
test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
@@ -844,18 +848,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
|
|
if (desc->chip->names && desc->chip->names[offset])
|
|
ioname = desc->chip->names[offset];
|
|
|
|
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
|
|
- desc, ioname ? ioname : "gpio%u",
|
|
- desc_to_gpio(desc));
|
|
+ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
|
|
+ MKDEV(0, 0), desc, gpio_groups,
|
|
+ ioname ? ioname : "gpio%u",
|
|
+ desc_to_gpio(desc));
|
|
if (IS_ERR(dev)) {
|
|
status = PTR_ERR(dev);
|
|
goto fail_unlock;
|
|
}
|
|
|
|
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
|
|
- if (status)
|
|
- goto fail_unregister_device;
|
|
-
|
|
if (direction_may_change) {
|
|
status = device_create_file(dev, &dev_attr_direction);
|
|
if (status)
|
|
@@ -866,13 +867,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
|
|
!test_bit(FLAG_IS_OUT, &desc->flags))) {
|
|
status = device_create_file(dev, &dev_attr_edge);
|
|
if (status)
|
|
- goto fail_unregister_device;
|
|
+ goto fail_remove_attr_direction;
|
|
}
|
|
|
|
set_bit(FLAG_EXPORT, &desc->flags);
|
|
mutex_unlock(&sysfs_lock);
|
|
return 0;
|
|
|
|
+fail_remove_attr_direction:
|
|
+ device_remove_file(dev, &dev_attr_direction);
|
|
fail_unregister_device:
|
|
device_unregister(dev);
|
|
fail_unlock:
|
|
@@ -917,6 +920,7 @@ int gpiod_export_link(struct device *dev, const char *name,
|
|
if (tdev != NULL) {
|
|
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
|
|
name);
|
|
+ put_device(tdev);
|
|
} else {
|
|
status = -ENODEV;
|
|
}
|
|
@@ -964,7 +968,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
|
}
|
|
|
|
status = sysfs_set_active_low(desc, dev, value);
|
|
-
|
|
+ put_device(dev);
|
|
unlock:
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
@@ -1006,6 +1010,8 @@ void gpiod_unexport(struct gpio_desc *desc)
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
if (dev) {
|
|
+ device_remove_file(dev, &dev_attr_edge);
|
|
+ device_remove_file(dev, &dev_attr_direction);
|
|
device_unregister(dev);
|
|
put_device(dev);
|
|
}
|
|
@@ -1030,13 +1036,13 @@ static int gpiochip_export(struct gpio_chip *chip)
|
|
|
|
/* use chip->base for the ID; it's already known to be unique */
|
|
mutex_lock(&sysfs_lock);
|
|
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
|
|
- "gpiochip%d", chip->base);
|
|
- if (!IS_ERR(dev)) {
|
|
- status = sysfs_create_group(&dev->kobj,
|
|
- &gpiochip_attr_group);
|
|
- } else
|
|
+ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
|
|
+ chip, gpiochip_groups,
|
|
+ "gpiochip%d", chip->base);
|
|
+ if (IS_ERR(dev))
|
|
status = PTR_ERR(dev);
|
|
+ else
|
|
+ status = 0;
|
|
chip->exported = (status == 0);
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
@@ -1060,12 +1066,15 @@ static void gpiochip_unexport(struct gpio_chip *chip)
|
|
{
|
|
int status;
|
|
struct device *dev;
|
|
+ struct gpio_desc *desc;
|
|
+ unsigned int i;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
dev = class_find_device(&gpio_class, NULL, chip, match_export);
|
|
if (dev) {
|
|
put_device(dev);
|
|
device_unregister(dev);
|
|
+ /* prevent further gpiod exports */
|
|
chip->exported = false;
|
|
status = 0;
|
|
} else
|
|
@@ -1074,6 +1083,13 @@ static void gpiochip_unexport(struct gpio_chip *chip)
|
|
|
|
if (status)
|
|
chip_dbg(chip, "%s: status %d\n", __func__, status);
|
|
+
|
|
+ /* unregister gpiod class devices owned by sysfs */
|
|
+ for (i = 0; i < chip->ngpio; i++) {
|
|
+ desc = &chip->desc[i];
|
|
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
|
|
+ gpiod_free(desc);
|
|
+ }
|
|
}
|
|
|
|
static int __init gpiolib_sysfs_init(void)
|
|
@@ -1222,6 +1238,9 @@ int gpiochip_add(struct gpio_chip *chip)
|
|
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
|
|
+ if (status)
|
|
+ goto fail;
|
|
+
|
|
#ifdef CONFIG_PINCTRL
|
|
INIT_LIST_HEAD(&chip->pin_ranges);
|
|
#endif
|
|
@@ -1229,12 +1248,12 @@ int gpiochip_add(struct gpio_chip *chip)
|
|
of_gpiochip_add(chip);
|
|
acpi_gpiochip_add(chip);
|
|
|
|
- if (status)
|
|
- goto fail;
|
|
-
|
|
status = gpiochip_export(chip);
|
|
- if (status)
|
|
+ if (status) {
|
|
+ acpi_gpiochip_remove(chip);
|
|
+ of_gpiochip_remove(chip);
|
|
goto fail;
|
|
+ }
|
|
|
|
pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
|
|
chip->base, chip->base + chip->ngpio - 1,
|
|
@@ -1265,6 +1284,8 @@ int gpiochip_remove(struct gpio_chip *chip)
|
|
int status = 0;
|
|
unsigned id;
|
|
|
|
+ gpiochip_unexport(chip);
|
|
+
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
|
|
gpiochip_remove_pin_ranges(chip);
|
|
@@ -1286,9 +1307,6 @@ int gpiochip_remove(struct gpio_chip *chip)
|
|
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
|
|
- if (status == 0)
|
|
- gpiochip_unexport(chip);
|
|
-
|
|
return status;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiochip_remove);
|
|
@@ -1928,15 +1946,15 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
|
|
* that the GPIO was actually requested.
|
|
*/
|
|
|
|
-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
|
|
+static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
|
|
{
|
|
struct gpio_chip *chip;
|
|
- int value;
|
|
+ bool value;
|
|
int offset;
|
|
|
|
chip = desc->chip;
|
|
offset = gpio_chip_hwgpio(desc);
|
|
- value = chip->get ? chip->get(chip, offset) : 0;
|
|
+ value = chip->get ? chip->get(chip, offset) : false;
|
|
trace_gpio_value(desc_to_gpio(desc), 1, value);
|
|
return value;
|
|
}
|
|
@@ -1992,7 +2010,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
|
|
* @desc: gpio descriptor whose state need to be set.
|
|
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
|
|
*/
|
|
-static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
|
|
+static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
|
|
{
|
|
int err = 0;
|
|
struct gpio_chip *chip = desc->chip;
|
|
@@ -2019,7 +2037,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
|
|
* @desc: gpio descriptor whose state need to be set.
|
|
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
|
|
*/
|
|
-static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
|
|
+static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
|
|
{
|
|
int err = 0;
|
|
struct gpio_chip *chip = desc->chip;
|
|
@@ -2041,7 +2059,7 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
|
|
__func__, err);
|
|
}
|
|
|
|
-static void _gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
|
+static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
|
|
{
|
|
struct gpio_chip *chip;
|
|
|
|
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
|
|
index 50535fd..d830b38 100644
|
|
--- a/drivers/gpu/drm/ast/ast_main.c
|
|
+++ b/drivers/gpu/drm/ast/ast_main.c
|
|
@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
|
|
}
|
|
ast->vga2_clone = false;
|
|
} else {
|
|
- ast->chip = 2000;
|
|
+ ast->chip = AST2000;
|
|
DRM_INFO("AST 2000 detected\n");
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
|
|
index cca063b..d2e56e9 100644
|
|
--- a/drivers/gpu/drm/ast/ast_mode.c
|
|
+++ b/drivers/gpu/drm/ast/ast_mode.c
|
|
@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
|
|
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
|
|
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
|
|
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
|
|
- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
|
|
- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
|
|
+ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
|
|
+ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
|
|
|
|
writel(data32.ul, dstxor);
|
|
csum += data32.ul;
|
|
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
|
|
index 08ce520..faa1f42 100644
|
|
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
|
|
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
|
|
@@ -32,6 +32,8 @@ static struct drm_driver driver;
|
|
static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
|
|
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
|
|
0, 0 },
|
|
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
|
|
+ 0x0001, 0, 0, 0 },
|
|
{0,}
|
|
};
|
|
|
|
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
|
|
index 3b7d32d..903db3c 100644
|
|
--- a/drivers/gpu/drm/drm_crtc.c
|
|
+++ b/drivers/gpu/drm/drm_crtc.c
|
|
@@ -2155,8 +2155,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return -EINVAL;
|
|
|
|
- /* For some reason crtc x/y offsets are signed internally. */
|
|
- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
|
|
+ /*
|
|
+ * Universal plane src offsets are only 16.16, prevent havoc for
|
|
+ * drivers using universal plane code internally.
|
|
+ */
|
|
+ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
|
|
return -ERANGE;
|
|
|
|
drm_modeset_lock_all(dev);
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
|
|
index 3ecb332..de5ab48 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
|
@@ -1426,10 +1426,13 @@ unlock:
|
|
out:
|
|
switch (ret) {
|
|
case -EIO:
|
|
- /* If this -EIO is due to a gpu hang, give the reset code a
|
|
- * chance to clean up the mess. Otherwise return the proper
|
|
- * SIGBUS. */
|
|
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
|
|
+ /*
|
|
+ * We eat errors when the gpu is terminally wedged to avoid
|
|
+ * userspace unduly crashing (gl has no provisions for mmaps to
|
|
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
|
|
+ * and so needs to be reported.
|
|
+ */
|
|
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
|
|
ret = VM_FAULT_SIGBUS;
|
|
break;
|
|
}
|
|
@@ -2890,6 +2893,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
|
u32 size = i915_gem_obj_ggtt_size(obj);
|
|
uint64_t val;
|
|
|
|
+ /* Adjust fence size to match tiled area */
|
|
+ if (obj->tiling_mode != I915_TILING_NONE) {
|
|
+ uint32_t row_size = obj->stride *
|
|
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
|
|
+ size = (size / row_size) * row_size;
|
|
+ }
|
|
+
|
|
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
|
|
0xfffff000) << 32;
|
|
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
|
|
@@ -4975,7 +4985,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
|
if (!mutex_is_locked(mutex))
|
|
return false;
|
|
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
|
|
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
|
|
return mutex->owner == task;
|
|
#else
|
|
/* Since UP may be pre-empted, we cannot assume that we own the lock */
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
index d278be1..1855cdc 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
@@ -827,6 +827,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
|
|
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
|
|
}
|
|
|
|
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
|
|
+{
|
|
+ if (INTEL_INFO(dev_priv->dev)->gen < 6) {
|
|
+ intel_gtt_chipset_flush();
|
|
+ } else {
|
|
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
|
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
|
+ }
|
|
+}
|
|
+
|
|
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -843,6 +853,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|
dev_priv->gtt.base.start / PAGE_SIZE,
|
|
dev_priv->gtt.base.total / PAGE_SIZE,
|
|
true);
|
|
+
|
|
+ i915_ggtt_flush(dev_priv);
|
|
}
|
|
|
|
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|
@@ -863,7 +875,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|
i915_gem_gtt_bind_object(obj, obj->cache_level);
|
|
}
|
|
|
|
- i915_gem_chipset_flush(dev);
|
|
+ i915_ggtt_flush(dev_priv);
|
|
}
|
|
|
|
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
|
|
index 3c78b22..800e06c 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
|
|
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|
r = devm_request_mem_region(dev->dev, base + 1,
|
|
dev_priv->gtt.stolen_size - 1,
|
|
"Graphics Stolen Memory");
|
|
- if (r == NULL) {
|
|
+ /*
|
|
+ * GEN3 firmware likes to smash pci bridges into the stolen
|
|
+ * range. Apparently this works.
|
|
+ */
|
|
+ if (r == NULL && !IS_GEN3(dev)) {
|
|
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
|
|
base, base + (uint32_t)dev_priv->gtt.stolen_size);
|
|
base = 0;
|
|
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
|
|
index 0a3b938..5b38bf8 100644
|
|
--- a/drivers/gpu/drm/i915/i915_reg.h
|
|
+++ b/drivers/gpu/drm/i915/i915_reg.h
|
|
@@ -320,6 +320,7 @@
|
|
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
|
|
#define PIPE_CONTROL_CS_STALL (1<<20)
|
|
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
|
|
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
|
|
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
|
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
|
|
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
|
|
@@ -1180,6 +1181,7 @@
|
|
#define GMBUS_CYCLE_INDEX (2<<25)
|
|
#define GMBUS_CYCLE_STOP (4<<25)
|
|
#define GMBUS_BYTE_COUNT_SHIFT 16
|
|
+#define GMBUS_BYTE_COUNT_MAX 256U
|
|
#define GMBUS_SLAVE_INDEX_SHIFT 8
|
|
#define GMBUS_SLAVE_ADDR_SHIFT 1
|
|
#define GMBUS_SLAVE_READ (1<<0)
|
|
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
|
|
index f220419..08105fd 100644
|
|
--- a/drivers/gpu/drm/i915/intel_bios.c
|
|
+++ b/drivers/gpu/drm/i915/intel_bios.c
|
|
@@ -839,7 +839,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|
}
|
|
}
|
|
|
|
-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
|
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
|
{
|
|
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
|
|
"VBIOS ROM for %s\n",
|
|
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
|
|
index b19ddac..8348475 100644
|
|
--- a/drivers/gpu/drm/i915/intel_crt.c
|
|
+++ b/drivers/gpu/drm/i915/intel_crt.c
|
|
@@ -750,7 +750,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
|
|
.destroy = intel_encoder_destroy,
|
|
};
|
|
|
|
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
|
|
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
|
|
{
|
|
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
|
|
return 1;
|
|
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
|
|
index b6fb3eb..c514690 100644
|
|
--- a/drivers/gpu/drm/i915/intel_display.c
|
|
+++ b/drivers/gpu/drm/i915/intel_display.c
|
|
@@ -3817,7 +3817,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|
ironlake_fdi_disable(crtc);
|
|
|
|
ironlake_disable_pch_transcoder(dev_priv, pipe);
|
|
- intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
|
|
|
|
if (HAS_PCH_CPT(dev)) {
|
|
/* disable TRANS_DP_CTL */
|
|
@@ -3883,7 +3882,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
|
|
|
if (intel_crtc->config.has_pch_encoder) {
|
|
lpt_disable_pch_transcoder(dev_priv);
|
|
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
|
|
intel_ddi_fdi_disable(crtc);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
|
|
index b2d0887..462307c 100644
|
|
--- a/drivers/gpu/drm/i915/intel_dp.c
|
|
+++ b/drivers/gpu/drm/i915/intel_dp.c
|
|
@@ -481,10 +481,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|
DP_AUX_CH_CTL_RECEIVE_ERROR))
|
|
continue;
|
|
if (status & DP_AUX_CH_CTL_DONE)
|
|
- break;
|
|
+ goto done;
|
|
}
|
|
- if (status & DP_AUX_CH_CTL_DONE)
|
|
- break;
|
|
}
|
|
|
|
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
|
|
@@ -493,6 +491,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|
goto out;
|
|
}
|
|
|
|
+done:
|
|
/* Check for timeout or receive error.
|
|
* Timeouts occur when the sink is not connected
|
|
*/
|
|
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
|
|
index d33b61d..81f8ec8 100644
|
|
--- a/drivers/gpu/drm/i915/intel_i2c.c
|
|
+++ b/drivers/gpu/drm/i915/intel_i2c.c
|
|
@@ -324,18 +324,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
|
}
|
|
|
|
static int
|
|
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
|
- u32 gmbus1_index)
|
|
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
|
|
+ unsigned short addr, u8 *buf, unsigned int len,
|
|
+ u32 gmbus1_index)
|
|
{
|
|
int reg_offset = dev_priv->gpio_mmio_base;
|
|
- u16 len = msg->len;
|
|
- u8 *buf = msg->buf;
|
|
|
|
I915_WRITE(GMBUS1 + reg_offset,
|
|
gmbus1_index |
|
|
GMBUS_CYCLE_WAIT |
|
|
(len << GMBUS_BYTE_COUNT_SHIFT) |
|
|
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
|
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
|
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
|
|
while (len) {
|
|
int ret;
|
|
@@ -357,11 +356,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
|
}
|
|
|
|
static int
|
|
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
|
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
|
+ u32 gmbus1_index)
|
|
{
|
|
- int reg_offset = dev_priv->gpio_mmio_base;
|
|
- u16 len = msg->len;
|
|
u8 *buf = msg->buf;
|
|
+ unsigned int rx_size = msg->len;
|
|
+ unsigned int len;
|
|
+ int ret;
|
|
+
|
|
+ do {
|
|
+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
|
|
+
|
|
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
|
|
+ buf, len, gmbus1_index);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ rx_size -= len;
|
|
+ buf += len;
|
|
+ } while (rx_size != 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
|
|
+ unsigned short addr, u8 *buf, unsigned int len)
|
|
+{
|
|
+ int reg_offset = dev_priv->gpio_mmio_base;
|
|
+ unsigned int chunk_size = len;
|
|
u32 val, loop;
|
|
|
|
val = loop = 0;
|
|
@@ -373,8 +396,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
|
I915_WRITE(GMBUS3 + reg_offset, val);
|
|
I915_WRITE(GMBUS1 + reg_offset,
|
|
GMBUS_CYCLE_WAIT |
|
|
- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
|
|
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
|
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
|
|
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
|
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
|
|
while (len) {
|
|
int ret;
|
|
@@ -391,6 +414,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
|
|
+{
|
|
+ u8 *buf = msg->buf;
|
|
+ unsigned int tx_size = msg->len;
|
|
+ unsigned int len;
|
|
+ int ret;
|
|
+
|
|
+ do {
|
|
+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
|
|
+
|
|
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ buf += len;
|
|
+ tx_size -= len;
|
|
+ } while (tx_size != 0);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -443,7 +489,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
|
struct intel_gmbus,
|
|
adapter);
|
|
struct drm_i915_private *dev_priv = bus->dev_priv;
|
|
- int i, reg_offset;
|
|
+ int i = 0, inc, try = 0, reg_offset;
|
|
int ret = 0;
|
|
|
|
intel_aux_display_runtime_get(dev_priv);
|
|
@@ -456,12 +502,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
|
|
|
reg_offset = dev_priv->gpio_mmio_base;
|
|
|
|
+retry:
|
|
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
|
|
|
- for (i = 0; i < num; i++) {
|
|
+ for (; i < num; i += inc) {
|
|
+ inc = 1;
|
|
if (gmbus_is_index_read(msgs, i, num)) {
|
|
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
|
|
- i += 1; /* set i to the index of the read xfer */
|
|
+ inc = 2; /* an index read is two msgs */
|
|
} else if (msgs[i].flags & I2C_M_RD) {
|
|
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
|
|
} else {
|
|
@@ -533,6 +581,18 @@ clear_err:
|
|
adapter->name, msgs[i].addr,
|
|
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
|
|
|
|
+ /*
|
|
+ * Passive adapters sometimes NAK the first probe. Retry the first
|
|
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
|
|
+ * has retries internally. See also the retry loop in
|
|
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
|
|
+ */
|
|
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
|
|
+ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
|
|
+ adapter->name);
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
goto out;
|
|
|
|
timeout:
|
|
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
|
|
index eb8f64b..1eb4994 100644
|
|
--- a/drivers/gpu/drm/i915/intel_lvds.c
|
|
+++ b/drivers/gpu/drm/i915/intel_lvds.c
|
|
@@ -544,7 +544,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
|
|
.destroy = intel_encoder_destroy,
|
|
};
|
|
|
|
-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
|
|
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
|
|
{
|
|
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
|
|
return 1;
|
|
@@ -815,12 +815,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
|
|
static const struct dmi_system_id intel_dual_link_lvds[] = {
|
|
{
|
|
.callback = intel_dual_link_lvds_callback,
|
|
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
|
|
+ .ident = "Apple MacBook Pro 15\" (2010)",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .callback = intel_dual_link_lvds_callback,
|
|
+ .ident = "Apple MacBook Pro 15\" (2011)",
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
|
|
},
|
|
},
|
|
+ {
|
|
+ .callback = intel_dual_link_lvds_callback,
|
|
+ .ident = "Apple MacBook Pro 15\" (2012)",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
|
|
+ },
|
|
+ },
|
|
{ } /* terminating entry */
|
|
};
|
|
|
|
@@ -905,6 +921,17 @@ void intel_lvds_init(struct drm_device *dev)
|
|
int pipe;
|
|
u8 pin;
|
|
|
|
+ /*
|
|
+ * Unlock registers and just leave them unlocked. Do this before
|
|
+ * checking quirk lists to avoid bogus WARNINGs.
|
|
+ */
|
|
+ if (HAS_PCH_SPLIT(dev)) {
|
|
+ I915_WRITE(PCH_PP_CONTROL,
|
|
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
+ } else {
|
|
+ I915_WRITE(PP_CONTROL,
|
|
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
+ }
|
|
if (!intel_lvds_supported(dev))
|
|
return;
|
|
|
|
@@ -1099,17 +1126,6 @@ out:
|
|
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
|
lvds_encoder->is_dual_link ? "dual" : "single");
|
|
|
|
- /*
|
|
- * Unlock registers and just
|
|
- * leave them unlocked
|
|
- */
|
|
- if (HAS_PCH_SPLIT(dev)) {
|
|
- I915_WRITE(PCH_PP_CONTROL,
|
|
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
- } else {
|
|
- I915_WRITE(PP_CONTROL,
|
|
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
|
- }
|
|
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
|
|
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
|
|
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
|
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
|
|
index fd98bec..c6d9777 100644
|
|
--- a/drivers/gpu/drm/i915/intel_panel.c
|
|
+++ b/drivers/gpu/drm/i915/intel_panel.c
|
|
@@ -645,7 +645,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
|
|
|
|
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
|
|
if (cpu_ctl2 & BLM_PWM_ENABLE) {
|
|
- WARN(1, "cpu backlight already enabled\n");
|
|
+ DRM_DEBUG_KMS("cpu backlight already enabled\n");
|
|
cpu_ctl2 &= ~BLM_PWM_ENABLE;
|
|
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
|
|
}
|
|
@@ -693,7 +693,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
|
|
|
|
ctl = I915_READ(BLC_PWM_CTL);
|
|
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
|
|
- WARN(1, "backlight already enabled\n");
|
|
+ DRM_DEBUG_KMS("backlight already enabled\n");
|
|
I915_WRITE(BLC_PWM_CTL, 0);
|
|
}
|
|
|
|
@@ -724,7 +724,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
|
|
|
|
ctl2 = I915_READ(BLC_PWM_CTL2);
|
|
if (ctl2 & BLM_PWM_ENABLE) {
|
|
- WARN(1, "backlight already enabled\n");
|
|
+ DRM_DEBUG_KMS("backlight already enabled\n");
|
|
ctl2 &= ~BLM_PWM_ENABLE;
|
|
I915_WRITE(BLC_PWM_CTL2, ctl2);
|
|
}
|
|
@@ -758,7 +758,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
|
|
|
|
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
|
|
if (ctl2 & BLM_PWM_ENABLE) {
|
|
- WARN(1, "backlight already enabled\n");
|
|
+ DRM_DEBUG_KMS("backlight already enabled\n");
|
|
ctl2 &= ~BLM_PWM_ENABLE;
|
|
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
index 31b36c5..d2af1e1 100644
|
|
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
@@ -334,12 +334,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
|
|
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
|
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
|
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
|
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
|
|
/*
|
|
* TLB invalidate requires a post-sync write.
|
|
*/
|
|
flags |= PIPE_CONTROL_QW_WRITE;
|
|
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
|
|
|
|
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
|
|
+
|
|
/* Workaround: we must issue a pipe_control with CS-stall bit
|
|
* set before a pipe_control command that has the state cache
|
|
* invalidate bit set. */
|
|
@@ -475,6 +478,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
|
}
|
|
}
|
|
|
|
+ /* Enforce ordering by reading HEAD register back */
|
|
+ I915_READ_HEAD(ring);
|
|
+
|
|
/* Initialize the ring. This must happen _after_ we've cleared the ring
|
|
* registers with the above sequence (the readback of the HEAD registers
|
|
* also enforces ordering), otherwise the hw might lose the new ring
|
|
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
|
|
index 365c7c4..9c9606c 100644
|
|
--- a/drivers/gpu/drm/i915/intel_tv.c
|
|
+++ b/drivers/gpu/drm/i915/intel_tv.c
|
|
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
|
|
struct drm_device *dev = encoder->base.dev;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
|
|
+ intel_wait_for_vblank(encoder->base.dev,
|
|
+ to_intel_crtc(encoder->base.crtc)->pipe);
|
|
+
|
|
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
|
|
index c879631..b6c063c 100644
|
|
--- a/drivers/gpu/drm/i915/intel_uncore.c
|
|
+++ b/drivers/gpu/drm/i915/intel_uncore.c
|
|
@@ -451,8 +451,8 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
|
|
static void
|
|
assert_device_not_suspended(struct drm_i915_private *dev_priv)
|
|
{
|
|
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
|
- "Device suspended\n");
|
|
+ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
|
+ "Device suspended\n");
|
|
}
|
|
|
|
#define REG_READ_HEADER(x) \
|
|
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
index 9683747..f2511a0 100644
|
|
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
@@ -1529,6 +1529,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
|
return MODE_BANDWIDTH;
|
|
}
|
|
|
|
+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
|
|
+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
|
|
+ return MODE_H_ILLEGAL;
|
|
+ }
|
|
+
|
|
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
|
|
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
|
|
mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
|
|
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
|
|
index 461df93..4f32b34 100644
|
|
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
|
|
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
|
|
@@ -35,8 +35,6 @@
|
|
A3XX_INT0_CP_AHB_ERROR_HALT | \
|
|
A3XX_INT0_UCHE_OOB_ACCESS)
|
|
|
|
-static struct platform_device *a3xx_pdev;
|
|
-
|
|
static void a3xx_me_init(struct msm_gpu *gpu)
|
|
{
|
|
struct msm_ringbuffer *ring = gpu->rb;
|
|
@@ -311,7 +309,6 @@ static void a3xx_destroy(struct msm_gpu *gpu)
|
|
ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
|
|
#endif
|
|
|
|
- put_device(&a3xx_gpu->pdev->dev);
|
|
kfree(a3xx_gpu);
|
|
}
|
|
|
|
@@ -439,7 +436,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
|
struct a3xx_gpu *a3xx_gpu = NULL;
|
|
struct adreno_gpu *adreno_gpu;
|
|
struct msm_gpu *gpu;
|
|
- struct platform_device *pdev = a3xx_pdev;
|
|
+ struct msm_drm_private *priv = dev->dev_private;
|
|
+ struct platform_device *pdev = priv->gpu_pdev;
|
|
struct adreno_platform_config *config;
|
|
int ret;
|
|
|
|
@@ -460,7 +458,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
|
adreno_gpu = &a3xx_gpu->base;
|
|
gpu = &adreno_gpu->base;
|
|
|
|
- get_device(&pdev->dev);
|
|
a3xx_gpu->pdev = pdev;
|
|
|
|
gpu->fast_rate = config->fast_rate;
|
|
@@ -522,17 +519,24 @@ fail:
|
|
# include <mach/kgsl.h>
|
|
#endif
|
|
|
|
-static int a3xx_probe(struct platform_device *pdev)
|
|
+static void set_gpu_pdev(struct drm_device *dev,
|
|
+ struct platform_device *pdev)
|
|
+{
|
|
+ struct msm_drm_private *priv = dev->dev_private;
|
|
+ priv->gpu_pdev = pdev;
|
|
+}
|
|
+
|
|
+static int a3xx_bind(struct device *dev, struct device *master, void *data)
|
|
{
|
|
static struct adreno_platform_config config = {};
|
|
#ifdef CONFIG_OF
|
|
- struct device_node *child, *node = pdev->dev.of_node;
|
|
+ struct device_node *child, *node = dev->of_node;
|
|
u32 val;
|
|
int ret;
|
|
|
|
ret = of_property_read_u32(node, "qcom,chipid", &val);
|
|
if (ret) {
|
|
- dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
|
|
+ dev_err(dev, "could not find chipid: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
@@ -548,7 +552,7 @@ static int a3xx_probe(struct platform_device *pdev)
|
|
for_each_child_of_node(child, pwrlvl) {
|
|
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
|
|
if (ret) {
|
|
- dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
|
|
+ dev_err(dev, "could not find gpu-freq: %d\n", ret);
|
|
return ret;
|
|
}
|
|
config.fast_rate = max(config.fast_rate, val);
|
|
@@ -558,12 +562,12 @@ static int a3xx_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
if (!config.fast_rate) {
|
|
- dev_err(&pdev->dev, "could not find clk rates\n");
|
|
+ dev_err(dev, "could not find clk rates\n");
|
|
return -ENXIO;
|
|
}
|
|
|
|
#else
|
|
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
|
|
+ struct kgsl_device_platform_data *pdata = dev->platform_data;
|
|
uint32_t version = socinfo_get_version();
|
|
if (cpu_is_apq8064ab()) {
|
|
config.fast_rate = 450000000;
|
|
@@ -609,14 +613,30 @@ static int a3xx_probe(struct platform_device *pdev)
|
|
config.bus_scale_table = pdata->bus_scale_table;
|
|
# endif
|
|
#endif
|
|
- pdev->dev.platform_data = &config;
|
|
- a3xx_pdev = pdev;
|
|
+ dev->platform_data = &config;
|
|
+ set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
|
|
return 0;
|
|
}
|
|
|
|
+static void a3xx_unbind(struct device *dev, struct device *master,
|
|
+ void *data)
|
|
+{
|
|
+ set_gpu_pdev(dev_get_drvdata(master), NULL);
|
|
+}
|
|
+
|
|
+static const struct component_ops a3xx_ops = {
|
|
+ .bind = a3xx_bind,
|
|
+ .unbind = a3xx_unbind,
|
|
+};
|
|
+
|
|
+static int a3xx_probe(struct platform_device *pdev)
|
|
+{
|
|
+ return component_add(&pdev->dev, &a3xx_ops);
|
|
+}
|
|
+
|
|
static int a3xx_remove(struct platform_device *pdev)
|
|
{
|
|
- a3xx_pdev = NULL;
|
|
+ component_del(&pdev->dev, &a3xx_ops);
|
|
return 0;
|
|
}
|
|
|
|
@@ -624,7 +644,6 @@ static const struct of_device_id dt_match[] = {
|
|
{ .compatible = "qcom,kgsl-3d0" },
|
|
{}
|
|
};
|
|
-MODULE_DEVICE_TABLE(of, dt_match);
|
|
|
|
static struct platform_driver a3xx_driver = {
|
|
.probe = a3xx_probe,
|
|
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
|
|
index 6f1588a..8a04a1d 100644
|
|
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
|
|
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
|
|
@@ -17,8 +17,6 @@
|
|
|
|
#include "hdmi.h"
|
|
|
|
-static struct platform_device *hdmi_pdev;
|
|
-
|
|
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
|
|
{
|
|
uint32_t ctrl = 0;
|
|
@@ -75,7 +73,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
|
|
{
|
|
struct hdmi *hdmi = NULL;
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
- struct platform_device *pdev = hdmi_pdev;
|
|
+ struct platform_device *pdev = priv->hdmi_pdev;
|
|
struct hdmi_platform_config *config;
|
|
int i, ret;
|
|
|
|
@@ -95,8 +93,6 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
|
|
|
|
kref_init(&hdmi->refcount);
|
|
|
|
- get_device(&pdev->dev);
|
|
-
|
|
hdmi->dev = dev;
|
|
hdmi->pdev = pdev;
|
|
hdmi->config = config;
|
|
@@ -249,17 +245,24 @@ fail:
|
|
|
|
#include <linux/of_gpio.h>
|
|
|
|
-static int hdmi_dev_probe(struct platform_device *pdev)
|
|
+static void set_hdmi_pdev(struct drm_device *dev,
|
|
+ struct platform_device *pdev)
|
|
+{
|
|
+ struct msm_drm_private *priv = dev->dev_private;
|
|
+ priv->hdmi_pdev = pdev;
|
|
+}
|
|
+
|
|
+static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
|
{
|
|
static struct hdmi_platform_config config = {};
|
|
#ifdef CONFIG_OF
|
|
- struct device_node *of_node = pdev->dev.of_node;
|
|
+ struct device_node *of_node = dev->of_node;
|
|
|
|
int get_gpio(const char *name)
|
|
{
|
|
int gpio = of_get_named_gpio(of_node, name, 0);
|
|
if (gpio < 0) {
|
|
- dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
|
|
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
|
|
name, gpio);
|
|
gpio = -1;
|
|
}
|
|
@@ -336,14 +339,30 @@ static int hdmi_dev_probe(struct platform_device *pdev)
|
|
config.mux_sel_gpio = -1;
|
|
}
|
|
#endif
|
|
- pdev->dev.platform_data = &config;
|
|
- hdmi_pdev = pdev;
|
|
+ dev->platform_data = &config;
|
|
+ set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
|
|
return 0;
|
|
}
|
|
|
|
+static void hdmi_unbind(struct device *dev, struct device *master,
|
|
+ void *data)
|
|
+{
|
|
+ set_hdmi_pdev(dev_get_drvdata(master), NULL);
|
|
+}
|
|
+
|
|
+static const struct component_ops hdmi_ops = {
|
|
+ .bind = hdmi_bind,
|
|
+ .unbind = hdmi_unbind,
|
|
+};
|
|
+
|
|
+static int hdmi_dev_probe(struct platform_device *pdev)
|
|
+{
|
|
+ return component_add(&pdev->dev, &hdmi_ops);
|
|
+}
|
|
+
|
|
static int hdmi_dev_remove(struct platform_device *pdev)
|
|
{
|
|
- hdmi_pdev = NULL;
|
|
+ component_del(&pdev->dev, &hdmi_ops);
|
|
return 0;
|
|
}
|
|
|
|
@@ -351,7 +370,6 @@ static const struct of_device_id dt_match[] = {
|
|
{ .compatible = "qcom,hdmi-tx" },
|
|
{}
|
|
};
|
|
-MODULE_DEVICE_TABLE(of, dt_match);
|
|
|
|
static struct platform_driver hdmi_driver = {
|
|
.probe = hdmi_dev_probe,
|
|
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
|
|
index e6adafc..e79cfd0 100644
|
|
--- a/drivers/gpu/drm/msm/msm_drv.c
|
|
+++ b/drivers/gpu/drm/msm/msm_drv.c
|
|
@@ -56,6 +56,10 @@ static char *vram;
|
|
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
|
|
module_param(vram, charp, 0);
|
|
|
|
+/*
|
|
+ * Util/helpers:
|
|
+ */
|
|
+
|
|
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
|
const char *dbgname)
|
|
{
|
|
@@ -143,6 +147,8 @@ static int msm_unload(struct drm_device *dev)
|
|
priv->vram.paddr, &attrs);
|
|
}
|
|
|
|
+ component_unbind_all(dev->dev, dev);
|
|
+
|
|
dev->dev_private = NULL;
|
|
|
|
kfree(priv);
|
|
@@ -175,6 +181,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|
struct msm_kms *kms;
|
|
int ret;
|
|
|
|
+
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
if (!priv) {
|
|
dev_err(dev->dev, "failed to allocate private data\n");
|
|
@@ -226,6 +233,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|
(uint32_t)(priv->vram.paddr + size));
|
|
}
|
|
|
|
+ platform_set_drvdata(pdev, dev);
|
|
+
|
|
+ /* Bind all our sub-components: */
|
|
+ ret = component_bind_all(dev->dev, dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
switch (get_mdp_ver(pdev)) {
|
|
case 4:
|
|
kms = mdp4_kms_init(dev);
|
|
@@ -281,8 +295,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|
goto fail;
|
|
}
|
|
|
|
- platform_set_drvdata(pdev, dev);
|
|
-
|
|
#ifdef CONFIG_DRM_MSM_FBDEV
|
|
priv->fbdev = msm_fbdev_init(dev);
|
|
#endif
|
|
@@ -819,18 +831,110 @@ static const struct dev_pm_ops msm_pm_ops = {
|
|
};
|
|
|
|
/*
|
|
+ * Componentized driver support:
|
|
+ */
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
|
|
+ * (or probably any other).. so probably some room for some helpers
|
|
+ */
|
|
+static int compare_of(struct device *dev, void *data)
|
|
+{
|
|
+ return dev->of_node == data;
|
|
+}
|
|
+
|
|
+static int msm_drm_add_components(struct device *master, struct master *m)
|
|
+{
|
|
+ struct device_node *np = master->of_node;
|
|
+ unsigned i;
|
|
+ int ret;
|
|
+
|
|
+ for (i = 0; ; i++) {
|
|
+ struct device_node *node;
|
|
+
|
|
+ node = of_parse_phandle(np, "connectors", i);
|
|
+ if (!node)
|
|
+ break;
|
|
+
|
|
+ ret = component_master_add_child(m, compare_of, node);
|
|
+ of_node_put(node);
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+static int compare_dev(struct device *dev, void *data)
|
|
+{
|
|
+ return dev == data;
|
|
+}
|
|
+
|
|
+static int msm_drm_add_components(struct device *master, struct master *m)
|
|
+{
|
|
+ /* For non-DT case, it kinda sucks. We don't actually have a way
|
|
+ * to know whether or not we are waiting for certain devices (or if
|
|
+ * they are simply not present). But for non-DT we only need to
|
|
+ * care about apq8064/apq8060/etc (all mdp4/a3xx):
|
|
+ */
|
|
+ static const char *devnames[] = {
|
|
+ "hdmi_msm.0", "kgsl-3d0.0",
|
|
+ };
|
|
+ int i;
|
|
+
|
|
+ DBG("Adding components..");
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(devnames); i++) {
|
|
+ struct device *dev;
|
|
+ int ret;
|
|
+
|
|
+ dev = bus_find_device_by_name(&platform_bus_type,
|
|
+ NULL, devnames[i]);
|
|
+ if (!dev) {
|
|
+ dev_info(master, "still waiting for %s\n", devnames[i]);
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+
|
|
+ ret = component_master_add_child(m, compare_dev, dev);
|
|
+ if (ret) {
|
|
+ DBG("could not add child: %d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int msm_drm_bind(struct device *dev)
|
|
+{
|
|
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
|
|
+}
|
|
+
|
|
+static void msm_drm_unbind(struct device *dev)
|
|
+{
|
|
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
|
|
+}
|
|
+
|
|
+static const struct component_master_ops msm_drm_ops = {
|
|
+ .add_components = msm_drm_add_components,
|
|
+ .bind = msm_drm_bind,
|
|
+ .unbind = msm_drm_unbind,
|
|
+};
|
|
+
|
|
+/*
|
|
* Platform driver:
|
|
*/
|
|
|
|
static int msm_pdev_probe(struct platform_device *pdev)
|
|
{
|
|
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
|
- return drm_platform_init(&msm_driver, pdev);
|
|
+ return component_master_add(&pdev->dev, &msm_drm_ops);
|
|
}
|
|
|
|
static int msm_pdev_remove(struct platform_device *pdev)
|
|
{
|
|
- drm_put_dev(platform_get_drvdata(pdev));
|
|
+ component_master_del(&pdev->dev, &msm_drm_ops);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
|
|
index 3d63269..9d10ee0 100644
|
|
--- a/drivers/gpu/drm/msm/msm_drv.h
|
|
+++ b/drivers/gpu/drm/msm/msm_drv.h
|
|
@@ -22,6 +22,7 @@
|
|
#include <linux/clk.h>
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/component.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/pm_runtime.h>
|
|
@@ -69,6 +70,9 @@ struct msm_drm_private {
|
|
|
|
struct msm_kms *kms;
|
|
|
|
+ /* subordinate devices, if present: */
|
|
+ struct platform_device *hdmi_pdev, *gpu_pdev;
|
|
+
|
|
/* when we have more than one 'msm_gpu' these need to be an array: */
|
|
struct msm_gpu *gpu;
|
|
struct msm_file_private *lastctx;
|
|
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
|
|
index 2d9b9d7..f3edd28 100644
|
|
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
|
|
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
|
|
@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
|
|
struct dcb_output *outp)
|
|
{
|
|
u16 dcb = dcb_outp(bios, idx, ver, len);
|
|
+ memset(outp, 0x00, sizeof(*outp));
|
|
if (dcb) {
|
|
if (*ver >= 0x20) {
|
|
u32 conn = nv_ro32(bios, dcb + 0x00);
|
|
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
|
|
index a75c35c..165401c 100644
|
|
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
|
|
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
|
|
@@ -24,13 +24,6 @@
|
|
|
|
#include "nv04.h"
|
|
|
|
-static void
|
|
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
|
|
-{
|
|
- struct nv04_mc_priv *priv = (void *)pmc;
|
|
- nv_wr08(priv, 0x088050, 0xff);
|
|
-}
|
|
-
|
|
struct nouveau_oclass *
|
|
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
|
|
.base.handle = NV_SUBDEV(MC, 0x4c),
|
|
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
|
|
.fini = _nouveau_mc_fini,
|
|
},
|
|
.intr = nv04_mc_intr,
|
|
- .msi_rearm = nv4c_mc_msi_rearm,
|
|
}.base;
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
|
|
index 23ca7a5..74ed08a 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
|
|
@@ -10,7 +10,7 @@
|
|
|
|
#define DRIVER_MAJOR 1
|
|
#define DRIVER_MINOR 1
|
|
-#define DRIVER_PATCHLEVEL 1
|
|
+#define DRIVER_PATCHLEVEL 2
|
|
|
|
/*
|
|
* 1.1.1:
|
|
@@ -21,6 +21,8 @@
|
|
* to control registers on the MPs to enable performance counters,
|
|
* and to control the warp error enable mask (OpenGL requires out of
|
|
* bounds access to local memory to be silently ignored / return 0).
|
|
+ * 1.1.2:
|
|
+ * - fixes multiple bugs in flip completion events and timestamping
|
|
*/
|
|
|
|
#include <core/client.h>
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
|
|
index 471347e..a92fb01 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
|
|
@@ -100,7 +100,16 @@ void
|
|
nouveau_vga_fini(struct nouveau_drm *drm)
|
|
{
|
|
struct drm_device *dev = drm->dev;
|
|
+ bool runtime = false;
|
|
+
|
|
+ if (nouveau_runtime_pm == 1)
|
|
+ runtime = true;
|
|
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
|
|
+ runtime = true;
|
|
+
|
|
vga_switcheroo_unregister_client(dev->pdev);
|
|
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
|
|
+ vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
|
|
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
index f926b4c..56c6055 100644
|
|
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
|
|
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
|
struct page **pages, uint32_t npages, uint32_t roll)
|
|
{
|
|
- dma_addr_t pat_pa = 0;
|
|
+ dma_addr_t pat_pa = 0, data_pa = 0;
|
|
uint32_t *data;
|
|
struct pat *pat;
|
|
struct refill_engine *engine = txn->engine_handle;
|
|
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
|
.lut_id = engine->tcm->lut_id,
|
|
};
|
|
|
|
- data = alloc_dma(txn, 4*i, &pat->data_pa);
|
|
+ data = alloc_dma(txn, 4*i, &data_pa);
|
|
+ /* FIXME: what if data_pa is more than 32-bit ? */
|
|
+ pat->data_pa = data_pa;
|
|
|
|
while (i--) {
|
|
int n = i + roll;
|
|
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
|
|
index 5aec3e8..5fd3bab 100644
|
|
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
|
|
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
|
|
@@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
|
|
omap_obj->paddr = tiler_ssptr(block);
|
|
omap_obj->block = block;
|
|
|
|
- DBG("got paddr: %08x", omap_obj->paddr);
|
|
+ DBG("got paddr: %pad", &omap_obj->paddr);
|
|
}
|
|
|
|
omap_obj->paddr_cnt++;
|
|
@@ -988,9 +988,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|
|
|
off = drm_vma_node_start(&obj->vma_node);
|
|
|
|
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
|
|
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
|
|
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
|
|
- off, omap_obj->paddr, omap_obj->paddr_cnt,
|
|
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
|
|
omap_obj->vaddr, omap_obj->roll);
|
|
|
|
if (omap_obj->flags & OMAP_BO_TILED) {
|
|
@@ -1468,8 +1468,8 @@ void omap_gem_init(struct drm_device *dev)
|
|
entry->paddr = tiler_ssptr(block);
|
|
entry->block = block;
|
|
|
|
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
|
|
- entry->paddr,
|
|
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
|
|
+ &entry->paddr,
|
|
usergart[i].stride_pfn << PAGE_SHIFT);
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
|
|
index 046d5e6..5b62e21 100644
|
|
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
|
|
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
|
|
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
|
|
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
|
|
info->out_width, info->out_height,
|
|
info->screen_width);
|
|
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
|
|
- info->paddr, info->p_uv_addr);
|
|
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
|
|
+ &info->paddr, &info->p_uv_addr);
|
|
|
|
/* TODO: */
|
|
ilace = false;
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
|
|
index eb89653..c5e96a3 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
|
|
@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
|
|
|
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
|
cmd->type = QXL_SURFACE_CMD_CREATE;
|
|
+ cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
|
|
cmd->u.surface_create.format = surf->surf.format;
|
|
cmd->u.surface_create.width = surf->surf.width;
|
|
cmd->u.surface_create.height = surf->surf.height;
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
|
|
index 798bde2..8e3267a 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_display.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_display.c
|
|
@@ -136,9 +136,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
|
|
*pwidth = head->width;
|
|
*pheight = head->height;
|
|
drm_mode_probed_add(connector, mode);
|
|
+ /* remember the last custom size for mode validation */
|
|
+ qdev->monitors_config_width = mode->hdisplay;
|
|
+ qdev->monitors_config_height = mode->vdisplay;
|
|
return 1;
|
|
}
|
|
|
|
+static struct mode_size {
|
|
+ int w;
|
|
+ int h;
|
|
+} common_modes[] = {
|
|
+ { 640, 480},
|
|
+ { 720, 480},
|
|
+ { 800, 600},
|
|
+ { 848, 480},
|
|
+ {1024, 768},
|
|
+ {1152, 768},
|
|
+ {1280, 720},
|
|
+ {1280, 800},
|
|
+ {1280, 854},
|
|
+ {1280, 960},
|
|
+ {1280, 1024},
|
|
+ {1440, 900},
|
|
+ {1400, 1050},
|
|
+ {1680, 1050},
|
|
+ {1600, 1200},
|
|
+ {1920, 1080},
|
|
+ {1920, 1200}
|
|
+};
|
|
+
|
|
static int qxl_add_common_modes(struct drm_connector *connector,
|
|
unsigned pwidth,
|
|
unsigned pheight)
|
|
@@ -146,29 +172,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
|
|
struct drm_device *dev = connector->dev;
|
|
struct drm_display_mode *mode = NULL;
|
|
int i;
|
|
- struct mode_size {
|
|
- int w;
|
|
- int h;
|
|
- } common_modes[] = {
|
|
- { 640, 480},
|
|
- { 720, 480},
|
|
- { 800, 600},
|
|
- { 848, 480},
|
|
- {1024, 768},
|
|
- {1152, 768},
|
|
- {1280, 720},
|
|
- {1280, 800},
|
|
- {1280, 854},
|
|
- {1280, 960},
|
|
- {1280, 1024},
|
|
- {1440, 900},
|
|
- {1400, 1050},
|
|
- {1680, 1050},
|
|
- {1600, 1200},
|
|
- {1920, 1080},
|
|
- {1920, 1200}
|
|
- };
|
|
-
|
|
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
|
|
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
|
|
60, false, false, false);
|
|
@@ -523,7 +526,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
|
|
struct qxl_framebuffer *qfb;
|
|
struct qxl_bo *bo, *old_bo = NULL;
|
|
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
|
|
- uint32_t width, height, base_offset;
|
|
bool recreate_primary = false;
|
|
int ret;
|
|
int surf_id;
|
|
@@ -553,9 +555,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
|
|
if (qcrtc->index == 0)
|
|
recreate_primary = true;
|
|
|
|
- width = mode->hdisplay;
|
|
- height = mode->vdisplay;
|
|
- base_offset = 0;
|
|
+ if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
|
|
+ DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
ret = qxl_bo_reserve(bo, false);
|
|
if (ret != 0)
|
|
@@ -569,10 +572,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
|
|
if (recreate_primary) {
|
|
qxl_io_destroy_primary(qdev);
|
|
qxl_io_log(qdev,
|
|
- "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
|
|
- width, height, bo->surf.width,
|
|
- bo->surf.height, bo->surf.stride, bo->surf.format);
|
|
- qxl_io_create_primary(qdev, base_offset, bo);
|
|
+ "recreate primary: %dx%d,%d,%d\n",
|
|
+ bo->surf.width, bo->surf.height,
|
|
+ bo->surf.stride, bo->surf.format);
|
|
+ qxl_io_create_primary(qdev, 0, bo);
|
|
bo->is_primary = true;
|
|
surf_id = 0;
|
|
} else {
|
|
@@ -753,11 +756,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
|
|
static int qxl_conn_mode_valid(struct drm_connector *connector,
|
|
struct drm_display_mode *mode)
|
|
{
|
|
+ struct drm_device *ddev = connector->dev;
|
|
+ struct qxl_device *qdev = ddev->dev_private;
|
|
+ int i;
|
|
+
|
|
/* TODO: is this called for user defined modes? (xrandr --add-mode)
|
|
* TODO: check that the mode fits in the framebuffer */
|
|
- DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
|
|
- mode->vdisplay, mode->status);
|
|
- return MODE_OK;
|
|
+
|
|
+ if(qdev->monitors_config_width == mode->hdisplay &&
|
|
+ qdev->monitors_config_height == mode->vdisplay)
|
|
+ return MODE_OK;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
|
|
+ if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
|
|
+ return MODE_OK;
|
|
+ }
|
|
+ return MODE_BAD;
|
|
}
|
|
|
|
static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
|
|
index 36ed40b..8aa077c 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_drv.h
|
|
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
|
|
@@ -325,6 +325,8 @@ struct qxl_device {
|
|
struct work_struct fb_work;
|
|
|
|
struct drm_property *hotplug_mode_update_property;
|
|
+ int monitors_config_width;
|
|
+ int monitors_config_height;
|
|
};
|
|
|
|
/* forward declaration for QXL_INFO_IO */
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
|
|
index 0bb86e6..56a13a9 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
|
|
@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
|
|
qobj = gem_to_qxl_bo(gobj);
|
|
|
|
ret = qxl_release_list_add(release, qobj);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ drm_gem_object_unreference_unlocked(gobj);
|
|
return NULL;
|
|
+ }
|
|
|
|
return qobj;
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
|
|
index 0cca5f2..0db3e20 100644
|
|
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
|
|
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
|
|
@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
|
|
misc |= ATOM_COMPOSITESYNC;
|
|
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
|
misc |= ATOM_INTERLACE;
|
|
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
|
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
|
misc |= ATOM_DOUBLE_CLOCK_MODE;
|
|
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
|
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
|
|
|
|
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
|
|
args.ucCRTC = radeon_crtc->crtc_id;
|
|
@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
|
misc |= ATOM_COMPOSITESYNC;
|
|
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
|
misc |= ATOM_INTERLACE;
|
|
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
|
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
|
misc |= ATOM_DOUBLE_CLOCK_MODE;
|
|
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
|
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
|
|
|
|
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
|
|
args.ucCRTC = radeon_crtc->crtc_id;
|
|
@@ -1306,6 +1310,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
|
(x << 16) | y);
|
|
viewport_w = crtc->mode.hdisplay;
|
|
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
|
+ if ((rdev->family >= CHIP_BONAIRE) &&
|
|
+ (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
|
|
+ viewport_h *= 2;
|
|
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
|
(viewport_w << 16) | viewport_h);
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
|
|
index 5727dbd..b4dbaded 100644
|
|
--- a/drivers/gpu/drm/radeon/atombios_dp.c
|
|
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
|
|
@@ -576,6 +576,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
|
|
struct radeon_connector_atom_dig *dig_connector;
|
|
int dp_clock;
|
|
|
|
+ if ((mode->clock > 340000) &&
|
|
+ (!radeon_connector_is_dp12_capable(connector)))
|
|
+ return MODE_CLOCK_HIGH;
|
|
+
|
|
if (!radeon_connector->con_priv)
|
|
return MODE_CLOCK_HIGH;
|
|
dig_connector = radeon_connector->con_priv;
|
|
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
|
|
index 5fa854c..c7c2856 100644
|
|
--- a/drivers/gpu/drm/radeon/ci_dpm.c
|
|
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
|
|
@@ -851,6 +851,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
|
|
WREG32_SMC(CG_THERMAL_CTRL, tmp);
|
|
#endif
|
|
|
|
+ rdev->pm.dpm.thermal.min_temp = low_temp;
|
|
+ rdev->pm.dpm.thermal.max_temp = high_temp;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -922,7 +925,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
|
|
pi->vddc_leakage.count = 0;
|
|
pi->vddci_leakage.count = 0;
|
|
|
|
- if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
|
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
|
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
|
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
|
+ if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
|
|
+ continue;
|
|
+ if (vddc != 0 && vddc != virtual_voltage_id) {
|
|
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
|
|
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
|
|
+ pi->vddc_leakage.count++;
|
|
+ }
|
|
+ }
|
|
+ } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
|
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
|
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
|
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
|
|
@@ -4719,7 +4733,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
|
|
ci_enable_spread_spectrum(rdev, false);
|
|
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
|
|
ci_stop_dpm(rdev);
|
|
- ci_enable_ds_master_switch(rdev, true);
|
|
+ ci_enable_ds_master_switch(rdev, false);
|
|
ci_enable_ulv(rdev, false);
|
|
ci_clear_vc(rdev);
|
|
ci_reset_to_default(rdev);
|
|
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
|
|
index 270f68a..2fd2fb3 100644
|
|
--- a/drivers/gpu/drm/radeon/cik.c
|
|
+++ b/drivers/gpu/drm/radeon/cik.c
|
|
@@ -2219,6 +2219,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
|
|
gb_tile_moden = 0;
|
|
break;
|
|
}
|
|
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
|
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
|
|
}
|
|
} else if (num_pipe_configs == 8) {
|
|
@@ -3230,6 +3231,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
|
|
(rdev->pdev->device == 0x130B) ||
|
|
(rdev->pdev->device == 0x130E) ||
|
|
(rdev->pdev->device == 0x1315) ||
|
|
+ (rdev->pdev->device == 0x1318) ||
|
|
(rdev->pdev->device == 0x131B)) {
|
|
rdev->config.cik.max_cu_per_sh = 4;
|
|
rdev->config.cik.max_backends_per_se = 1;
|
|
@@ -3556,7 +3558,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
|
|
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
|
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
|
|
|
- /* EVENT_WRITE_EOP - flush caches, send int */
|
|
+ /* Workaround for cache flush problems. First send a dummy EOP
|
|
+ * event down the pipe with seq one below.
|
|
+ */
|
|
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
|
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
|
+ EOP_TC_ACTION_EN |
|
|
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
|
+ EVENT_INDEX(5)));
|
|
+ radeon_ring_write(ring, addr & 0xfffffffc);
|
|
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
|
+ DATA_SEL(1) | INT_SEL(0));
|
|
+ radeon_ring_write(ring, fence->seq - 1);
|
|
+ radeon_ring_write(ring, 0);
|
|
+
|
|
+ /* Then send the real EOP event down the pipe. */
|
|
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
|
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
|
EOP_TC_ACTION_EN |
|
|
@@ -3934,8 +3950,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
|
|
/* init the CE partitions. CE only used for gfx on CIK */
|
|
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
|
|
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
|
|
- radeon_ring_write(ring, 0xc000);
|
|
- radeon_ring_write(ring, 0xc000);
|
|
+ radeon_ring_write(ring, 0x8000);
|
|
+ radeon_ring_write(ring, 0x8000);
|
|
|
|
/* setup clear context state */
|
|
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
|
@@ -4132,6 +4148,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
|
|
WDOORBELL32(ring->doorbell_index, ring->wptr);
|
|
}
|
|
|
|
+static void cik_compute_stop(struct radeon_device *rdev,
|
|
+ struct radeon_ring *ring)
|
|
+{
|
|
+ u32 j, tmp;
|
|
+
|
|
+ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
|
|
+ /* Disable wptr polling. */
|
|
+ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
|
|
+ tmp &= ~WPTR_POLL_EN;
|
|
+ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
|
|
+ /* Disable HQD. */
|
|
+ if (RREG32(CP_HQD_ACTIVE) & 1) {
|
|
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
|
+ for (j = 0; j < rdev->usec_timeout; j++) {
|
|
+ if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
|
+ break;
|
|
+ udelay(1);
|
|
+ }
|
|
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
|
|
+ WREG32(CP_HQD_PQ_RPTR, 0);
|
|
+ WREG32(CP_HQD_PQ_WPTR, 0);
|
|
+ }
|
|
+ cik_srbm_select(rdev, 0, 0, 0, 0);
|
|
+}
|
|
+
|
|
/**
|
|
* cik_cp_compute_enable - enable/disable the compute CP MEs
|
|
*
|
|
@@ -4145,6 +4186,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
|
|
if (enable)
|
|
WREG32(CP_MEC_CNTL, 0);
|
|
else {
|
|
+ /*
|
|
+ * To make hibernation reliable we need to clear compute ring
|
|
+ * configuration before halting the compute ring.
|
|
+ */
|
|
+ mutex_lock(&rdev->srbm_mutex);
|
|
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
|
|
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
|
|
+ mutex_unlock(&rdev->srbm_mutex);
|
|
+
|
|
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
|
|
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
|
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
|
@@ -4390,7 +4440,7 @@ struct bonaire_mqd
|
|
*/
|
|
static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|
{
|
|
- int r, i, idx;
|
|
+ int r, i, j, idx;
|
|
u32 tmp;
|
|
bool use_doorbell = true;
|
|
u64 hqd_gpu_addr;
|
|
@@ -4509,7 +4559,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|
mqd->queue_state.cp_hqd_pq_wptr= 0;
|
|
if (RREG32(CP_HQD_ACTIVE) & 1) {
|
|
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
|
- for (i = 0; i < rdev->usec_timeout; i++) {
|
|
+ for (j = 0; j < rdev->usec_timeout; j++) {
|
|
if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
|
break;
|
|
udelay(1);
|
|
@@ -5344,7 +5394,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
|
|
*/
|
|
/* set vm size, must be a multiple of 4 */
|
|
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
|
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
|
|
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
|
|
for (i = 1; i < 16; i++) {
|
|
if (i < 8)
|
|
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
|
|
@@ -5543,12 +5593,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
|
|
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|
{
|
|
struct radeon_ring *ring = &rdev->ring[ridx];
|
|
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
|
|
|
|
if (vm == NULL)
|
|
return;
|
|
|
|
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
|
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
|
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
|
WRITE_DATA_DST_SEL(0)));
|
|
if (vm->id < 8) {
|
|
radeon_ring_write(ring,
|
|
@@ -5598,7 +5649,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|
radeon_ring_write(ring, 1 << vm->id);
|
|
|
|
/* compute doesn't have PFP */
|
|
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
|
|
+ if (usepfp) {
|
|
/* sync PFP to ME, otherwise we might get invalid PFP reads */
|
|
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
|
radeon_ring_write(ring, 0x0);
|
|
@@ -5876,6 +5927,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
|
|
}
|
|
|
|
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
|
|
+ data |= 0x00000001;
|
|
data &= 0xfffffffd;
|
|
if (orig != data)
|
|
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
|
|
@@ -5907,7 +5959,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
|
|
}
|
|
} else {
|
|
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
|
|
- data |= 0x00000002;
|
|
+ data |= 0x00000003;
|
|
if (orig != data)
|
|
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
|
|
|
|
@@ -6805,7 +6857,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
|
|
u32 grbm_int_cntl = 0;
|
|
u32 dma_cntl, dma_cntl1;
|
|
- u32 thermal_int;
|
|
|
|
if (!rdev->irq.installed) {
|
|
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
|
@@ -6842,13 +6893,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
|
|
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
|
|
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
|
|
- ~(THERM_INTH_MASK | THERM_INTL_MASK);
|
|
- else
|
|
- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
|
|
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
|
|
-
|
|
/* enable CP interrupts on all rings */
|
|
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
|
|
DRM_DEBUG("cik_irq_set: sw int gfx\n");
|
|
@@ -7006,14 +7050,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
hpd6 |= DC_HPDx_INT_EN;
|
|
}
|
|
|
|
- if (rdev->irq.dpm_thermal) {
|
|
- DRM_DEBUG("dpm thermal\n");
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
|
|
- else
|
|
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
|
|
- }
|
|
-
|
|
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
|
|
|
|
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
|
|
@@ -7067,10 +7103,8 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
WREG32(DC_HPD5_INT_CONTROL, hpd5);
|
|
WREG32(DC_HPD6_INT_CONTROL, hpd6);
|
|
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
|
|
- else
|
|
- WREG32_SMC(CG_THERMAL_INT, thermal_int);
|
|
+ /* posting read */
|
|
+ RREG32(SRBM_STATUS);
|
|
|
|
return 0;
|
|
}
|
|
@@ -7270,6 +7304,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
+ wptr &= ~RB_OVERFLOW;
|
|
}
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
}
|
|
@@ -7776,6 +7811,7 @@ restart_ih:
|
|
static int cik_startup(struct radeon_device *rdev)
|
|
{
|
|
struct radeon_ring *ring;
|
|
+ u32 nop;
|
|
int r;
|
|
|
|
/* enable pcie gen2/3 link */
|
|
@@ -7893,9 +7929,15 @@ static int cik_startup(struct radeon_device *rdev)
|
|
}
|
|
cik_irq_set(rdev);
|
|
|
|
+ if (rdev->family == CHIP_HAWAII) {
|
|
+ nop = RADEON_CP_PACKET2;
|
|
+ } else {
|
|
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
|
|
+ }
|
|
+
|
|
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
|
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
|
- PACKET3(PACKET3_NOP, 0x3FFF));
|
|
+ nop);
|
|
if (r)
|
|
return r;
|
|
|
|
@@ -7903,7 +7945,7 @@ static int cik_startup(struct radeon_device *rdev)
|
|
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
|
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
|
|
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
|
|
- PACKET3(PACKET3_NOP, 0x3FFF));
|
|
+ nop);
|
|
if (r)
|
|
return r;
|
|
ring->me = 1; /* first MEC */
|
|
@@ -7914,7 +7956,7 @@ static int cik_startup(struct radeon_device *rdev)
|
|
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
|
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
|
|
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
|
|
- PACKET3(PACKET3_NOP, 0x3FFF));
|
|
+ nop);
|
|
if (r)
|
|
return r;
|
|
/* dGPU only have 1 MEC */
|
|
@@ -8882,6 +8924,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
|
|
u32 num_heads = 0, lb_size;
|
|
int i;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
for (i = 0; i < rdev->num_crtc; i++) {
|
|
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
|
|
index aac8f48..e590aec 100644
|
|
--- a/drivers/gpu/drm/radeon/cik_sdma.c
|
|
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
|
|
@@ -266,6 +266,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
|
|
}
|
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
|
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
|
|
+
|
|
+ /* FIXME use something else than big hammer but after few days can not
|
|
+ * seem to find good combination so reset SDMA blocks as it seems we
|
|
+ * do not shut them down properly. This fix hibernation and does not
|
|
+ * affect suspend to ram.
|
|
+ */
|
|
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
|
+ (void)RREG32(SRBM_SOFT_RESET);
|
|
+ udelay(50);
|
|
+ WREG32(SRBM_SOFT_RESET, 0);
|
|
+ (void)RREG32(SRBM_SOFT_RESET);
|
|
}
|
|
|
|
/**
|
|
@@ -461,13 +472,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
|
|
{
|
|
int r;
|
|
|
|
- /* Reset dma */
|
|
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
|
- RREG32(SRBM_SOFT_RESET);
|
|
- udelay(50);
|
|
- WREG32(SRBM_SOFT_RESET, 0);
|
|
- RREG32(SRBM_SOFT_RESET);
|
|
-
|
|
r = cik_sdma_load_microcode(rdev);
|
|
if (r)
|
|
return r;
|
|
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
|
|
index 64108db..ee9f0b4 100644
|
|
--- a/drivers/gpu/drm/radeon/evergreen.c
|
|
+++ b/drivers/gpu/drm/radeon/evergreen.c
|
|
@@ -2362,6 +2362,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
|
|
u32 num_heads = 0, lb_size;
|
|
int i;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
for (i = 0; i < rdev->num_crtc; i++) {
|
|
@@ -2570,6 +2573,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
|
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
|
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
|
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
|
}
|
|
} else {
|
|
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
|
@@ -4592,6 +4596,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
|
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
|
|
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
|
|
|
|
+ /* posting read */
|
|
+ RREG32(SRBM_STATUS);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4763,6 +4770,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
+ wptr &= ~RB_OVERFLOW;
|
|
}
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
|
|
index 351db36..c7c7bc5 100644
|
|
--- a/drivers/gpu/drm/radeon/kv_dpm.c
|
|
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
|
|
@@ -1121,6 +1121,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
|
|
}
|
|
}
|
|
|
|
+static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
|
|
+{
|
|
+ u32 thermal_int;
|
|
+
|
|
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
|
|
+ if (enable)
|
|
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
|
|
+ else
|
|
+ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
|
|
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
|
|
+
|
|
+}
|
|
+
|
|
int kv_dpm_enable(struct radeon_device *rdev)
|
|
{
|
|
struct kv_power_info *pi = kv_get_pi(rdev);
|
|
@@ -1232,8 +1245,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev)
|
|
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
|
|
return ret;
|
|
}
|
|
- rdev->irq.dpm_thermal = true;
|
|
- radeon_irq_set(rdev);
|
|
+ kv_enable_thermal_int(rdev, true);
|
|
}
|
|
|
|
/* powerdown unused blocks for now */
|
|
@@ -1261,6 +1273,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
|
|
kv_stop_dpm(rdev);
|
|
kv_enable_ulv(rdev, false);
|
|
kv_reset_am(rdev);
|
|
+ kv_enable_thermal_int(rdev, false);
|
|
|
|
kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
|
|
index bf6300c..392d94b 100644
|
|
--- a/drivers/gpu/drm/radeon/ni.c
|
|
+++ b/drivers/gpu/drm/radeon/ni.c
|
|
@@ -1073,12 +1073,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|
|
|
if ((rdev->config.cayman.max_backends_per_se == 1) &&
|
|
(rdev->flags & RADEON_IS_IGP)) {
|
|
- if ((disabled_rb_mask & 3) == 1) {
|
|
- /* RB0 disabled, RB1 enabled */
|
|
- tmp = 0x11111111;
|
|
- } else {
|
|
+ if ((disabled_rb_mask & 3) == 2) {
|
|
/* RB1 disabled, RB0 enabled */
|
|
tmp = 0x00000000;
|
|
+ } else {
|
|
+ /* RB0 disabled, RB1 enabled */
|
|
+ tmp = 0x11111111;
|
|
}
|
|
} else {
|
|
tmp = gb_addr_config & NUM_PIPES_MASK;
|
|
@@ -1256,7 +1256,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
|
|
*/
|
|
for (i = 1; i < 8; i++) {
|
|
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
|
|
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
|
|
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
|
|
+ rdev->vm_manager.max_pfn - 1);
|
|
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
|
|
rdev->gart.table_addr >> 12);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
|
|
index 7cf96b1..94fa49e 100644
|
|
--- a/drivers/gpu/drm/radeon/ni_dma.c
|
|
+++ b/drivers/gpu/drm/radeon/ni_dma.c
|
|
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
|
|
u32 reg_offset, wb_offset;
|
|
int i, r;
|
|
|
|
- /* Reset dma */
|
|
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
|
- RREG32(SRBM_SOFT_RESET);
|
|
- udelay(50);
|
|
- WREG32(SRBM_SOFT_RESET, 0);
|
|
-
|
|
for (i = 0; i < 2; i++) {
|
|
if (i == 0) {
|
|
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
|
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
|
|
index 3cc78bb..e28de20 100644
|
|
--- a/drivers/gpu/drm/radeon/r100.c
|
|
+++ b/drivers/gpu/drm/radeon/r100.c
|
|
@@ -742,6 +742,10 @@ int r100_irq_set(struct radeon_device *rdev)
|
|
tmp |= RADEON_FP2_DETECT_MASK;
|
|
}
|
|
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
|
+
|
|
+ /* read back to post the write */
|
|
+ RREG32(RADEON_GEN_INT_CNTL);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3219,6 +3223,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
|
uint32_t pixel_bytes1 = 0;
|
|
uint32_t pixel_bytes2 = 0;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
if (rdev->mode_info.crtcs[0]->base.enabled) {
|
|
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
|
|
index f28ab84..74a8a38 100644
|
|
--- a/drivers/gpu/drm/radeon/r600.c
|
|
+++ b/drivers/gpu/drm/radeon/r600.c
|
|
@@ -3647,6 +3647,9 @@ int r600_irq_set(struct radeon_device *rdev)
|
|
WREG32(RV770_CG_THERMAL_INT, thermal_int);
|
|
}
|
|
|
|
+ /* posting read */
|
|
+ RREG32(R_000E50_SRBM_STATUS);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3795,6 +3798,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
+ wptr &= ~RB_OVERFLOW;
|
|
}
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
|
|
index b2d4c91..9949551 100644
|
|
--- a/drivers/gpu/drm/radeon/r600_dma.c
|
|
+++ b/drivers/gpu/drm/radeon/r600_dma.c
|
|
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
|
|
u32 rb_bufsz;
|
|
int r;
|
|
|
|
- /* Reset dma */
|
|
- if (rdev->family >= CHIP_RV770)
|
|
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
|
- else
|
|
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
|
- RREG32(SRBM_SOFT_RESET);
|
|
- udelay(50);
|
|
- WREG32(SRBM_SOFT_RESET, 0);
|
|
-
|
|
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
|
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
|
|
index 813db8d..e981082 100644
|
|
--- a/drivers/gpu/drm/radeon/r600_dpm.c
|
|
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
|
|
@@ -187,7 +187,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
|
radeon_crtc = to_radeon_crtc(crtc);
|
|
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
|
|
- vrefresh = radeon_crtc->hw_mode.vrefresh;
|
|
+ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
|
|
break;
|
|
}
|
|
}
|
|
@@ -1209,7 +1209,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
|
|
(mode_info->atom_context->bios + data_offset +
|
|
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
|
|
rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
|
|
- ppt->usMaximumPowerDeliveryLimit;
|
|
+ le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
|
|
pt = &ppt->power_tune_table;
|
|
} else {
|
|
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
|
|
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
|
|
index 08e86f9..b837e9f 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon.h
|
|
+++ b/drivers/gpu/drm/radeon/radeon.h
|
|
@@ -294,6 +294,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
|
u16 *vddc, u16 *vddci,
|
|
u16 virtual_voltage_id,
|
|
u16 vbios_voltage_id);
|
|
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
|
+ u16 virtual_voltage_id,
|
|
+ u16 *voltage);
|
|
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
|
|
u8 voltage_type,
|
|
u16 nominal_voltage,
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
|
|
index 5600d4c..d625a14 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_asic.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
|
|
@@ -335,6 +335,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
|
|
.set_wptr = &r100_gfx_set_wptr,
|
|
};
|
|
|
|
+static struct radeon_asic_ring rv515_gfx_ring = {
|
|
+ .ib_execute = &r100_ring_ib_execute,
|
|
+ .emit_fence = &r300_fence_ring_emit,
|
|
+ .emit_semaphore = &r100_semaphore_ring_emit,
|
|
+ .cs_parse = &r300_cs_parse,
|
|
+ .ring_start = &rv515_ring_start,
|
|
+ .ring_test = &r100_ring_test,
|
|
+ .ib_test = &r100_ib_test,
|
|
+ .is_lockup = &r100_gpu_is_lockup,
|
|
+ .get_rptr = &r100_gfx_get_rptr,
|
|
+ .get_wptr = &r100_gfx_get_wptr,
|
|
+ .set_wptr = &r100_gfx_set_wptr,
|
|
+};
|
|
+
|
|
static struct radeon_asic r300_asic = {
|
|
.init = &r300_init,
|
|
.fini = &r300_fini,
|
|
@@ -756,7 +770,7 @@ static struct radeon_asic rv515_asic = {
|
|
.set_page = &rv370_pcie_gart_set_page,
|
|
},
|
|
.ring = {
|
|
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
|
|
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
|
|
},
|
|
.irq = {
|
|
.set = &rs600_irq_set,
|
|
@@ -823,7 +837,7 @@ static struct radeon_asic r520_asic = {
|
|
.set_page = &rv370_pcie_gart_set_page,
|
|
},
|
|
.ring = {
|
|
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
|
|
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
|
|
},
|
|
.irq = {
|
|
.set = &rs600_irq_set,
|
|
@@ -1159,7 +1173,7 @@ static struct radeon_asic rs780_asic = {
|
|
static struct radeon_asic_ring rv770_uvd_ring = {
|
|
.ib_execute = &uvd_v1_0_ib_execute,
|
|
.emit_fence = &uvd_v2_2_fence_emit,
|
|
- .emit_semaphore = &uvd_v1_0_semaphore_emit,
|
|
+ .emit_semaphore = &uvd_v2_2_semaphore_emit,
|
|
.cs_parse = &radeon_uvd_cs_parse,
|
|
.ring_test = &uvd_v1_0_ring_test,
|
|
.ib_test = &uvd_v1_0_ib_test,
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
|
|
index ae637cf..f6e19ed 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_asic.h
|
|
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
|
|
@@ -853,6 +853,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
|
int uvd_v2_2_resume(struct radeon_device *rdev);
|
|
void uvd_v2_2_fence_emit(struct radeon_device *rdev,
|
|
struct radeon_fence *fence);
|
|
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
|
|
+ struct radeon_ring *ring,
|
|
+ struct radeon_semaphore *semaphore,
|
|
+ bool emit_wait);
|
|
|
|
/* uvd v3.1 */
|
|
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
index 3084481..2fa3cf6 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
|
}
|
|
}
|
|
|
|
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
|
|
+ if ((dev->pdev->device == 0x9805) &&
|
|
+ (dev->pdev->subsystem_vendor == 0x1734) &&
|
|
+ (dev->pdev->subsystem_device == 0x11bd)) {
|
|
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA)
|
|
+ return false;
|
|
+ }
|
|
|
|
return true;
|
|
}
|
|
@@ -1955,7 +1962,7 @@ static const char *thermal_controller_names[] = {
|
|
"adm1032",
|
|
"adm1030",
|
|
"max6649",
|
|
- "lm64",
|
|
+ "lm63", /* lm64 */
|
|
"f75375",
|
|
"asc7xxx",
|
|
};
|
|
@@ -1966,7 +1973,7 @@ static const char *pp_lib_thermal_controller_names[] = {
|
|
"adm1032",
|
|
"adm1030",
|
|
"max6649",
|
|
- "lm64",
|
|
+ "lm63", /* lm64 */
|
|
"f75375",
|
|
"RV6xx",
|
|
"RV770",
|
|
@@ -2273,19 +2280,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
|
|
(controller->ucFanParameters &
|
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
|
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
|
|
- } else if ((controller->ucType ==
|
|
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
|
|
- (controller->ucType ==
|
|
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
|
|
- (controller->ucType ==
|
|
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
|
|
- DRM_INFO("Special thermal controller config\n");
|
|
+ } else if (controller->ucType ==
|
|
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
|
|
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
|
|
+ (controller->ucFanParameters &
|
|
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
|
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
|
|
+ } else if (controller->ucType ==
|
|
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
|
|
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
|
|
+ (controller->ucFanParameters &
|
|
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
|
+ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
|
|
+ } else if (controller->ucType ==
|
|
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
|
|
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
|
|
+ (controller->ucFanParameters &
|
|
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
|
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
|
|
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
|
|
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
|
pp_lib_thermal_controller_names[controller->ucType],
|
|
controller->ucI2cAddress >> 1,
|
|
(controller->ucFanParameters &
|
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
|
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
|
|
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
|
|
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
|
|
if (rdev->pm.i2c_bus) {
|
|
@@ -3228,6 +3247,42 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
|
return 0;
|
|
}
|
|
|
|
+union get_voltage_info {
|
|
+ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
|
|
+ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
|
|
+};
|
|
+
|
|
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
|
+ u16 virtual_voltage_id,
|
|
+ u16 *voltage)
|
|
+{
|
|
+ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
|
|
+ u32 entry_id;
|
|
+ u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
|
|
+ union get_voltage_info args;
|
|
+
|
|
+ for (entry_id = 0; entry_id < count; entry_id++) {
|
|
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
|
|
+ virtual_voltage_id)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (entry_id >= count)
|
|
+ return -EINVAL;
|
|
+
|
|
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
|
|
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
|
|
+ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
|
|
+ args.in.ulSCLKFreq =
|
|
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
|
|
+
|
|
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
|
+
|
|
+ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
|
|
u16 voltage_level, u8 voltage_type,
|
|
u32 *gpio_value, u32 *gpio_mask)
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
|
|
index 9ab3097..c43335c 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_bios.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
|
|
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
|
|
|
|
static bool radeon_read_bios(struct radeon_device *rdev)
|
|
{
|
|
- uint8_t __iomem *bios;
|
|
+ uint8_t __iomem *bios, val1, val2;
|
|
size_t size;
|
|
|
|
rdev->bios = NULL;
|
|
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
|
|
return false;
|
|
}
|
|
|
|
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
|
|
+ val1 = readb(&bios[0]);
|
|
+ val2 = readb(&bios[1]);
|
|
+
|
|
+ if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
|
|
pci_unmap_rom(rdev->pdev, bios);
|
|
return false;
|
|
}
|
|
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
|
|
+ rdev->bios = kzalloc(size, GFP_KERNEL);
|
|
if (rdev->bios == NULL) {
|
|
pci_unmap_rom(rdev->pdev, bios);
|
|
return false;
|
|
}
|
|
+ memcpy_fromio(rdev->bios, bios, size);
|
|
pci_unmap_rom(rdev->pdev, bios);
|
|
return true;
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
|
|
index 6651177..79a2669 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_combios.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
|
|
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
|
|
|
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
|
|
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
|
|
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
|
|
+
|
|
+ if (hss > lvds->native_mode.hdisplay)
|
|
+ hss = (10 - 1) * 8;
|
|
+
|
|
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
|
|
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
|
|
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
|
|
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
|
|
+ hss;
|
|
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
|
|
(RBIOS8(tmp + 23) * 8);
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
index 4d36b9e..17ae621 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
@@ -71,6 +71,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
|
|
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
|
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
|
} else if (radeon_dp_needs_link_train(radeon_connector)) {
|
|
+ /* Don't try to start link training before we
|
|
+ * have the dpcd */
|
|
+ if (!radeon_dp_getdpcd(radeon_connector))
|
|
+ return;
|
|
+
|
|
/* set it to OFF so that drm_helper_connector_dpms()
|
|
* won't return immediately since the current state
|
|
* is ON at this point.
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
|
|
index 7f2d6c0..2f2d2ce 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_cs.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
|
|
@@ -179,11 +179,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
u32 ring = RADEON_CS_RING_GFX;
|
|
s32 priority = 0;
|
|
|
|
+ INIT_LIST_HEAD(&p->validated);
|
|
+
|
|
if (!cs->num_chunks) {
|
|
return 0;
|
|
}
|
|
+
|
|
/* get chunks */
|
|
- INIT_LIST_HEAD(&p->validated);
|
|
p->idx = 0;
|
|
p->ib.sa_bo = NULL;
|
|
p->ib.semaphore = NULL;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
|
|
index 0bf6f4a..129915e 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_device.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_device.c
|
|
@@ -1314,7 +1314,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|
|
|
r = radeon_init(rdev);
|
|
if (r)
|
|
- return r;
|
|
+ goto failed;
|
|
|
|
r = radeon_ib_ring_tests(rdev);
|
|
if (r)
|
|
@@ -1334,7 +1334,22 @@ int radeon_device_init(struct radeon_device *rdev,
|
|
radeon_agp_disable(rdev);
|
|
r = radeon_init(rdev);
|
|
if (r)
|
|
- return r;
|
|
+ goto failed;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
|
|
+ * after the CP ring have chew one packet at least. Hence here we stop
|
|
+ * and restart DPM after the radeon_ib_ring_tests().
|
|
+ */
|
|
+ if (rdev->pm.dpm_enabled &&
|
|
+ (rdev->pm.pm_method == PM_METHOD_DPM) &&
|
|
+ (rdev->family == CHIP_TURKS) &&
|
|
+ (rdev->flags & RADEON_IS_MOBILITY)) {
|
|
+ mutex_lock(&rdev->pm.mutex);
|
|
+ radeon_dpm_disable(rdev);
|
|
+ radeon_dpm_enable(rdev);
|
|
+ mutex_unlock(&rdev->pm.mutex);
|
|
}
|
|
|
|
if ((radeon_testing & 1)) {
|
|
@@ -1356,6 +1371,11 @@ int radeon_device_init(struct radeon_device *rdev,
|
|
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
|
|
}
|
|
return 0;
|
|
+
|
|
+failed:
|
|
+ if (runtime)
|
|
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
|
+ return r;
|
|
}
|
|
|
|
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
|
|
@@ -1376,6 +1396,8 @@ void radeon_device_fini(struct radeon_device *rdev)
|
|
radeon_bo_evict_vram(rdev);
|
|
radeon_fini(rdev);
|
|
vga_switcheroo_unregister_client(rdev->pdev);
|
|
+ if (rdev->flags & RADEON_IS_PX)
|
|
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
|
vga_client_register(rdev->pdev, NULL, NULL, NULL);
|
|
if (rdev->rio_mem)
|
|
pci_iounmap(rdev->pdev, rdev->rio_mem);
|
|
@@ -1600,7 +1622,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
|
radeon_save_bios_scratch_regs(rdev);
|
|
/* block TTM */
|
|
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
|
- radeon_pm_suspend(rdev);
|
|
radeon_suspend(rdev);
|
|
|
|
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
|
@@ -1646,9 +1667,24 @@ retry:
|
|
}
|
|
}
|
|
|
|
- radeon_pm_resume(rdev);
|
|
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
|
|
+ /* do dpm late init */
|
|
+ r = radeon_pm_late_init(rdev);
|
|
+ if (r) {
|
|
+ rdev->pm.dpm_enabled = false;
|
|
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
|
|
+ }
|
|
+ } else {
|
|
+ /* resume old pm late */
|
|
+ radeon_pm_resume(rdev);
|
|
+ }
|
|
+
|
|
drm_helper_resume_force_mode(rdev->ddev);
|
|
|
|
+ /* set the power state here in case we are a PX system or headless */
|
|
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
|
|
+ radeon_pm_compute_clocks(rdev);
|
|
+
|
|
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
|
if (r) {
|
|
/* bad news, how to tell it to userspace ? */
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
|
|
index a8f9b46..e609722 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_gart.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
|
|
@@ -251,8 +251,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|
}
|
|
}
|
|
}
|
|
- mb();
|
|
- radeon_gart_tlb_flush(rdev);
|
|
+ if (rdev->gart.ptr) {
|
|
+ mb();
|
|
+ radeon_gart_tlb_flush(rdev);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -294,8 +296,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|
}
|
|
}
|
|
}
|
|
- mb();
|
|
- radeon_gart_tlb_flush(rdev);
|
|
+ if (rdev->gart.ptr) {
|
|
+ mb();
|
|
+ radeon_gart_tlb_flush(rdev);
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
|
|
index 089c9ff..f8b20e1 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
|
|
@@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
|
|
struct drm_mode_config *mode_config = &dev->mode_config;
|
|
struct drm_connector *connector;
|
|
|
|
+ mutex_lock(&mode_config->mutex);
|
|
if (mode_config->num_connector) {
|
|
list_for_each_entry(connector, &mode_config->connector_list, head)
|
|
radeon_connector_hotplug(connector);
|
|
}
|
|
+ mutex_unlock(&mode_config->mutex);
|
|
/* Just fire off a uevent and let userspace tell us what to do */
|
|
drm_helper_hpd_irq_event(dev);
|
|
}
|
|
@@ -202,6 +204,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
|
|
if (rdev->flags & RADEON_IS_AGP)
|
|
return false;
|
|
|
|
+ /*
|
|
+ * Older chips have a HW limitation, they can only generate 40 bits
|
|
+ * of address for "64-bit" MSIs which breaks on some platforms, notably
|
|
+ * IBM POWER servers, so we limit them
|
|
+ */
|
|
+ if (rdev->family < CHIP_BONAIRE) {
|
|
+ dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
|
|
+ rdev->pdev->no_64bit_msi = 1;
|
|
+ }
|
|
+
|
|
/* force MSI on */
|
|
if (radeon_msi == 1)
|
|
return true;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
index ea34a31..6bffe82 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_kms.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
@@ -254,7 +254,14 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|
}
|
|
break;
|
|
case RADEON_INFO_ACCEL_WORKING2:
|
|
- *value = rdev->accel_working;
|
|
+ if (rdev->family == CHIP_HAWAII) {
|
|
+ if (rdev->accel_working)
|
|
+ *value = 2;
|
|
+ else
|
|
+ *value = 0;
|
|
+ } else {
|
|
+ *value = rdev->accel_working;
|
|
+ }
|
|
break;
|
|
case RADEON_INFO_TILING_CONFIG:
|
|
if (rdev->family >= CHIP_BONAIRE)
|
|
@@ -733,6 +740,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
|
|
|
/* Get associated drm_crtc: */
|
|
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
|
|
+ if (!drmcrtc)
|
|
+ return -EINVAL;
|
|
|
|
/* Helper routine in DRM core does all the work: */
|
|
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
|
|
index f77d9d0..0095ee7 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_pm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
|
|
@@ -458,10 +458,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
|
|
struct radeon_device *rdev = ddev->dev_private;
|
|
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
|
|
|
|
- if ((rdev->flags & RADEON_IS_PX) &&
|
|
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
|
- return snprintf(buf, PAGE_SIZE, "off\n");
|
|
-
|
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
|
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
|
|
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
|
|
@@ -475,11 +471,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
struct radeon_device *rdev = ddev->dev_private;
|
|
|
|
- /* Can't set dpm state when the card is off */
|
|
- if ((rdev->flags & RADEON_IS_PX) &&
|
|
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
|
- return -EINVAL;
|
|
-
|
|
mutex_lock(&rdev->pm.mutex);
|
|
if (strncmp("battery", buf, strlen("battery")) == 0)
|
|
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
|
|
@@ -493,7 +484,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
|
|
goto fail;
|
|
}
|
|
mutex_unlock(&rdev->pm.mutex);
|
|
- radeon_pm_compute_clocks(rdev);
|
|
+
|
|
+ /* Can't set dpm state when the card is off */
|
|
+ if (!(rdev->flags & RADEON_IS_PX) ||
|
|
+ (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
|
|
+ radeon_pm_compute_clocks(rdev);
|
|
+
|
|
fail:
|
|
return count;
|
|
}
|
|
@@ -1264,8 +1260,39 @@ dpm_failed:
|
|
return ret;
|
|
}
|
|
|
|
+struct radeon_dpm_quirk {
|
|
+ u32 chip_vendor;
|
|
+ u32 chip_device;
|
|
+ u32 subsys_vendor;
|
|
+ u32 subsys_device;
|
|
+};
|
|
+
|
|
+/* cards with dpm stability problems */
|
|
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
|
|
+ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
|
|
+ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
|
|
+ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
|
|
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
|
|
+ { 0, 0, 0, 0 },
|
|
+};
|
|
+
|
|
int radeon_pm_init(struct radeon_device *rdev)
|
|
{
|
|
+ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
|
|
+ bool disable_dpm = false;
|
|
+
|
|
+ /* Apply dpm quirks */
|
|
+ while (p && p->chip_device != 0) {
|
|
+ if (rdev->pdev->vendor == p->chip_vendor &&
|
|
+ rdev->pdev->device == p->chip_device &&
|
|
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
|
|
+ rdev->pdev->subsystem_device == p->subsys_device) {
|
|
+ disable_dpm = true;
|
|
+ break;
|
|
+ }
|
|
+ ++p;
|
|
+ }
|
|
+
|
|
/* enable dpm on rv6xx+ */
|
|
switch (rdev->family) {
|
|
case CHIP_RV610:
|
|
@@ -1276,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|
case CHIP_RS780:
|
|
case CHIP_RS880:
|
|
case CHIP_RV770:
|
|
- case CHIP_BARTS:
|
|
- case CHIP_TURKS:
|
|
- case CHIP_CAICOS:
|
|
- case CHIP_CAYMAN:
|
|
/* DPM requires the RLC, RV770+ dGPU requires SMC */
|
|
if (!rdev->rlc_fw)
|
|
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
|
@@ -1303,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|
case CHIP_PALM:
|
|
case CHIP_SUMO:
|
|
case CHIP_SUMO2:
|
|
+ case CHIP_BARTS:
|
|
+ case CHIP_TURKS:
|
|
+ case CHIP_CAICOS:
|
|
+ case CHIP_CAYMAN:
|
|
case CHIP_ARUBA:
|
|
case CHIP_TAHITI:
|
|
case CHIP_PITCAIRN:
|
|
@@ -1320,6 +1347,8 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|
(!(rdev->flags & RADEON_IS_IGP)) &&
|
|
(!rdev->smc_fw))
|
|
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
|
+ else if (disable_dpm && (radeon_dpm == -1))
|
|
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
|
|
else if (radeon_dpm == 0)
|
|
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
|
else
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
|
|
index 9006b32..eb7b600 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
|
|
@@ -34,7 +34,7 @@
|
|
int radeon_semaphore_create(struct radeon_device *rdev,
|
|
struct radeon_semaphore **semaphore)
|
|
{
|
|
- uint32_t *cpu_addr;
|
|
+ uint64_t *cpu_addr;
|
|
int i, r;
|
|
|
|
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
index 040a2a1..45a9a03 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
@@ -191,7 +191,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|
rbo = container_of(bo, struct radeon_bo, tbo);
|
|
switch (bo->mem.mem_type) {
|
|
case TTM_PL_VRAM:
|
|
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
|
|
+ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
|
|
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
|
|
else
|
|
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
|
|
index 414e079..bcfac76 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
|
|
@@ -350,6 +350,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
|
|
return 0;
|
|
}
|
|
|
|
+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
|
|
+ unsigned stream_type)
|
|
+{
|
|
+ switch (stream_type) {
|
|
+ case 0: /* H264 */
|
|
+ case 1: /* VC1 */
|
|
+ /* always supported */
|
|
+ return 0;
|
|
+
|
|
+ case 3: /* MPEG2 */
|
|
+ case 4: /* MPEG4 */
|
|
+ /* only since UVD 3 */
|
|
+ if (p->rdev->family >= CHIP_PALM)
|
|
+ return 0;
|
|
+
|
|
+ /* fall through */
|
|
+ default:
|
|
+ DRM_ERROR("UVD codec not supported by hardware %d!\n",
|
|
+ stream_type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|
unsigned offset, unsigned buf_sizes[])
|
|
{
|
|
@@ -388,50 +411,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (msg_type == 1) {
|
|
- /* it's a decode msg, calc buffer sizes */
|
|
- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
|
|
- /* calc image size (width * height) */
|
|
- img_size = msg[6] * msg[7];
|
|
+ switch (msg_type) {
|
|
+ case 0:
|
|
+ /* it's a create msg, calc image size (width * height) */
|
|
+ img_size = msg[7] * msg[8];
|
|
+
|
|
+ r = radeon_uvd_validate_codec(p, msg[4]);
|
|
+ radeon_bo_kunmap(bo);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
+ /* try to alloc a new handle */
|
|
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
|
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
|
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
|
|
+ p->rdev->uvd.filp[i] = p->filp;
|
|
+ p->rdev->uvd.img_size[i] = img_size;
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ DRM_ERROR("No more free UVD handles!\n");
|
|
+ return -EINVAL;
|
|
+
|
|
+ case 1:
|
|
+ /* it's a decode msg, validate codec and calc buffer sizes */
|
|
+ r = radeon_uvd_validate_codec(p, msg[4]);
|
|
+ if (!r)
|
|
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
|
|
radeon_bo_kunmap(bo);
|
|
if (r)
|
|
return r;
|
|
|
|
- } else if (msg_type == 2) {
|
|
+ /* validate the handle */
|
|
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
|
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
|
+ if (p->rdev->uvd.filp[i] != p->filp) {
|
|
+ DRM_ERROR("UVD handle collision detected!\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
|
|
+ return -ENOENT;
|
|
+
|
|
+ case 2:
|
|
/* it's a destroy msg, free the handle */
|
|
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
|
|
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
|
|
radeon_bo_kunmap(bo);
|
|
return 0;
|
|
- } else {
|
|
- /* it's a create msg, calc image size (width * height) */
|
|
- img_size = msg[7] * msg[8];
|
|
- radeon_bo_kunmap(bo);
|
|
|
|
- if (msg_type != 0) {
|
|
- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- /* it's a create msg, no special handling needed */
|
|
- }
|
|
-
|
|
- /* create or decode, validate the handle */
|
|
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
|
- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
|
|
- return 0;
|
|
- }
|
|
+ default:
|
|
|
|
- /* handle not found try to alloc a new one */
|
|
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
|
- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
|
|
- p->rdev->uvd.filp[i] = p->filp;
|
|
- p->rdev->uvd.img_size[i] = img_size;
|
|
- return 0;
|
|
- }
|
|
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
- DRM_ERROR("No more free UVD handles!\n");
|
|
+ BUG();
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
|
|
index 95b693c..4261b38 100644
|
|
--- a/drivers/gpu/drm/radeon/rs600.c
|
|
+++ b/drivers/gpu/drm/radeon/rs600.c
|
|
@@ -700,6 +700,10 @@ int rs600_irq_set(struct radeon_device *rdev)
|
|
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
|
|
if (ASIC_IS_DCE2(rdev))
|
|
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
|
|
+
|
|
+ /* posting read */
|
|
+ RREG32(R_000040_GEN_INT_CNTL);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -890,6 +894,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
|
|
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
|
|
/* FIXME: implement full support */
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
if (rdev->mode_info.crtcs[0]->base.enabled)
|
|
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
|
|
index 3462b64..0a2d36e 100644
|
|
--- a/drivers/gpu/drm/radeon/rs690.c
|
|
+++ b/drivers/gpu/drm/radeon/rs690.c
|
|
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
|
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
|
|
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
if (rdev->mode_info.crtcs[0]->base.enabled)
|
|
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
|
|
index 237dd29..b49965a 100644
|
|
--- a/drivers/gpu/drm/radeon/rv515.c
|
|
+++ b/drivers/gpu/drm/radeon/rv515.c
|
|
@@ -1276,6 +1276,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
|
|
struct drm_display_mode *mode0 = NULL;
|
|
struct drm_display_mode *mode1 = NULL;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
if (rdev->mode_info.crtcs[0]->base.enabled)
|
|
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
|
|
index 3cf1e29..9ef2064 100644
|
|
--- a/drivers/gpu/drm/radeon/rv770d.h
|
|
+++ b/drivers/gpu/drm/radeon/rv770d.h
|
|
@@ -989,6 +989,9 @@
|
|
((n) & 0x3FFF) << 16)
|
|
|
|
/* UVD */
|
|
+#define UVD_SEMA_ADDR_LOW 0xef00
|
|
+#define UVD_SEMA_ADDR_HIGH 0xef04
|
|
+#define UVD_SEMA_CMD 0xef08
|
|
#define UVD_GPCOM_VCPU_CMD 0xef0c
|
|
#define UVD_GPCOM_VCPU_DATA0 0xef10
|
|
#define UVD_GPCOM_VCPU_DATA1 0xef14
|
|
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
|
|
index cb7508d..49da9fc 100644
|
|
--- a/drivers/gpu/drm/radeon/si.c
|
|
+++ b/drivers/gpu/drm/radeon/si.c
|
|
@@ -2227,6 +2227,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
|
|
u32 num_heads = 0, lb_size;
|
|
int i;
|
|
|
|
+ if (!rdev->mode_info.mode_config_initialized)
|
|
+ return;
|
|
+
|
|
radeon_update_display_priority(rdev);
|
|
|
|
for (i = 0; i < rdev->num_crtc; i++) {
|
|
@@ -4081,7 +4084,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
|
|
/* empty context1-15 */
|
|
/* set vm size, must be a multiple of 4 */
|
|
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
|
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
|
|
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
|
|
/* Assign the pt base to something valid for now; the pts used for
|
|
* the VMs are determined by the application and setup and assigned
|
|
* on the fly in the vm part of radeon_gart.c
|
|
@@ -4810,7 +4813,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|
|
|
/* write new base address */
|
|
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
|
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
|
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
|
|
WRITE_DATA_DST_SEL(0)));
|
|
|
|
if (vm->id < 8) {
|
|
@@ -5955,6 +5958,9 @@ int si_irq_set(struct radeon_device *rdev)
|
|
|
|
WREG32(CG_THERMAL_INT, thermal_int);
|
|
|
|
+ /* posting read */
|
|
+ RREG32(SRBM_STATUS);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -6098,6 +6104,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
+ wptr &= ~RB_OVERFLOW;
|
|
}
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
}
|
|
@@ -6871,8 +6878,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
|
|
|
|
if (!vclk || !dclk) {
|
|
- /* keep the Bypass mode, put PLL to sleep */
|
|
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
|
|
+ /* keep the Bypass mode */
|
|
return 0;
|
|
}
|
|
|
|
@@ -6888,8 +6894,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|
/* set VCO_MODE to 1 */
|
|
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
|
|
|
|
- /* toggle UPLL_SLEEP to 1 then back to 0 */
|
|
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
|
|
+ /* disable sleep mode */
|
|
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
|
|
|
|
/* deassert UPLL_RESET */
|
|
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
|
|
index 0a2f5b4..c9053f7 100644
|
|
--- a/drivers/gpu/drm/radeon/si_dpm.c
|
|
+++ b/drivers/gpu/drm/radeon/si_dpm.c
|
|
@@ -2900,6 +2900,24 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
|
|
return ret;
|
|
}
|
|
|
|
+struct si_dpm_quirk {
|
|
+ u32 chip_vendor;
|
|
+ u32 chip_device;
|
|
+ u32 subsys_vendor;
|
|
+ u32 subsys_device;
|
|
+ u32 max_sclk;
|
|
+ u32 max_mclk;
|
|
+};
|
|
+
|
|
+/* cards with dpm stability problems */
|
|
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
|
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
|
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
|
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
|
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
|
+ { 0, 0, 0, 0 },
|
|
+};
|
|
+
|
|
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|
struct radeon_ps *rps)
|
|
{
|
|
@@ -2910,7 +2928,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|
u32 mclk, sclk;
|
|
u16 vddc, vddci;
|
|
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
|
+ u32 max_sclk = 0, max_mclk = 0;
|
|
int i;
|
|
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
|
|
+
|
|
+ /* Apply dpm quirks */
|
|
+ while (p && p->chip_device != 0) {
|
|
+ if (rdev->pdev->vendor == p->chip_vendor &&
|
|
+ rdev->pdev->device == p->chip_device &&
|
|
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
|
|
+ rdev->pdev->subsystem_device == p->subsys_device) {
|
|
+ max_sclk = p->max_sclk;
|
|
+ max_mclk = p->max_mclk;
|
|
+ break;
|
|
+ }
|
|
+ ++p;
|
|
+ }
|
|
|
|
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
|
|
ni_dpm_vblank_too_short(rdev))
|
|
@@ -2964,6 +2997,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|
if (ps->performance_levels[i].mclk > max_mclk_vddc)
|
|
ps->performance_levels[i].mclk = max_mclk_vddc;
|
|
}
|
|
+ if (max_mclk) {
|
|
+ if (ps->performance_levels[i].mclk > max_mclk)
|
|
+ ps->performance_levels[i].mclk = max_mclk;
|
|
+ }
|
|
+ if (max_sclk) {
|
|
+ if (ps->performance_levels[i].sclk > max_sclk)
|
|
+ ps->performance_levels[i].sclk = max_sclk;
|
|
+ }
|
|
}
|
|
|
|
/* XXX validate the min clocks required for display */
|
|
@@ -6200,7 +6241,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
|
|
if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
|
|
index == 0) {
|
|
/* XXX disable for A0 tahiti */
|
|
- si_pi->ulv.supported = true;
|
|
+ si_pi->ulv.supported = false;
|
|
si_pi->ulv.pl = *pl;
|
|
si_pi->ulv.one_pcie_lane_in_ulv = false;
|
|
si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
|
|
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
|
|
index 2da0e17..d9cfa09b 100644
|
|
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
|
|
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
|
|
@@ -1877,7 +1877,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
|
|
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
|
|
pi->at[i] = TRINITY_AT_DFLT;
|
|
|
|
- pi->enable_bapm = false;
|
|
+ /* There are stability issues reported on with
|
|
+ * bapm enabled when switching between AC and battery
|
|
+ * power. At the same time, some MSI boards hang
|
|
+ * if it's not enabled and dpm is enabled. Just enable
|
|
+ * it for MSI boards right now.
|
|
+ */
|
|
+ if (rdev->pdev->subsystem_vendor == 0x1462)
|
|
+ pi->enable_bapm = true;
|
|
+ else
|
|
+ pi->enable_bapm = false;
|
|
pi->enable_nbps_policy = true;
|
|
pi->enable_sclk_ds = true;
|
|
pi->enable_gfx_power_gating = true;
|
|
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
|
|
index c310a0a..0fa9009 100644
|
|
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
|
|
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
|
|
@@ -365,18 +365,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
|
struct radeon_semaphore *semaphore,
|
|
bool emit_wait)
|
|
{
|
|
- uint64_t addr = semaphore->gpu_addr;
|
|
-
|
|
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
|
|
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
|
-
|
|
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
|
|
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
|
-
|
|
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
|
- radeon_ring_write(ring, emit_wait ? 1 : 0);
|
|
-
|
|
- return true;
|
|
+ /* disable semaphores for UVD V1 hardware */
|
|
+ return false;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
|
|
index d177100..19ccb2a 100644
|
|
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
|
|
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
|
|
@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
|
|
}
|
|
|
|
/**
|
|
+ * uvd_v2_2_semaphore_emit - emit semaphore command
|
|
+ *
|
|
+ * @rdev: radeon_device pointer
|
|
+ * @ring: radeon_ring pointer
|
|
+ * @semaphore: semaphore to emit commands for
|
|
+ * @emit_wait: true if we should emit a wait command
|
|
+ *
|
|
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
|
|
+ */
|
|
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
|
|
+ struct radeon_ring *ring,
|
|
+ struct radeon_semaphore *semaphore,
|
|
+ bool emit_wait)
|
|
+{
|
|
+ uint64_t addr = semaphore->gpu_addr;
|
|
+
|
|
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
|
|
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
|
+
|
|
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
|
|
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
|
+
|
|
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
|
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
* uvd_v2_2_resume - memory controller programming
|
|
*
|
|
* @rdev: radeon_device pointer
|
|
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
|
|
index 171a820..52b4711 100644
|
|
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
|
|
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
|
|
@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
|
|
if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
|
|
/* oh nos! */
|
|
dev_err(dev->dev, "no encoders/connectors found\n");
|
|
+ drm_mode_config_cleanup(dev);
|
|
return -ENXIO;
|
|
}
|
|
|
|
@@ -122,6 +123,7 @@ static int tilcdc_unload(struct drm_device *dev)
|
|
struct tilcdc_drm_private *priv = dev->dev_private;
|
|
struct tilcdc_module *mod, *cur;
|
|
|
|
+ drm_fbdev_cma_fini(priv->fbdev);
|
|
drm_kms_helper_poll_fini(dev);
|
|
drm_mode_config_cleanup(dev);
|
|
drm_vblank_cleanup(dev);
|
|
@@ -177,33 +179,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
|
|
dev->dev_private = priv;
|
|
|
|
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
|
|
+ if (!priv->wq) {
|
|
+ ret = -ENOMEM;
|
|
+ goto fail_free_priv;
|
|
+ }
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
if (!res) {
|
|
dev_err(dev->dev, "failed to get memory resource\n");
|
|
ret = -EINVAL;
|
|
- goto fail;
|
|
+ goto fail_free_wq;
|
|
}
|
|
|
|
priv->mmio = ioremap_nocache(res->start, resource_size(res));
|
|
if (!priv->mmio) {
|
|
dev_err(dev->dev, "failed to ioremap\n");
|
|
ret = -ENOMEM;
|
|
- goto fail;
|
|
+ goto fail_free_wq;
|
|
}
|
|
|
|
priv->clk = clk_get(dev->dev, "fck");
|
|
if (IS_ERR(priv->clk)) {
|
|
dev_err(dev->dev, "failed to get functional clock\n");
|
|
ret = -ENODEV;
|
|
- goto fail;
|
|
+ goto fail_iounmap;
|
|
}
|
|
|
|
priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
|
|
if (IS_ERR(priv->clk)) {
|
|
dev_err(dev->dev, "failed to get display clock\n");
|
|
ret = -ENODEV;
|
|
- goto fail;
|
|
+ goto fail_put_clk;
|
|
}
|
|
|
|
#ifdef CONFIG_CPU_FREQ
|
|
@@ -213,7 +219,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
if (ret) {
|
|
dev_err(dev->dev, "failed to register cpufreq notifier\n");
|
|
- goto fail;
|
|
+ goto fail_put_disp_clk;
|
|
}
|
|
#endif
|
|
|
|
@@ -258,13 +264,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
|
|
ret = modeset_init(dev);
|
|
if (ret < 0) {
|
|
dev_err(dev->dev, "failed to initialize mode setting\n");
|
|
- goto fail;
|
|
+ goto fail_cpufreq_unregister;
|
|
}
|
|
|
|
ret = drm_vblank_init(dev, 1);
|
|
if (ret < 0) {
|
|
dev_err(dev->dev, "failed to initialize vblank\n");
|
|
- goto fail;
|
|
+ goto fail_mode_config_cleanup;
|
|
}
|
|
|
|
pm_runtime_get_sync(dev->dev);
|
|
@@ -272,7 +278,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
|
|
pm_runtime_put_sync(dev->dev);
|
|
if (ret < 0) {
|
|
dev_err(dev->dev, "failed to install IRQ handler\n");
|
|
- goto fail;
|
|
+ goto fail_vblank_cleanup;
|
|
}
|
|
|
|
platform_set_drvdata(pdev, dev);
|
|
@@ -288,13 +294,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
|
|
priv->fbdev = drm_fbdev_cma_init(dev, bpp,
|
|
dev->mode_config.num_crtc,
|
|
dev->mode_config.num_connector);
|
|
+ if (IS_ERR(priv->fbdev)) {
|
|
+ ret = PTR_ERR(priv->fbdev);
|
|
+ goto fail_irq_uninstall;
|
|
+ }
|
|
|
|
drm_kms_helper_poll_init(dev);
|
|
|
|
return 0;
|
|
|
|
-fail:
|
|
- tilcdc_unload(dev);
|
|
+fail_irq_uninstall:
|
|
+ pm_runtime_get_sync(dev->dev);
|
|
+ drm_irq_uninstall(dev);
|
|
+ pm_runtime_put_sync(dev->dev);
|
|
+
|
|
+fail_vblank_cleanup:
|
|
+ drm_vblank_cleanup(dev);
|
|
+
|
|
+fail_mode_config_cleanup:
|
|
+ drm_mode_config_cleanup(dev);
|
|
+
|
|
+fail_cpufreq_unregister:
|
|
+ pm_runtime_disable(dev->dev);
|
|
+#ifdef CONFIG_CPU_FREQ
|
|
+ cpufreq_unregister_notifier(&priv->freq_transition,
|
|
+ CPUFREQ_TRANSITION_NOTIFIER);
|
|
+fail_put_disp_clk:
|
|
+ clk_put(priv->disp_clk);
|
|
+#endif
|
|
+
|
|
+fail_put_clk:
|
|
+ clk_put(priv->clk);
|
|
+
|
|
+fail_iounmap:
|
|
+ iounmap(priv->mmio);
|
|
+
|
|
+fail_free_wq:
|
|
+ flush_workqueue(priv->wq);
|
|
+ destroy_workqueue(priv->wq);
|
|
+
|
|
+fail_free_priv:
|
|
+ dev->dev_private = NULL;
|
|
+ kfree(priv);
|
|
return ret;
|
|
}
|
|
|
|
@@ -628,10 +669,10 @@ static int __init tilcdc_drm_init(void)
|
|
static void __exit tilcdc_drm_fini(void)
|
|
{
|
|
DBG("fini");
|
|
- tilcdc_tfp410_fini();
|
|
- tilcdc_slave_fini();
|
|
- tilcdc_panel_fini();
|
|
platform_driver_unregister(&tilcdc_platform_driver);
|
|
+ tilcdc_panel_fini();
|
|
+ tilcdc_slave_fini();
|
|
+ tilcdc_tfp410_fini();
|
|
}
|
|
|
|
late_initcall(tilcdc_drm_init);
|
|
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
|
|
index 86c6732..b085dcc 100644
|
|
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
|
|
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
|
|
@@ -151,6 +151,7 @@ struct panel_connector {
|
|
static void panel_connector_destroy(struct drm_connector *connector)
|
|
{
|
|
struct panel_connector *panel_connector = to_panel_connector(connector);
|
|
+ drm_sysfs_connector_remove(connector);
|
|
drm_connector_cleanup(connector);
|
|
kfree(panel_connector);
|
|
}
|
|
@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
|
|
{
|
|
struct panel_module *panel_mod = to_panel_module(mod);
|
|
|
|
- if (panel_mod->timings) {
|
|
+ if (panel_mod->timings)
|
|
display_timings_release(panel_mod->timings);
|
|
- kfree(panel_mod->timings);
|
|
- }
|
|
|
|
tilcdc_module_cleanup(mod);
|
|
kfree(panel_mod->info);
|
|
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
|
|
index 595068b..2f83ffb 100644
|
|
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
|
|
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
|
|
@@ -166,6 +166,7 @@ struct slave_connector {
|
|
static void slave_connector_destroy(struct drm_connector *connector)
|
|
{
|
|
struct slave_connector *slave_connector = to_slave_connector(connector);
|
|
+ drm_sysfs_connector_remove(connector);
|
|
drm_connector_cleanup(connector);
|
|
kfree(slave_connector);
|
|
}
|
|
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
|
|
index c38b56b..ce75ac8 100644
|
|
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
|
|
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
|
|
@@ -167,6 +167,7 @@ struct tfp410_connector {
|
|
static void tfp410_connector_destroy(struct drm_connector *connector)
|
|
{
|
|
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
|
|
+ drm_sysfs_connector_remove(connector);
|
|
drm_connector_cleanup(connector);
|
|
kfree(tfp410_connector);
|
|
}
|
|
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
index 863bef9..76329d2 100644
|
|
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
@@ -297,9 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
|
|
*
|
|
* @pool: to free the pages from
|
|
* @free_all: If set to true will free all pages in pool
|
|
+ * @use_static: Safe to use static buffer
|
|
**/
|
|
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
|
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
|
|
+ bool use_static)
|
|
{
|
|
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
|
|
unsigned long irq_flags;
|
|
struct page *p;
|
|
struct page **pages_to_free;
|
|
@@ -309,8 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
|
if (NUM_PAGES_TO_ALLOC < nr_free)
|
|
npages_to_free = NUM_PAGES_TO_ALLOC;
|
|
|
|
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
- GFP_KERNEL);
|
|
+ if (use_static)
|
|
+ pages_to_free = static_buf;
|
|
+ else
|
|
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
+ GFP_KERNEL);
|
|
if (!pages_to_free) {
|
|
pr_err("Failed to allocate memory for pool free operation\n");
|
|
return 0;
|
|
@@ -373,7 +379,8 @@ restart:
|
|
if (freed_pages)
|
|
ttm_pages_put(pages_to_free, freed_pages);
|
|
out:
|
|
- kfree(pages_to_free);
|
|
+ if (pages_to_free != static_buf)
|
|
+ kfree(pages_to_free);
|
|
return nr_free;
|
|
}
|
|
|
|
@@ -382,32 +389,33 @@ out:
|
|
*
|
|
* XXX: (dchinner) Deadlock warning!
|
|
*
|
|
- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
|
|
- * this can deadlock when called a sc->gfp_mask that is not equal to
|
|
- * GFP_KERNEL.
|
|
- *
|
|
* This code is crying out for a shrinker per pool....
|
|
*/
|
|
static unsigned long
|
|
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
{
|
|
- static atomic_t start_pool = ATOMIC_INIT(0);
|
|
+ static DEFINE_MUTEX(lock);
|
|
+ static unsigned start_pool;
|
|
unsigned i;
|
|
- unsigned pool_offset = atomic_add_return(1, &start_pool);
|
|
+ unsigned pool_offset;
|
|
struct ttm_page_pool *pool;
|
|
int shrink_pages = sc->nr_to_scan;
|
|
unsigned long freed = 0;
|
|
|
|
- pool_offset = pool_offset % NUM_POOLS;
|
|
+ if (!mutex_trylock(&lock))
|
|
+ return SHRINK_STOP;
|
|
+ pool_offset = ++start_pool % NUM_POOLS;
|
|
/* select start pool in round robin fashion */
|
|
for (i = 0; i < NUM_POOLS; ++i) {
|
|
unsigned nr_free = shrink_pages;
|
|
if (shrink_pages == 0)
|
|
break;
|
|
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
|
- shrink_pages = ttm_page_pool_free(pool, nr_free);
|
|
+ /* OK to use static buffer since global mutex is held. */
|
|
+ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
|
|
freed += nr_free - shrink_pages;
|
|
}
|
|
+ mutex_unlock(&lock);
|
|
return freed;
|
|
}
|
|
|
|
@@ -706,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
|
}
|
|
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
|
if (npages)
|
|
- ttm_page_pool_free(pool, npages);
|
|
+ ttm_page_pool_free(pool, npages, false);
|
|
}
|
|
|
|
/*
|
|
@@ -845,8 +853,9 @@ void ttm_page_alloc_fini(void)
|
|
pr_info("Finalizing pool allocator\n");
|
|
ttm_pool_mm_shrink_fini(_manager);
|
|
|
|
+ /* OK to use static buffer since global mutex is no longer used. */
|
|
for (i = 0; i < NUM_POOLS; ++i)
|
|
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
|
|
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
|
|
|
|
kobject_put(&_manager->kobj);
|
|
_manager = NULL;
|
|
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
|
|
index fb8259f..3dfa97d 100644
|
|
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
|
|
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
|
|
@@ -411,9 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
|
*
|
|
* @pool: to free the pages from
|
|
* @nr_free: If set to true will free all pages in pool
|
|
+ * @use_static: Safe to use static buffer
|
|
**/
|
|
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
|
|
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
|
|
+ bool use_static)
|
|
{
|
|
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
|
|
unsigned long irq_flags;
|
|
struct dma_page *dma_p, *tmp;
|
|
struct page **pages_to_free;
|
|
@@ -430,8 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
|
|
npages_to_free, nr_free);
|
|
}
|
|
#endif
|
|
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
- GFP_KERNEL);
|
|
+ if (use_static)
|
|
+ pages_to_free = static_buf;
|
|
+ else
|
|
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
+ GFP_KERNEL);
|
|
|
|
if (!pages_to_free) {
|
|
pr_err("%s: Failed to allocate memory for pool free operation\n",
|
|
@@ -501,7 +507,8 @@ restart:
|
|
if (freed_pages)
|
|
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
|
|
out:
|
|
- kfree(pages_to_free);
|
|
+ if (pages_to_free != static_buf)
|
|
+ kfree(pages_to_free);
|
|
return nr_free;
|
|
}
|
|
|
|
@@ -530,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
|
|
if (pool->type != type)
|
|
continue;
|
|
/* Takes a spinlock.. */
|
|
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
|
|
+ /* OK to use static buffer since global mutex is held. */
|
|
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
|
|
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
|
|
/* This code path is called after _all_ references to the
|
|
* struct device has been dropped - so nobody should be
|
|
@@ -983,7 +991,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
|
|
/* shrink pool if necessary (only on !is_cached pools)*/
|
|
if (npages)
|
|
- ttm_dma_page_pool_free(pool, npages);
|
|
+ ttm_dma_page_pool_free(pool, npages, false);
|
|
ttm->state = tt_unpopulated;
|
|
}
|
|
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
|
@@ -993,20 +1001,15 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
|
*
|
|
* XXX: (dchinner) Deadlock warning!
|
|
*
|
|
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
|
|
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
|
|
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
|
|
- * bad.
|
|
- *
|
|
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
|
|
* shrinkers
|
|
*/
|
|
static unsigned long
|
|
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
{
|
|
- static atomic_t start_pool = ATOMIC_INIT(0);
|
|
+ static unsigned start_pool;
|
|
unsigned idx = 0;
|
|
- unsigned pool_offset = atomic_add_return(1, &start_pool);
|
|
+ unsigned pool_offset;
|
|
unsigned shrink_pages = sc->nr_to_scan;
|
|
struct device_pools *p;
|
|
unsigned long freed = 0;
|
|
@@ -1014,8 +1017,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
if (list_empty(&_manager->pools))
|
|
return SHRINK_STOP;
|
|
|
|
- mutex_lock(&_manager->lock);
|
|
- pool_offset = pool_offset % _manager->npools;
|
|
+ if (!mutex_trylock(&_manager->lock))
|
|
+ return SHRINK_STOP;
|
|
+ if (!_manager->npools)
|
|
+ goto out;
|
|
+ pool_offset = ++start_pool % _manager->npools;
|
|
list_for_each_entry(p, &_manager->pools, pools) {
|
|
unsigned nr_free;
|
|
|
|
@@ -1027,13 +1033,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
if (++idx < pool_offset)
|
|
continue;
|
|
nr_free = shrink_pages;
|
|
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
|
|
+ /* OK to use static buffer since global mutex is held. */
|
|
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
|
|
freed += nr_free - shrink_pages;
|
|
|
|
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
|
|
p->pool->dev_name, p->pool->name, current->pid,
|
|
nr_free, shrink_pages);
|
|
}
|
|
+out:
|
|
mutex_unlock(&_manager->lock);
|
|
return freed;
|
|
}
|
|
@@ -1044,7 +1052,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
struct device_pools *p;
|
|
unsigned long count = 0;
|
|
|
|
- mutex_lock(&_manager->lock);
|
|
+ if (!mutex_trylock(&_manager->lock))
|
|
+ return 0;
|
|
list_for_each_entry(p, &_manager->pools, pools)
|
|
count += p->pool->npages_free;
|
|
mutex_unlock(&_manager->lock);
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
index 0083cbf..0771dcb 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
@@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|
goto out_err0;
|
|
}
|
|
|
|
- if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
|
|
+ /*
|
|
+ * Limit back buffer size to VRAM size. Remove this once
|
|
+ * screen targets are implemented.
|
|
+ */
|
|
+ if (dev_priv->prim_bb_mem > dev_priv->vram_size)
|
|
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
|
|
|
mutex_unlock(&dev_priv->hw_mutex);
|
|
@@ -729,32 +733,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|
goto out_err1;
|
|
}
|
|
|
|
- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
|
|
- (dev_priv->vram_size >> PAGE_SHIFT));
|
|
- if (unlikely(ret != 0)) {
|
|
- DRM_ERROR("Failed initializing memory manager for VRAM.\n");
|
|
- goto out_err2;
|
|
- }
|
|
-
|
|
- dev_priv->has_gmr = true;
|
|
- if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
|
|
- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
|
- VMW_PL_GMR) != 0) {
|
|
- DRM_INFO("No GMR memory available. "
|
|
- "Graphics memory resources are very limited.\n");
|
|
- dev_priv->has_gmr = false;
|
|
- }
|
|
-
|
|
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
|
- dev_priv->has_mob = true;
|
|
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
|
|
- VMW_PL_MOB) != 0) {
|
|
- DRM_INFO("No MOB memory available. "
|
|
- "3D will be disabled.\n");
|
|
- dev_priv->has_mob = false;
|
|
- }
|
|
- }
|
|
-
|
|
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
|
|
dev_priv->mmio_size);
|
|
|
|
@@ -817,6 +795,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|
goto out_no_fman;
|
|
}
|
|
|
|
+
|
|
+ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
|
|
+ (dev_priv->vram_size >> PAGE_SHIFT));
|
|
+ if (unlikely(ret != 0)) {
|
|
+ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
|
|
+ goto out_no_vram;
|
|
+ }
|
|
+
|
|
+ dev_priv->has_gmr = true;
|
|
+ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
|
|
+ refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
|
+ VMW_PL_GMR) != 0) {
|
|
+ DRM_INFO("No GMR memory available. "
|
|
+ "Graphics memory resources are very limited.\n");
|
|
+ dev_priv->has_gmr = false;
|
|
+ }
|
|
+
|
|
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
|
+ dev_priv->has_mob = true;
|
|
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
|
|
+ VMW_PL_MOB) != 0) {
|
|
+ DRM_INFO("No MOB memory available. "
|
|
+ "3D will be disabled.\n");
|
|
+ dev_priv->has_mob = false;
|
|
+ }
|
|
+ }
|
|
+
|
|
vmw_kms_save_vga(dev_priv);
|
|
|
|
/* Start kms and overlay systems, needs fifo. */
|
|
@@ -842,6 +847,12 @@ out_no_fifo:
|
|
vmw_kms_close(dev_priv);
|
|
out_no_kms:
|
|
vmw_kms_restore_vga(dev_priv);
|
|
+ if (dev_priv->has_mob)
|
|
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
+ if (dev_priv->has_gmr)
|
|
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
+out_no_vram:
|
|
vmw_fence_manager_takedown(dev_priv->fman);
|
|
out_no_fman:
|
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
|
@@ -857,12 +868,6 @@ out_err4:
|
|
iounmap(dev_priv->mmio_virt);
|
|
out_err3:
|
|
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
|
- if (dev_priv->has_mob)
|
|
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
- if (dev_priv->has_gmr)
|
|
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
-out_err2:
|
|
(void)ttm_bo_device_release(&dev_priv->bdev);
|
|
out_err1:
|
|
vmw_ttm_global_release(dev_priv);
|
|
@@ -892,6 +897,13 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|
}
|
|
vmw_kms_close(dev_priv);
|
|
vmw_overlay_close(dev_priv);
|
|
+
|
|
+ if (dev_priv->has_mob)
|
|
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
+ if (dev_priv->has_gmr)
|
|
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
+
|
|
vmw_fence_manager_takedown(dev_priv->fman);
|
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
|
drm_irq_uninstall(dev_priv->dev);
|
|
@@ -903,11 +915,6 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|
ttm_object_device_release(&dev_priv->tdev);
|
|
iounmap(dev_priv->mmio_virt);
|
|
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
|
- if (dev_priv->has_mob)
|
|
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
- if (dev_priv->has_gmr)
|
|
- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
(void)ttm_bo_device_release(&dev_priv->bdev);
|
|
vmw_ttm_global_release(dev_priv);
|
|
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
index a3480c1..9fe10d1 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
@@ -2475,7 +2475,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
|
ret = vmw_resources_validate(sw_context);
|
|
if (unlikely(ret != 0))
|
|
- goto out_err;
|
|
+ goto out_err_nores;
|
|
|
|
if (throttle_us) {
|
|
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
|
|
@@ -2511,6 +2511,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
vmw_resource_relocations_free(&sw_context->res_relocations);
|
|
|
|
vmw_fifo_commit(dev_priv, command_size);
|
|
+ mutex_unlock(&dev_priv->binding_mutex);
|
|
|
|
vmw_query_bo_switch_commit(dev_priv, sw_context);
|
|
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
|
|
@@ -2526,7 +2527,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
DRM_ERROR("Fence submission error. Syncing.\n");
|
|
|
|
vmw_resource_list_unreserve(&sw_context->resource_list, false);
|
|
- mutex_unlock(&dev_priv->binding_mutex);
|
|
|
|
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
|
|
(void *) fence);
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
|
|
index 436b013..b65272d 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
|
|
@@ -1049,6 +1049,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
|
|
if (ret != 0)
|
|
goto out_no_queue;
|
|
|
|
+ return 0;
|
|
+
|
|
out_no_queue:
|
|
event->base.destroy(&event->base);
|
|
out_no_event:
|
|
@@ -1124,17 +1126,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
|
|
|
BUG_ON(fence == NULL);
|
|
|
|
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
|
|
- ret = vmw_event_fence_action_create(file_priv, fence,
|
|
- arg->flags,
|
|
- arg->user_data,
|
|
- true);
|
|
- else
|
|
- ret = vmw_event_fence_action_create(file_priv, fence,
|
|
- arg->flags,
|
|
- arg->user_data,
|
|
- true);
|
|
-
|
|
+ ret = vmw_event_fence_action_create(file_priv, fence,
|
|
+ arg->flags,
|
|
+ arg->user_data,
|
|
+ true);
|
|
if (unlikely(ret != 0)) {
|
|
if (ret != -ERESTARTSYS)
|
|
DRM_ERROR("Failed to attach event to fence.\n");
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
index 6ccd993..6eae14d 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
@@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|
|
|
mutex_lock(&dev_priv->hw_mutex);
|
|
|
|
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
|
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
|
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
|
+ ;
|
|
|
|
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
|
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
index 8a65041..c8f8ecf 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
@@ -1954,6 +1954,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
|
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
|
|
};
|
|
int i;
|
|
+ u32 assumed_bpp = 2;
|
|
+
|
|
+ /*
|
|
+ * If using screen objects, then assume 32-bpp because that's what the
|
|
+ * SVGA device is assuming
|
|
+ */
|
|
+ if (dev_priv->sou_priv)
|
|
+ assumed_bpp = 4;
|
|
|
|
/* Add preferred mode */
|
|
{
|
|
@@ -1964,8 +1972,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
|
mode->vdisplay = du->pref_height;
|
|
vmw_guess_mode_timing(mode);
|
|
|
|
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
|
|
- mode->vdisplay)) {
|
|
+ if (vmw_kms_validate_mode_vram(dev_priv,
|
|
+ mode->hdisplay * assumed_bpp,
|
|
+ mode->vdisplay)) {
|
|
drm_mode_probed_add(connector, mode);
|
|
} else {
|
|
drm_mode_destroy(dev, mode);
|
|
@@ -1987,7 +1996,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
|
bmode->vdisplay > max_height)
|
|
continue;
|
|
|
|
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
|
|
+ if (!vmw_kms_validate_mode_vram(dev_priv,
|
|
+ bmode->hdisplay * assumed_bpp,
|
|
bmode->vdisplay))
|
|
continue;
|
|
|
|
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
|
|
index 6866448..37ac7b5 100644
|
|
--- a/drivers/gpu/vga/vga_switcheroo.c
|
|
+++ b/drivers/gpu/vga/vga_switcheroo.c
|
|
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
|
|
}
|
|
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
|
|
|
|
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
|
|
+{
|
|
+ dev->pm_domain = NULL;
|
|
+}
|
|
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
|
|
+
|
|
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
|
|
{
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
|
|
index 1bdcccc..f745d2c 100644
|
|
--- a/drivers/hid/hid-cherry.c
|
|
+++ b/drivers/hid/hid-cherry.c
|
|
@@ -28,7 +28,7 @@
|
|
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
unsigned int *rsize)
|
|
{
|
|
- if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
|
|
+ if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
|
|
hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
|
|
rdesc[11] = rdesc[16] = 0xff;
|
|
rdesc[12] = rdesc[17] = 0x03;
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index 7cd42ea..d92c7d9 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -1743,6 +1743,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
|
|
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
|
index 6e12cd0..4850da3 100644
|
|
--- a/drivers/hid/hid-ids.h
|
|
+++ b/drivers/hid/hid-ids.h
|
|
@@ -292,6 +292,11 @@
|
|
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
|
|
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
|
|
|
|
+#define USB_VENDOR_ID_ELAN 0x04f3
|
|
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
|
|
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
|
|
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
|
|
+
|
|
#define USB_VENDOR_ID_ELECOM 0x056e
|
|
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
|
|
|
|
@@ -506,6 +511,7 @@
|
|
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
|
|
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
|
|
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
|
|
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
|
|
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
|
|
|
|
#define USB_VENDOR_ID_LABTEC 0x1020
|
|
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
|
|
index a713e62..a413f76 100644
|
|
--- a/drivers/hid/hid-input.c
|
|
+++ b/drivers/hid/hid-input.c
|
|
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
|
|
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
|
|
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
|
|
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
|
|
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
|
|
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
|
|
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
|
|
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
|
|
{}
|
|
@@ -1063,6 +1066,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
|
|
return;
|
|
}
|
|
|
|
+ /*
|
|
+ * Ignore reports for absolute data if the data didn't change. This is
|
|
+ * not only an optimization but also fixes 'dead' key reports. Some
|
|
+ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
|
|
+ * 0x31 and 0x32) report multiple keys, even though a localized keyboard
|
|
+ * can only have one of them physically available. The 'dead' keys
|
|
+ * report constant 0. As all map to the same keycode, they'd confuse
|
|
+ * the input layer. If we filter the 'dead' keys on the HID level, we
|
|
+ * skip the keycode translation and only forward real events.
|
|
+ */
|
|
+ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
|
|
+ HID_MAIN_ITEM_BUFFERED_BYTE)) &&
|
|
+ (field->flags & HID_MAIN_ITEM_VARIABLE) &&
|
|
+ usage->usage_index < field->maxusage &&
|
|
+ value == field->value[usage->usage_index])
|
|
+ return;
|
|
+
|
|
/* report the usage code as scancode if the key status has changed */
|
|
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
|
|
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
|
|
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
|
|
index e776963..158fcf5 100644
|
|
--- a/drivers/hid/hid-kye.c
|
|
+++ b/drivers/hid/hid-kye.c
|
|
@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
* - change the button usage range to 4-7 for the extra
|
|
* buttons
|
|
*/
|
|
- if (*rsize >= 74 &&
|
|
+ if (*rsize >= 75 &&
|
|
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
|
|
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
|
|
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
|
|
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
}
|
|
break;
|
|
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
|
|
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
|
|
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
|
|
rdesc = mousepen_i608x_rdesc_fixed;
|
|
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
|
|
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|
switch (id->product) {
|
|
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
|
|
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
|
|
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
|
|
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
|
|
ret = kye_tablet_enable(hdev);
|
|
if (ret) {
|
|
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
|
|
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
|
|
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
|
|
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
|
|
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
|
|
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
|
|
index 9fe9d4a..b8207e0 100644
|
|
--- a/drivers/hid/hid-lg.c
|
|
+++ b/drivers/hid/hid-lg.c
|
|
@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
struct usb_device_descriptor *udesc;
|
|
__u16 bcdDevice, rev_maj, rev_min;
|
|
|
|
- if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
|
|
+ if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
|
|
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
|
|
hid_info(hdev,
|
|
"fixing up Logitech keyboard report descriptor\n");
|
|
rdesc[84] = rdesc[89] = 0x4d;
|
|
rdesc[85] = rdesc[90] = 0x10;
|
|
}
|
|
- if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
|
|
+ if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
|
|
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
|
|
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
|
|
hid_info(hdev,
|
|
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
|
|
index f45279c..5da115a 100644
|
|
--- a/drivers/hid/hid-logitech-dj.c
|
|
+++ b/drivers/hid/hid-logitech-dj.c
|
|
@@ -237,13 +237,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
|
|
return;
|
|
}
|
|
|
|
- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
|
|
- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
|
|
- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
|
|
- __func__, dj_report->device_index);
|
|
- return;
|
|
- }
|
|
-
|
|
if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
|
|
/* The device is already known. No need to reallocate it. */
|
|
dbg_hid("%s: device is already known\n", __func__);
|
|
@@ -694,7 +687,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
|
|
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
|
|
struct dj_report *dj_report = (struct dj_report *) data;
|
|
unsigned long flags;
|
|
- bool report_processed = false;
|
|
|
|
dbg_hid("%s, size:%d\n", __func__, size);
|
|
|
|
@@ -722,27 +714,41 @@ static int logi_dj_raw_event(struct hid_device *hdev,
|
|
* anything else with it.
|
|
*/
|
|
|
|
+ /* case 1) */
|
|
+ if (data[0] != REPORT_ID_DJ_SHORT)
|
|
+ return false;
|
|
+
|
|
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
|
|
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
|
|
+ /*
|
|
+ * Device index is wrong, bail out.
|
|
+ * This driver can ignore safely the receiver notifications,
|
|
+ * so ignore those reports too.
|
|
+ */
|
|
+ if (dj_report->device_index != DJ_RECEIVER_INDEX)
|
|
+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
|
|
+ __func__, dj_report->device_index);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
spin_lock_irqsave(&djrcv_dev->lock, flags);
|
|
- if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
|
|
- switch (dj_report->report_type) {
|
|
- case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
|
|
- case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
|
|
- logi_dj_recv_queue_notification(djrcv_dev, dj_report);
|
|
- break;
|
|
- case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
|
|
- if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
|
|
- STATUS_LINKLOSS) {
|
|
- logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
|
|
- }
|
|
- break;
|
|
- default:
|
|
- logi_dj_recv_forward_report(djrcv_dev, dj_report);
|
|
+ switch (dj_report->report_type) {
|
|
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
|
|
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
|
|
+ logi_dj_recv_queue_notification(djrcv_dev, dj_report);
|
|
+ break;
|
|
+ case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
|
|
+ if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
|
|
+ STATUS_LINKLOSS) {
|
|
+ logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
|
|
}
|
|
- report_processed = true;
|
|
+ break;
|
|
+ default:
|
|
+ logi_dj_recv_forward_report(djrcv_dev, dj_report);
|
|
}
|
|
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
|
|
|
|
- return report_processed;
|
|
+ return true;
|
|
}
|
|
|
|
static int logi_dj_probe(struct hid_device *hdev,
|
|
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
|
|
index 4a40003..daeb0aa 100644
|
|
--- a/drivers/hid/hid-logitech-dj.h
|
|
+++ b/drivers/hid/hid-logitech-dj.h
|
|
@@ -27,6 +27,7 @@
|
|
|
|
#define DJ_MAX_PAIRED_DEVICES 6
|
|
#define DJ_MAX_NUMBER_NOTIFICATIONS 8
|
|
+#define DJ_RECEIVER_INDEX 0
|
|
#define DJ_DEVICE_INDEX_MIN 1
|
|
#define DJ_DEVICE_INDEX_MAX 6
|
|
|
|
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
|
|
index 3b43d1c..991ba79 100644
|
|
--- a/drivers/hid/hid-magicmouse.c
|
|
+++ b/drivers/hid/hid-magicmouse.c
|
|
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
|
if (size < 4 || ((size - 4) % 9) != 0)
|
|
return 0;
|
|
npoints = (size - 4) / 9;
|
|
+ if (npoints > 15) {
|
|
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
|
|
+ size);
|
|
+ return 0;
|
|
+ }
|
|
msc->ntouches = 0;
|
|
for (ii = 0; ii < npoints; ii++)
|
|
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
|
|
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
|
if (size < 6 || ((size - 6) % 8) != 0)
|
|
return 0;
|
|
npoints = (size - 6) / 8;
|
|
+ if (npoints > 15) {
|
|
+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
|
|
+ size);
|
|
+ return 0;
|
|
+ }
|
|
msc->ntouches = 0;
|
|
for (ii = 0; ii < npoints; ii++)
|
|
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
|
|
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
|
|
index 9e14c00..25daf28 100644
|
|
--- a/drivers/hid/hid-monterey.c
|
|
+++ b/drivers/hid/hid-monterey.c
|
|
@@ -24,7 +24,7 @@
|
|
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
unsigned int *rsize)
|
|
{
|
|
- if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
|
|
+ if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
|
|
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
|
|
rdesc[30] = 0x0c;
|
|
}
|
|
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
|
|
index 736b250..6aca4f2 100644
|
|
--- a/drivers/hid/hid-petalynx.c
|
|
+++ b/drivers/hid/hid-petalynx.c
|
|
@@ -25,7 +25,7 @@
|
|
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
unsigned int *rsize)
|
|
{
|
|
- if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
|
|
+ if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
|
|
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
|
|
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
|
|
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
|
|
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
|
|
index acbb0210..020df3c 100644
|
|
--- a/drivers/hid/hid-picolcd_core.c
|
|
+++ b/drivers/hid/hid-picolcd_core.c
|
|
@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
|
|
if (!data)
|
|
return 1;
|
|
|
|
+ if (size > 64) {
|
|
+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
|
|
+ size);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
if (report->id == REPORT_KEY_STATE) {
|
|
if (data->input_keys)
|
|
ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
|
|
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
|
|
index 1a07e07..47d7e74 100644
|
|
--- a/drivers/hid/hid-roccat-pyra.c
|
|
+++ b/drivers/hid/hid-roccat-pyra.c
|
|
@@ -35,6 +35,8 @@ static struct class *pyra_class;
|
|
static void profile_activated(struct pyra_device *pyra,
|
|
unsigned int new_profile)
|
|
{
|
|
+ if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
|
|
+ return;
|
|
pyra->actual_profile = new_profile;
|
|
pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
|
|
}
|
|
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
|
|
if (off != 0 || count != PYRA_SIZE_SETTINGS)
|
|
return -EINVAL;
|
|
|
|
- mutex_lock(&pyra->pyra_lock);
|
|
-
|
|
settings = (struct pyra_settings const *)buf;
|
|
+ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&pyra->pyra_lock);
|
|
|
|
retval = pyra_set_settings(usb_dev, settings);
|
|
if (retval) {
|
|
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
|
|
index 87fc91e..91072fa 100644
|
|
--- a/drivers/hid/hid-sunplus.c
|
|
+++ b/drivers/hid/hid-sunplus.c
|
|
@@ -24,7 +24,7 @@
|
|
static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|
unsigned int *rsize)
|
|
{
|
|
- if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
|
|
+ if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
|
|
rdesc[106] == 0x03) {
|
|
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
|
|
rdesc[105] = rdesc[110] = 0x03;
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
|
|
index 42eebd1..17be889 100644
|
|
--- a/drivers/hid/i2c-hid/i2c-hid.c
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid.c
|
|
@@ -136,6 +136,7 @@ struct i2c_hid {
|
|
* descriptor. */
|
|
unsigned int bufsize; /* i2c buffer size */
|
|
char *inbuf; /* Input buffer */
|
|
+ char *rawbuf; /* Raw Input buffer */
|
|
char *cmdbuf; /* Command buffer */
|
|
char *argsbuf; /* Command arguments buffer */
|
|
|
|
@@ -357,6 +358,9 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
|
|
int ret, ret_size;
|
|
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
|
|
|
|
+ if (size > ihid->bufsize)
|
|
+ size = ihid->bufsize;
|
|
+
|
|
ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
|
|
if (ret != size) {
|
|
if (ret < 0)
|
|
@@ -482,9 +486,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
|
|
static void i2c_hid_free_buffers(struct i2c_hid *ihid)
|
|
{
|
|
kfree(ihid->inbuf);
|
|
+ kfree(ihid->rawbuf);
|
|
kfree(ihid->argsbuf);
|
|
kfree(ihid->cmdbuf);
|
|
ihid->inbuf = NULL;
|
|
+ ihid->rawbuf = NULL;
|
|
ihid->cmdbuf = NULL;
|
|
ihid->argsbuf = NULL;
|
|
ihid->bufsize = 0;
|
|
@@ -500,10 +506,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
|
|
report_size; /* report */
|
|
|
|
ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
|
|
+ ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
|
|
ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
|
|
ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
|
|
|
|
- if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
|
|
+ if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
|
|
i2c_hid_free_buffers(ihid);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -530,12 +537,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
|
|
|
|
ret = i2c_hid_get_report(client,
|
|
report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
|
|
- report_number, ihid->inbuf, ask_count);
|
|
+ report_number, ihid->rawbuf, ask_count);
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
|
|
+ ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
|
|
|
|
if (ret_count <= 2)
|
|
return 0;
|
|
@@ -544,7 +551,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
|
|
|
|
/* The query buffer contains the size, dropping it in the reply */
|
|
count = min(count, ret_count - 2);
|
|
- memcpy(buf, ihid->inbuf + 2, count);
|
|
+ memcpy(buf, ihid->rawbuf + 2, count);
|
|
|
|
return count;
|
|
}
|
|
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
|
|
index 44df131..deabd2c 100644
|
|
--- a/drivers/hid/usbhid/hid-core.c
|
|
+++ b/drivers/hid/usbhid/hid-core.c
|
|
@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
|
|
struct usbhid_device *usbhid = hid->driver_data;
|
|
|
|
spin_lock_irqsave(&usbhid->lock, flags);
|
|
- if (hid->open > 0 &&
|
|
+ if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
|
|
!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
|
|
!test_bit(HID_SUSPENDED, &usbhid->iofl) &&
|
|
!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
|
|
@@ -180,7 +180,7 @@ static void hid_io_error(struct hid_device *hid)
|
|
if (time_after(jiffies, usbhid->stop_retry)) {
|
|
|
|
/* Retries failed, so do a port reset unless we lack bandwidth*/
|
|
- if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
|
|
+ if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
|
|
&& !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
|
|
|
|
schedule_work(&usbhid->reset_work);
|
|
@@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb)
|
|
case 0: /* success */
|
|
usbhid_mark_busy(usbhid);
|
|
usbhid->retry_delay = 0;
|
|
+ if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
|
|
+ break;
|
|
hid_input_report(urb->context, HID_INPUT_REPORT,
|
|
urb->transfer_buffer,
|
|
urb->actual_length, 1);
|
|
@@ -734,8 +736,10 @@ void usbhid_close(struct hid_device *hid)
|
|
if (!--hid->open) {
|
|
spin_unlock_irq(&usbhid->lock);
|
|
hid_cancel_delayed_stuff(usbhid);
|
|
- usb_kill_urb(usbhid->urbin);
|
|
- usbhid->intf->needs_remote_wakeup = 0;
|
|
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
|
|
+ usb_kill_urb(usbhid->urbin);
|
|
+ usbhid->intf->needs_remote_wakeup = 0;
|
|
+ }
|
|
} else {
|
|
spin_unlock_irq(&usbhid->lock);
|
|
}
|
|
@@ -1119,6 +1123,19 @@ static int usbhid_start(struct hid_device *hid)
|
|
|
|
set_bit(HID_STARTED, &usbhid->iofl);
|
|
|
|
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
|
|
+ ret = usb_autopm_get_interface(usbhid->intf);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+ usbhid->intf->needs_remote_wakeup = 1;
|
|
+ ret = hid_start_in(hid);
|
|
+ if (ret) {
|
|
+ dev_err(&hid->dev,
|
|
+ "failed to start in urb: %d\n", ret);
|
|
+ }
|
|
+ usb_autopm_put_interface(usbhid->intf);
|
|
+ }
|
|
+
|
|
/* Some keyboards don't work until their LEDs have been set.
|
|
* Since BIOSes do set the LEDs, it must be safe for any device
|
|
* that supports the keyboard boot protocol.
|
|
@@ -1151,6 +1168,9 @@ static void usbhid_stop(struct hid_device *hid)
|
|
if (WARN_ON(!usbhid))
|
|
return;
|
|
|
|
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
|
|
+ usbhid->intf->needs_remote_wakeup = 0;
|
|
+
|
|
clear_bit(HID_STARTED, &usbhid->iofl);
|
|
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
|
|
set_bit(HID_DISCONNECTED, &usbhid->iofl);
|
|
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
|
|
index 8e4ddb3..473c0c4 100644
|
|
--- a/drivers/hid/usbhid/hid-quirks.c
|
|
+++ b/drivers/hid/usbhid/hid-quirks.c
|
|
@@ -69,6 +69,9 @@ static const struct hid_blacklist {
|
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
|
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
|
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
|
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
|
|
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
|
|
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
|
|
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
|
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
|
|
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
|
|
@@ -113,6 +116,7 @@ static const struct hid_blacklist {
|
|
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
|
|
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
|
|
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
|
|
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
|
|
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
|
|
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
|
|
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
|
|
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
|
|
index 69ea36f..356f22f 100644
|
|
--- a/drivers/hv/channel.c
|
|
+++ b/drivers/hv/channel.c
|
|
@@ -134,7 +134,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|
GFP_KERNEL);
|
|
if (!open_info) {
|
|
err = -ENOMEM;
|
|
- goto error0;
|
|
+ goto error_gpadl;
|
|
}
|
|
|
|
init_completion(&open_info->waitevent);
|
|
@@ -150,7 +150,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|
|
|
if (userdatalen > MAX_USER_DEFINED_BYTES) {
|
|
err = -EINVAL;
|
|
- goto error0;
|
|
+ goto error_gpadl;
|
|
}
|
|
|
|
if (userdatalen)
|
|
@@ -164,8 +164,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|
ret = vmbus_post_msg(open_msg,
|
|
sizeof(struct vmbus_channel_open_channel));
|
|
|
|
- if (ret != 0)
|
|
+ if (ret != 0) {
|
|
+ err = ret;
|
|
goto error1;
|
|
+ }
|
|
|
|
t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
|
|
if (t == 0) {
|
|
@@ -192,6 +194,9 @@ error1:
|
|
list_del(&open_info->msglistentry);
|
|
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
|
|
|
+error_gpadl:
|
|
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
|
|
+
|
|
error0:
|
|
free_pages((unsigned long)out,
|
|
get_order(send_ringbuffer_size + recv_ringbuffer_size));
|
|
@@ -362,7 +367,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
|
u32 next_gpadl_handle;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
- int t;
|
|
|
|
next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
|
|
atomic_inc(&vmbus_connection.next_gpadl_handle);
|
|
@@ -409,9 +413,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
|
|
|
}
|
|
}
|
|
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
|
|
- BUG_ON(t == 0);
|
|
-
|
|
+ wait_for_completion(&msginfo->waitevent);
|
|
|
|
/* At this point, we received the gpadl created msg */
|
|
*gpadl_handle = gpadlmsg->gpadl;
|
|
@@ -434,7 +436,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
|
struct vmbus_channel_gpadl_teardown *msg;
|
|
struct vmbus_channel_msginfo *info;
|
|
unsigned long flags;
|
|
- int ret, t;
|
|
+ int ret;
|
|
|
|
info = kmalloc(sizeof(*info) +
|
|
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
|
|
@@ -456,11 +458,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
|
ret = vmbus_post_msg(msg,
|
|
sizeof(struct vmbus_channel_gpadl_teardown));
|
|
|
|
- BUG_ON(ret != 0);
|
|
- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
|
|
- BUG_ON(t == 0);
|
|
+ if (ret)
|
|
+ goto post_msg_err;
|
|
+
|
|
+ wait_for_completion(&info->waitevent);
|
|
|
|
- /* Received a torndown response */
|
|
+post_msg_err:
|
|
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
|
list_del(&info->msglistentry);
|
|
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
|
@@ -470,7 +473,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
|
}
|
|
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
|
|
|
|
-static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
+static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
{
|
|
struct vmbus_channel_close_channel *msg;
|
|
int ret;
|
|
@@ -492,11 +495,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
|
|
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
|
|
|
|
- BUG_ON(ret != 0);
|
|
+ if (ret) {
|
|
+ pr_err("Close failed: close post msg return is %d\n", ret);
|
|
+ /*
|
|
+ * If we failed to post the close msg,
|
|
+ * it is perhaps better to leak memory.
|
|
+ */
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
/* Tear down the gpadl for the channel's ring buffer */
|
|
- if (channel->ringbuffer_gpadlhandle)
|
|
- vmbus_teardown_gpadl(channel,
|
|
- channel->ringbuffer_gpadlhandle);
|
|
+ if (channel->ringbuffer_gpadlhandle) {
|
|
+ ret = vmbus_teardown_gpadl(channel,
|
|
+ channel->ringbuffer_gpadlhandle);
|
|
+ if (ret) {
|
|
+ pr_err("Close failed: teardown gpadl return %d\n", ret);
|
|
+ /*
|
|
+ * If we failed to teardown gpadl,
|
|
+ * it is perhaps better to leak memory.
|
|
+ */
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
|
|
/* Cleanup the ring buffers for this channel */
|
|
hv_ringbuffer_cleanup(&channel->outbound);
|
|
@@ -505,7 +525,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
free_pages((unsigned long)channel->ringbuffer_pages,
|
|
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
|
|
|
-
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
|
|
index fa92046..8c24881 100644
|
|
--- a/drivers/hv/channel_mgmt.c
|
|
+++ b/drivers/hv/channel_mgmt.c
|
|
@@ -202,9 +202,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
|
|
unsigned long flags;
|
|
struct vmbus_channel *primary_channel;
|
|
struct vmbus_channel_relid_released msg;
|
|
+ struct device *dev;
|
|
+
|
|
+ if (channel->device_obj) {
|
|
+ dev = get_device(&channel->device_obj->device);
|
|
+ if (dev) {
|
|
+ vmbus_device_unregister(channel->device_obj);
|
|
+ put_device(dev);
|
|
+ }
|
|
+ }
|
|
|
|
- if (channel->device_obj)
|
|
- vmbus_device_unregister(channel->device_obj);
|
|
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
|
|
msg.child_relid = channel->offermsg.child_relid;
|
|
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
|
|
@@ -709,7 +716,7 @@ int vmbus_request_offers(void)
|
|
{
|
|
struct vmbus_channel_message_header *msg;
|
|
struct vmbus_channel_msginfo *msginfo;
|
|
- int ret, t;
|
|
+ int ret;
|
|
|
|
msginfo = kmalloc(sizeof(*msginfo) +
|
|
sizeof(struct vmbus_channel_message_header),
|
|
@@ -717,8 +724,6 @@ int vmbus_request_offers(void)
|
|
if (!msginfo)
|
|
return -ENOMEM;
|
|
|
|
- init_completion(&msginfo->waitevent);
|
|
-
|
|
msg = (struct vmbus_channel_message_header *)msginfo->msg;
|
|
|
|
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
|
|
@@ -732,14 +737,6 @@ int vmbus_request_offers(void)
|
|
goto cleanup;
|
|
}
|
|
|
|
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
|
|
- if (t == 0) {
|
|
- ret = -ETIMEDOUT;
|
|
- goto cleanup;
|
|
- }
|
|
-
|
|
-
|
|
-
|
|
cleanup:
|
|
kfree(msginfo);
|
|
|
|
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
|
|
index ce5a9f2..d8fd95c 100644
|
|
--- a/drivers/hv/connection.c
|
|
+++ b/drivers/hv/connection.c
|
|
@@ -408,10 +408,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
|
|
* insufficient resources. Retry the operation a couple of
|
|
* times before giving up.
|
|
*/
|
|
- while (retries < 3) {
|
|
- ret = hv_post_message(conn_id, 1, buffer, buflen);
|
|
- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
|
|
+ while (retries < 10) {
|
|
+ ret = hv_post_message(conn_id, 1, buffer, buflen);
|
|
+
|
|
+ switch (ret) {
|
|
+ case HV_STATUS_INSUFFICIENT_BUFFERS:
|
|
+ ret = -ENOMEM;
|
|
+ case -ENOMEM:
|
|
+ break;
|
|
+ case HV_STATUS_SUCCESS:
|
|
return ret;
|
|
+ default:
|
|
+ pr_err("hv_post_msg() failed; error code:%d\n", ret);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
retries++;
|
|
msleep(100);
|
|
}
|
|
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
|
|
index 7f9dc2f..1265164 100644
|
|
--- a/drivers/hwmon/ads1015.c
|
|
+++ b/drivers/hwmon/ads1015.c
|
|
@@ -198,7 +198,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
|
|
}
|
|
|
|
channel = be32_to_cpup(property);
|
|
- if (channel > ADS1015_CHANNELS) {
|
|
+ if (channel >= ADS1015_CHANNELS) {
|
|
dev_err(&client->dev,
|
|
"invalid channel index %d on %s\n",
|
|
channel, node->full_name);
|
|
@@ -212,6 +212,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
|
|
dev_err(&client->dev,
|
|
"invalid gain on %s\n",
|
|
node->full_name);
|
|
+ return -EINVAL;
|
|
}
|
|
}
|
|
|
|
@@ -222,6 +223,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
|
|
dev_err(&client->dev,
|
|
"invalid data_rate on %s\n",
|
|
node->full_name);
|
|
+ return -EINVAL;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
|
|
index 9f2be3d..8a67ec6 100644
|
|
--- a/drivers/hwmon/amc6821.c
|
|
+++ b/drivers/hwmon/amc6821.c
|
|
@@ -360,11 +360,13 @@ static ssize_t set_pwm1_enable(
|
|
if (config)
|
|
return config;
|
|
|
|
+ mutex_lock(&data->update_lock);
|
|
config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
|
|
if (config < 0) {
|
|
dev_err(&client->dev,
|
|
"Error reading configuration register, aborting.\n");
|
|
- return config;
|
|
+ count = config;
|
|
+ goto unlock;
|
|
}
|
|
|
|
switch (val) {
|
|
@@ -381,14 +383,15 @@ static ssize_t set_pwm1_enable(
|
|
config |= AMC6821_CONF1_FDRC1;
|
|
break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ count = -EINVAL;
|
|
+ goto unlock;
|
|
}
|
|
- mutex_lock(&data->update_lock);
|
|
if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
|
|
dev_err(&client->dev,
|
|
"Configuration register write error, aborting.\n");
|
|
count = -EIO;
|
|
}
|
|
+unlock:
|
|
mutex_unlock(&data->update_lock);
|
|
return count;
|
|
}
|
|
@@ -493,8 +496,9 @@ static ssize_t set_temp_auto_point_temp(
|
|
return -EINVAL;
|
|
}
|
|
|
|
- data->valid = 0;
|
|
mutex_lock(&data->update_lock);
|
|
+ data->valid = 0;
|
|
+
|
|
switch (ix) {
|
|
case 0:
|
|
ptemp[0] = clamp_val(val / 1000, 0,
|
|
@@ -658,13 +662,14 @@ static ssize_t set_fan1_div(
|
|
if (config)
|
|
return config;
|
|
|
|
+ mutex_lock(&data->update_lock);
|
|
config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
|
|
if (config < 0) {
|
|
dev_err(&client->dev,
|
|
"Error reading configuration register, aborting.\n");
|
|
- return config;
|
|
+ count = config;
|
|
+ goto EXIT;
|
|
}
|
|
- mutex_lock(&data->update_lock);
|
|
switch (val) {
|
|
case 2:
|
|
config &= ~AMC6821_CONF4_PSPR;
|
|
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
|
|
index 4ae3fff..bea0a34 100644
|
|
--- a/drivers/hwmon/dme1737.c
|
|
+++ b/drivers/hwmon/dme1737.c
|
|
@@ -247,8 +247,8 @@ struct dme1737_data {
|
|
u8 pwm_acz[3];
|
|
u8 pwm_freq[6];
|
|
u8 pwm_rr[2];
|
|
- u8 zone_low[3];
|
|
- u8 zone_abs[3];
|
|
+ s8 zone_low[3];
|
|
+ s8 zone_abs[3];
|
|
u8 zone_hyst[2];
|
|
u32 alarms;
|
|
};
|
|
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
|
|
return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
|
|
}
|
|
|
|
-static inline int IN_TO_REG(int val, int nominal)
|
|
+static inline int IN_TO_REG(long val, int nominal)
|
|
{
|
|
return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
|
|
}
|
|
@@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
|
|
return (reg * 1000) >> (res - 8);
|
|
}
|
|
|
|
-static inline int TEMP_TO_REG(int val)
|
|
+static inline int TEMP_TO_REG(long val)
|
|
{
|
|
return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
|
|
}
|
|
@@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg)
|
|
return TEMP_RANGE[(reg >> 4) & 0x0f];
|
|
}
|
|
|
|
-static int TEMP_RANGE_TO_REG(int val, int reg)
|
|
+static int TEMP_RANGE_TO_REG(long val, int reg)
|
|
{
|
|
int i;
|
|
|
|
@@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
|
|
return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
|
|
}
|
|
|
|
-static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
|
|
+static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
|
|
{
|
|
int hyst = clamp_val((val + 500) / 1000, 0, 15);
|
|
|
|
@@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc)
|
|
return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg;
|
|
}
|
|
|
|
-static inline int FAN_TO_REG(int val, int tpc)
|
|
+static inline int FAN_TO_REG(long val, int tpc)
|
|
{
|
|
if (tpc) {
|
|
return clamp_val(val / tpc, 0, 0xffff);
|
|
@@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg)
|
|
return (edge > 0) ? 1 << (edge - 1) : 0;
|
|
}
|
|
|
|
-static inline int FAN_TYPE_TO_REG(int val, int reg)
|
|
+static inline int FAN_TYPE_TO_REG(long val, int reg)
|
|
{
|
|
int edge = (val == 4) ? 3 : val;
|
|
|
|
@@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg)
|
|
return 1000 + i * 500;
|
|
}
|
|
|
|
-static int FAN_MAX_TO_REG(int val)
|
|
+static int FAN_MAX_TO_REG(long val)
|
|
{
|
|
int i;
|
|
|
|
@@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg)
|
|
return acz[(reg >> 5) & 0x07];
|
|
}
|
|
|
|
-static inline int PWM_ACZ_TO_REG(int val, int reg)
|
|
+static inline int PWM_ACZ_TO_REG(long val, int reg)
|
|
{
|
|
int acz = (val == 4) ? 2 : val - 1;
|
|
|
|
@@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg)
|
|
return PWM_FREQ[reg & 0x0f];
|
|
}
|
|
|
|
-static int PWM_FREQ_TO_REG(int val, int reg)
|
|
+static int PWM_FREQ_TO_REG(long val, int reg)
|
|
{
|
|
int i;
|
|
|
|
@@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix)
|
|
return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
|
|
}
|
|
|
|
-static int PWM_RR_TO_REG(int val, int ix, int reg)
|
|
+static int PWM_RR_TO_REG(long val, int ix, int reg)
|
|
{
|
|
int i;
|
|
|
|
@@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
|
|
return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
|
|
}
|
|
|
|
-static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
|
|
+static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg)
|
|
{
|
|
int en = (ix == 1) ? 0x80 : 0x08;
|
|
|
|
@@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct dme1737_data *data = dev_get_drvdata(dev);
|
|
- long val;
|
|
+ unsigned long val;
|
|
int err;
|
|
|
|
- err = kstrtol(buf, 10, &val);
|
|
+ err = kstrtoul(buf, 10, &val);
|
|
if (err)
|
|
return err;
|
|
|
|
+ if (val > 255)
|
|
+ return -EINVAL;
|
|
+
|
|
data->vrm = val;
|
|
return count;
|
|
}
|
|
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
|
|
index fc6f5d5..8890870 100644
|
|
--- a/drivers/hwmon/ds1621.c
|
|
+++ b/drivers/hwmon/ds1621.c
|
|
@@ -309,6 +309,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
|
|
data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
|
|
i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
|
|
data->update_interval = ds1721_convrates[resol];
|
|
+ data->zbits = 7 - resol;
|
|
mutex_unlock(&data->update_lock);
|
|
|
|
return count;
|
|
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
|
|
index 73181be..814f7ee 100644
|
|
--- a/drivers/hwmon/gpio-fan.c
|
|
+++ b/drivers/hwmon/gpio-fan.c
|
|
@@ -173,7 +173,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
|
|
return -ENODEV;
|
|
}
|
|
|
|
-static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
|
|
+static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
|
|
{
|
|
struct gpio_fan_speed *speed = fan_data->speed;
|
|
int i;
|
|
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
|
|
index 9efadfc..c1eb464 100644
|
|
--- a/drivers/hwmon/lm78.c
|
|
+++ b/drivers/hwmon/lm78.c
|
|
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
|
|
* TEMP: mC (-128C to +127C)
|
|
* REG: 1C/bit, two's complement
|
|
*/
|
|
-static inline s8 TEMP_TO_REG(int val)
|
|
+static inline s8 TEMP_TO_REG(long val)
|
|
{
|
|
int nval = clamp_val(val, -128000, 127000) ;
|
|
return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
|
|
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
|
|
index bed4af35..21d6cc8 100644
|
|
--- a/drivers/hwmon/lm85.c
|
|
+++ b/drivers/hwmon/lm85.c
|
|
@@ -158,7 +158,7 @@ static inline u16 FAN_TO_REG(unsigned long val)
|
|
|
|
/* Temperature is reported in .001 degC increments */
|
|
#define TEMP_TO_REG(val) \
|
|
- clamp_val(SCALE(val, 1000, 1), -127, 127)
|
|
+ DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000)
|
|
#define TEMPEXT_FROM_REG(val, ext) \
|
|
SCALE(((val) << 4) + (ext), 16, 1000)
|
|
#define TEMP_FROM_REG(val) ((val) * 1000)
|
|
@@ -192,7 +192,7 @@ static const int lm85_range_map[] = {
|
|
13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
|
|
};
|
|
|
|
-static int RANGE_TO_REG(int range)
|
|
+static int RANGE_TO_REG(long range)
|
|
{
|
|
int i;
|
|
|
|
@@ -214,7 +214,7 @@ static const int adm1027_freq_map[8] = { /* 1 Hz */
|
|
11, 15, 22, 29, 35, 44, 59, 88
|
|
};
|
|
|
|
-static int FREQ_TO_REG(const int *map, int freq)
|
|
+static int FREQ_TO_REG(const int *map, unsigned long freq)
|
|
{
|
|
int i;
|
|
|
|
@@ -463,6 +463,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
|
|
if (err)
|
|
return err;
|
|
|
|
+ if (val > 255)
|
|
+ return -EINVAL;
|
|
+
|
|
data->vrm = val;
|
|
return count;
|
|
}
|
|
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
|
|
index d219c06..972444a 100644
|
|
--- a/drivers/hwmon/mcp3021.c
|
|
+++ b/drivers/hwmon/mcp3021.c
|
|
@@ -31,14 +31,11 @@
|
|
/* output format */
|
|
#define MCP3021_SAR_SHIFT 2
|
|
#define MCP3021_SAR_MASK 0x3ff
|
|
-
|
|
#define MCP3021_OUTPUT_RES 10 /* 10-bit resolution */
|
|
-#define MCP3021_OUTPUT_SCALE 4
|
|
|
|
#define MCP3221_SAR_SHIFT 0
|
|
#define MCP3221_SAR_MASK 0xfff
|
|
#define MCP3221_OUTPUT_RES 12 /* 12-bit resolution */
|
|
-#define MCP3221_OUTPUT_SCALE 1
|
|
|
|
enum chips {
|
|
mcp3021,
|
|
@@ -54,7 +51,6 @@ struct mcp3021_data {
|
|
u16 sar_shift;
|
|
u16 sar_mask;
|
|
u8 output_res;
|
|
- u8 output_scale;
|
|
};
|
|
|
|
static int mcp3021_read16(struct i2c_client *client)
|
|
@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
|
|
|
|
static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
|
|
{
|
|
- if (val == 0)
|
|
- return 0;
|
|
-
|
|
- val = val * data->output_scale - data->output_scale / 2;
|
|
-
|
|
- return val * DIV_ROUND_CLOSEST(data->vdd,
|
|
- (1 << data->output_res) * data->output_scale);
|
|
+ return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
|
|
}
|
|
|
|
static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
|
|
@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
|
|
data->sar_shift = MCP3021_SAR_SHIFT;
|
|
data->sar_mask = MCP3021_SAR_MASK;
|
|
data->output_res = MCP3021_OUTPUT_RES;
|
|
- data->output_scale = MCP3021_OUTPUT_SCALE;
|
|
break;
|
|
|
|
case mcp3221:
|
|
data->sar_shift = MCP3221_SAR_SHIFT;
|
|
data->sar_mask = MCP3221_SAR_MASK;
|
|
data->output_res = MCP3221_OUTPUT_RES;
|
|
- data->output_scale = MCP3221_OUTPUT_SCALE;
|
|
break;
|
|
}
|
|
|
|
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
|
|
index 38d5a63..20b69bf 100644
|
|
--- a/drivers/hwmon/nct6775.c
|
|
+++ b/drivers/hwmon/nct6775.c
|
|
@@ -986,6 +986,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
|
|
(*t)->dev_attr.attr.name, tg->base + i);
|
|
if ((*t)->s2) {
|
|
a2 = &su->u.a2;
|
|
+ sysfs_attr_init(&a2->dev_attr.attr);
|
|
a2->dev_attr.attr.name = su->name;
|
|
a2->nr = (*t)->u.s.nr + i;
|
|
a2->index = (*t)->u.s.index;
|
|
@@ -996,6 +997,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
|
|
*attrs = &a2->dev_attr.attr;
|
|
} else {
|
|
a = &su->u.a1;
|
|
+ sysfs_attr_init(&a->dev_attr.attr);
|
|
a->dev_attr.attr.name = su->name;
|
|
a->index = (*t)->u.index + i;
|
|
a->dev_attr.attr.mode =
|
|
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
|
|
index e76feb86..3660cb6 100644
|
|
--- a/drivers/hwmon/ntc_thermistor.c
|
|
+++ b/drivers/hwmon/ntc_thermistor.c
|
|
@@ -181,8 +181,10 @@ static struct ntc_thermistor_platform_data *
|
|
ntc_thermistor_parse_dt(struct platform_device *pdev)
|
|
{
|
|
struct iio_channel *chan;
|
|
+ enum iio_chan_type type;
|
|
struct device_node *np = pdev->dev.of_node;
|
|
struct ntc_thermistor_platform_data *pdata;
|
|
+ int ret;
|
|
|
|
if (!np)
|
|
return NULL;
|
|
@@ -195,6 +197,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
|
|
if (IS_ERR(chan))
|
|
return ERR_CAST(chan);
|
|
|
|
+ ret = iio_get_channel_type(chan, &type);
|
|
+ if (ret < 0)
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
+ if (type != IIO_VOLTAGE)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
|
|
return ERR_PTR(-ENODEV);
|
|
if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
|
|
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
|
|
index 3532026..bf1d789 100644
|
|
--- a/drivers/hwmon/sis5595.c
|
|
+++ b/drivers/hwmon/sis5595.c
|
|
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
|
|
{
|
|
return val * 830 + 52120;
|
|
}
|
|
-static inline s8 TEMP_TO_REG(int val)
|
|
+static inline s8 TEMP_TO_REG(long val)
|
|
{
|
|
int nval = clamp_val(val, -54120, 157530) ;
|
|
return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
|
|
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
|
|
index efee4c5..34b9a60 100644
|
|
--- a/drivers/hwmon/smsc47m192.c
|
|
+++ b/drivers/hwmon/smsc47m192.c
|
|
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
|
|
*/
|
|
static inline s8 TEMP_TO_REG(int val)
|
|
{
|
|
- return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
|
|
+ return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
|
|
}
|
|
|
|
static inline int TEMP_FROM_REG(s8 val)
|
|
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
|
|
err = kstrtoul(buf, 10, &val);
|
|
if (err)
|
|
return err;
|
|
+ if (val > 255)
|
|
+ return -EINVAL;
|
|
|
|
data->vrm = val;
|
|
return count;
|
|
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
|
|
index 843d012..50862c9 100644
|
|
--- a/drivers/i2c/busses/i2c-at91.c
|
|
+++ b/drivers/i2c/busses/i2c-at91.c
|
|
@@ -62,6 +62,9 @@
|
|
#define AT91_TWI_UNRE 0x0080 /* Underrun Error */
|
|
#define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
|
|
|
|
+#define AT91_TWI_INT_MASK \
|
|
+ (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
|
|
+
|
|
#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
|
|
#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
|
|
#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
|
|
@@ -101,6 +104,7 @@ struct at91_twi_dev {
|
|
unsigned twi_cwgr_reg;
|
|
struct at91_twi_pdata *pdata;
|
|
bool use_dma;
|
|
+ bool recv_len_abort;
|
|
struct at91_twi_dma dma;
|
|
};
|
|
|
|
@@ -116,13 +120,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
|
|
|
|
static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
|
|
{
|
|
- at91_twi_write(dev, AT91_TWI_IDR,
|
|
- AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
|
|
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
|
|
}
|
|
|
|
static void at91_twi_irq_save(struct at91_twi_dev *dev)
|
|
{
|
|
- dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
|
|
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
|
|
at91_disable_twi_interrupts(dev);
|
|
}
|
|
|
|
@@ -210,8 +213,16 @@ static void at91_twi_write_data_dma_callback(void *data)
|
|
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
|
|
|
|
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
|
|
- dev->buf_len, DMA_MEM_TO_DEV);
|
|
+ dev->buf_len, DMA_TO_DEVICE);
|
|
|
|
+ /*
|
|
+ * When this callback is called, THR/TX FIFO is likely not to be empty
|
|
+ * yet. So we have to wait for TXCOMP or NACK bits to be set into the
|
|
+ * Status Register to be sure that the STOP bit has been sent and the
|
|
+ * transfer is completed. The NACK interrupt has already been enabled,
|
|
+ * we just have to enable TXCOMP one.
|
|
+ */
|
|
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
|
|
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
|
|
}
|
|
|
|
@@ -267,12 +278,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
|
|
*dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
|
|
--dev->buf_len;
|
|
|
|
+ /* return if aborting, we only needed to read RHR to clear RXRDY*/
|
|
+ if (dev->recv_len_abort)
|
|
+ return;
|
|
+
|
|
/* handle I2C_SMBUS_BLOCK_DATA */
|
|
if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
|
|
- dev->msg->flags &= ~I2C_M_RECV_LEN;
|
|
- dev->buf_len += *dev->buf;
|
|
- dev->msg->len = dev->buf_len + 1;
|
|
- dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
|
|
+ /* ensure length byte is a valid value */
|
|
+ if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
|
|
+ dev->msg->flags &= ~I2C_M_RECV_LEN;
|
|
+ dev->buf_len += *dev->buf;
|
|
+ dev->msg->len = dev->buf_len + 1;
|
|
+ dev_dbg(dev->dev, "received block length %d\n",
|
|
+ dev->buf_len);
|
|
+ } else {
|
|
+ /* abort and send the stop by reading one more byte */
|
|
+ dev->recv_len_abort = true;
|
|
+ dev->buf_len = 1;
|
|
+ }
|
|
}
|
|
|
|
/* send stop if second but last byte has been read */
|
|
@@ -289,12 +312,12 @@ static void at91_twi_read_data_dma_callback(void *data)
|
|
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
|
|
|
|
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
|
|
- dev->buf_len, DMA_DEV_TO_MEM);
|
|
+ dev->buf_len, DMA_FROM_DEVICE);
|
|
|
|
/* The last two bytes have to be read without using dma */
|
|
dev->buf += dev->buf_len - 2;
|
|
dev->buf_len = 2;
|
|
- at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
|
|
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
|
|
}
|
|
|
|
static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
|
|
@@ -355,7 +378,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
|
|
/* catch error flags */
|
|
dev->transfer_status |= status;
|
|
|
|
- if (irqstatus & AT91_TWI_TXCOMP) {
|
|
+ if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
|
|
at91_disable_twi_interrupts(dev);
|
|
complete(&dev->cmd_complete);
|
|
}
|
|
@@ -368,6 +391,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
|
|
int ret;
|
|
bool has_unre_flag = dev->pdata->has_unre_flag;
|
|
|
|
+ /*
|
|
+ * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
|
|
+ * read flag but shows the state of the transmission at the time the
|
|
+ * Status Register is read. According to the programmer datasheet,
|
|
+ * TXCOMP is set when both holding register and internal shifter are
|
|
+ * empty and STOP condition has been sent.
|
|
+ * Consequently, we should enable NACK interrupt rather than TXCOMP to
|
|
+ * detect transmission failure.
|
|
+ *
|
|
+ * Besides, the TXCOMP bit is already set before the i2c transaction
|
|
+ * has been started. For read transactions, this bit is cleared when
|
|
+ * writing the START bit into the Control Register. So the
|
|
+ * corresponding interrupt can safely be enabled just after.
|
|
+ * However for write transactions managed by the CPU, we first write
|
|
+ * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
|
|
+ * interrupt. If TXCOMP interrupt were enabled before writing into THR,
|
|
+ * the interrupt handler would be called immediately and the i2c command
|
|
+ * would be reported as completed.
|
|
+ * Also when a write transaction is managed by the DMA controller,
|
|
+ * enabling the TXCOMP interrupt in this function may lead to a race
|
|
+ * condition since we don't know whether the TXCOMP interrupt is enabled
|
|
+ * before or after the DMA has started to write into THR. So the TXCOMP
|
|
+ * interrupt is enabled later by at91_twi_write_data_dma_callback().
|
|
+ * Immediately after in that DMA callback, we still need to send the
|
|
+ * STOP condition manually writing the corresponding bit into the
|
|
+ * Control Register.
|
|
+ */
|
|
+
|
|
dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
|
|
(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
|
|
|
|
@@ -398,31 +449,29 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
|
|
* seems to be the best solution.
|
|
*/
|
|
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
|
|
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
|
|
at91_twi_read_data_dma(dev);
|
|
- /*
|
|
- * It is important to enable TXCOMP irq here because
|
|
- * doing it only when transferring the last two bytes
|
|
- * will mask NACK errors since TXCOMP is set when a
|
|
- * NACK occurs.
|
|
- */
|
|
- at91_twi_write(dev, AT91_TWI_IER,
|
|
- AT91_TWI_TXCOMP);
|
|
- } else
|
|
+ } else {
|
|
at91_twi_write(dev, AT91_TWI_IER,
|
|
- AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
|
|
+ AT91_TWI_TXCOMP |
|
|
+ AT91_TWI_NACK |
|
|
+ AT91_TWI_RXRDY);
|
|
+ }
|
|
} else {
|
|
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
|
|
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
|
|
at91_twi_write_data_dma(dev);
|
|
- at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
|
|
} else {
|
|
at91_twi_write_next_byte(dev);
|
|
at91_twi_write(dev, AT91_TWI_IER,
|
|
- AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
|
|
+ AT91_TWI_TXCOMP |
|
|
+ AT91_TWI_NACK |
|
|
+ AT91_TWI_TXRDY);
|
|
}
|
|
}
|
|
|
|
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
|
|
- dev->adapter.timeout);
|
|
+ ret = wait_for_completion_timeout(&dev->cmd_complete,
|
|
+ dev->adapter.timeout);
|
|
if (ret == 0) {
|
|
dev_err(dev->dev, "controller timed out\n");
|
|
at91_init_twi_bus(dev);
|
|
@@ -444,6 +493,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
|
|
ret = -EIO;
|
|
goto error;
|
|
}
|
|
+ if (dev->recv_len_abort) {
|
|
+ dev_err(dev->dev, "invalid smbus block length recvd\n");
|
|
+ ret = -EPROTO;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
dev_dbg(dev->dev, "transfer complete\n");
|
|
|
|
return 0;
|
|
@@ -500,6 +555,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
|
|
dev->buf_len = m_start->len;
|
|
dev->buf = m_start->buf;
|
|
dev->msg = m_start;
|
|
+ dev->recv_len_abort = false;
|
|
|
|
ret = at91_do_twi_transfer(dev);
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
|
|
index af0b583..e3c6a96 100644
|
|
--- a/drivers/i2c/busses/i2c-davinci.c
|
|
+++ b/drivers/i2c/busses/i2c-davinci.c
|
|
@@ -411,11 +411,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
|
|
if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
|
|
if (msg->flags & I2C_M_IGNORE_NAK)
|
|
return msg->len;
|
|
- if (stop) {
|
|
- w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
|
|
- w |= DAVINCI_I2C_MDR_STP;
|
|
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
|
|
- }
|
|
+ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
|
|
+ w |= DAVINCI_I2C_MDR_STP;
|
|
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
|
|
return -EREMOTEIO;
|
|
}
|
|
return -EIO;
|
|
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
|
|
index 8ce4f51..6e932d1 100644
|
|
--- a/drivers/i2c/busses/i2c-ismt.c
|
|
+++ b/drivers/i2c/busses/i2c-ismt.c
|
|
@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
|
desc->wr_len_cmd = dma_size;
|
|
desc->control |= ISMT_DESC_BLK;
|
|
priv->dma_buffer[0] = command;
|
|
- memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
|
|
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
|
|
} else {
|
|
/* Block Read */
|
|
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
|
|
@@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
|
desc->wr_len_cmd = dma_size;
|
|
desc->control |= ISMT_DESC_I2C;
|
|
priv->dma_buffer[0] = command;
|
|
- memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
|
|
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
|
|
} else {
|
|
/* i2c Block Read */
|
|
dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n");
|
|
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
|
|
index d52d849..cf89175 100644
|
|
--- a/drivers/i2c/busses/i2c-mv64xxx.c
|
|
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
|
|
@@ -748,8 +748,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
|
|
}
|
|
tclk = clk_get_rate(drv_data->clk);
|
|
|
|
- rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
|
|
- if (rc)
|
|
+ if (of_property_read_u32(np, "clock-frequency", &bus_freq))
|
|
bus_freq = 100000; /* 100kHz by default */
|
|
|
|
if (!mv64xxx_find_baud_factors(bus_freq, tclk,
|
|
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
|
|
index 90dcc2e..a686057 100644
|
|
--- a/drivers/i2c/busses/i2c-omap.c
|
|
+++ b/drivers/i2c/busses/i2c-omap.c
|
|
@@ -926,14 +926,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
|
|
if (stat & OMAP_I2C_STAT_NACK) {
|
|
err |= OMAP_I2C_STAT_NACK;
|
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
|
|
- break;
|
|
}
|
|
|
|
if (stat & OMAP_I2C_STAT_AL) {
|
|
dev_err(dev->dev, "Arbitration lost\n");
|
|
err |= OMAP_I2C_STAT_AL;
|
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
|
|
- break;
|
|
}
|
|
|
|
/*
|
|
@@ -958,11 +956,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
|
|
if (dev->fifo_size)
|
|
num_bytes = dev->buf_len;
|
|
|
|
- omap_i2c_receive_data(dev, num_bytes, true);
|
|
-
|
|
- if (dev->errata & I2C_OMAP_ERRATA_I207)
|
|
+ if (dev->errata & I2C_OMAP_ERRATA_I207) {
|
|
i2c_omap_errata_i207(dev, stat);
|
|
+ num_bytes = (omap_i2c_read_reg(dev,
|
|
+ OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
|
|
+ }
|
|
|
|
+ omap_i2c_receive_data(dev, num_bytes, true);
|
|
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
|
|
continue;
|
|
}
|
|
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
|
|
index 93ec376..79a2117 100644
|
|
--- a/drivers/i2c/busses/i2c-s3c2410.c
|
|
+++ b/drivers/i2c/busses/i2c-s3c2410.c
|
|
@@ -753,14 +753,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
|
|
int ret;
|
|
|
|
pm_runtime_get_sync(&adap->dev);
|
|
- clk_prepare_enable(i2c->clk);
|
|
+ ret = clk_enable(i2c->clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
for (retry = 0; retry < adap->retries; retry++) {
|
|
|
|
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
|
|
|
|
if (ret != -EAGAIN) {
|
|
- clk_disable_unprepare(i2c->clk);
|
|
+ clk_disable(i2c->clk);
|
|
pm_runtime_put(&adap->dev);
|
|
return ret;
|
|
}
|
|
@@ -770,7 +772,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
|
|
udelay(100);
|
|
}
|
|
|
|
- clk_disable_unprepare(i2c->clk);
|
|
+ clk_disable(i2c->clk);
|
|
pm_runtime_put(&adap->dev);
|
|
return -EREMOTEIO;
|
|
}
|
|
@@ -1153,7 +1155,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
|
|
|
|
clk_prepare_enable(i2c->clk);
|
|
ret = s3c24xx_i2c_init(i2c);
|
|
- clk_disable_unprepare(i2c->clk);
|
|
+ clk_disable(i2c->clk);
|
|
if (ret != 0) {
|
|
dev_err(&pdev->dev, "I2C controller init failed\n");
|
|
return ret;
|
|
@@ -1166,6 +1168,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
|
|
i2c->irq = ret = platform_get_irq(pdev, 0);
|
|
if (ret <= 0) {
|
|
dev_err(&pdev->dev, "cannot find IRQ\n");
|
|
+ clk_unprepare(i2c->clk);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1174,6 +1177,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
|
|
|
|
if (ret != 0) {
|
|
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
|
|
+ clk_unprepare(i2c->clk);
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1181,6 +1185,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
|
|
ret = s3c24xx_i2c_register_cpufreq(i2c);
|
|
if (ret < 0) {
|
|
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
|
|
+ clk_unprepare(i2c->clk);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1197,6 +1202,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
|
|
if (ret < 0) {
|
|
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
|
|
s3c24xx_i2c_deregister_cpufreq(i2c);
|
|
+ clk_unprepare(i2c->clk);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1218,6 +1224,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
|
|
{
|
|
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
|
|
|
|
+ clk_unprepare(i2c->clk);
|
|
+
|
|
pm_runtime_disable(&i2c->adap.dev);
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
@@ -1246,10 +1254,13 @@ static int s3c24xx_i2c_resume(struct device *dev)
|
|
{
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
|
|
+ int ret;
|
|
|
|
- clk_prepare_enable(i2c->clk);
|
|
+ ret = clk_enable(i2c->clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
s3c24xx_i2c_init(i2c);
|
|
- clk_disable_unprepare(i2c->clk);
|
|
+ clk_disable(i2c->clk);
|
|
i2c->suspended = 0;
|
|
|
|
return 0;
|
|
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
|
|
index 5fb80b8..43fe15a 100644
|
|
--- a/drivers/i2c/i2c-core.c
|
|
+++ b/drivers/i2c/i2c-core.c
|
|
@@ -217,6 +217,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
|
|
adap->bus_recovery_info->set_scl(adap, 1);
|
|
return i2c_generic_recovery(adap);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
|
|
|
|
int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
|
|
{
|
|
@@ -231,6 +232,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
|
|
|
|
return ret;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
|
|
|
|
int i2c_recover_bus(struct i2c_adapter *adap)
|
|
{
|
|
@@ -240,6 +242,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
|
|
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
|
|
return adap->bus_recovery_info->recover_bus(adap);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(i2c_recover_bus);
|
|
|
|
static int i2c_device_probe(struct device *dev)
|
|
{
|
|
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
|
|
index bfec313..6f039c3 100644
|
|
--- a/drivers/iio/accel/bma180.c
|
|
+++ b/drivers/iio/accel/bma180.c
|
|
@@ -68,13 +68,13 @@
|
|
/* Defaults values */
|
|
#define BMA180_DEF_PMODE 0
|
|
#define BMA180_DEF_BW 20
|
|
-#define BMA180_DEF_SCALE 250
|
|
+#define BMA180_DEF_SCALE 2452
|
|
|
|
/* Available values for sysfs */
|
|
#define BMA180_FLP_FREQ_AVAILABLE \
|
|
"10 20 40 75 150 300"
|
|
#define BMA180_SCALE_AVAILABLE \
|
|
- "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
|
|
+ "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
|
|
|
|
struct bma180_data {
|
|
struct i2c_client *client;
|
|
@@ -94,7 +94,7 @@ enum bma180_axis {
|
|
};
|
|
|
|
static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
|
|
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
|
|
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
|
|
|
|
static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
|
|
{
|
|
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
|
|
mutex_unlock(&data->mutex);
|
|
return ret;
|
|
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
|
|
+ if (val2)
|
|
+ return -EINVAL;
|
|
mutex_lock(&data->mutex);
|
|
ret = bma180_set_bw(data, val);
|
|
mutex_unlock(&data->mutex);
|
|
@@ -569,7 +571,7 @@ static int bma180_probe(struct i2c_client *client,
|
|
trig->ops = &bma180_trigger_ops;
|
|
iio_trigger_set_drvdata(trig, indio_dev);
|
|
data->trig = trig;
|
|
- indio_dev->trig = trig;
|
|
+ indio_dev->trig = iio_trigger_get(trig);
|
|
|
|
ret = iio_trigger_register(trig);
|
|
if (ret)
|
|
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
|
|
index 9a4e0e3..eb799a4 100644
|
|
--- a/drivers/iio/adc/ad_sigma_delta.c
|
|
+++ b/drivers/iio/adc/ad_sigma_delta.c
|
|
@@ -472,7 +472,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
|
|
goto error_free_irq;
|
|
|
|
/* select default trigger */
|
|
- indio_dev->trig = sigma_delta->trig;
|
|
+ indio_dev->trig = iio_trigger_get(sigma_delta->trig);
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
|
|
index e6bf77d..ed4e45f 100644
|
|
--- a/drivers/iio/adc/at91_adc.c
|
|
+++ b/drivers/iio/adc/at91_adc.c
|
|
@@ -58,7 +58,7 @@ struct at91_adc_caps {
|
|
u8 ts_pen_detect_sensitivity;
|
|
|
|
/* startup time calculate function */
|
|
- u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
|
|
+ u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
|
|
|
|
u8 num_channels;
|
|
struct at91_adc_reg_desc registers;
|
|
@@ -82,7 +82,7 @@ struct at91_adc_state {
|
|
u8 num_channels;
|
|
void __iomem *reg_base;
|
|
struct at91_adc_reg_desc *registers;
|
|
- u8 startup_time;
|
|
+ u32 startup_time;
|
|
u8 sample_hold_time;
|
|
bool sleep_mode;
|
|
struct iio_trigger **trig;
|
|
@@ -590,7 +590,7 @@ ret:
|
|
return ret;
|
|
}
|
|
|
|
-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
|
|
+static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
|
|
{
|
|
/*
|
|
* Number of ticks needed to cover the startup time of the ADC
|
|
@@ -601,7 +601,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
|
|
return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
|
|
}
|
|
|
|
-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
|
|
+static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
|
|
{
|
|
/*
|
|
* For sama5d3x and at91sam9x5, the formula changes to:
|
|
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
|
|
index 47dcb34..3a615f3 100644
|
|
--- a/drivers/iio/adc/mcp3422.c
|
|
+++ b/drivers/iio/adc/mcp3422.c
|
|
@@ -57,20 +57,11 @@
|
|
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
|
|
}
|
|
|
|
-/* LSB is in nV to eliminate floating point */
|
|
-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
|
|
-
|
|
-/*
|
|
- * scales calculated as:
|
|
- * rates_to_lsb[sample_rate] / (1 << pga);
|
|
- * pga is 1 for 0, 2
|
|
- */
|
|
-
|
|
static const int mcp3422_scales[4][4] = {
|
|
- { 1000000, 250000, 62500, 15625 },
|
|
- { 500000 , 125000, 31250, 7812 },
|
|
- { 250000 , 62500 , 15625, 3906 },
|
|
- { 125000 , 31250 , 7812 , 1953 } };
|
|
+ { 1000000, 500000, 250000, 125000 },
|
|
+ { 250000 , 125000, 62500 , 31250 },
|
|
+ { 62500 , 31250 , 15625 , 7812 },
|
|
+ { 15625 , 7812 , 3906 , 1953 } };
|
|
|
|
/* Constant msleep times for data acquisitions */
|
|
static const int mcp3422_read_times[4] = {
|
|
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
|
|
index 53a24eb..779dac5 100644
|
|
--- a/drivers/iio/adc/twl6030-gpadc.c
|
|
+++ b/drivers/iio/adc/twl6030-gpadc.c
|
|
@@ -1003,7 +1003,7 @@ static struct platform_driver twl6030_gpadc_driver = {
|
|
|
|
module_platform_driver(twl6030_gpadc_driver);
|
|
|
|
-MODULE_ALIAS("platform: " DRIVER_NAME);
|
|
+MODULE_ALIAS("platform:" DRIVER_NAME);
|
|
MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
|
|
MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
|
|
MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
|
|
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
|
|
index 7dcf839..1be235b 100644
|
|
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
|
|
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
|
|
@@ -99,7 +99,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
|
|
dev_err(&indio_dev->dev, "Trigger Register Failed\n");
|
|
goto error_free_trig;
|
|
}
|
|
- indio_dev->trig = attrb->trigger = trig;
|
|
+ attrb->trigger = trig;
|
|
+ indio_dev->trig = iio_trigger_get(trig);
|
|
|
|
return ret;
|
|
|
|
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
|
|
index 1665c8e..e18bc67 100644
|
|
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
|
|
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
|
|
@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
|
|
goto st_sensors_free_memory;
|
|
}
|
|
|
|
- for (i = 0; i < n * num_data_channels; i++) {
|
|
+ for (i = 0; i < n * byte_for_channel; i++) {
|
|
if (i < n)
|
|
buf[i] = rx_array[i];
|
|
else
|
|
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
|
|
index 8fc3a97..8d8ca6f 100644
|
|
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
|
|
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
|
|
@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
|
|
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
|
|
goto iio_trigger_register_error;
|
|
}
|
|
- indio_dev->trig = sdata->trig;
|
|
+ indio_dev->trig = iio_trigger_get(sdata->trig);
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
|
|
index e8199cc..1e66651 100644
|
|
--- a/drivers/iio/dac/ad5624r_spi.c
|
|
+++ b/drivers/iio/dac/ad5624r_spi.c
|
|
@@ -22,7 +22,7 @@
|
|
#include "ad5624r.h"
|
|
|
|
static int ad5624r_spi_write(struct spi_device *spi,
|
|
- u8 cmd, u8 addr, u16 val, u8 len)
|
|
+ u8 cmd, u8 addr, u16 val, u8 shift)
|
|
{
|
|
u32 data;
|
|
u8 msg[3];
|
|
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
|
|
* 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
|
|
* for the AD5664R, AD5644R, and AD5624R, respectively.
|
|
*/
|
|
- data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
|
|
+ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
|
|
msg[0] = data >> 16;
|
|
msg[1] = data >> 8;
|
|
msg[2] = data;
|
|
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
|
|
index 17aca4d..861ba3d 100644
|
|
--- a/drivers/iio/dac/ad5686.c
|
|
+++ b/drivers/iio/dac/ad5686.c
|
|
@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
|
|
st = iio_priv(indio_dev);
|
|
spi_set_drvdata(spi, indio_dev);
|
|
|
|
- st->reg = devm_regulator_get(&spi->dev, "vcc");
|
|
+ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
|
|
if (!IS_ERR(st->reg)) {
|
|
ret = regulator_enable(st->reg);
|
|
if (ret)
|
|
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
|
|
index ac2d69e..6f64c5c 100644
|
|
--- a/drivers/iio/gyro/Kconfig
|
|
+++ b/drivers/iio/gyro/Kconfig
|
|
@@ -93,7 +93,8 @@ config IIO_ST_GYRO_SPI_3AXIS
|
|
config ITG3200
|
|
tristate "InvenSense ITG3200 Digital 3-Axis Gyroscope I2C driver"
|
|
depends on I2C
|
|
- select IIO_TRIGGERED_BUFFER if IIO_BUFFER
|
|
+ select IIO_BUFFER
|
|
+ select IIO_TRIGGERED_BUFFER
|
|
help
|
|
Say yes here to add support for the InvenSense ITG3200 digital
|
|
3-axis gyroscope sensor.
|
|
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
|
|
index e3b3c50..eef50e9 100644
|
|
--- a/drivers/iio/gyro/itg3200_buffer.c
|
|
+++ b/drivers/iio/gyro/itg3200_buffer.c
|
|
@@ -132,7 +132,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
|
|
goto error_free_irq;
|
|
|
|
/* select default trigger */
|
|
- indio_dev->trig = st->trig;
|
|
+ indio_dev->trig = iio_trigger_get(st->trig);
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
|
|
index 0916bf6..1e8fd2e 100644
|
|
--- a/drivers/iio/imu/adis16400.h
|
|
+++ b/drivers/iio/imu/adis16400.h
|
|
@@ -165,6 +165,7 @@ struct adis16400_state {
|
|
int filt_int;
|
|
|
|
struct adis adis;
|
|
+ unsigned long avail_scan_mask[2];
|
|
};
|
|
|
|
/* At the moment triggers are only used for ring buffer
|
|
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
|
|
index 7c582f7..ccfaf3a 100644
|
|
--- a/drivers/iio/imu/adis16400_core.c
|
|
+++ b/drivers/iio/imu/adis16400_core.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/list.h>
|
|
#include <linux/module.h>
|
|
#include <linux/debugfs.h>
|
|
+#include <linux/bitops.h>
|
|
|
|
#include <linux/iio/iio.h>
|
|
#include <linux/iio/sysfs.h>
|
|
@@ -437,6 +438,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
|
|
*val = st->variant->temp_scale_nano / 1000000;
|
|
*val2 = (st->variant->temp_scale_nano % 1000000);
|
|
return IIO_VAL_INT_PLUS_MICRO;
|
|
+ case IIO_PRESSURE:
|
|
+ /* 20 uBar = 0.002kPascal */
|
|
+ *val = 0;
|
|
+ *val2 = 2000;
|
|
+ return IIO_VAL_INT_PLUS_MICRO;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
@@ -447,7 +453,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
|
|
mutex_unlock(&indio_dev->mlock);
|
|
if (ret)
|
|
return ret;
|
|
- val16 = ((val16 & 0xFFF) << 4) >> 4;
|
|
+ val16 = sign_extend32(val16, 11);
|
|
*val = val16;
|
|
return IIO_VAL_INT;
|
|
case IIO_CHAN_INFO_OFFSET:
|
|
@@ -479,10 +485,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
|
|
}
|
|
}
|
|
|
|
-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
|
|
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
|
|
.type = IIO_VOLTAGE, \
|
|
.indexed = 1, \
|
|
- .channel = 0, \
|
|
+ .channel = chn, \
|
|
.extend_name = name, \
|
|
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
|
BIT(IIO_CHAN_INFO_SCALE), \
|
|
@@ -498,10 +504,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
|
|
}
|
|
|
|
#define ADIS16400_SUPPLY_CHAN(addr, bits) \
|
|
- ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
|
|
+ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
|
|
|
|
#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
|
|
- ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
|
|
+ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
|
|
|
|
#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
|
|
.type = IIO_ANGL_VEL, \
|
|
@@ -818,11 +824,6 @@ static const struct iio_info adis16400_info = {
|
|
.debugfs_reg_access = adis_debugfs_reg_access,
|
|
};
|
|
|
|
-static const unsigned long adis16400_burst_scan_mask[] = {
|
|
- ~0UL,
|
|
- 0,
|
|
-};
|
|
-
|
|
static const char * const adis16400_status_error_msgs[] = {
|
|
[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
|
|
[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
|
|
@@ -870,6 +871,20 @@ static const struct adis_data adis16400_data = {
|
|
BIT(ADIS16400_DIAG_STAT_POWER_LOW),
|
|
};
|
|
|
|
+static void adis16400_setup_chan_mask(struct adis16400_state *st)
|
|
+{
|
|
+ const struct adis16400_chip_info *chip_info = st->variant;
|
|
+ unsigned i;
|
|
+
|
|
+ for (i = 0; i < chip_info->num_channels; i++) {
|
|
+ const struct iio_chan_spec *ch = &chip_info->channels[i];
|
|
+
|
|
+ if (ch->scan_index >= 0 &&
|
|
+ ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
|
|
+ st->avail_scan_mask[0] |= BIT(ch->scan_index);
|
|
+ }
|
|
+}
|
|
+
|
|
static int adis16400_probe(struct spi_device *spi)
|
|
{
|
|
struct adis16400_state *st;
|
|
@@ -893,8 +908,10 @@ static int adis16400_probe(struct spi_device *spi)
|
|
indio_dev->info = &adis16400_info;
|
|
indio_dev->modes = INDIO_DIRECT_MODE;
|
|
|
|
- if (!(st->variant->flags & ADIS16400_NO_BURST))
|
|
- indio_dev->available_scan_masks = adis16400_burst_scan_mask;
|
|
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
|
|
+ adis16400_setup_chan_mask(st);
|
|
+ indio_dev->available_scan_masks = st->avail_scan_mask;
|
|
+ }
|
|
|
|
ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
|
|
if (ret)
|
|
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
|
|
index dd4206c..5e1b117 100644
|
|
--- a/drivers/iio/imu/adis16480.c
|
|
+++ b/drivers/iio/imu/adis16480.c
|
|
@@ -110,6 +110,10 @@
|
|
struct adis16480_chip_info {
|
|
unsigned int num_channels;
|
|
const struct iio_chan_spec *channels;
|
|
+ unsigned int gyro_max_val;
|
|
+ unsigned int gyro_max_scale;
|
|
+ unsigned int accel_max_val;
|
|
+ unsigned int accel_max_scale;
|
|
};
|
|
|
|
struct adis16480 {
|
|
@@ -533,19 +537,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
|
|
static int adis16480_read_raw(struct iio_dev *indio_dev,
|
|
const struct iio_chan_spec *chan, int *val, int *val2, long info)
|
|
{
|
|
+ struct adis16480 *st = iio_priv(indio_dev);
|
|
+
|
|
switch (info) {
|
|
case IIO_CHAN_INFO_RAW:
|
|
return adis_single_conversion(indio_dev, chan, 0, val);
|
|
case IIO_CHAN_INFO_SCALE:
|
|
switch (chan->type) {
|
|
case IIO_ANGL_VEL:
|
|
- *val = 0;
|
|
- *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
|
|
- return IIO_VAL_INT_PLUS_MICRO;
|
|
+ *val = st->chip_info->gyro_max_scale;
|
|
+ *val2 = st->chip_info->gyro_max_val;
|
|
+ return IIO_VAL_FRACTIONAL;
|
|
case IIO_ACCEL:
|
|
- *val = 0;
|
|
- *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
|
|
- return IIO_VAL_INT_PLUS_MICRO;
|
|
+ *val = st->chip_info->accel_max_scale;
|
|
+ *val2 = st->chip_info->accel_max_val;
|
|
+ return IIO_VAL_FRACTIONAL;
|
|
case IIO_MAGN:
|
|
*val = 0;
|
|
*val2 = 100; /* 0.0001 gauss */
|
|
@@ -702,18 +708,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
|
|
[ADIS16375] = {
|
|
.channels = adis16485_channels,
|
|
.num_channels = ARRAY_SIZE(adis16485_channels),
|
|
+ /*
|
|
+ * storing the value in rad/degree and the scale in degree
|
|
+ * gives us the result in rad and better precession than
|
|
+ * storing the scale directly in rad.
|
|
+ */
|
|
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
|
|
+ .gyro_max_scale = 300,
|
|
+ .accel_max_val = IIO_M_S_2_TO_G(21973),
|
|
+ .accel_max_scale = 18,
|
|
},
|
|
[ADIS16480] = {
|
|
.channels = adis16480_channels,
|
|
.num_channels = ARRAY_SIZE(adis16480_channels),
|
|
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
|
|
+ .gyro_max_scale = 450,
|
|
+ .accel_max_val = IIO_M_S_2_TO_G(12500),
|
|
+ .accel_max_scale = 5,
|
|
},
|
|
[ADIS16485] = {
|
|
.channels = adis16485_channels,
|
|
.num_channels = ARRAY_SIZE(adis16485_channels),
|
|
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
|
|
+ .gyro_max_scale = 450,
|
|
+ .accel_max_val = IIO_M_S_2_TO_G(20000),
|
|
+ .accel_max_scale = 5,
|
|
},
|
|
[ADIS16488] = {
|
|
.channels = adis16480_channels,
|
|
.num_channels = ARRAY_SIZE(adis16480_channels),
|
|
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
|
|
+ .gyro_max_scale = 450,
|
|
+ .accel_max_val = IIO_M_S_2_TO_G(22500),
|
|
+ .accel_max_scale = 18,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
|
|
index e0017c2..f53e9a8 100644
|
|
--- a/drivers/iio/imu/adis_trigger.c
|
|
+++ b/drivers/iio/imu/adis_trigger.c
|
|
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
|
|
iio_trigger_set_drvdata(adis->trig, adis);
|
|
ret = iio_trigger_register(adis->trig);
|
|
|
|
- indio_dev->trig = adis->trig;
|
|
+ indio_dev->trig = iio_trigger_get(adis->trig);
|
|
if (ret)
|
|
goto error_free_irq;
|
|
|
|
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
|
|
index 4295171..30fce67 100644
|
|
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
|
|
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
|
|
@@ -25,6 +25,16 @@
|
|
#include <linux/poll.h>
|
|
#include "inv_mpu_iio.h"
|
|
|
|
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* take the spin lock sem to avoid interrupt kick in */
|
|
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
|
|
+ kfifo_reset(&st->timestamps);
|
|
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
|
|
+}
|
|
+
|
|
int inv_reset_fifo(struct iio_dev *indio_dev)
|
|
{
|
|
int result;
|
|
@@ -51,6 +61,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
|
|
INV_MPU6050_BIT_FIFO_RST);
|
|
if (result)
|
|
goto reset_fifo_fail;
|
|
+
|
|
+ /* clear timestamps fifo */
|
|
+ inv_clear_kfifo(st);
|
|
+
|
|
/* enable interrupt */
|
|
if (st->chip_config.accl_fifo_enable ||
|
|
st->chip_config.gyro_fifo_enable) {
|
|
@@ -84,16 +98,6 @@ reset_fifo_fail:
|
|
return result;
|
|
}
|
|
|
|
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
|
|
-{
|
|
- unsigned long flags;
|
|
-
|
|
- /* take the spin lock sem to avoid interrupt kick in */
|
|
- spin_lock_irqsave(&st->time_stamp_lock, flags);
|
|
- kfifo_reset(&st->timestamps);
|
|
- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
|
|
-}
|
|
-
|
|
/**
|
|
* inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
|
|
*/
|
|
@@ -185,7 +189,6 @@ end_session:
|
|
flush_fifo:
|
|
/* Flush HW and SW FIFOs. */
|
|
inv_reset_fifo(indio_dev);
|
|
- inv_clear_kfifo(st);
|
|
mutex_unlock(&indio_dev->mlock);
|
|
iio_trigger_notify_done(indio_dev->trig);
|
|
|
|
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
|
|
index 03b9372..926fcce 100644
|
|
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
|
|
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
|
|
@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
|
|
ret = iio_trigger_register(st->trig);
|
|
if (ret)
|
|
goto error_free_irq;
|
|
- indio_dev->trig = st->trig;
|
|
+ indio_dev->trig = iio_trigger_get(st->trig);
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
|
|
index fe25042..37b52bd 100644
|
|
--- a/drivers/iio/industrialio-buffer.c
|
|
+++ b/drivers/iio/industrialio-buffer.c
|
|
@@ -96,7 +96,7 @@ unsigned int iio_buffer_poll(struct file *filp,
|
|
struct iio_buffer *rb = indio_dev->buffer;
|
|
|
|
if (!indio_dev->info)
|
|
- return -ENODEV;
|
|
+ return 0;
|
|
|
|
poll_wait(filp, &rb->pollq, wait);
|
|
if (iio_buffer_data_available(rb))
|
|
@@ -953,7 +953,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
|
|
|
|
/* Now we have the two masks, work from least sig and build up sizes */
|
|
for_each_set_bit(out_ind,
|
|
- indio_dev->active_scan_mask,
|
|
+ buffer->scan_mask,
|
|
indio_dev->masklength) {
|
|
in_ind = find_next_bit(indio_dev->active_scan_mask,
|
|
indio_dev->masklength,
|
|
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
|
|
index f9360f4..05b5994 100644
|
|
--- a/drivers/iio/industrialio-event.c
|
|
+++ b/drivers/iio/industrialio-event.c
|
|
@@ -83,7 +83,7 @@ static unsigned int iio_event_poll(struct file *filep,
|
|
unsigned int events = 0;
|
|
|
|
if (!indio_dev->info)
|
|
- return -ENODEV;
|
|
+ return events;
|
|
|
|
poll_wait(filep, &ev_int->wait, wait);
|
|
|
|
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
|
|
index 1e8e94d..4fc88e6 100644
|
|
--- a/drivers/iio/inkern.c
|
|
+++ b/drivers/iio/inkern.c
|
|
@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
|
|
index = of_property_match_string(np, "io-channel-names",
|
|
name);
|
|
chan = of_iio_channel_get(np, index);
|
|
- if (!IS_ERR(chan))
|
|
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
|
|
break;
|
|
else if (name && index >= 0) {
|
|
pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
|
|
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
|
|
index 52bbcfa..476aa13 100644
|
|
--- a/drivers/iio/magnetometer/st_magn_core.c
|
|
+++ b/drivers/iio/magnetometer/st_magn_core.c
|
|
@@ -42,7 +42,8 @@
|
|
#define ST_MAGN_FS_AVL_5600MG 5600
|
|
#define ST_MAGN_FS_AVL_8000MG 8000
|
|
#define ST_MAGN_FS_AVL_8100MG 8100
|
|
-#define ST_MAGN_FS_AVL_10000MG 10000
|
|
+#define ST_MAGN_FS_AVL_12000MG 12000
|
|
+#define ST_MAGN_FS_AVL_16000MG 16000
|
|
|
|
/* CUSTOM VALUES FOR SENSOR 1 */
|
|
#define ST_MAGN_1_WAI_EXP 0x3c
|
|
@@ -69,20 +70,20 @@
|
|
#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
|
|
#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
|
|
#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
|
|
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100
|
|
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855
|
|
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670
|
|
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450
|
|
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400
|
|
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330
|
|
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230
|
|
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980
|
|
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760
|
|
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600
|
|
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400
|
|
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355
|
|
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295
|
|
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205
|
|
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
|
|
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
|
|
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
|
|
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
|
|
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
|
|
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
|
|
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
|
|
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
|
|
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
|
|
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
|
|
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
|
|
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
|
|
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
|
|
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
|
|
#define ST_MAGN_1_MULTIREAD_BIT false
|
|
|
|
/* CUSTOM VALUES FOR SENSOR 2 */
|
|
@@ -105,10 +106,12 @@
|
|
#define ST_MAGN_2_FS_MASK 0x60
|
|
#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
|
|
#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
|
|
-#define ST_MAGN_2_FS_AVL_10000_VAL 0x02
|
|
-#define ST_MAGN_2_FS_AVL_4000_GAIN 430
|
|
-#define ST_MAGN_2_FS_AVL_8000_GAIN 230
|
|
-#define ST_MAGN_2_FS_AVL_10000_GAIN 230
|
|
+#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
|
|
+#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
|
|
+#define ST_MAGN_2_FS_AVL_4000_GAIN 146
|
|
+#define ST_MAGN_2_FS_AVL_8000_GAIN 292
|
|
+#define ST_MAGN_2_FS_AVL_12000_GAIN 438
|
|
+#define ST_MAGN_2_FS_AVL_16000_GAIN 584
|
|
#define ST_MAGN_2_MULTIREAD_BIT false
|
|
#define ST_MAGN_2_OUT_X_L_ADDR 0x28
|
|
#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
|
|
@@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = {
|
|
.gain = ST_MAGN_2_FS_AVL_8000_GAIN,
|
|
},
|
|
[2] = {
|
|
- .num = ST_MAGN_FS_AVL_10000MG,
|
|
- .value = ST_MAGN_2_FS_AVL_10000_VAL,
|
|
- .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
|
|
+ .num = ST_MAGN_FS_AVL_12000MG,
|
|
+ .value = ST_MAGN_2_FS_AVL_12000_VAL,
|
|
+ .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
|
|
+ },
|
|
+ [3] = {
|
|
+ .num = ST_MAGN_FS_AVL_16000MG,
|
|
+ .value = ST_MAGN_2_FS_AVL_16000_VAL,
|
|
+ .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
|
|
},
|
|
},
|
|
},
|
|
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
|
|
index 84a0789..7a80509 100644
|
|
--- a/drivers/iio/temperature/tmp006.c
|
|
+++ b/drivers/iio/temperature/tmp006.c
|
|
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
|
|
struct tmp006_data *data = iio_priv(indio_dev);
|
|
int i;
|
|
|
|
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
|
|
+ return -EINVAL;
|
|
+
|
|
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
|
|
if ((val == tmp006_freqs[i][0]) &&
|
|
(val2 == tmp006_freqs[i][1])) {
|
|
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
|
|
index 42c3058..dbd7d66 100644
|
|
--- a/drivers/infiniband/core/cma.c
|
|
+++ b/drivers/infiniband/core/cma.c
|
|
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
|
|
memcpy(&ib->sib_addr, &path->dgid, 16);
|
|
}
|
|
|
|
+static __be16 ss_get_port(const struct sockaddr_storage *ss)
|
|
+{
|
|
+ if (ss->ss_family == AF_INET)
|
|
+ return ((struct sockaddr_in *)ss)->sin_port;
|
|
+ else if (ss->ss_family == AF_INET6)
|
|
+ return ((struct sockaddr_in6 *)ss)->sin6_port;
|
|
+ BUG();
|
|
+}
|
|
+
|
|
static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
|
|
struct cma_hdr *hdr)
|
|
{
|
|
- struct sockaddr_in *listen4, *ip4;
|
|
+ struct sockaddr_in *ip4;
|
|
|
|
- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
|
|
ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
|
|
- ip4->sin_family = listen4->sin_family;
|
|
+ ip4->sin_family = AF_INET;
|
|
ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
|
|
- ip4->sin_port = listen4->sin_port;
|
|
+ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
|
|
|
|
ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
|
|
- ip4->sin_family = listen4->sin_family;
|
|
+ ip4->sin_family = AF_INET;
|
|
ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
|
|
ip4->sin_port = hdr->port;
|
|
}
|
|
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
|
|
static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
|
|
struct cma_hdr *hdr)
|
|
{
|
|
- struct sockaddr_in6 *listen6, *ip6;
|
|
+ struct sockaddr_in6 *ip6;
|
|
|
|
- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
|
|
ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
|
|
- ip6->sin6_family = listen6->sin6_family;
|
|
+ ip6->sin6_family = AF_INET6;
|
|
ip6->sin6_addr = hdr->dst_addr.ip6;
|
|
- ip6->sin6_port = listen6->sin6_port;
|
|
+ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
|
|
|
|
ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
|
|
- ip6->sin6_family = listen6->sin6_family;
|
|
+ ip6->sin6_family = AF_INET6;
|
|
ip6->sin6_addr = hdr->src_addr.ip6;
|
|
ip6->sin6_port = hdr->port;
|
|
}
|
|
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
|
|
index 3d2e489..ff9163d 100644
|
|
--- a/drivers/infiniband/core/iwcm.c
|
|
+++ b/drivers/infiniband/core/iwcm.c
|
|
@@ -46,6 +46,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/sysctl.h>
|
|
|
|
#include <rdma/iw_cm.h>
|
|
#include <rdma/ib_addr.h>
|
|
@@ -65,6 +66,20 @@ struct iwcm_work {
|
|
struct list_head free_list;
|
|
};
|
|
|
|
+static unsigned int default_backlog = 256;
|
|
+
|
|
+static struct ctl_table_header *iwcm_ctl_table_hdr;
|
|
+static struct ctl_table iwcm_ctl_table[] = {
|
|
+ {
|
|
+ .procname = "default_backlog",
|
|
+ .data = &default_backlog,
|
|
+ .maxlen = sizeof(default_backlog),
|
|
+ .mode = 0644,
|
|
+ .proc_handler = proc_dointvec,
|
|
+ },
|
|
+ { }
|
|
+};
|
|
+
|
|
/*
|
|
* The following services provide a mechanism for pre-allocating iwcm_work
|
|
* elements. The design pre-allocates them based on the cm_id type:
|
|
@@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
|
|
|
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
|
|
|
+ if (!backlog)
|
|
+ backlog = default_backlog;
|
|
+
|
|
ret = alloc_work_entries(cm_id_priv, backlog);
|
|
if (ret)
|
|
return ret;
|
|
@@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void)
|
|
if (!iwcm_wq)
|
|
return -ENOMEM;
|
|
|
|
+ iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
|
|
+ iwcm_ctl_table);
|
|
+ if (!iwcm_ctl_table_hdr) {
|
|
+ pr_err("iw_cm: couldn't register sysctl paths\n");
|
|
+ destroy_workqueue(iwcm_wq);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static void __exit iw_cm_cleanup(void)
|
|
{
|
|
+ unregister_net_sysctl_table(iwcm_ctl_table_hdr);
|
|
destroy_workqueue(iwcm_wq);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
|
|
index 56a4b7c..45d67e9 100644
|
|
--- a/drivers/infiniband/core/ucma.c
|
|
+++ b/drivers/infiniband/core/ucma.c
|
|
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
|
|
if (!optlen)
|
|
return -EINVAL;
|
|
|
|
+ memset(&sa_path, 0, sizeof(sa_path));
|
|
+ sa_path.vlan_id = 0xffff;
|
|
+
|
|
ib_sa_unpack_path(path_data->path_rec, &sa_path);
|
|
ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
|
|
if (ret)
|
|
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
|
|
index a841123..c1fef27 100644
|
|
--- a/drivers/infiniband/core/umem.c
|
|
+++ b/drivers/infiniband/core/umem.c
|
|
@@ -94,6 +94,17 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
if (dmasync)
|
|
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
|
|
|
|
+ if (!size)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ /*
|
|
+ * If the combination of the addr and size requested for this memory
|
|
+ * region causes an integer overflow, return error.
|
|
+ */
|
|
+ if (((addr + size) < addr) ||
|
|
+ PAGE_ALIGN(addr + size) < (addr + size))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
if (!can_do_mlock())
|
|
return ERR_PTR(-EPERM);
|
|
|
|
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
|
|
index ea6203e..2adc143 100644
|
|
--- a/drivers/infiniband/core/uverbs_cmd.c
|
|
+++ b/drivers/infiniband/core/uverbs_cmd.c
|
|
@@ -1964,20 +1964,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
|
|
if (qp->real_qp == qp) {
|
|
ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
|
|
if (ret)
|
|
- goto out;
|
|
+ goto release_qp;
|
|
ret = qp->device->modify_qp(qp, attr,
|
|
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
|
|
} else {
|
|
ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
|
|
}
|
|
|
|
- put_qp_read(qp);
|
|
-
|
|
if (ret)
|
|
- goto out;
|
|
+ goto release_qp;
|
|
|
|
ret = in_len;
|
|
|
|
+release_qp:
|
|
+ put_qp_read(qp);
|
|
+
|
|
out:
|
|
kfree(attr);
|
|
|
|
@@ -2425,6 +2426,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
|
attr.grh.sgid_index = cmd.attr.grh.sgid_index;
|
|
attr.grh.hop_limit = cmd.attr.grh.hop_limit;
|
|
attr.grh.traffic_class = cmd.attr.grh.traffic_class;
|
|
+ attr.vlan_id = 0;
|
|
+ memset(&attr.dmac, 0, sizeof(attr.dmac));
|
|
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
|
|
|
|
ah = ib_create_ah(pd, &attr);
|
|
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
|
|
index 08219fb..7a515c8 100644
|
|
--- a/drivers/infiniband/core/uverbs_main.c
|
|
+++ b/drivers/infiniband/core/uverbs_main.c
|
|
@@ -476,6 +476,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
|
|
|
|
entry->desc.async.element = element;
|
|
entry->desc.async.event_type = event;
|
|
+ entry->desc.async.reserved = 0;
|
|
entry->counter = counter;
|
|
|
|
list_add_tail(&entry->list, &file->async_file->event_list);
|
|
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
|
|
index e7bee46..abd9724 100644
|
|
--- a/drivers/infiniband/core/uverbs_marshall.c
|
|
+++ b/drivers/infiniband/core/uverbs_marshall.c
|
|
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
|
|
dst->packet_life_time = src->packet_life_time;
|
|
dst->preference = src->preference;
|
|
dst->packet_life_time_selector = src->packet_life_time_selector;
|
|
+
|
|
+ memset(dst->smac, 0, sizeof(dst->smac));
|
|
+ memset(dst->dmac, 0, sizeof(dst->dmac));
|
|
+ dst->vlan_id = 0xffff;
|
|
}
|
|
EXPORT_SYMBOL(ib_copy_path_rec_from_user);
|
|
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
|
|
index f2a3f48..2592ab5 100644
|
|
--- a/drivers/infiniband/hw/mlx4/mad.c
|
|
+++ b/drivers/infiniband/hw/mlx4/mad.c
|
|
@@ -64,6 +64,14 @@ enum {
|
|
#define GUID_TBL_BLK_NUM_ENTRIES 8
|
|
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
|
|
|
|
+/* Counters should be saturate once they reach their maximum value */
|
|
+#define ASSIGN_32BIT_COUNTER(counter, value) do {\
|
|
+ if ((value) > U32_MAX) \
|
|
+ counter = cpu_to_be32(U32_MAX); \
|
|
+ else \
|
|
+ counter = cpu_to_be32(value); \
|
|
+} while (0)
|
|
+
|
|
struct mlx4_mad_rcv_buf {
|
|
struct ib_grh grh;
|
|
u8 payload[256];
|
|
@@ -730,10 +738,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|
static void edit_counter(struct mlx4_counter *cnt,
|
|
struct ib_pma_portcounters *pma_cnt)
|
|
{
|
|
- pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
|
|
- pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
|
|
- pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
|
|
- pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
|
|
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
|
|
+ (be64_to_cpu(cnt->tx_bytes) >> 2));
|
|
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
|
|
+ (be64_to_cpu(cnt->rx_bytes) >> 2));
|
|
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
|
|
+ be64_to_cpu(cnt->tx_frames));
|
|
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
|
|
+ be64_to_cpu(cnt->rx_frames));
|
|
}
|
|
|
|
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
|
|
index f9c12e9..1a3d924 100644
|
|
--- a/drivers/infiniband/hw/mlx4/main.c
|
|
+++ b/drivers/infiniband/hw/mlx4/main.c
|
|
@@ -1161,8 +1161,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
|
|
u64 reg_id;
|
|
struct mlx4_ib_steering *ib_steering = NULL;
|
|
- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
|
|
- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
|
|
+ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
|
|
|
|
if (mdev->dev->caps.steering_mode ==
|
|
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
|
@@ -1175,8 +1174,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
!!(mqp->flags &
|
|
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
|
|
prot, ®_id);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ pr_err("multicast attach op failed, err %d\n", err);
|
|
goto err_malloc;
|
|
+ }
|
|
|
|
err = add_gid_entry(ibqp, gid);
|
|
if (err)
|
|
@@ -1224,8 +1225,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
struct net_device *ndev;
|
|
struct mlx4_ib_gid_entry *ge;
|
|
u64 reg_id = 0;
|
|
- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
|
|
- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
|
|
+ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
|
|
|
|
if (mdev->dev->caps.steering_mode ==
|
|
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
|
@@ -1622,6 +1622,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
struct inet6_dev *in6_dev;
|
|
union ib_gid *pgid;
|
|
struct inet6_ifaddr *ifp;
|
|
+ union ib_gid default_gid;
|
|
#endif
|
|
union ib_gid gid;
|
|
|
|
@@ -1642,12 +1643,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
|
|
in_dev_put(in_dev);
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
+ mlx4_make_default_gid(dev, &default_gid);
|
|
/* IPv6 gids */
|
|
in6_dev = in6_dev_get(dev);
|
|
if (in6_dev) {
|
|
read_lock_bh(&in6_dev->lock);
|
|
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
|
|
pgid = (union ib_gid *)&ifp->addr;
|
|
+ if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
|
|
+ continue;
|
|
update_gid_table(ibdev, port, pgid, 0, 0);
|
|
}
|
|
read_unlock_bh(&in6_dev->lock);
|
|
@@ -1723,31 +1727,34 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
|
|
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
|
|
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
- } else {
|
|
- reset_gid_table(ibdev, port);
|
|
- }
|
|
- /* if using bonding/team and a slave port is down, we don't the bond IP
|
|
- * based gids in the table since flows that select port by gid may get
|
|
- * the down port.
|
|
- */
|
|
- if (curr_master && (port_state == IB_PORT_DOWN)) {
|
|
- reset_gid_table(ibdev, port);
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
- }
|
|
- /* if bonding is used it is possible that we add it to masters
|
|
- * only after IP address is assigned to the net bonding
|
|
- * interface.
|
|
- */
|
|
- if (curr_master && (old_master != curr_master)) {
|
|
- reset_gid_table(ibdev, port);
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
- mlx4_ib_get_dev_addr(curr_master, ibdev, port);
|
|
- }
|
|
+ /* if using bonding/team and a slave port is down, we
|
|
+ * don't the bond IP based gids in the table since
|
|
+ * flows that select port by gid may get the down port.
|
|
+ */
|
|
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
|
|
+ reset_gid_table(ibdev, port);
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
+ curr_netdev, port);
|
|
+ }
|
|
+ /* if bonding is used it is possible that we add it to
|
|
+ * masters only after IP address is assigned to the
|
|
+ * net bonding interface.
|
|
+ */
|
|
+ if (curr_master && (old_master != curr_master)) {
|
|
+ reset_gid_table(ibdev, port);
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
+ curr_netdev, port);
|
|
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
|
|
+ }
|
|
|
|
- if (!curr_master && (old_master != curr_master)) {
|
|
+ if (!curr_master && (old_master != curr_master)) {
|
|
+ reset_gid_table(ibdev, port);
|
|
+ mlx4_ib_set_default_gid(ibdev,
|
|
+ curr_netdev, port);
|
|
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
|
|
+ }
|
|
+ } else {
|
|
reset_gid_table(ibdev, port);
|
|
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
|
|
- mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
|
|
index d8f4d1f..8d7cd98 100644
|
|
--- a/drivers/infiniband/hw/mlx4/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx4/qp.c
|
|
@@ -2274,8 +2274,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
|
|
|
|
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
|
|
|
|
- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
|
|
- wr->wr.ud.hlen);
|
|
+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
|
|
*lso_seg_len = halign;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
|
|
index 1946101..675d3c7 100644
|
|
--- a/drivers/infiniband/hw/qib/qib.h
|
|
+++ b/drivers/infiniband/hw/qib/qib.h
|
|
@@ -1080,12 +1080,6 @@ struct qib_devdata {
|
|
/* control high-level access to EEPROM */
|
|
struct mutex eep_lock;
|
|
uint64_t traffic_wds;
|
|
- /* active time is kept in seconds, but logged in hours */
|
|
- atomic_t active_time;
|
|
- /* Below are nominal shadow of EEPROM, new since last EEPROM update */
|
|
- uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
|
|
- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
|
|
- uint16_t eep_hrs;
|
|
/*
|
|
* masks for which bits of errs, hwerrs that cause
|
|
* each of the counters to increment.
|
|
@@ -1307,8 +1301,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
|
|
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
|
|
const void *buffer, int len);
|
|
void qib_get_eeprom_info(struct qib_devdata *);
|
|
-int qib_update_eeprom_log(struct qib_devdata *dd);
|
|
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
|
|
+#define qib_inc_eeprom_err(dd, eidx, incr)
|
|
void qib_dump_lookup_output_queue(struct qib_devdata *);
|
|
void qib_force_pio_avail_update(struct qib_devdata *);
|
|
void qib_clear_symerror_on_linkup(unsigned long opaque);
|
|
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
|
|
index 799a0c3..6abd3ed 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
|
|
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
|
|
struct qib_qp_iter *iter;
|
|
loff_t n = *pos;
|
|
|
|
+ rcu_read_lock();
|
|
iter = qib_qp_iter_init(s->private);
|
|
if (!iter)
|
|
return NULL;
|
|
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
|
|
|
|
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
|
|
{
|
|
- /* nothing for now */
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
|
|
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
|
|
index 4d5d71a..e2280b0 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
|
|
@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
|
|
"Board SN %s did not pass functional test: %s\n",
|
|
dd->serial, ifp->if_comment);
|
|
|
|
- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
|
|
- /*
|
|
- * Power-on (actually "active") hours are kept as little-endian value
|
|
- * in EEPROM, but as seconds in a (possibly as small as 24-bit)
|
|
- * atomic_t while running.
|
|
- */
|
|
- atomic_set(&dd->active_time, 0);
|
|
- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
|
|
-
|
|
done:
|
|
vfree(buf);
|
|
|
|
bail:;
|
|
}
|
|
|
|
-/**
|
|
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
|
|
- * @dd: the qlogic_ib device
|
|
- *
|
|
- * Although the time is kept as seconds in the qib_devdata struct, it is
|
|
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
|
|
- * First-cut code reads whole (expected) struct qib_flash, modifies,
|
|
- * re-writes. Future direction: read/write only what we need, assuming
|
|
- * that the EEPROM had to have been "good enough" for driver init, and
|
|
- * if not, we aren't making it worse.
|
|
- *
|
|
- */
|
|
-int qib_update_eeprom_log(struct qib_devdata *dd)
|
|
-{
|
|
- void *buf;
|
|
- struct qib_flash *ifp;
|
|
- int len, hi_water;
|
|
- uint32_t new_time, new_hrs;
|
|
- u8 csum;
|
|
- int ret, idx;
|
|
- unsigned long flags;
|
|
-
|
|
- /* first, check if we actually need to do anything. */
|
|
- ret = 0;
|
|
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
|
- if (dd->eep_st_new_errs[idx]) {
|
|
- ret = 1;
|
|
- break;
|
|
- }
|
|
- }
|
|
- new_time = atomic_read(&dd->active_time);
|
|
-
|
|
- if (ret == 0 && new_time < 3600)
|
|
- goto bail;
|
|
-
|
|
- /*
|
|
- * The quick-check above determined that there is something worthy
|
|
- * of logging, so get current contents and do a more detailed idea.
|
|
- * read full flash, not just currently used part, since it may have
|
|
- * been written with a newer definition
|
|
- */
|
|
- len = sizeof(struct qib_flash);
|
|
- buf = vmalloc(len);
|
|
- ret = 1;
|
|
- if (!buf) {
|
|
- qib_dev_err(dd,
|
|
- "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
|
|
- len);
|
|
- goto bail;
|
|
- }
|
|
-
|
|
- /* Grab semaphore and read current EEPROM. If we get an
|
|
- * error, let go, but if not, keep it until we finish write.
|
|
- */
|
|
- ret = mutex_lock_interruptible(&dd->eep_lock);
|
|
- if (ret) {
|
|
- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
|
|
- goto free_bail;
|
|
- }
|
|
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
|
|
- if (ret) {
|
|
- mutex_unlock(&dd->eep_lock);
|
|
- qib_dev_err(dd, "Unable read EEPROM for logging\n");
|
|
- goto free_bail;
|
|
- }
|
|
- ifp = (struct qib_flash *)buf;
|
|
-
|
|
- csum = flash_csum(ifp, 0);
|
|
- if (csum != ifp->if_csum) {
|
|
- mutex_unlock(&dd->eep_lock);
|
|
- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
|
|
- csum, ifp->if_csum);
|
|
- ret = 1;
|
|
- goto free_bail;
|
|
- }
|
|
- hi_water = 0;
|
|
- spin_lock_irqsave(&dd->eep_st_lock, flags);
|
|
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
|
- int new_val = dd->eep_st_new_errs[idx];
|
|
- if (new_val) {
|
|
- /*
|
|
- * If we have seen any errors, add to EEPROM values
|
|
- * We need to saturate at 0xFF (255) and we also
|
|
- * would need to adjust the checksum if we were
|
|
- * trying to minimize EEPROM traffic
|
|
- * Note that we add to actual current count in EEPROM,
|
|
- * in case it was altered while we were running.
|
|
- */
|
|
- new_val += ifp->if_errcntp[idx];
|
|
- if (new_val > 0xFF)
|
|
- new_val = 0xFF;
|
|
- if (ifp->if_errcntp[idx] != new_val) {
|
|
- ifp->if_errcntp[idx] = new_val;
|
|
- hi_water = offsetof(struct qib_flash,
|
|
- if_errcntp) + idx;
|
|
- }
|
|
- /*
|
|
- * update our shadow (used to minimize EEPROM
|
|
- * traffic), to match what we are about to write.
|
|
- */
|
|
- dd->eep_st_errs[idx] = new_val;
|
|
- dd->eep_st_new_errs[idx] = 0;
|
|
- }
|
|
- }
|
|
- /*
|
|
- * Now update active-time. We would like to round to the nearest hour
|
|
- * but unless atomic_t are sure to be proper signed ints we cannot,
|
|
- * because we need to account for what we "transfer" to EEPROM and
|
|
- * if we log an hour at 31 minutes, then we would need to set
|
|
- * active_time to -29 to accurately count the _next_ hour.
|
|
- */
|
|
- if (new_time >= 3600) {
|
|
- new_hrs = new_time / 3600;
|
|
- atomic_sub((new_hrs * 3600), &dd->active_time);
|
|
- new_hrs += dd->eep_hrs;
|
|
- if (new_hrs > 0xFFFF)
|
|
- new_hrs = 0xFFFF;
|
|
- dd->eep_hrs = new_hrs;
|
|
- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
|
|
- ifp->if_powerhour[0] = new_hrs & 0xFF;
|
|
- hi_water = offsetof(struct qib_flash, if_powerhour);
|
|
- }
|
|
- if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
|
|
- ifp->if_powerhour[1] = new_hrs >> 8;
|
|
- hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
|
|
- }
|
|
- }
|
|
- /*
|
|
- * There is a tiny possibility that we could somehow fail to write
|
|
- * the EEPROM after updating our shadows, but problems from holding
|
|
- * the spinlock too long are a much bigger issue.
|
|
- */
|
|
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
|
- if (hi_water) {
|
|
- /* we made some change to the data, uopdate cksum and write */
|
|
- csum = flash_csum(ifp, 1);
|
|
- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
|
|
- }
|
|
- mutex_unlock(&dd->eep_lock);
|
|
- if (ret)
|
|
- qib_dev_err(dd, "Failed updating EEPROM\n");
|
|
-
|
|
-free_bail:
|
|
- vfree(buf);
|
|
-bail:
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/**
|
|
- * qib_inc_eeprom_err - increment one of the four error counters
|
|
- * that are logged to EEPROM.
|
|
- * @dd: the qlogic_ib device
|
|
- * @eidx: 0..3, the counter to increment
|
|
- * @incr: how much to add
|
|
- *
|
|
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
|
|
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
|
|
- * is called, but it can only be called in a context that allows sleep.
|
|
- * This function can be called even at interrupt level.
|
|
- */
|
|
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
|
|
-{
|
|
- uint new_val;
|
|
- unsigned long flags;
|
|
-
|
|
- spin_lock_irqsave(&dd->eep_st_lock, flags);
|
|
- new_val = dd->eep_st_new_errs[eidx] + incr;
|
|
- if (new_val > 255)
|
|
- new_val = 255;
|
|
- dd->eep_st_new_errs[eidx] = new_val;
|
|
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
|
-}
|
|
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
|
|
index 84e593d..295f631 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
|
|
@@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
|
|
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
|
traffic_wds -= dd->traffic_wds;
|
|
dd->traffic_wds += traffic_wds;
|
|
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
|
- atomic_add(5, &dd->active_time); /* S/B #define */
|
|
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
|
|
|
qib_chk_6120_errormask(dd);
|
|
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
|
|
index 454c2e7..c86e71b 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
|
|
@@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
|
|
spin_lock_irqsave(&dd->eep_st_lock, flags);
|
|
traffic_wds -= dd->traffic_wds;
|
|
dd->traffic_wds += traffic_wds;
|
|
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
|
- atomic_add(5, &dd->active_time); /* S/B #define */
|
|
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
|
|
done:
|
|
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
|
|
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
|
|
index d1bd213..0f8d1f0 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
|
|
@@ -5191,8 +5191,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
|
|
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
|
|
traffic_wds -= ppd->dd->traffic_wds;
|
|
ppd->dd->traffic_wds += traffic_wds;
|
|
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
|
|
- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
|
|
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
|
|
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
|
|
QIB_IB_QDR) &&
|
|
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
|
|
index 76c3e17..8c9bb6c 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_init.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_init.c
|
|
@@ -922,7 +922,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
|
|
}
|
|
}
|
|
|
|
- qib_update_eeprom_log(dd);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
|
|
index 0cad0c4..6a71b2b 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_qp.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_qp.c
|
|
@@ -1324,7 +1324,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
|
|
struct qib_qp *pqp = iter->qp;
|
|
struct qib_qp *qp;
|
|
|
|
- rcu_read_lock();
|
|
for (; n < dev->qp_table_size; n++) {
|
|
if (pqp)
|
|
qp = rcu_dereference(pqp->next);
|
|
@@ -1332,18 +1331,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
|
|
qp = rcu_dereference(dev->qp_table[n]);
|
|
pqp = qp;
|
|
if (qp) {
|
|
- if (iter->qp)
|
|
- atomic_dec(&iter->qp->refcount);
|
|
- atomic_inc(&qp->refcount);
|
|
- rcu_read_unlock();
|
|
iter->qp = qp;
|
|
iter->n = n;
|
|
return 0;
|
|
}
|
|
}
|
|
- rcu_read_unlock();
|
|
- if (iter->qp)
|
|
- atomic_dec(&iter->qp->refcount);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
|
|
index 3c8e4e3..b9ccbda 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
|
|
@@ -611,28 +611,6 @@ bail:
|
|
return ret < 0 ? ret : count;
|
|
}
|
|
|
|
-static ssize_t show_logged_errs(struct device *device,
|
|
- struct device_attribute *attr, char *buf)
|
|
-{
|
|
- struct qib_ibdev *dev =
|
|
- container_of(device, struct qib_ibdev, ibdev.dev);
|
|
- struct qib_devdata *dd = dd_from_dev(dev);
|
|
- int idx, count;
|
|
-
|
|
- /* force consistency with actual EEPROM */
|
|
- if (qib_update_eeprom_log(dd) != 0)
|
|
- return -ENXIO;
|
|
-
|
|
- count = 0;
|
|
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
|
|
- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
|
|
- dd->eep_st_errs[idx],
|
|
- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
|
|
- }
|
|
-
|
|
- return count;
|
|
-}
|
|
-
|
|
/*
|
|
* Dump tempsense regs. in decimal, to ease shell-scripts.
|
|
*/
|
|
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
|
|
static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
|
|
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
|
|
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
|
|
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
|
|
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
|
|
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
|
|
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
|
|
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
|
|
&dev_attr_nfreectxts,
|
|
&dev_attr_serial,
|
|
&dev_attr_boardversion,
|
|
- &dev_attr_logged_errors,
|
|
&dev_attr_tempsense,
|
|
&dev_attr_localbus_info,
|
|
&dev_attr_chip_reset,
|
|
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
|
|
index 156205a..dd2b610 100644
|
|
--- a/drivers/infiniband/ulp/isert/ib_isert.c
|
|
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
|
|
@@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex);
|
|
static LIST_HEAD(device_list);
|
|
static struct workqueue_struct *isert_rx_wq;
|
|
static struct workqueue_struct *isert_comp_wq;
|
|
+static struct workqueue_struct *isert_release_wq;
|
|
|
|
static void
|
|
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
|
|
@@ -52,6 +53,13 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
|
|
static int
|
|
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
struct isert_rdma_wr *wr);
|
|
+static int
|
|
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
|
|
+static int
|
|
+isert_rdma_accept(struct isert_conn *isert_conn);
|
|
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
|
|
+
|
|
+static void isert_release_work(struct work_struct *work);
|
|
|
|
static void
|
|
isert_qp_event_callback(struct ib_event *e, void *context)
|
|
@@ -112,9 +120,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
|
|
/*
|
|
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
|
|
- * work-around for RDMA_READ..
|
|
+ * work-around for RDMA_READs with ConnectX-2.
|
|
+ *
|
|
+ * Also, still make sure to have at least two SGEs for
|
|
+ * outgoing control PDU responses.
|
|
*/
|
|
- attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
|
|
+ attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
|
|
isert_conn->max_sge = attr.cap.max_send_sge;
|
|
|
|
attr.cap.max_recv_sge = 1;
|
|
@@ -129,12 +140,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
|
|
if (ret) {
|
|
pr_err("rdma_create_qp failed for cma_id %d\n", ret);
|
|
- return ret;
|
|
+ goto err;
|
|
}
|
|
isert_conn->conn_qp = cma_id->qp;
|
|
pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
return 0;
|
|
+err:
|
|
+ mutex_lock(&device_list_mutex);
|
|
+ device->cq_active_qps[min_index]--;
|
|
+ mutex_unlock(&device_list_mutex);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static void
|
|
@@ -191,7 +208,7 @@ fail:
|
|
static void
|
|
isert_free_rx_descriptors(struct isert_conn *isert_conn)
|
|
{
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
+ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
|
|
struct iser_rx_desc *rx_desc;
|
|
int i;
|
|
|
|
@@ -220,12 +237,16 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
struct isert_cq_desc *cq_desc;
|
|
struct ib_device_attr *dev_attr;
|
|
int ret = 0, i, j;
|
|
+ int max_rx_cqe, max_tx_cqe;
|
|
|
|
dev_attr = &device->dev_attr;
|
|
ret = isert_query_device(ib_dev, dev_attr);
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
|
|
+ max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
|
|
+
|
|
/* asign function handlers */
|
|
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
|
|
device->use_fastreg = 1;
|
|
@@ -261,7 +282,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
isert_cq_rx_callback,
|
|
isert_cq_event_callback,
|
|
(void *)&cq_desc[i],
|
|
- ISER_MAX_RX_CQ_LEN, i);
|
|
+ max_rx_cqe, i);
|
|
if (IS_ERR(device->dev_rx_cq[i])) {
|
|
ret = PTR_ERR(device->dev_rx_cq[i]);
|
|
device->dev_rx_cq[i] = NULL;
|
|
@@ -273,7 +294,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
isert_cq_tx_callback,
|
|
isert_cq_event_callback,
|
|
(void *)&cq_desc[i],
|
|
- ISER_MAX_TX_CQ_LEN, i);
|
|
+ max_tx_cqe, i);
|
|
if (IS_ERR(device->dev_tx_cq[i])) {
|
|
ret = PTR_ERR(device->dev_tx_cq[i]);
|
|
device->dev_tx_cq[i] = NULL;
|
|
@@ -482,8 +503,8 @@ err:
|
|
static int
|
|
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
{
|
|
- struct iscsi_np *np = cma_id->context;
|
|
- struct isert_np *isert_np = np->np_context;
|
|
+ struct isert_np *isert_np = cma_id->context;
|
|
+ struct iscsi_np *np = isert_np->np;
|
|
struct isert_conn *isert_conn;
|
|
struct isert_device *device;
|
|
struct ib_device *ib_dev = cma_id->device;
|
|
@@ -508,15 +529,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
isert_conn->state = ISER_CONN_INIT;
|
|
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
|
|
init_completion(&isert_conn->conn_login_comp);
|
|
+ init_completion(&isert_conn->login_req_comp);
|
|
init_completion(&isert_conn->conn_wait);
|
|
init_completion(&isert_conn->conn_wait_comp_err);
|
|
kref_init(&isert_conn->conn_kref);
|
|
- kref_get(&isert_conn->conn_kref);
|
|
mutex_init(&isert_conn->conn_mutex);
|
|
spin_lock_init(&isert_conn->conn_lock);
|
|
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
|
|
+ INIT_WORK(&isert_conn->release_work, isert_release_work);
|
|
|
|
- cma_id->context = isert_conn;
|
|
isert_conn->conn_cm_id = cma_id;
|
|
isert_conn->responder_resources = event->param.conn.responder_resources;
|
|
isert_conn->initiator_depth = event->param.conn.initiator_depth;
|
|
@@ -590,6 +611,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
if (ret)
|
|
goto out_conn_dev;
|
|
|
|
+ ret = isert_rdma_post_recvl(isert_conn);
|
|
+ if (ret)
|
|
+ goto out_conn_dev;
|
|
+
|
|
+ ret = isert_rdma_accept(isert_conn);
|
|
+ if (ret)
|
|
+ goto out_conn_dev;
|
|
+
|
|
mutex_lock(&isert_np->np_accept_mutex);
|
|
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
@@ -614,33 +643,37 @@ out_login_buf:
|
|
kfree(isert_conn->login_buf);
|
|
out:
|
|
kfree(isert_conn);
|
|
+ rdma_reject(cma_id, NULL, 0);
|
|
return ret;
|
|
}
|
|
|
|
static void
|
|
isert_connect_release(struct isert_conn *isert_conn)
|
|
{
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
struct isert_device *device = isert_conn->conn_device;
|
|
int cq_index;
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
|
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
if (device && device->use_fastreg)
|
|
isert_conn_free_fastreg_pool(isert_conn);
|
|
|
|
+ isert_free_rx_descriptors(isert_conn);
|
|
+ if (isert_conn->conn_cm_id)
|
|
+ rdma_destroy_id(isert_conn->conn_cm_id);
|
|
+
|
|
if (isert_conn->conn_qp) {
|
|
cq_index = ((struct isert_cq_desc *)
|
|
isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
|
|
pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
|
|
+ mutex_lock(&device_list_mutex);
|
|
isert_conn->conn_device->cq_active_qps[cq_index]--;
|
|
+ mutex_unlock(&device_list_mutex);
|
|
|
|
- rdma_destroy_qp(isert_conn->conn_cm_id);
|
|
+ ib_destroy_qp(isert_conn->conn_qp);
|
|
}
|
|
|
|
- isert_free_rx_descriptors(isert_conn);
|
|
- rdma_destroy_id(isert_conn->conn_cm_id);
|
|
-
|
|
ib_dereg_mr(isert_conn->conn_mr);
|
|
ib_dealloc_pd(isert_conn->conn_pd);
|
|
|
|
@@ -663,7 +696,19 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|
static void
|
|
isert_connected_handler(struct rdma_cm_id *cma_id)
|
|
{
|
|
- return;
|
|
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
|
+
|
|
+ pr_info("conn %p\n", isert_conn);
|
|
+
|
|
+ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
|
|
+ pr_warn("conn %p connect_release is running\n", isert_conn);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&isert_conn->conn_mutex);
|
|
+ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
|
|
+ isert_conn->state = ISER_CONN_UP;
|
|
+ mutex_unlock(&isert_conn->conn_mutex);
|
|
}
|
|
|
|
static void
|
|
@@ -684,55 +729,125 @@ isert_put_conn(struct isert_conn *isert_conn)
|
|
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
|
|
}
|
|
|
|
+/**
|
|
+ * isert_conn_terminate() - Initiate connection termination
|
|
+ * @isert_conn: isert connection struct
|
|
+ *
|
|
+ * Notes:
|
|
+ * In case the connection state is FULL_FEATURE, move state
|
|
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
|
|
+ * In case the connection state is UP, complete flush as well.
|
|
+ *
|
|
+ * This routine must be called with conn_mutex held. Thus it is
|
|
+ * safe to call multiple times.
|
|
+ */
|
|
static void
|
|
-isert_disconnect_work(struct work_struct *work)
|
|
+isert_conn_terminate(struct isert_conn *isert_conn)
|
|
{
|
|
- struct isert_conn *isert_conn = container_of(work,
|
|
- struct isert_conn, conn_logout_work);
|
|
+ int err;
|
|
|
|
- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
- if (isert_conn->state == ISER_CONN_UP)
|
|
+ switch (isert_conn->state) {
|
|
+ case ISER_CONN_TERMINATING:
|
|
+ break;
|
|
+ case ISER_CONN_UP:
|
|
+ /*
|
|
+ * No flush completions will occur as we didn't
|
|
+ * get to ISER_CONN_FULL_FEATURE yet, complete
|
|
+ * to allow teardown progress.
|
|
+ */
|
|
+ complete(&isert_conn->conn_wait_comp_err);
|
|
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
|
|
+ pr_info("Terminating conn %p state %d\n",
|
|
+ isert_conn, isert_conn->state);
|
|
isert_conn->state = ISER_CONN_TERMINATING;
|
|
-
|
|
- if (isert_conn->post_recv_buf_count == 0 &&
|
|
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
- goto wake_up;
|
|
- }
|
|
- if (!isert_conn->conn_cm_id) {
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
- isert_put_conn(isert_conn);
|
|
- return;
|
|
+ err = rdma_disconnect(isert_conn->conn_cm_id);
|
|
+ if (err)
|
|
+ pr_warn("Failed rdma_disconnect isert_conn %p\n",
|
|
+ isert_conn);
|
|
+ break;
|
|
+ default:
|
|
+ pr_warn("conn %p teminating in state %d\n",
|
|
+ isert_conn, isert_conn->state);
|
|
}
|
|
+}
|
|
+
|
|
+static int
|
|
+isert_np_cma_handler(struct isert_np *isert_np,
|
|
+ enum rdma_cm_event_type event)
|
|
+{
|
|
+ pr_debug("isert np %p, handling event %d\n", isert_np, event);
|
|
|
|
- if (isert_conn->disconnect) {
|
|
- /* Send DREQ/DREP towards our initiator */
|
|
- rdma_disconnect(isert_conn->conn_cm_id);
|
|
+ switch (event) {
|
|
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
+ isert_np->np_cm_id = NULL;
|
|
+ break;
|
|
+ case RDMA_CM_EVENT_ADDR_CHANGE:
|
|
+ isert_np->np_cm_id = isert_setup_id(isert_np);
|
|
+ if (IS_ERR(isert_np->np_cm_id)) {
|
|
+ pr_err("isert np %p setup id failed: %ld\n",
|
|
+ isert_np, PTR_ERR(isert_np->np_cm_id));
|
|
+ isert_np->np_cm_id = NULL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ pr_err("isert np %p Unexpected event %d\n",
|
|
+ isert_np, event);
|
|
}
|
|
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int
|
|
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
|
+ enum rdma_cm_event_type event)
|
|
+{
|
|
+ struct isert_np *isert_np = cma_id->context;
|
|
+ struct isert_conn *isert_conn;
|
|
+ bool terminating = false;
|
|
+
|
|
+ if (isert_np->np_cm_id == cma_id)
|
|
+ return isert_np_cma_handler(cma_id->context, event);
|
|
+
|
|
+ isert_conn = cma_id->qp->qp_context;
|
|
+
|
|
+ mutex_lock(&isert_conn->conn_mutex);
|
|
+ terminating = (isert_conn->state == ISER_CONN_TERMINATING);
|
|
+ isert_conn_terminate(isert_conn);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
|
|
-wake_up:
|
|
+ pr_info("conn %p completing conn_wait\n", isert_conn);
|
|
complete(&isert_conn->conn_wait);
|
|
- isert_put_conn(isert_conn);
|
|
+
|
|
+ if (terminating)
|
|
+ goto out;
|
|
+
|
|
+ mutex_lock(&isert_np->np_accept_mutex);
|
|
+ if (!list_empty(&isert_conn->conn_accept_node)) {
|
|
+ list_del_init(&isert_conn->conn_accept_node);
|
|
+ isert_put_conn(isert_conn);
|
|
+ queue_work(isert_release_wq, &isert_conn->release_work);
|
|
+ }
|
|
+ mutex_unlock(&isert_np->np_accept_mutex);
|
|
+
|
|
+out:
|
|
+ return 0;
|
|
}
|
|
|
|
-static void
|
|
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
|
|
+static int
|
|
+isert_connect_error(struct rdma_cm_id *cma_id)
|
|
{
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
|
|
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
|
+
|
|
+ isert_conn->conn_cm_id = NULL;
|
|
+ isert_put_conn(isert_conn);
|
|
|
|
- isert_conn->disconnect = disconnect;
|
|
- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
|
|
- schedule_work(&isert_conn->conn_logout_work);
|
|
+ return -1;
|
|
}
|
|
|
|
static int
|
|
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
{
|
|
int ret = 0;
|
|
- bool disconnect = false;
|
|
|
|
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
|
|
event->event, event->status, cma_id->context, cma_id);
|
|
@@ -740,6 +855,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
switch (event->event) {
|
|
case RDMA_CM_EVENT_CONNECT_REQUEST:
|
|
ret = isert_connect_request(cma_id, event);
|
|
+ if (ret)
|
|
+ pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
|
|
+ event->event, ret);
|
|
break;
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
isert_connected_handler(cma_id);
|
|
@@ -747,22 +865,19 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
|
|
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
|
|
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
|
|
- disconnect = true;
|
|
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
|
|
- isert_disconnected_handler(cma_id, disconnect);
|
|
+ ret = isert_disconnected_handler(cma_id, event->event);
|
|
break;
|
|
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
|
|
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
|
|
case RDMA_CM_EVENT_CONNECT_ERROR:
|
|
+ ret = isert_connect_error(cma_id);
|
|
+ break;
|
|
default:
|
|
pr_err("Unhandled RDMA CMA event: %d\n", event->event);
|
|
break;
|
|
}
|
|
|
|
- if (ret != 0) {
|
|
- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
|
|
- event->event, ret);
|
|
- dump_stack();
|
|
- }
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -891,7 +1006,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
|
|
*/
|
|
mutex_lock(&isert_conn->conn_mutex);
|
|
- if (coalesce && isert_conn->state == ISER_CONN_UP &&
|
|
+ if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
|
|
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
|
|
tx_desc->llnode_active = true;
|
|
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
|
|
@@ -970,7 +1085,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
}
|
|
if (!login->login_failed) {
|
|
if (login->login_complete) {
|
|
- if (isert_conn->conn_device->use_fastreg) {
|
|
+ if (!conn->sess->sess_ops->SessionType &&
|
|
+ isert_conn->conn_device->use_fastreg) {
|
|
ret = isert_conn_create_fastreg_pool(isert_conn);
|
|
if (ret) {
|
|
pr_err("Conn: %p failed to create"
|
|
@@ -987,7 +1103,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- isert_conn->state = ISER_CONN_UP;
|
|
+ /* Now we are in FULL_FEATURE phase */
|
|
+ mutex_lock(&isert_conn->conn_mutex);
|
|
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
|
|
+ mutex_unlock(&isert_conn->conn_mutex);
|
|
goto post_send;
|
|
}
|
|
|
|
@@ -1004,18 +1123,17 @@ post_send:
|
|
}
|
|
|
|
static void
|
|
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
|
|
- struct isert_conn *isert_conn)
|
|
+isert_rx_login_req(struct isert_conn *isert_conn)
|
|
{
|
|
+ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
|
|
+ int rx_buflen = isert_conn->login_req_len;
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
struct iscsi_login *login = conn->conn_login;
|
|
int size;
|
|
|
|
- if (!login) {
|
|
- pr_err("conn->conn_login is NULL\n");
|
|
- dump_stack();
|
|
- return;
|
|
- }
|
|
+ pr_info("conn %p\n", isert_conn);
|
|
+
|
|
+ WARN_ON_ONCE(!login);
|
|
|
|
if (login->first_request) {
|
|
struct iscsi_login_req *login_req =
|
|
@@ -1378,11 +1496,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
hdr->opcode, hdr->itt, hdr->flags,
|
|
(int)(xfer_len - ISER_HEADERS_LEN));
|
|
|
|
- if ((char *)desc == isert_conn->login_req_buf)
|
|
- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
|
|
- isert_conn);
|
|
- else
|
|
+ if ((char *)desc == isert_conn->login_req_buf) {
|
|
+ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
|
|
+ if (isert_conn->conn) {
|
|
+ struct iscsi_login *login = isert_conn->conn->conn_login;
|
|
+
|
|
+ if (login && !login->first_request)
|
|
+ isert_rx_login_req(isert_conn);
|
|
+ }
|
|
+ mutex_lock(&isert_conn->conn_mutex);
|
|
+ complete(&isert_conn->login_req_comp);
|
|
+ mutex_unlock(&isert_conn->conn_mutex);
|
|
+ } else {
|
|
isert_rx_do_work(desc, isert_conn);
|
|
+ }
|
|
|
|
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
|
|
DMA_FROM_DEVICE);
|
|
@@ -1783,7 +1910,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
|
|
msleep(3000);
|
|
|
|
mutex_lock(&isert_conn->conn_mutex);
|
|
- isert_conn->state = ISER_CONN_DOWN;
|
|
+ isert_conn_terminate(isert_conn);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
|
|
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
|
|
@@ -1937,7 +2064,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
isert_cmd->tx_desc.num_sge = 2;
|
|
}
|
|
|
|
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
|
|
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
|
|
|
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
@@ -2456,7 +2583,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
&isert_cmd->tx_desc.iscsi_header);
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_send_wr(isert_conn, isert_cmd,
|
|
- &isert_cmd->tx_desc.send_wr, true);
|
|
+ &isert_cmd->tx_desc.send_wr, false);
|
|
|
|
atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
|
|
|
|
@@ -2563,13 +2690,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
|
return ret;
|
|
}
|
|
|
|
+struct rdma_cm_id *
|
|
+isert_setup_id(struct isert_np *isert_np)
|
|
+{
|
|
+ struct iscsi_np *np = isert_np->np;
|
|
+ struct rdma_cm_id *id;
|
|
+ struct sockaddr *sa;
|
|
+ int ret;
|
|
+
|
|
+ sa = (struct sockaddr *)&np->np_sockaddr;
|
|
+ pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
|
|
+
|
|
+ id = rdma_create_id(isert_cma_handler, isert_np,
|
|
+ RDMA_PS_TCP, IB_QPT_RC);
|
|
+ if (IS_ERR(id)) {
|
|
+ pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
|
|
+ ret = PTR_ERR(id);
|
|
+ goto out;
|
|
+ }
|
|
+ pr_debug("id %p context %p\n", id, id->context);
|
|
+
|
|
+ ret = rdma_bind_addr(id, sa);
|
|
+ if (ret) {
|
|
+ pr_err("rdma_bind_addr() failed: %d\n", ret);
|
|
+ goto out_id;
|
|
+ }
|
|
+
|
|
+ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
|
|
+ if (ret) {
|
|
+ pr_err("rdma_listen() failed: %d\n", ret);
|
|
+ goto out_id;
|
|
+ }
|
|
+
|
|
+ return id;
|
|
+out_id:
|
|
+ rdma_destroy_id(id);
|
|
+out:
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
static int
|
|
isert_setup_np(struct iscsi_np *np,
|
|
struct __kernel_sockaddr_storage *ksockaddr)
|
|
{
|
|
struct isert_np *isert_np;
|
|
struct rdma_cm_id *isert_lid;
|
|
- struct sockaddr *sa;
|
|
int ret;
|
|
|
|
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
|
|
@@ -2581,9 +2746,8 @@ isert_setup_np(struct iscsi_np *np,
|
|
mutex_init(&isert_np->np_accept_mutex);
|
|
INIT_LIST_HEAD(&isert_np->np_accept_list);
|
|
init_completion(&isert_np->np_login_comp);
|
|
+ isert_np->np = np;
|
|
|
|
- sa = (struct sockaddr *)ksockaddr;
|
|
- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
|
|
/*
|
|
* Setup the np->np_sockaddr from the passed sockaddr setup
|
|
* in iscsi_target_configfs.c code..
|
|
@@ -2591,37 +2755,20 @@ isert_setup_np(struct iscsi_np *np,
|
|
memcpy(&np->np_sockaddr, ksockaddr,
|
|
sizeof(struct __kernel_sockaddr_storage));
|
|
|
|
- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
|
|
- IB_QPT_RC);
|
|
+ isert_lid = isert_setup_id(isert_np);
|
|
if (IS_ERR(isert_lid)) {
|
|
- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
|
|
- PTR_ERR(isert_lid));
|
|
ret = PTR_ERR(isert_lid);
|
|
goto out;
|
|
}
|
|
|
|
- ret = rdma_bind_addr(isert_lid, sa);
|
|
- if (ret) {
|
|
- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
|
|
- goto out_lid;
|
|
- }
|
|
-
|
|
- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
|
|
- if (ret) {
|
|
- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
|
|
- goto out_lid;
|
|
- }
|
|
-
|
|
isert_np->np_cm_id = isert_lid;
|
|
np->np_context = isert_np;
|
|
- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
|
|
|
|
return 0;
|
|
|
|
-out_lid:
|
|
- rdma_destroy_id(isert_lid);
|
|
out:
|
|
kfree(isert_np);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -2657,7 +2804,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
int ret;
|
|
|
|
- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
|
|
+ pr_info("before login_req comp conn: %p\n", isert_conn);
|
|
+ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
|
|
+ if (ret) {
|
|
+ pr_err("isert_conn %p interrupted before got login req\n",
|
|
+ isert_conn);
|
|
+ return ret;
|
|
+ }
|
|
+ reinit_completion(&isert_conn->login_req_comp);
|
|
+
|
|
/*
|
|
* For login requests after the first PDU, isert_rx_login_req() will
|
|
* kick schedule_delayed_work(&conn->login_work) as the packet is
|
|
@@ -2667,11 +2822,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
if (!login->first_request)
|
|
return 0;
|
|
|
|
+ isert_rx_login_req(isert_conn);
|
|
+
|
|
+ pr_info("before conn_login_comp conn: %p\n", conn);
|
|
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
|
|
+ pr_info("processing login->req: %p\n", login->req);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2749,17 +2908,10 @@ accept_wait:
|
|
isert_conn->conn = conn;
|
|
max_accept = 0;
|
|
|
|
- ret = isert_rdma_post_recvl(isert_conn);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = isert_rdma_accept(isert_conn);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
isert_set_conn_info(np, conn, isert_conn);
|
|
|
|
- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
|
|
+ pr_debug("Processing isert_conn: %p\n", isert_conn);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2768,12 +2920,31 @@ isert_free_np(struct iscsi_np *np)
|
|
{
|
|
struct isert_np *isert_np = (struct isert_np *)np->np_context;
|
|
|
|
- rdma_destroy_id(isert_np->np_cm_id);
|
|
+ if (isert_np->np_cm_id)
|
|
+ rdma_destroy_id(isert_np->np_cm_id);
|
|
|
|
np->np_context = NULL;
|
|
kfree(isert_np);
|
|
}
|
|
|
|
+static void isert_release_work(struct work_struct *work)
|
|
+{
|
|
+ struct isert_conn *isert_conn = container_of(work,
|
|
+ struct isert_conn,
|
|
+ release_work);
|
|
+
|
|
+ pr_info("Starting release conn %p\n", isert_conn);
|
|
+
|
|
+ wait_for_completion(&isert_conn->conn_wait);
|
|
+
|
|
+ mutex_lock(&isert_conn->conn_mutex);
|
|
+ isert_conn->state = ISER_CONN_DOWN;
|
|
+ mutex_unlock(&isert_conn->conn_mutex);
|
|
+
|
|
+ pr_info("Destroying conn %p\n", isert_conn);
|
|
+ isert_put_conn(isert_conn);
|
|
+}
|
|
+
|
|
static void isert_wait_conn(struct iscsi_conn *conn)
|
|
{
|
|
struct isert_conn *isert_conn = conn->context;
|
|
@@ -2781,10 +2952,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|
pr_debug("isert_wait_conn: Starting \n");
|
|
|
|
mutex_lock(&isert_conn->conn_mutex);
|
|
- if (isert_conn->conn_cm_id) {
|
|
- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
|
|
- rdma_disconnect(isert_conn->conn_cm_id);
|
|
- }
|
|
/*
|
|
* Only wait for conn_wait_comp_err if the isert_conn made it
|
|
* into full feature phase..
|
|
@@ -2793,13 +2960,12 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
return;
|
|
}
|
|
- if (isert_conn->state == ISER_CONN_UP)
|
|
- isert_conn->state = ISER_CONN_TERMINATING;
|
|
+ isert_conn_terminate(isert_conn);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
|
|
wait_for_completion(&isert_conn->conn_wait_comp_err);
|
|
|
|
- wait_for_completion(&isert_conn->conn_wait);
|
|
+ queue_work(isert_release_wq, &isert_conn->release_work);
|
|
}
|
|
|
|
static void isert_free_conn(struct iscsi_conn *conn)
|
|
@@ -2845,10 +3011,21 @@ static int __init isert_init(void)
|
|
goto destroy_rx_wq;
|
|
}
|
|
|
|
+ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
|
|
+ WQ_UNBOUND_MAX_ACTIVE);
|
|
+ if (!isert_release_wq) {
|
|
+ pr_err("Unable to allocate isert_release_wq\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto destroy_comp_wq;
|
|
+ }
|
|
+
|
|
iscsit_register_transport(&iser_target_transport);
|
|
- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
|
+ pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
|
+
|
|
return 0;
|
|
|
|
+destroy_comp_wq:
|
|
+ destroy_workqueue(isert_comp_wq);
|
|
destroy_rx_wq:
|
|
destroy_workqueue(isert_rx_wq);
|
|
return ret;
|
|
@@ -2857,6 +3034,7 @@ destroy_rx_wq:
|
|
static void __exit isert_exit(void)
|
|
{
|
|
flush_scheduled_work();
|
|
+ destroy_workqueue(isert_release_wq);
|
|
destroy_workqueue(isert_comp_wq);
|
|
destroy_workqueue(isert_rx_wq);
|
|
iscsit_unregister_transport(&iser_target_transport);
|
|
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
|
|
index cbecaab..1178c5b 100644
|
|
--- a/drivers/infiniband/ulp/isert/ib_isert.h
|
|
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
|
|
@@ -23,6 +23,7 @@ enum iser_ib_op_code {
|
|
enum iser_conn_state {
|
|
ISER_CONN_INIT,
|
|
ISER_CONN_UP,
|
|
+ ISER_CONN_FULL_FEATURE,
|
|
ISER_CONN_TERMINATING,
|
|
ISER_CONN_DOWN,
|
|
};
|
|
@@ -102,6 +103,7 @@ struct isert_conn {
|
|
char *login_req_buf;
|
|
char *login_rsp_buf;
|
|
u64 login_req_dma;
|
|
+ int login_req_len;
|
|
u64 login_rsp_dma;
|
|
unsigned int conn_rx_desc_head;
|
|
struct iser_rx_desc *conn_rx_descs;
|
|
@@ -109,13 +111,13 @@ struct isert_conn {
|
|
struct iscsi_conn *conn;
|
|
struct list_head conn_accept_node;
|
|
struct completion conn_login_comp;
|
|
+ struct completion login_req_comp;
|
|
struct iser_tx_desc conn_login_tx_desc;
|
|
struct rdma_cm_id *conn_cm_id;
|
|
struct ib_pd *conn_pd;
|
|
struct ib_mr *conn_mr;
|
|
struct ib_qp *conn_qp;
|
|
struct isert_device *conn_device;
|
|
- struct work_struct conn_logout_work;
|
|
struct mutex conn_mutex;
|
|
struct completion conn_wait;
|
|
struct completion conn_wait_comp_err;
|
|
@@ -124,10 +126,10 @@ struct isert_conn {
|
|
int conn_fr_pool_size;
|
|
/* lock to protect fastreg pool */
|
|
spinlock_t conn_lock;
|
|
+ struct work_struct release_work;
|
|
#define ISERT_COMP_BATCH_COUNT 8
|
|
int conn_comp_batch;
|
|
struct llist_head conn_comp_llist;
|
|
- bool disconnect;
|
|
};
|
|
|
|
#define ISERT_MAX_CQ 64
|
|
@@ -158,6 +160,7 @@ struct isert_device {
|
|
};
|
|
|
|
struct isert_np {
|
|
+ struct iscsi_np *np;
|
|
struct semaphore np_sem;
|
|
struct rdma_cm_id *np_cm_id;
|
|
struct mutex np_accept_mutex;
|
|
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
index e96c07e..ca0bc6c 100644
|
|
--- a/drivers/infiniband/ulp/srp/ib_srp.c
|
|
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
@@ -120,6 +120,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
|
|
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
|
|
|
|
static struct scsi_transport_template *ib_srp_transport_template;
|
|
+static struct workqueue_struct *srp_remove_wq;
|
|
|
|
static struct ib_client srp_client = {
|
|
.name = "srp",
|
|
@@ -539,7 +540,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
|
|
spin_unlock_irq(&target->lock);
|
|
|
|
if (changed)
|
|
- queue_work(system_long_wq, &target->remove_work);
|
|
+ queue_work(srp_remove_wq, &target->remove_work);
|
|
|
|
return changed;
|
|
}
|
|
@@ -2886,9 +2887,10 @@ static void srp_remove_one(struct ib_device *device)
|
|
spin_unlock(&host->target_lock);
|
|
|
|
/*
|
|
- * Wait for target port removal tasks.
|
|
+ * Wait for tl_err and target port removal tasks.
|
|
*/
|
|
flush_workqueue(system_long_wq);
|
|
+ flush_workqueue(srp_remove_wq);
|
|
|
|
kfree(host);
|
|
}
|
|
@@ -2940,16 +2942,22 @@ static int __init srp_init_module(void)
|
|
indirect_sg_entries = cmd_sg_entries;
|
|
}
|
|
|
|
+ srp_remove_wq = create_workqueue("srp_remove");
|
|
+ if (IS_ERR(srp_remove_wq)) {
|
|
+ ret = PTR_ERR(srp_remove_wq);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ ret = -ENOMEM;
|
|
ib_srp_transport_template =
|
|
srp_attach_transport(&ib_srp_transport_functions);
|
|
if (!ib_srp_transport_template)
|
|
- return -ENOMEM;
|
|
+ goto destroy_wq;
|
|
|
|
ret = class_register(&srp_class);
|
|
if (ret) {
|
|
pr_err("couldn't register class infiniband_srp\n");
|
|
- srp_release_transport(ib_srp_transport_template);
|
|
- return ret;
|
|
+ goto release_tr;
|
|
}
|
|
|
|
ib_sa_register_client(&srp_sa_client);
|
|
@@ -2957,13 +2965,22 @@ static int __init srp_init_module(void)
|
|
ret = ib_register_client(&srp_client);
|
|
if (ret) {
|
|
pr_err("couldn't register IB client\n");
|
|
- srp_release_transport(ib_srp_transport_template);
|
|
- ib_sa_unregister_client(&srp_sa_client);
|
|
- class_unregister(&srp_class);
|
|
- return ret;
|
|
+ goto unreg_sa;
|
|
}
|
|
|
|
- return 0;
|
|
+out:
|
|
+ return ret;
|
|
+
|
|
+unreg_sa:
|
|
+ ib_sa_unregister_client(&srp_sa_client);
|
|
+ class_unregister(&srp_class);
|
|
+
|
|
+release_tr:
|
|
+ srp_release_transport(ib_srp_transport_template);
|
|
+
|
|
+destroy_wq:
|
|
+ destroy_workqueue(srp_remove_wq);
|
|
+ goto out;
|
|
}
|
|
|
|
static void __exit srp_cleanup_module(void)
|
|
@@ -2972,6 +2989,7 @@ static void __exit srp_cleanup_module(void)
|
|
ib_sa_unregister_client(&srp_sa_client);
|
|
class_unregister(&srp_class);
|
|
srp_release_transport(ib_srp_transport_template);
|
|
+ destroy_workqueue(srp_remove_wq);
|
|
}
|
|
|
|
module_init(srp_init_module);
|
|
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
index d1078ce..0097b8d 100644
|
|
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
@@ -2091,6 +2091,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
|
|
if (!qp_init)
|
|
goto out;
|
|
|
|
+retry:
|
|
ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
|
|
ch->rq_size + srp_sq_size, 0);
|
|
if (IS_ERR(ch->cq)) {
|
|
@@ -2114,6 +2115,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
|
|
ch->qp = ib_create_qp(sdev->pd, qp_init);
|
|
if (IS_ERR(ch->qp)) {
|
|
ret = PTR_ERR(ch->qp);
|
|
+ if (ret == -ENOMEM) {
|
|
+ srp_sq_size /= 2;
|
|
+ if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
|
|
+ ib_destroy_cq(ch->cq);
|
|
+ goto retry;
|
|
+ }
|
|
+ }
|
|
printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
|
|
goto err_destroy_cq;
|
|
}
|
|
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
|
|
index ce953d8..fb787c3 100644
|
|
--- a/drivers/input/evdev.c
|
|
+++ b/drivers/input/evdev.c
|
|
@@ -757,20 +757,23 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
|
|
*/
|
|
static int evdev_handle_get_val(struct evdev_client *client,
|
|
struct input_dev *dev, unsigned int type,
|
|
- unsigned long *bits, unsigned int max,
|
|
- unsigned int size, void __user *p, int compat)
|
|
+ unsigned long *bits, unsigned int maxbit,
|
|
+ unsigned int maxlen, void __user *p,
|
|
+ int compat)
|
|
{
|
|
int ret;
|
|
unsigned long *mem;
|
|
+ size_t len;
|
|
|
|
- mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL);
|
|
+ len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long);
|
|
+ mem = kmalloc(len, GFP_KERNEL);
|
|
if (!mem)
|
|
return -ENOMEM;
|
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
spin_lock(&client->buffer_lock);
|
|
|
|
- memcpy(mem, bits, sizeof(unsigned long) * max);
|
|
+ memcpy(mem, bits, len);
|
|
|
|
spin_unlock(&dev->event_lock);
|
|
|
|
@@ -778,7 +781,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
|
|
|
|
spin_unlock_irq(&client->buffer_lock);
|
|
|
|
- ret = bits_to_user(mem, max, size, p, compat);
|
|
+ ret = bits_to_user(mem, maxbit, maxlen, p, compat);
|
|
if (ret < 0)
|
|
evdev_queue_syn_dropped(client);
|
|
|
|
diff --git a/drivers/input/input.c b/drivers/input/input.c
|
|
index 1c4c0db..29ca0bb 100644
|
|
--- a/drivers/input/input.c
|
|
+++ b/drivers/input/input.c
|
|
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
|
|
}
|
|
|
|
static int input_get_disposition(struct input_dev *dev,
|
|
- unsigned int type, unsigned int code, int value)
|
|
+ unsigned int type, unsigned int code, int *pval)
|
|
{
|
|
int disposition = INPUT_IGNORE_EVENT;
|
|
+ int value = *pval;
|
|
|
|
switch (type) {
|
|
|
|
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
|
|
break;
|
|
}
|
|
|
|
+ *pval = value;
|
|
return disposition;
|
|
}
|
|
|
|
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
|
|
{
|
|
int disposition;
|
|
|
|
- disposition = input_get_disposition(dev, type, code, value);
|
|
+ disposition = input_get_disposition(dev, type, code, &value);
|
|
|
|
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
|
|
dev->event(dev, type, code, value);
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index 603fe0d..517829f 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -1003,9 +1003,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|
}
|
|
|
|
ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
|
|
- usb_fill_bulk_urb(xpad->bulk_out, udev,
|
|
- usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
|
|
- xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
|
|
+ if (usb_endpoint_is_bulk_out(ep_irq_in)) {
|
|
+ usb_fill_bulk_urb(xpad->bulk_out, udev,
|
|
+ usb_sndbulkpipe(udev,
|
|
+ ep_irq_in->bEndpointAddress),
|
|
+ xpad->bdata, XPAD_PKT_LEN,
|
|
+ xpad_bulk_out, xpad);
|
|
+ } else {
|
|
+ usb_fill_int_urb(xpad->bulk_out, udev,
|
|
+ usb_sndintpipe(udev,
|
|
+ ep_irq_in->bEndpointAddress),
|
|
+ xpad->bdata, XPAD_PKT_LEN,
|
|
+ xpad_bulk_out, xpad, 0);
|
|
+ }
|
|
|
|
/*
|
|
* Submit the int URB immediately rather than waiting for open
|
|
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
|
|
index 2dd1d0d..6f5d795 100644
|
|
--- a/drivers/input/keyboard/atkbd.c
|
|
+++ b/drivers/input/keyboard/atkbd.c
|
|
@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
|
|
{
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
|
|
- DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
|
|
- },
|
|
- .callback = atkbd_deactivate_fixup,
|
|
- },
|
|
- {
|
|
- .matches = {
|
|
- DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
|
|
- DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
|
|
},
|
|
.callback = atkbd_deactivate_fixup,
|
|
},
|
|
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
|
|
index fb15c64..4979b00 100644
|
|
--- a/drivers/input/mouse/alps.c
|
|
+++ b/drivers/input/mouse/alps.c
|
|
@@ -1047,7 +1047,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
|
|
{
|
|
struct alps_data *priv = psmouse->private;
|
|
|
|
- if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
|
|
+ /*
|
|
+ * Check if we are dealing with a bare PS/2 packet, presumably from
|
|
+ * a device connected to the external PS/2 port. Because bare PS/2
|
|
+ * protocol does not have enough constant bits to self-synchronize
|
|
+ * properly we only do this if the device is fully synchronized.
|
|
+ */
|
|
+ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
|
|
if (psmouse->pktcnt == 3) {
|
|
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
|
|
true);
|
|
@@ -1071,12 +1077,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
|
|
}
|
|
|
|
/* Bytes 2 - pktsize should have 0 in the highest bit */
|
|
- if ((priv->proto_version < ALPS_PROTO_V5) &&
|
|
+ if (priv->proto_version < ALPS_PROTO_V5 &&
|
|
psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
|
|
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
|
|
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
|
|
psmouse->pktcnt - 1,
|
|
psmouse->packet[psmouse->pktcnt - 1]);
|
|
+
|
|
+ if (priv->proto_version == ALPS_PROTO_V3 &&
|
|
+ psmouse->pktcnt == psmouse->pktsize) {
|
|
+ /*
|
|
+ * Some Dell boxes, such as Latitude E6440 or E7440
|
|
+ * with closed lid, quite often smash last byte of
|
|
+ * otherwise valid packet with 0xff. Given that the
|
|
+ * next packet is very likely to be valid let's
|
|
+ * report PSMOUSE_FULL_PACKET but not process data,
|
|
+ * rather than reporting PSMOUSE_BAD_DATA and
|
|
+ * filling the logs.
|
|
+ */
|
|
+ return PSMOUSE_FULL_PACKET;
|
|
+ }
|
|
+
|
|
return PSMOUSE_BAD_DATA;
|
|
}
|
|
|
|
@@ -2148,6 +2169,9 @@ int alps_init(struct psmouse *psmouse)
|
|
/* We are having trouble resyncing ALPS touchpads so disable it for now */
|
|
psmouse->resync_time = 0;
|
|
|
|
+ /* Allow 2 invalid packets without resetting device */
|
|
+ psmouse->resetafter = psmouse->pktsize * 2;
|
|
+
|
|
return 0;
|
|
|
|
init_fail:
|
|
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
|
|
index 233516a..94eaaf0 100644
|
|
--- a/drivers/input/mouse/elantech.c
|
|
+++ b/drivers/input/mouse/elantech.c
|
|
@@ -314,7 +314,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
|
|
unsigned int x2, unsigned int y2)
|
|
{
|
|
elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
|
|
- elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
|
|
+ elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
|
|
}
|
|
|
|
/*
|
|
@@ -814,6 +814,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
|
|
}
|
|
|
|
/*
|
|
+ * This writes the reg_07 value again to the hardware at the end of every
|
|
+ * set_rate call because the register loses its value. reg_07 allows setting
|
|
+ * absolute mode on v4 hardware
|
|
+ */
|
|
+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
|
|
+ unsigned int rate)
|
|
+{
|
|
+ struct elantech_data *etd = psmouse->private;
|
|
+
|
|
+ etd->original_set_rate(psmouse, rate);
|
|
+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
|
|
+ psmouse_err(psmouse, "restoring reg_07 failed\n");
|
|
+}
|
|
+
|
|
+/*
|
|
* Put the touchpad into absolute mode
|
|
*/
|
|
static int elantech_set_absolute_mode(struct psmouse *psmouse)
|
|
@@ -1015,6 +1030,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
|
* Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
|
|
* Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
|
|
* Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
|
|
+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
|
|
+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
|
|
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
|
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
|
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
|
@@ -1253,6 +1270,14 @@ static bool elantech_is_signature_valid(const unsigned char *param)
|
|
if (param[1] == 0)
|
|
return true;
|
|
|
|
+ /*
|
|
+ * Some hw_version >= 4 models have a revision higher then 20. Meaning
|
|
+ * that param[2] may be 10 or 20, skip the rates check for these.
|
|
+ */
|
|
+ if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
|
|
+ param[2] < 40)
|
|
+ return true;
|
|
+
|
|
for (i = 0; i < ARRAY_SIZE(rates); i++)
|
|
if (param[2] == rates[i])
|
|
return false;
|
|
@@ -1483,6 +1508,11 @@ int elantech_init(struct psmouse *psmouse)
|
|
goto init_fail;
|
|
}
|
|
|
|
+ if (etd->fw_version == 0x381f17) {
|
|
+ etd->original_set_rate = psmouse->set_rate;
|
|
+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
|
|
+ }
|
|
+
|
|
if (elantech_set_input_params(psmouse)) {
|
|
psmouse_err(psmouse, "failed to query touchpad range.\n");
|
|
goto init_fail;
|
|
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
|
|
index 9e0e2a1..59263a3 100644
|
|
--- a/drivers/input/mouse/elantech.h
|
|
+++ b/drivers/input/mouse/elantech.h
|
|
@@ -139,6 +139,7 @@ struct elantech_data {
|
|
struct finger_pos mt[ETP_MAX_FINGERS];
|
|
unsigned char parity[256];
|
|
int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
|
|
+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
|
|
};
|
|
|
|
#ifdef CONFIG_MOUSE_PS2_ELANTECH
|
|
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
|
|
index ec772d9..53f09a8 100644
|
|
--- a/drivers/input/mouse/synaptics.c
|
|
+++ b/drivers/input/mouse/synaptics.c
|
|
@@ -132,13 +132,23 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
|
|
1232, 5710, 1156, 4696
|
|
},
|
|
{
|
|
- (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
|
|
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
|
|
+ "LEN0039", "LEN2002", "LEN2004",
|
|
+ NULL},
|
|
1024, 5112, 2024, 4832
|
|
},
|
|
{
|
|
+ (const char * const []){"LEN2000", NULL},
|
|
+ 1024, 5113, 2021, 4832
|
|
+ },
|
|
+ {
|
|
(const char * const []){"LEN2001", NULL},
|
|
1024, 5022, 2508, 4832
|
|
},
|
|
+ {
|
|
+ (const char * const []){"LEN2006", NULL},
|
|
+ 1264, 5675, 1171, 4688
|
|
+ },
|
|
{ }
|
|
};
|
|
|
|
@@ -157,8 +167,9 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
|
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
|
|
"LEN0035", /* X240 */
|
|
"LEN0036", /* T440 */
|
|
- "LEN0037",
|
|
+ "LEN0037", /* X1 Carbon 2nd */
|
|
"LEN0038",
|
|
+ "LEN0039", /* T440s */
|
|
"LEN0041",
|
|
"LEN0042", /* Yoga */
|
|
"LEN0045",
|
|
@@ -166,9 +177,9 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
|
"LEN0047",
|
|
"LEN0048",
|
|
"LEN0049",
|
|
- "LEN2000",
|
|
+ "LEN2000", /* S540 */
|
|
"LEN2001", /* Edge E431 */
|
|
- "LEN2002",
|
|
+ "LEN2002", /* Edge E531 */
|
|
"LEN2003",
|
|
"LEN2004", /* L440 */
|
|
"LEN2005",
|
|
@@ -625,10 +636,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
|
|
((buf[0] & 0x04) >> 1) |
|
|
((buf[3] & 0x04) >> 2));
|
|
|
|
+ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
|
|
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
|
|
+ hw->w == 2) {
|
|
+ synaptics_parse_agm(buf, priv, hw);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ hw->x = (((buf[3] & 0x10) << 8) |
|
|
+ ((buf[1] & 0x0f) << 8) |
|
|
+ buf[4]);
|
|
+ hw->y = (((buf[3] & 0x20) << 7) |
|
|
+ ((buf[1] & 0xf0) << 4) |
|
|
+ buf[5]);
|
|
+ hw->z = buf[2];
|
|
+
|
|
hw->left = (buf[0] & 0x01) ? 1 : 0;
|
|
hw->right = (buf[0] & 0x02) ? 1 : 0;
|
|
|
|
- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
|
|
+ if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
|
|
+ /*
|
|
+ * ForcePads, like Clickpads, use middle button
|
|
+ * bits to report primary button clicks.
|
|
+ * Unfortunately they report primary button not
|
|
+ * only when user presses on the pad above certain
|
|
+ * threshold, but also when there are more than one
|
|
+ * finger on the touchpad, which interferes with
|
|
+ * out multi-finger gestures.
|
|
+ */
|
|
+ if (hw->z == 0) {
|
|
+ /* No contacts */
|
|
+ priv->press = priv->report_press = false;
|
|
+ } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
|
|
+ /*
|
|
+ * Single-finger touch with pressure above
|
|
+ * the threshold. If pressure stays long
|
|
+ * enough, we'll start reporting primary
|
|
+ * button. We rely on the device continuing
|
|
+ * sending data even if finger does not
|
|
+ * move.
|
|
+ */
|
|
+ if (!priv->press) {
|
|
+ priv->press_start = jiffies;
|
|
+ priv->press = true;
|
|
+ } else if (time_after(jiffies,
|
|
+ priv->press_start +
|
|
+ msecs_to_jiffies(50))) {
|
|
+ priv->report_press = true;
|
|
+ }
|
|
+ } else {
|
|
+ priv->press = false;
|
|
+ }
|
|
+
|
|
+ hw->left = priv->report_press;
|
|
+
|
|
+ } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
|
|
/*
|
|
* Clickpad's button is transmitted as middle button,
|
|
* however, since it is primary button, we will report
|
|
@@ -647,21 +709,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
|
|
hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
|
|
}
|
|
|
|
- if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
|
|
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
|
|
- hw->w == 2) {
|
|
- synaptics_parse_agm(buf, priv, hw);
|
|
- return 1;
|
|
- }
|
|
-
|
|
- hw->x = (((buf[3] & 0x10) << 8) |
|
|
- ((buf[1] & 0x0f) << 8) |
|
|
- buf[4]);
|
|
- hw->y = (((buf[3] & 0x20) << 7) |
|
|
- ((buf[1] & 0xf0) << 4) |
|
|
- buf[5]);
|
|
- hw->z = buf[2];
|
|
-
|
|
if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
|
|
((buf[0] ^ buf[3]) & 0x02)) {
|
|
switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
|
|
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
|
|
index e594af0..fb2e076 100644
|
|
--- a/drivers/input/mouse/synaptics.h
|
|
+++ b/drivers/input/mouse/synaptics.h
|
|
@@ -78,6 +78,11 @@
|
|
* 2 0x08 image sensor image sensor tracks 5 fingers, but only
|
|
* reports 2.
|
|
* 2 0x20 report min query 0x0f gives min coord reported
|
|
+ * 2 0x80 forcepad forcepad is a variant of clickpad that
|
|
+ * does not have physical buttons but rather
|
|
+ * uses pressure above certain threshold to
|
|
+ * report primary clicks. Forcepads also have
|
|
+ * clickpad bit set.
|
|
*/
|
|
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
|
|
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
|
|
@@ -86,6 +91,7 @@
|
|
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
|
|
#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
|
|
#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
|
|
+#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
|
|
|
|
/* synaptics modes query bits */
|
|
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
|
|
@@ -177,6 +183,11 @@ struct synaptics_data {
|
|
*/
|
|
struct synaptics_hw_state agm;
|
|
bool agm_pending; /* new AGM packet received */
|
|
+
|
|
+ /* ForcePad handling */
|
|
+ unsigned long press_start;
|
|
+ bool press;
|
|
+ bool report_press;
|
|
};
|
|
|
|
void synaptics_module_init(void);
|
|
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
|
|
index 381b20d..dd6d14d 100644
|
|
--- a/drivers/input/serio/i8042-x86ia64io.h
|
|
+++ b/drivers/input/serio/i8042-x86ia64io.h
|
|
@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|
},
|
|
{
|
|
.matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
|
|
DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
|
|
DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
|
|
@@ -146,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|
},
|
|
},
|
|
{
|
|
+ /* Medion Akoya E7225 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
/* Blue FB5601 */
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
|
|
@@ -402,6 +416,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
|
|
},
|
|
},
|
|
{
|
|
+ /* Acer Aspire 7738 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
/* Gericom Bellagio */
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
|
|
@@ -458,6 +479,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Avatar AVIU-145A6 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
|
|
+ },
|
|
+ },
|
|
{ }
|
|
};
|
|
|
|
@@ -601,6 +629,30 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Fujitsu A544 laptop */
|
|
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Fujitsu AH544 laptop */
|
|
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Fujitsu U574 laptop */
|
|
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
|
|
+ },
|
|
+ },
|
|
{ }
|
|
};
|
|
|
|
@@ -684,6 +736,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
|
|
{ }
|
|
};
|
|
|
|
+/*
|
|
+ * Some laptops need keyboard reset before probing for the trackpad to get
|
|
+ * it detected, initialised & finally work.
|
|
+ */
|
|
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
|
|
+ {
|
|
+ /* Gigabyte P35 v2 - Elantech touchpad */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Gigabyte P34 - Elantech touchpad */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
|
|
+ },
|
|
+ },
|
|
+ { }
|
|
+};
|
|
+
|
|
#endif /* CONFIG_X86 */
|
|
|
|
#ifdef CONFIG_PNP
|
|
@@ -979,6 +1060,9 @@ static int __init i8042_platform_init(void)
|
|
if (dmi_check_system(i8042_dmi_dritek_table))
|
|
i8042_dritek = true;
|
|
|
|
+ if (dmi_check_system(i8042_dmi_kbdreset_table))
|
|
+ i8042_kbdreset = true;
|
|
+
|
|
/*
|
|
* A20 was already enabled during early kernel init. But some buggy
|
|
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
|
|
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
|
|
index 3807c3e..eb796ff 100644
|
|
--- a/drivers/input/serio/i8042.c
|
|
+++ b/drivers/input/serio/i8042.c
|
|
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
|
|
module_param_named(notimeout, i8042_notimeout, bool, 0);
|
|
MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
|
|
|
|
+static bool i8042_kbdreset;
|
|
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
|
|
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
|
|
+
|
|
#ifdef CONFIG_X86
|
|
static bool i8042_dritek;
|
|
module_param_named(dritek, i8042_dritek, bool, 0);
|
|
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
|
|
return -1;
|
|
|
|
/*
|
|
+ * Reset keyboard (needed on some laptops to successfully detect
|
|
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
|
|
+ * touchpads).
|
|
+ */
|
|
+ if (i8042_kbdreset) {
|
|
+ pr_warn("Attempting to reset device connected to KBD port\n");
|
|
+ i8042_kbd_write(NULL, (unsigned char) 0xff);
|
|
+ }
|
|
+
|
|
+/*
|
|
* Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
|
|
* used it for a PCI card or somethig else.
|
|
*/
|
|
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
|
|
index 0cb7ef5..69175b8 100644
|
|
--- a/drivers/input/serio/serport.c
|
|
+++ b/drivers/input/serio/serport.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/serio.h>
|
|
#include <linux/tty.h>
|
|
+#include <linux/compat.h>
|
|
|
|
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
|
|
MODULE_DESCRIPTION("Input device TTY line discipline");
|
|
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
|
|
return 0;
|
|
}
|
|
|
|
+static void serport_set_type(struct tty_struct *tty, unsigned long type)
|
|
+{
|
|
+ struct serport *serport = tty->disc_data;
|
|
+
|
|
+ serport->id.proto = type & 0x000000ff;
|
|
+ serport->id.id = (type & 0x0000ff00) >> 8;
|
|
+ serport->id.extra = (type & 0x00ff0000) >> 16;
|
|
+}
|
|
+
|
|
/*
|
|
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
|
|
*/
|
|
|
|
-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
|
|
+static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
|
|
+ unsigned int cmd, unsigned long arg)
|
|
{
|
|
- struct serport *serport = (struct serport*) tty->disc_data;
|
|
- unsigned long type;
|
|
-
|
|
if (cmd == SPIOCSTYPE) {
|
|
+ unsigned long type;
|
|
+
|
|
if (get_user(type, (unsigned long __user *) arg))
|
|
return -EFAULT;
|
|
|
|
- serport->id.proto = type & 0x000000ff;
|
|
- serport->id.id = (type & 0x0000ff00) >> 8;
|
|
- serport->id.extra = (type & 0x00ff0000) >> 16;
|
|
+ serport_set_type(tty, type);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
|
|
+static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
|
|
+ struct file *file,
|
|
+ unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ if (cmd == COMPAT_SPIOCSTYPE) {
|
|
+ void __user *uarg = compat_ptr(arg);
|
|
+ compat_ulong_t compat_type;
|
|
+
|
|
+ if (get_user(compat_type, (compat_ulong_t __user *)uarg))
|
|
+ return -EFAULT;
|
|
|
|
+ serport_set_type(tty, compat_type);
|
|
return 0;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
+#endif
|
|
|
|
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
|
|
{
|
|
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = {
|
|
.close = serport_ldisc_close,
|
|
.read = serport_ldisc_read,
|
|
.ioctl = serport_ldisc_ioctl,
|
|
+#ifdef CONFIG_COMPAT
|
|
+ .compat_ioctl = serport_ldisc_compat_ioctl,
|
|
+#endif
|
|
.receive_buf = serport_ldisc_receive,
|
|
.write_wakeup = serport_ldisc_write_wakeup
|
|
};
|
|
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
|
|
index 05f371d..d4b0a31 100644
|
|
--- a/drivers/input/tablet/wacom_wac.c
|
|
+++ b/drivers/input/tablet/wacom_wac.c
|
|
@@ -700,6 +700,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
|
|
input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
|
|
input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
|
|
input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
|
|
+
|
|
+ if (data[4] | (data[3] & 0x01)) {
|
|
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
|
|
+ } else {
|
|
+ input_report_abs(input, ABS_MISC, 0);
|
|
+ }
|
|
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
|
|
int i;
|
|
|
|
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
|
|
index a096633..c6f7e91 100644
|
|
--- a/drivers/input/touchscreen/usbtouchscreen.c
|
|
+++ b/drivers/input/touchscreen/usbtouchscreen.c
|
|
@@ -625,6 +625,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
|
|
goto err_out;
|
|
}
|
|
|
|
+ /* TSC-25 data sheet specifies a delay after the RESET command */
|
|
+ msleep(150);
|
|
+
|
|
/* set coordinate output rate */
|
|
buf[0] = buf[1] = 0xFF;
|
|
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
|
|
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
|
|
index 71776ff..9359740 100644
|
|
--- a/drivers/iommu/amd_iommu.c
|
|
+++ b/drivers/iommu/amd_iommu.c
|
|
@@ -1922,9 +1922,15 @@ static void free_pt_##LVL (unsigned long __pt) \
|
|
pt = (u64 *)__pt; \
|
|
\
|
|
for (i = 0; i < 512; ++i) { \
|
|
+ /* PTE present? */ \
|
|
if (!IOMMU_PTE_PRESENT(pt[i])) \
|
|
continue; \
|
|
\
|
|
+ /* Large PTE? */ \
|
|
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
|
|
+ PM_PTE_LEVEL(pt[i]) == 7) \
|
|
+ continue; \
|
|
+ \
|
|
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
|
|
FN(p); \
|
|
} \
|
|
@@ -3227,14 +3233,16 @@ free_domains:
|
|
|
|
static void cleanup_domain(struct protection_domain *domain)
|
|
{
|
|
- struct iommu_dev_data *dev_data, *next;
|
|
+ struct iommu_dev_data *entry;
|
|
unsigned long flags;
|
|
|
|
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
|
- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
|
|
- __detach_device(dev_data);
|
|
- atomic_set(&dev_data->bind, 0);
|
|
+ while (!list_empty(&domain->dev_list)) {
|
|
+ entry = list_first_entry(&domain->dev_list,
|
|
+ struct iommu_dev_data, list);
|
|
+ __detach_device(entry);
|
|
+ atomic_set(&entry->bind, 0);
|
|
}
|
|
|
|
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
|
|
index 1d9ab39..2ecac46 100644
|
|
--- a/drivers/iommu/arm-smmu.c
|
|
+++ b/drivers/iommu/arm-smmu.c
|
|
@@ -794,8 +794,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|
reg |= TTBCR_EAE |
|
|
(TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
|
|
(TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
|
|
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
|
|
- (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
|
|
+ (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
|
|
+
|
|
+ if (!stage1)
|
|
+ reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
|
|
+
|
|
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
|
|
|
|
/* MAIR0 (stage-1 only) */
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index 40f6b47..8855ecb 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -1768,7 +1768,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
struct dma_pte *first_pte = NULL, *pte = NULL;
|
|
phys_addr_t uninitialized_var(pteval);
|
|
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
- unsigned long sg_res;
|
|
+ unsigned long sg_res = 0;
|
|
unsigned int largepage_lvl = 0;
|
|
unsigned long lvl_pages = 0;
|
|
|
|
@@ -1779,10 +1779,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
|
|
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
|
|
|
|
- if (sg)
|
|
- sg_res = 0;
|
|
- else {
|
|
- sg_res = nr_pages + 1;
|
|
+ if (!sg) {
|
|
+ sg_res = nr_pages;
|
|
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
|
|
}
|
|
|
|
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
|
|
index f37d63c..825545c 100644
|
|
--- a/drivers/leds/led-class.c
|
|
+++ b/drivers/leds/led-class.c
|
|
@@ -178,6 +178,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
|
|
}
|
|
EXPORT_SYMBOL_GPL(led_classdev_resume);
|
|
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
static int led_suspend(struct device *dev)
|
|
{
|
|
struct led_classdev *led_cdev = dev_get_drvdata(dev);
|
|
@@ -197,11 +198,9 @@ static int led_resume(struct device *dev)
|
|
|
|
return 0;
|
|
}
|
|
+#endif
|
|
|
|
-static const struct dev_pm_ops leds_class_dev_pm_ops = {
|
|
- .suspend = led_suspend,
|
|
- .resume = led_resume,
|
|
-};
|
|
+static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
|
|
|
|
/**
|
|
* led_classdev_register - register a new object of led_classdev class.
|
|
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
|
|
index 0bf1e4e..19da222 100644
|
|
--- a/drivers/lguest/core.c
|
|
+++ b/drivers/lguest/core.c
|
|
@@ -176,7 +176,7 @@ static void unmap_switcher(void)
|
|
bool lguest_address_ok(const struct lguest *lg,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
|
|
+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
|
|
index 5f9c2a6..fbcb622 100644
|
|
--- a/drivers/md/bcache/btree.c
|
|
+++ b/drivers/md/bcache/btree.c
|
|
@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
|
|
struct bset *i = btree_bset_first(b);
|
|
struct btree_iter *iter;
|
|
|
|
- iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
|
|
+ iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
|
|
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
|
|
iter->used = 0;
|
|
|
|
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
|
|
index 4195a01..cc3dc0c 100644
|
|
--- a/drivers/md/bitmap.c
|
|
+++ b/drivers/md/bitmap.c
|
|
@@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
|
|
if (err)
|
|
return err;
|
|
|
|
+ err = -EINVAL;
|
|
+
|
|
sb = kmap_atomic(sb_page);
|
|
|
|
chunksize = le32_to_cpu(sb->chunksize);
|
|
@@ -883,7 +885,6 @@ void bitmap_unplug(struct bitmap *bitmap)
|
|
{
|
|
unsigned long i;
|
|
int dirty, need_write;
|
|
- int wait = 0;
|
|
|
|
if (!bitmap || !bitmap->storage.filemap ||
|
|
test_bit(BITMAP_STALE, &bitmap->flags))
|
|
@@ -901,16 +902,13 @@ void bitmap_unplug(struct bitmap *bitmap)
|
|
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
|
|
write_page(bitmap, bitmap->storage.filemap[i], 0);
|
|
}
|
|
- if (dirty)
|
|
- wait = 1;
|
|
- }
|
|
- if (wait) { /* if any writes were performed, we need to wait on them */
|
|
- if (bitmap->storage.file)
|
|
- wait_event(bitmap->write_wait,
|
|
- atomic_read(&bitmap->pending_writes)==0);
|
|
- else
|
|
- md_super_wait(bitmap->mddev);
|
|
}
|
|
+ if (bitmap->storage.file)
|
|
+ wait_event(bitmap->write_wait,
|
|
+ atomic_read(&bitmap->pending_writes)==0);
|
|
+ else
|
|
+ md_super_wait(bitmap->mddev);
|
|
+
|
|
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
|
bitmap_file_kick(bitmap);
|
|
}
|
|
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
|
|
index 66c5d13..03c872f 100644
|
|
--- a/drivers/md/dm-bufio.c
|
|
+++ b/drivers/md/dm-bufio.c
|
|
@@ -465,6 +465,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
|
|
c->n_buffers[dirty]++;
|
|
b->list_mode = dirty;
|
|
list_move(&b->lru_list, &c->lru[dirty]);
|
|
+ b->last_accessed = jiffies;
|
|
}
|
|
|
|
/*----------------------------------------------------------------
|
|
@@ -531,6 +532,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
|
|
end_io(&b->bio, r);
|
|
}
|
|
|
|
+static void inline_endio(struct bio *bio, int error)
|
|
+{
|
|
+ bio_end_io_t *end_fn = bio->bi_private;
|
|
+
|
|
+ /*
|
|
+ * Reset the bio to free any attached resources
|
|
+ * (e.g. bio integrity profiles).
|
|
+ */
|
|
+ bio_reset(bio);
|
|
+
|
|
+ end_fn(bio, error);
|
|
+}
|
|
+
|
|
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
|
|
bio_end_io_t *end_io)
|
|
{
|
|
@@ -542,7 +556,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
|
|
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
|
|
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
|
|
b->bio.bi_bdev = b->c->bdev;
|
|
- b->bio.bi_end_io = end_io;
|
|
+ b->bio.bi_end_io = inline_endio;
|
|
+ /*
|
|
+ * Use of .bi_private isn't a problem here because
|
|
+ * the dm_buffer's inline bio is local to bufio.
|
|
+ */
|
|
+ b->bio.bi_private = end_io;
|
|
|
|
/*
|
|
* We assume that if len >= PAGE_SIZE ptr is page-aligned.
|
|
@@ -1447,9 +1466,9 @@ static void drop_buffers(struct dm_bufio_client *c)
|
|
|
|
/*
|
|
* Test if the buffer is unused and too old, and commit it.
|
|
- * At if noio is set, we must not do any I/O because we hold
|
|
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
|
|
- * different bufio client.
|
|
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
|
|
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
|
|
+ * rerouted to different bufio client.
|
|
*/
|
|
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
|
|
unsigned long max_jiffies)
|
|
@@ -1457,7 +1476,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
|
|
if (jiffies - b->last_accessed < max_jiffies)
|
|
return 0;
|
|
|
|
- if (!(gfp & __GFP_IO)) {
|
|
+ if (!(gfp & __GFP_FS)) {
|
|
if (test_bit(B_READING, &b->state) ||
|
|
test_bit(B_WRITING, &b->state) ||
|
|
test_bit(B_DIRTY, &b->state))
|
|
@@ -1485,9 +1504,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
|
|
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
|
|
freed += __cleanup_old_buffer(b, gfp_mask, 0);
|
|
if (!--nr_to_scan)
|
|
- break;
|
|
+ return freed;
|
|
+ dm_bufio_cond_resched();
|
|
}
|
|
- dm_bufio_cond_resched();
|
|
}
|
|
return freed;
|
|
}
|
|
@@ -1499,7 +1518,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
unsigned long freed;
|
|
|
|
c = container_of(shrink, struct dm_bufio_client, shrinker);
|
|
- if (sc->gfp_mask & __GFP_IO)
|
|
+ if (sc->gfp_mask & __GFP_FS)
|
|
dm_bufio_lock(c);
|
|
else if (!dm_bufio_trylock(c))
|
|
return SHRINK_STOP;
|
|
@@ -1516,7 +1535,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
unsigned long count;
|
|
|
|
c = container_of(shrink, struct dm_bufio_client, shrinker);
|
|
- if (sc->gfp_mask & __GFP_IO)
|
|
+ if (sc->gfp_mask & __GFP_FS)
|
|
dm_bufio_lock(c);
|
|
else if (!dm_bufio_trylock(c))
|
|
return 0;
|
|
@@ -1541,7 +1560,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
|
|
BUG_ON(block_size < 1 << SECTOR_SHIFT ||
|
|
(block_size & (block_size - 1)));
|
|
|
|
- c = kmalloc(sizeof(*c), GFP_KERNEL);
|
|
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
|
|
if (!c) {
|
|
r = -ENOMEM;
|
|
goto bad_client;
|
|
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
|
|
index a87d3fa..b950a80 100644
|
|
--- a/drivers/md/dm-cache-metadata.c
|
|
+++ b/drivers/md/dm-cache-metadata.c
|
|
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
|
|
} __packed;
|
|
|
|
struct dm_cache_metadata {
|
|
+ atomic_t ref_count;
|
|
+ struct list_head list;
|
|
+
|
|
struct block_device *bdev;
|
|
struct dm_block_manager *bm;
|
|
struct dm_space_map *metadata_sm;
|
|
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
|
|
|
|
/*----------------------------------------------------------------*/
|
|
|
|
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
|
|
- sector_t data_block_size,
|
|
- bool may_format_device,
|
|
- size_t policy_hint_size)
|
|
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
|
|
+ sector_t data_block_size,
|
|
+ bool may_format_device,
|
|
+ size_t policy_hint_size)
|
|
{
|
|
int r;
|
|
struct dm_cache_metadata *cmd;
|
|
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
|
|
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
|
if (!cmd) {
|
|
DMERR("could not allocate metadata struct");
|
|
- return NULL;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
+ atomic_set(&cmd->ref_count, 1);
|
|
init_rwsem(&cmd->root_lock);
|
|
cmd->bdev = bdev;
|
|
cmd->data_block_size = data_block_size;
|
|
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
|
|
return cmd;
|
|
}
|
|
|
|
+/*
|
|
+ * We keep a little list of ref counted metadata objects to prevent two
|
|
+ * different target instances creating separate bufio instances. This is
|
|
+ * an issue if a table is reloaded before the suspend.
|
|
+ */
|
|
+static DEFINE_MUTEX(table_lock);
|
|
+static LIST_HEAD(table);
|
|
+
|
|
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
|
|
+{
|
|
+ struct dm_cache_metadata *cmd;
|
|
+
|
|
+ list_for_each_entry(cmd, &table, list)
|
|
+ if (cmd->bdev == bdev) {
|
|
+ atomic_inc(&cmd->ref_count);
|
|
+ return cmd;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
|
|
+ sector_t data_block_size,
|
|
+ bool may_format_device,
|
|
+ size_t policy_hint_size)
|
|
+{
|
|
+ struct dm_cache_metadata *cmd, *cmd2;
|
|
+
|
|
+ mutex_lock(&table_lock);
|
|
+ cmd = lookup(bdev);
|
|
+ mutex_unlock(&table_lock);
|
|
+
|
|
+ if (cmd)
|
|
+ return cmd;
|
|
+
|
|
+ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
|
|
+ if (!IS_ERR(cmd)) {
|
|
+ mutex_lock(&table_lock);
|
|
+ cmd2 = lookup(bdev);
|
|
+ if (cmd2) {
|
|
+ mutex_unlock(&table_lock);
|
|
+ __destroy_persistent_data_objects(cmd);
|
|
+ kfree(cmd);
|
|
+ return cmd2;
|
|
+ }
|
|
+ list_add(&cmd->list, &table);
|
|
+ mutex_unlock(&table_lock);
|
|
+ }
|
|
+
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
|
|
+{
|
|
+ if (cmd->data_block_size != data_block_size) {
|
|
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
|
|
+ (unsigned long long) data_block_size,
|
|
+ (unsigned long long) cmd->data_block_size);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
|
|
+ sector_t data_block_size,
|
|
+ bool may_format_device,
|
|
+ size_t policy_hint_size)
|
|
+{
|
|
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
|
|
+ may_format_device, policy_hint_size);
|
|
+
|
|
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
|
|
+ dm_cache_metadata_close(cmd);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
|
|
{
|
|
- __destroy_persistent_data_objects(cmd);
|
|
- kfree(cmd);
|
|
+ if (atomic_dec_and_test(&cmd->ref_count)) {
|
|
+ mutex_lock(&table_lock);
|
|
+ list_del(&cmd->list);
|
|
+ mutex_unlock(&table_lock);
|
|
+
|
|
+ __destroy_persistent_data_objects(cmd);
|
|
+ kfree(cmd);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
|
|
index c0ad90d..c10dec0 100644
|
|
--- a/drivers/md/dm-cache-target.c
|
|
+++ b/drivers/md/dm-cache-target.c
|
|
@@ -222,7 +222,13 @@ struct cache {
|
|
struct list_head need_commit_migrations;
|
|
sector_t migration_threshold;
|
|
wait_queue_head_t migration_wait;
|
|
- atomic_t nr_migrations;
|
|
+ atomic_t nr_allocated_migrations;
|
|
+
|
|
+ /*
|
|
+ * The number of in flight migrations that are performing
|
|
+ * background io. eg, promotion, writeback.
|
|
+ */
|
|
+ atomic_t nr_io_migrations;
|
|
|
|
wait_queue_head_t quiescing_wait;
|
|
atomic_t quiescing;
|
|
@@ -231,7 +237,7 @@ struct cache {
|
|
/*
|
|
* cache_size entries, dirty if set
|
|
*/
|
|
- dm_cblock_t nr_dirty;
|
|
+ atomic_t nr_dirty;
|
|
unsigned long *dirty_bitset;
|
|
|
|
/*
|
|
@@ -259,7 +265,6 @@ struct cache {
|
|
struct dm_deferred_set *all_io_ds;
|
|
|
|
mempool_t *migration_pool;
|
|
- struct dm_cache_migration *next_migration;
|
|
|
|
struct dm_cache_policy *policy;
|
|
unsigned policy_nr_args;
|
|
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
|
|
dm_bio_prison_free_cell(cache->prison, cell);
|
|
}
|
|
|
|
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
|
|
+{
|
|
+ struct dm_cache_migration *mg;
|
|
+
|
|
+ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
|
|
+ if (mg) {
|
|
+ mg->cache = cache;
|
|
+ atomic_inc(&mg->cache->nr_allocated_migrations);
|
|
+ }
|
|
+
|
|
+ return mg;
|
|
+}
|
|
+
|
|
+static void free_migration(struct dm_cache_migration *mg)
|
|
+{
|
|
+ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
|
|
+ wake_up(&mg->cache->migration_wait);
|
|
+
|
|
+ mempool_free(mg, mg->cache->migration_pool);
|
|
+}
|
|
+
|
|
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
|
|
{
|
|
if (!p->mg) {
|
|
- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
|
|
+ p->mg = alloc_migration(cache);
|
|
if (!p->mg)
|
|
return -ENOMEM;
|
|
}
|
|
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
|
|
free_prison_cell(cache, p->cell1);
|
|
|
|
if (p->mg)
|
|
- mempool_free(p->mg, cache->migration_pool);
|
|
+ free_migration(p->mg);
|
|
}
|
|
|
|
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
|
|
@@ -493,7 +519,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
|
|
static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
|
|
{
|
|
if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
|
|
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
|
|
+ atomic_inc(&cache->nr_dirty);
|
|
policy_set_dirty(cache->policy, oblock);
|
|
}
|
|
}
|
|
@@ -502,8 +528,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
|
|
{
|
|
if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
|
|
policy_clear_dirty(cache->policy, oblock);
|
|
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
|
|
- if (!from_cblock(cache->nr_dirty))
|
|
+ if (atomic_dec_return(&cache->nr_dirty) == 0)
|
|
dm_table_event(cache->ti->table);
|
|
}
|
|
}
|
|
@@ -813,24 +838,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
|
|
* Migration covers moving data from the origin device to the cache, or
|
|
* vice versa.
|
|
*--------------------------------------------------------------*/
|
|
-static void free_migration(struct dm_cache_migration *mg)
|
|
+static void inc_io_migrations(struct cache *cache)
|
|
{
|
|
- mempool_free(mg, mg->cache->migration_pool);
|
|
+ atomic_inc(&cache->nr_io_migrations);
|
|
}
|
|
|
|
-static void inc_nr_migrations(struct cache *cache)
|
|
+static void dec_io_migrations(struct cache *cache)
|
|
{
|
|
- atomic_inc(&cache->nr_migrations);
|
|
-}
|
|
-
|
|
-static void dec_nr_migrations(struct cache *cache)
|
|
-{
|
|
- atomic_dec(&cache->nr_migrations);
|
|
-
|
|
- /*
|
|
- * Wake the worker in case we're suspending the target.
|
|
- */
|
|
- wake_up(&cache->migration_wait);
|
|
+ atomic_dec(&cache->nr_io_migrations);
|
|
}
|
|
|
|
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
|
|
@@ -853,11 +868,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
|
|
wake_worker(cache);
|
|
}
|
|
|
|
-static void cleanup_migration(struct dm_cache_migration *mg)
|
|
+static void free_io_migration(struct dm_cache_migration *mg)
|
|
{
|
|
- struct cache *cache = mg->cache;
|
|
+ dec_io_migrations(mg->cache);
|
|
free_migration(mg);
|
|
- dec_nr_migrations(cache);
|
|
}
|
|
|
|
static void migration_failure(struct dm_cache_migration *mg)
|
|
@@ -882,7 +896,7 @@ static void migration_failure(struct dm_cache_migration *mg)
|
|
cell_defer(cache, mg->new_ocell, true);
|
|
}
|
|
|
|
- cleanup_migration(mg);
|
|
+ free_io_migration(mg);
|
|
}
|
|
|
|
static void migration_success_pre_commit(struct dm_cache_migration *mg)
|
|
@@ -891,9 +905,9 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
|
|
struct cache *cache = mg->cache;
|
|
|
|
if (mg->writeback) {
|
|
- cell_defer(cache, mg->old_ocell, false);
|
|
clear_dirty(cache, mg->old_oblock, mg->cblock);
|
|
- cleanup_migration(mg);
|
|
+ cell_defer(cache, mg->old_ocell, false);
|
|
+ free_io_migration(mg);
|
|
return;
|
|
|
|
} else if (mg->demote) {
|
|
@@ -903,14 +917,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
|
|
mg->old_oblock);
|
|
if (mg->promote)
|
|
cell_defer(cache, mg->new_ocell, true);
|
|
- cleanup_migration(mg);
|
|
+ free_io_migration(mg);
|
|
return;
|
|
}
|
|
} else {
|
|
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
|
|
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
|
|
policy_remove_mapping(cache->policy, mg->new_oblock);
|
|
- cleanup_migration(mg);
|
|
+ free_io_migration(mg);
|
|
return;
|
|
}
|
|
}
|
|
@@ -943,18 +957,22 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
|
|
} else {
|
|
if (mg->invalidate)
|
|
policy_remove_mapping(cache->policy, mg->old_oblock);
|
|
- cleanup_migration(mg);
|
|
+ free_io_migration(mg);
|
|
}
|
|
|
|
} else {
|
|
- if (mg->requeue_holder)
|
|
+ if (mg->requeue_holder) {
|
|
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
|
|
cell_defer(cache, mg->new_ocell, true);
|
|
- else {
|
|
+ } else {
|
|
+ /*
|
|
+ * The block was promoted via an overwrite, so it's dirty.
|
|
+ */
|
|
+ set_dirty(cache, mg->new_oblock, mg->cblock);
|
|
bio_endio(mg->new_ocell->holder, 0);
|
|
cell_defer(cache, mg->new_ocell, false);
|
|
}
|
|
- clear_dirty(cache, mg->new_oblock, mg->cblock);
|
|
- cleanup_migration(mg);
|
|
+ free_io_migration(mg);
|
|
}
|
|
}
|
|
|
|
@@ -1061,7 +1079,8 @@ static void issue_copy(struct dm_cache_migration *mg)
|
|
|
|
avoid = is_discarded_oblock(cache, mg->new_oblock);
|
|
|
|
- if (!avoid && bio_writes_complete_block(cache, bio)) {
|
|
+ if (writeback_mode(&cache->features) &&
|
|
+ !avoid && bio_writes_complete_block(cache, bio)) {
|
|
issue_overwrite(mg, bio);
|
|
return;
|
|
}
|
|
@@ -1165,7 +1184,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
|
|
mg->new_ocell = cell;
|
|
mg->start_jiffies = jiffies;
|
|
|
|
- inc_nr_migrations(cache);
|
|
+ inc_io_migrations(cache);
|
|
quiesce_migration(mg);
|
|
}
|
|
|
|
@@ -1188,7 +1207,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
|
|
mg->new_ocell = NULL;
|
|
mg->start_jiffies = jiffies;
|
|
|
|
- inc_nr_migrations(cache);
|
|
+ inc_io_migrations(cache);
|
|
quiesce_migration(mg);
|
|
}
|
|
|
|
@@ -1214,7 +1233,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
|
|
mg->new_ocell = new_ocell;
|
|
mg->start_jiffies = jiffies;
|
|
|
|
- inc_nr_migrations(cache);
|
|
+ inc_io_migrations(cache);
|
|
quiesce_migration(mg);
|
|
}
|
|
|
|
@@ -1241,7 +1260,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
|
|
mg->new_ocell = NULL;
|
|
mg->start_jiffies = jiffies;
|
|
|
|
- inc_nr_migrations(cache);
|
|
+ inc_io_migrations(cache);
|
|
quiesce_migration(mg);
|
|
}
|
|
|
|
@@ -1302,7 +1321,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
|
|
|
|
static bool spare_migration_bandwidth(struct cache *cache)
|
|
{
|
|
- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
|
|
+ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
|
|
cache->sectors_per_block;
|
|
return current_volume < cache->migration_threshold;
|
|
}
|
|
@@ -1657,7 +1676,7 @@ static void stop_quiescing(struct cache *cache)
|
|
|
|
static void wait_for_migrations(struct cache *cache)
|
|
{
|
|
- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
|
|
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
|
|
}
|
|
|
|
static void stop_worker(struct cache *cache)
|
|
@@ -1768,9 +1787,6 @@ static void destroy(struct cache *cache)
|
|
{
|
|
unsigned i;
|
|
|
|
- if (cache->next_migration)
|
|
- mempool_free(cache->next_migration, cache->migration_pool);
|
|
-
|
|
if (cache->migration_pool)
|
|
mempool_destroy(cache->migration_pool);
|
|
|
|
@@ -2278,7 +2294,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|
INIT_LIST_HEAD(&cache->quiesced_migrations);
|
|
INIT_LIST_HEAD(&cache->completed_migrations);
|
|
INIT_LIST_HEAD(&cache->need_commit_migrations);
|
|
- atomic_set(&cache->nr_migrations, 0);
|
|
+ atomic_set(&cache->nr_allocated_migrations, 0);
|
|
+ atomic_set(&cache->nr_io_migrations, 0);
|
|
init_waitqueue_head(&cache->migration_wait);
|
|
|
|
init_waitqueue_head(&cache->quiescing_wait);
|
|
@@ -2286,7 +2303,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|
atomic_set(&cache->quiescing_ack, 0);
|
|
|
|
r = -ENOMEM;
|
|
- cache->nr_dirty = 0;
|
|
+ atomic_set(&cache->nr_dirty, 0);
|
|
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
|
|
if (!cache->dirty_bitset) {
|
|
*error = "could not allocate dirty bitset";
|
|
@@ -2338,8 +2355,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|
goto bad;
|
|
}
|
|
|
|
- cache->next_migration = NULL;
|
|
-
|
|
cache->need_tick_bio = true;
|
|
cache->sized = false;
|
|
cache->invalidate = false;
|
|
@@ -2828,7 +2843,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
|
|
|
residency = policy_residency(cache->policy);
|
|
|
|
- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
|
|
+ DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
|
|
(unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
|
|
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
|
|
(unsigned long long)nr_blocks_metadata,
|
|
@@ -2841,7 +2856,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
|
(unsigned) atomic_read(&cache->stats.write_miss),
|
|
(unsigned) atomic_read(&cache->stats.demotion),
|
|
(unsigned) atomic_read(&cache->stats.promotion),
|
|
- (unsigned long long) from_cblock(cache->nr_dirty));
|
|
+ (unsigned long) atomic_read(&cache->nr_dirty));
|
|
|
|
if (writethrough_mode(&cache->features))
|
|
DMEMIT("1 writethrough ");
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index 53b2132..4a8d19d 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -709,7 +709,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
|
|
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
|
|
crypto_xor(data + i * 8, buf, 8);
|
|
out:
|
|
- memset(buf, 0, sizeof(buf));
|
|
+ memzero_explicit(buf, sizeof(buf));
|
|
return r;
|
|
}
|
|
|
|
@@ -1681,6 +1681,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
unsigned int key_size, opt_params;
|
|
unsigned long long tmpll;
|
|
int ret;
|
|
+ size_t iv_size_padding;
|
|
struct dm_arg_set as;
|
|
const char *opt_string;
|
|
char dummy;
|
|
@@ -1717,12 +1718,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
|
cc->dmreq_start = sizeof(struct ablkcipher_request);
|
|
cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
|
|
- cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
|
|
- cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
|
|
- ~(crypto_tfm_ctx_alignment() - 1);
|
|
+ cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
|
|
+
|
|
+ if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
|
|
+ /* Allocate the padding exactly */
|
|
+ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
|
|
+ & crypto_ablkcipher_alignmask(any_tfm(cc));
|
|
+ } else {
|
|
+ /*
|
|
+ * If the cipher requires greater alignment than kmalloc
|
|
+ * alignment, we don't know the exact position of the
|
|
+ * initialization vector. We must assume worst case.
|
|
+ */
|
|
+ iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
|
|
+ }
|
|
|
|
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
|
|
- sizeof(struct dm_crypt_request) + cc->iv_size);
|
|
+ sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
|
|
if (!cc->req_pool) {
|
|
ti->error = "Cannot allocate crypt request mempool";
|
|
goto bad;
|
|
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
|
|
index db404a0..080e767 100644
|
|
--- a/drivers/md/dm-io.c
|
|
+++ b/drivers/md/dm-io.c
|
|
@@ -291,6 +291,19 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|
struct request_queue *q = bdev_get_queue(where->bdev);
|
|
unsigned short logical_block_size = queue_logical_block_size(q);
|
|
sector_t num_sectors;
|
|
+ unsigned int uninitialized_var(special_cmd_max_sectors);
|
|
+
|
|
+ /*
|
|
+ * Reject unsupported discard and write same requests.
|
|
+ */
|
|
+ if (rw & REQ_DISCARD)
|
|
+ special_cmd_max_sectors = q->limits.max_discard_sectors;
|
|
+ else if (rw & REQ_WRITE_SAME)
|
|
+ special_cmd_max_sectors = q->limits.max_write_same_sectors;
|
|
+ if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
|
|
+ dec_count(io, region, -EOPNOTSUPP);
|
|
+ return;
|
|
+ }
|
|
|
|
/*
|
|
* where->count may be zero if rw holds a flush and we need to
|
|
@@ -313,7 +326,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|
store_io_and_region_in_bio(bio, io, region);
|
|
|
|
if (rw & REQ_DISCARD) {
|
|
- num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
|
|
+ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
|
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
|
remaining -= num_sectors;
|
|
} else if (rw & REQ_WRITE_SAME) {
|
|
@@ -322,7 +335,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|
*/
|
|
dp->get_page(dp, &page, &len, &offset);
|
|
bio_add_page(bio, page, logical_block_size, offset);
|
|
- num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
|
|
+ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
|
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
|
|
|
offset = 0;
|
|
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
|
|
index 08d9a20..c69d0b7 100644
|
|
--- a/drivers/md/dm-log-userspace-transfer.c
|
|
+++ b/drivers/md/dm-log-userspace-transfer.c
|
|
@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
|
|
|
|
r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
|
|
if (r) {
|
|
- cn_del_callback(&ulog_cn_id);
|
|
+ kfree(prealloced_cn_msg);
|
|
return r;
|
|
}
|
|
|
|
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
|
|
index 4880b69..5971538 100644
|
|
--- a/drivers/md/dm-raid.c
|
|
+++ b/drivers/md/dm-raid.c
|
|
@@ -785,8 +785,7 @@ struct dm_raid_superblock {
|
|
__le32 layout;
|
|
__le32 stripe_sectors;
|
|
|
|
- __u8 pad[452]; /* Round struct to 512 bytes. */
|
|
- /* Always set to 0 when writing. */
|
|
+ /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
|
|
} __packed;
|
|
|
|
static int read_disk_sb(struct md_rdev *rdev, int size)
|
|
@@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
|
|
test_bit(Faulty, &(rs->dev[i].rdev.flags)))
|
|
failed_devices |= (1ULL << i);
|
|
|
|
- memset(sb, 0, sizeof(*sb));
|
|
+ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
|
|
|
|
sb->magic = cpu_to_le32(DM_RAID_MAGIC);
|
|
sb->features = cpu_to_le32(0); /* No features yet */
|
|
@@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
|
|
uint64_t events_sb, events_refsb;
|
|
|
|
rdev->sb_start = 0;
|
|
- rdev->sb_size = sizeof(*sb);
|
|
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
|
|
+ if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
|
|
+ DMERR("superblock size of a logical block is no longer valid");
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
ret = read_disk_sb(rdev, rdev->sb_size);
|
|
if (ret)
|
|
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
|
|
index 7dfdb5c..089d627 100644
|
|
--- a/drivers/md/dm-raid1.c
|
|
+++ b/drivers/md/dm-raid1.c
|
|
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
|
|
return;
|
|
}
|
|
|
|
+ /*
|
|
+ * If the bio is discard, return an error, but do not
|
|
+ * degrade the array.
|
|
+ */
|
|
+ if (bio->bi_rw & REQ_DISCARD) {
|
|
+ bio_endio(bio, -EOPNOTSUPP);
|
|
+ return;
|
|
+ }
|
|
+
|
|
for (i = 0; i < ms->nr_mirrors; i++)
|
|
if (test_bit(i, &error))
|
|
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
|
|
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
|
|
index ebddef5..c356a10 100644
|
|
--- a/drivers/md/dm-snap.c
|
|
+++ b/drivers/md/dm-snap.c
|
|
@@ -1440,8 +1440,6 @@ out:
|
|
full_bio->bi_private = pe->full_bio_private;
|
|
atomic_inc(&full_bio->bi_remaining);
|
|
}
|
|
- free_pending_exception(pe);
|
|
-
|
|
increment_pending_exceptions_done_count();
|
|
|
|
up_write(&s->lock);
|
|
@@ -1458,6 +1456,8 @@ out:
|
|
}
|
|
|
|
retry_origin_bios(s, origin_bios);
|
|
+
|
|
+ free_pending_exception(pe);
|
|
}
|
|
|
|
static void commit_callback(void *context, int success)
|
|
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
|
|
index 28a9012..b3b0697 100644
|
|
--- a/drivers/md/dm-stats.c
|
|
+++ b/drivers/md/dm-stats.c
|
|
@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
|
|
return -EINVAL;
|
|
|
|
if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
|
|
+ if (!divisor)
|
|
+ return -EINVAL;
|
|
step = end - start;
|
|
if (do_div(step, divisor))
|
|
step++;
|
|
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
|
|
index e9d33ad..3412b86 100644
|
|
--- a/drivers/md/dm-thin-metadata.c
|
|
+++ b/drivers/md/dm-thin-metadata.c
|
|
@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
|
|
return r;
|
|
|
|
disk_super = dm_block_data(copy);
|
|
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
|
|
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
|
|
+ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
|
|
+ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
|
|
dm_sm_dec_block(pmd->metadata_sm, held_root);
|
|
|
|
return dm_tm_unlock(pmd->tm, copy);
|
|
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
|
|
index 359af3a..c1120eb 100644
|
|
--- a/drivers/md/dm-thin.c
|
|
+++ b/drivers/md/dm-thin.c
|
|
@@ -916,6 +916,24 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
|
}
|
|
}
|
|
|
|
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|
+
|
|
+static void check_for_space(struct pool *pool)
|
|
+{
|
|
+ int r;
|
|
+ dm_block_t nr_free;
|
|
+
|
|
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
|
|
+ return;
|
|
+
|
|
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
|
|
+ if (r)
|
|
+ return;
|
|
+
|
|
+ if (nr_free)
|
|
+ set_pool_mode(pool, PM_WRITE);
|
|
+}
|
|
+
|
|
/*
|
|
* A non-zero return indicates read_only or fail_io mode.
|
|
* Many callers don't care about the return value.
|
|
@@ -930,6 +948,8 @@ static int commit(struct pool *pool)
|
|
r = dm_pool_commit_metadata(pool->pmd);
|
|
if (r)
|
|
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
|
+ else
|
|
+ check_for_space(pool);
|
|
|
|
return r;
|
|
}
|
|
@@ -948,8 +968,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
|
|
}
|
|
}
|
|
|
|
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|
-
|
|
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
{
|
|
int r;
|
|
@@ -1592,7 +1610,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|
pool->process_bio = process_bio_read_only;
|
|
pool->process_discard = process_discard;
|
|
pool->process_prepared_mapping = process_prepared_mapping;
|
|
- pool->process_prepared_discard = process_prepared_discard_passdown;
|
|
+ pool->process_prepared_discard = process_prepared_discard;
|
|
|
|
if (!pool->pf.error_if_no_space && no_space_timeout)
|
|
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
|
|
@@ -1704,6 +1722,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
|
|
+ /*
|
|
+ * We must hold the virtual cell before doing the lookup, otherwise
|
|
+ * there's a race with discard.
|
|
+ */
|
|
+ build_virtual_key(tc->td, block, &key);
|
|
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
+
|
|
r = dm_thin_find_block(td, block, 0, &result);
|
|
|
|
/*
|
|
@@ -1727,13 +1753,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
* shared flag will be set in their case.
|
|
*/
|
|
thin_defer_bio(tc, bio);
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
|
|
- build_virtual_key(tc->td, block, &key);
|
|
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
|
|
- return DM_MAPIO_SUBMITTED;
|
|
-
|
|
build_data_key(tc->td, result.block, &key);
|
|
if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
|
|
cell_defer_no_holder_no_free(tc, &cell1);
|
|
@@ -1754,6 +1777,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
* of doing so.
|
|
*/
|
|
handle_unserviceable_bio(tc->pool, bio);
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
/* fall through */
|
|
@@ -1764,6 +1788,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
* provide the hint to load the metadata into cache.
|
|
*/
|
|
thin_defer_bio(tc, bio);
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
|
default:
|
|
@@ -1773,6 +1798,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
* pool is switched to fail-io mode.
|
|
*/
|
|
bio_io_error(bio);
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
}
|
|
@@ -2718,6 +2744,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
|
|
struct pool_c *pt = ti->private;
|
|
struct pool *pool = pt->pool;
|
|
|
|
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
|
|
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
|
|
+ dm_device_name(pool->pool_md));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
if (!strcasecmp(argv[0], "create_thin"))
|
|
r = process_create_thin_mesg(argc, argv, pool);
|
|
|
|
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
|
|
index 65ee3a0..e400591 100644
|
|
--- a/drivers/md/dm.c
|
|
+++ b/drivers/md/dm.c
|
|
@@ -2288,7 +2288,7 @@ int dm_setup_md_queue(struct mapped_device *md)
|
|
return 0;
|
|
}
|
|
|
|
-static struct mapped_device *dm_find_md(dev_t dev)
|
|
+struct mapped_device *dm_get_md(dev_t dev)
|
|
{
|
|
struct mapped_device *md;
|
|
unsigned minor = MINOR(dev);
|
|
@@ -2299,12 +2299,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
|
|
spin_lock(&_minor_lock);
|
|
|
|
md = idr_find(&_minor_idr, minor);
|
|
- if (md && (md == MINOR_ALLOCED ||
|
|
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
|
|
- dm_deleting_md(md) ||
|
|
- test_bit(DMF_FREEING, &md->flags))) {
|
|
- md = NULL;
|
|
- goto out;
|
|
+ if (md) {
|
|
+ if ((md == MINOR_ALLOCED ||
|
|
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
|
|
+ dm_deleting_md(md) ||
|
|
+ test_bit(DMF_FREEING, &md->flags))) {
|
|
+ md = NULL;
|
|
+ goto out;
|
|
+ }
|
|
+ dm_get(md);
|
|
}
|
|
|
|
out:
|
|
@@ -2312,16 +2315,6 @@ out:
|
|
|
|
return md;
|
|
}
|
|
-
|
|
-struct mapped_device *dm_get_md(dev_t dev)
|
|
-{
|
|
- struct mapped_device *md = dm_find_md(dev);
|
|
-
|
|
- if (md)
|
|
- dm_get(md);
|
|
-
|
|
- return md;
|
|
-}
|
|
EXPORT_SYMBOL_GPL(dm_get_md);
|
|
|
|
void *dm_get_mdptr(struct mapped_device *md)
|
|
@@ -2359,10 +2352,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
|
set_bit(DMF_FREEING, &md->flags);
|
|
spin_unlock(&_minor_lock);
|
|
|
|
+ /*
|
|
+ * Take suspend_lock so that presuspend and postsuspend methods
|
|
+ * do not race with internal suspend.
|
|
+ */
|
|
+ mutex_lock(&md->suspend_lock);
|
|
if (!dm_suspended_md(md)) {
|
|
dm_table_presuspend_targets(map);
|
|
dm_table_postsuspend_targets(map);
|
|
}
|
|
+ mutex_unlock(&md->suspend_lock);
|
|
|
|
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
|
dm_put_live_table(md, srcu_idx);
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 73aedcb..2ffd277 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -5333,6 +5333,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
|
|
printk("md: %s still in use.\n",mdname(mddev));
|
|
if (did_freeze) {
|
|
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
md_wakeup_thread(mddev->thread);
|
|
}
|
|
err = -EBUSY;
|
|
@@ -5347,6 +5348,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
|
|
mddev->ro = 1;
|
|
set_disk_ro(mddev->gendisk, 1);
|
|
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
+ md_wakeup_thread(mddev->thread);
|
|
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
|
err = 0;
|
|
}
|
|
@@ -5390,6 +5393,7 @@ static int do_md_stop(struct mddev * mddev, int mode,
|
|
mutex_unlock(&mddev->open_mutex);
|
|
if (did_freeze) {
|
|
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
md_wakeup_thread(mddev->thread);
|
|
}
|
|
return -EBUSY;
|
|
@@ -5641,8 +5645,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
|
|
char *ptr, *buf = NULL;
|
|
int err = -ENOMEM;
|
|
|
|
- file = kmalloc(sizeof(*file), GFP_NOIO);
|
|
-
|
|
+ file = kzalloc(sizeof(*file), GFP_NOIO);
|
|
if (!file)
|
|
goto out;
|
|
|
|
@@ -6228,7 +6231,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|
mddev->ctime != info->ctime ||
|
|
mddev->level != info->level ||
|
|
/* mddev->layout != info->layout || */
|
|
- !mddev->persistent != info->not_persistent||
|
|
+ mddev->persistent != !info->not_persistent ||
|
|
mddev->chunk_sectors != info->chunk_size >> 9 ||
|
|
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
|
|
((state^info->state) & 0xfffffe00)
|
|
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
|
|
index 37d367b..bf2b80d 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-internal.h
|
|
+++ b/drivers/md/persistent-data/dm-btree-internal.h
|
|
@@ -42,6 +42,12 @@ struct btree_node {
|
|
} __packed;
|
|
|
|
|
|
+/*
|
|
+ * Locks a block using the btree node validator.
|
|
+ */
|
|
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
|
|
+ struct dm_block **result);
|
|
+
|
|
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
|
|
struct dm_btree_value_type *vt);
|
|
|
|
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
|
|
index b88757c..a03178e 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-remove.c
|
|
+++ b/drivers/md/persistent-data/dm-btree-remove.c
|
|
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
|
|
|
if (s < 0 && nr_center < -s) {
|
|
/* not enough in central node */
|
|
- shift(left, center, nr_center);
|
|
- s = nr_center - target;
|
|
+ shift(left, center, -nr_center);
|
|
+ s += nr_center;
|
|
shift(left, right, s);
|
|
nr_right += s;
|
|
} else
|
|
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
|
if (s > 0 && nr_center < s) {
|
|
/* not enough in central node */
|
|
shift(center, right, nr_center);
|
|
- s = target - nr_center;
|
|
+ s -= nr_center;
|
|
shift(left, right, s);
|
|
nr_left -= s;
|
|
} else
|
|
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
|
|
index cf9fd67..1b5e13e 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-spine.c
|
|
+++ b/drivers/md/persistent-data/dm-btree-spine.c
|
|
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
|
|
|
|
/*----------------------------------------------------------------*/
|
|
|
|
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
|
|
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
|
|
struct dm_block **result)
|
|
{
|
|
return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
|
|
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
|
|
index 416060c..fdd3793 100644
|
|
--- a/drivers/md/persistent-data/dm-btree.c
|
|
+++ b/drivers/md/persistent-data/dm-btree.c
|
|
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
|
|
int r;
|
|
struct del_stack *s;
|
|
|
|
- s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
+ s = kmalloc(sizeof(*s), GFP_NOIO);
|
|
if (!s)
|
|
return -ENOMEM;
|
|
s->info = info;
|
|
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
|
|
* FIXME: We shouldn't use a recursive algorithm when we have limited stack
|
|
* space. Also this only works for single level trees.
|
|
*/
|
|
-static int walk_node(struct ro_spine *s, dm_block_t block,
|
|
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
|
|
int (*fn)(void *context, uint64_t *keys, void *leaf),
|
|
void *context)
|
|
{
|
|
int r;
|
|
unsigned i, nr;
|
|
+ struct dm_block *node;
|
|
struct btree_node *n;
|
|
uint64_t keys;
|
|
|
|
- r = ro_step(s, block);
|
|
- n = ro_node(s);
|
|
+ r = bn_read_lock(info, block, &node);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
+ n = dm_block_data(node);
|
|
|
|
nr = le32_to_cpu(n->header.nr_entries);
|
|
for (i = 0; i < nr; i++) {
|
|
if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
|
|
- r = walk_node(s, value64(n, i), fn, context);
|
|
+ r = walk_node(info, value64(n, i), fn, context);
|
|
if (r)
|
|
goto out;
|
|
} else {
|
|
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
|
|
}
|
|
|
|
out:
|
|
- ro_pop(s);
|
|
+ dm_tm_unlock(info->tm, node);
|
|
return r;
|
|
}
|
|
|
|
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
|
|
int (*fn)(void *context, uint64_t *keys, void *leaf),
|
|
void *context)
|
|
{
|
|
- int r;
|
|
- struct ro_spine spine;
|
|
-
|
|
BUG_ON(info->levels > 1);
|
|
-
|
|
- init_ro_spine(&spine, info);
|
|
- r = walk_node(&spine, root, fn, context);
|
|
- exit_ro_spine(&spine);
|
|
-
|
|
- return r;
|
|
+ return walk_node(info, root, fn, context);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dm_btree_walk);
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
index 786b689..199c9cc 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
|
|
smm->recursion_count++;
|
|
}
|
|
|
|
+static int apply_bops(struct sm_metadata *smm)
|
|
+{
|
|
+ int r = 0;
|
|
+
|
|
+ while (!brb_empty(&smm->uncommitted)) {
|
|
+ struct block_op bop;
|
|
+
|
|
+ r = brb_pop(&smm->uncommitted, &bop);
|
|
+ if (r) {
|
|
+ DMERR("bug in bop ring buffer");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ r = commit_bop(smm, &bop);
|
|
+ if (r)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
static int out(struct sm_metadata *smm)
|
|
{
|
|
int r = 0;
|
|
@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- if (smm->recursion_count == 1) {
|
|
- while (!brb_empty(&smm->uncommitted)) {
|
|
- struct block_op bop;
|
|
-
|
|
- r = brb_pop(&smm->uncommitted, &bop);
|
|
- if (r) {
|
|
- DMERR("bug in bop ring buffer");
|
|
- break;
|
|
- }
|
|
-
|
|
- r = commit_bop(smm, &bop);
|
|
- if (r)
|
|
- break;
|
|
- }
|
|
- }
|
|
+ if (smm->recursion_count == 1)
|
|
+ apply_bops(smm);
|
|
|
|
smm->recursion_count--;
|
|
|
|
@@ -564,7 +572,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
|
|
{
|
|
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
|
|
|
|
- return smm->ll.nr_blocks;
|
|
+ *count = smm->ll.nr_blocks;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
|
|
@@ -700,6 +710,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
|
|
}
|
|
old_len = smm->begin;
|
|
|
|
+ r = apply_bops(smm);
|
|
+ if (r) {
|
|
+ DMERR("%s: apply_bops failed", __func__);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
r = sm_ll_commit(&smm->ll);
|
|
if (r)
|
|
goto out;
|
|
@@ -769,6 +785,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
|
|
if (r)
|
|
return r;
|
|
|
|
+ r = apply_bops(smm);
|
|
+ if (r) {
|
|
+ DMERR("%s: apply_bops failed", __func__);
|
|
+ return r;
|
|
+ }
|
|
+
|
|
return sm_metadata_commit(sm);
|
|
}
|
|
|
|
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
|
|
index 407a99e..9afd00b 100644
|
|
--- a/drivers/md/raid0.c
|
|
+++ b/drivers/md/raid0.c
|
|
@@ -320,7 +320,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
|
|
|
|
/*
|
|
* remaps the bio to the target device. we separate two flows.
|
|
- * power 2 flow and a general flow for the sake of perfromance
|
|
+ * power 2 flow and a general flow for the sake of performance
|
|
*/
|
|
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
|
|
sector_t sector, sector_t *sector_offset)
|
|
@@ -531,6 +531,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|
? (sector & (chunk_sects-1))
|
|
: sector_div(sector, chunk_sects));
|
|
|
|
+ /* Restore due to sector_div */
|
|
+ sector = bio->bi_iter.bi_sector;
|
|
+
|
|
if (sectors < bio_sectors(bio)) {
|
|
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
|
|
bio_chain(split, bio);
|
|
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
|
|
index 56e24c0..47b7c31 100644
|
|
--- a/drivers/md/raid1.c
|
|
+++ b/drivers/md/raid1.c
|
|
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
if (r1_bio->mddev->degraded == conf->raid_disks ||
|
|
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
|
|
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
|
|
+ test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
|
|
uptodate = 1;
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
}
|
|
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|
has_nonrot_disk = 0;
|
|
choose_next_idle = 0;
|
|
|
|
- if (conf->mddev->recovery_cp < MaxSector &&
|
|
- (this_sector + sectors >= conf->next_resync))
|
|
- choose_first = 1;
|
|
- else
|
|
- choose_first = 0;
|
|
+ choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
|
|
|
|
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
|
|
sector_t dist;
|
|
@@ -565,7 +561,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|
if (test_bit(WriteMostly, &rdev->flags)) {
|
|
/* Don't balance among write-mostly, just
|
|
* use the first as a last resort */
|
|
- if (best_disk < 0) {
|
|
+ if (best_dist_disk < 0) {
|
|
if (is_badblock(rdev, this_sector, sectors,
|
|
&first_bad, &bad_sectors)) {
|
|
if (first_bad < this_sector)
|
|
@@ -574,7 +570,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|
best_good_sectors = first_bad - this_sector;
|
|
} else
|
|
best_good_sectors = sectors;
|
|
- best_disk = disk;
|
|
+ best_dist_disk = disk;
|
|
+ best_pending_disk = disk;
|
|
}
|
|
continue;
|
|
}
|
|
@@ -831,7 +828,7 @@ static void flush_pending_writes(struct r1conf *conf)
|
|
* there is no normal IO happeing. It must arrange to call
|
|
* lower_barrier when the particular background IO completes.
|
|
*/
|
|
-static void raise_barrier(struct r1conf *conf)
|
|
+static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
|
|
{
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
@@ -841,6 +838,7 @@ static void raise_barrier(struct r1conf *conf)
|
|
|
|
/* block any new IO from starting */
|
|
conf->barrier++;
|
|
+ conf->next_resync = sector_nr;
|
|
|
|
/* For these conditions we must wait:
|
|
* A: while the array is in frozen state
|
|
@@ -849,14 +847,17 @@ static void raise_barrier(struct r1conf *conf)
|
|
* C: next_resync + RESYNC_SECTORS > start_next_window, meaning
|
|
* next resync will reach to the window which normal bios are
|
|
* handling.
|
|
+ * D: while there are any active requests in the current window.
|
|
*/
|
|
wait_event_lock_irq(conf->wait_barrier,
|
|
!conf->array_frozen &&
|
|
conf->barrier < RESYNC_DEPTH &&
|
|
+ conf->current_window_requests == 0 &&
|
|
(conf->start_next_window >=
|
|
conf->next_resync + RESYNC_SECTORS),
|
|
conf->resync_lock);
|
|
|
|
+ conf->nr_pending++;
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
}
|
|
|
|
@@ -866,6 +867,7 @@ static void lower_barrier(struct r1conf *conf)
|
|
BUG_ON(conf->barrier <= 0);
|
|
spin_lock_irqsave(&conf->resync_lock, flags);
|
|
conf->barrier--;
|
|
+ conf->nr_pending--;
|
|
spin_unlock_irqrestore(&conf->resync_lock, flags);
|
|
wake_up(&conf->wait_barrier);
|
|
}
|
|
@@ -877,12 +879,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
|
|
if (conf->array_frozen || !bio)
|
|
wait = true;
|
|
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
|
|
- if (conf->next_resync < RESYNC_WINDOW_SECTORS)
|
|
- wait = true;
|
|
- else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
|
|
- >= bio_end_sector(bio)) ||
|
|
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
|
|
- <= bio->bi_iter.bi_sector))
|
|
+ if ((conf->mddev->curr_resync_completed
|
|
+ >= bio_end_sector(bio)) ||
|
|
+ (conf->next_resync + NEXT_NORMALIO_DISTANCE
|
|
+ <= bio->bi_iter.bi_sector))
|
|
wait = false;
|
|
else
|
|
wait = true;
|
|
@@ -919,8 +919,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
|
|
}
|
|
|
|
if (bio && bio_data_dir(bio) == WRITE) {
|
|
- if (conf->next_resync + NEXT_NORMALIO_DISTANCE
|
|
- <= bio->bi_iter.bi_sector) {
|
|
+ if (bio->bi_iter.bi_sector >=
|
|
+ conf->mddev->curr_resync_completed) {
|
|
if (conf->start_next_window == MaxSector)
|
|
conf->start_next_window =
|
|
conf->next_resync +
|
|
@@ -1186,6 +1186,7 @@ read_again:
|
|
atomic_read(&bitmap->behind_writes) == 0);
|
|
}
|
|
r1_bio->read_disk = rdisk;
|
|
+ r1_bio->start_next_window = 0;
|
|
|
|
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
|
|
@@ -1476,6 +1477,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
{
|
|
char b[BDEVNAME_SIZE];
|
|
struct r1conf *conf = mddev->private;
|
|
+ unsigned long flags;
|
|
|
|
/*
|
|
* If it is not operational, then we have already marked it as dead
|
|
@@ -1495,18 +1497,17 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
return;
|
|
}
|
|
set_bit(Blocked, &rdev->flags);
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
if (test_and_clear_bit(In_sync, &rdev->flags)) {
|
|
- unsigned long flags;
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
mddev->degraded++;
|
|
set_bit(Faulty, &rdev->flags);
|
|
- spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
- /*
|
|
- * if recovery is running, make sure it aborts.
|
|
- */
|
|
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
} else
|
|
set_bit(Faulty, &rdev->flags);
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
+ /*
|
|
+ * if recovery is running, make sure it aborts.
|
|
+ */
|
|
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
printk(KERN_ALERT
|
|
"md/raid1:%s: Disk failure on %s, disabling device.\n"
|
|
@@ -1548,8 +1549,13 @@ static void close_sync(struct r1conf *conf)
|
|
mempool_destroy(conf->r1buf_pool);
|
|
conf->r1buf_pool = NULL;
|
|
|
|
+ spin_lock_irq(&conf->resync_lock);
|
|
conf->next_resync = 0;
|
|
conf->start_next_window = MaxSector;
|
|
+ conf->current_window_requests +=
|
|
+ conf->next_window_requests;
|
|
+ conf->next_window_requests = 0;
|
|
+ spin_unlock_irq(&conf->resync_lock);
|
|
}
|
|
|
|
static int raid1_spare_active(struct mddev *mddev)
|
|
@@ -1563,7 +1569,10 @@ static int raid1_spare_active(struct mddev *mddev)
|
|
* Find all failed disks within the RAID1 configuration
|
|
* and mark them readable.
|
|
* Called under mddev lock, so rcu protection not needed.
|
|
+ * device_lock used to avoid races with raid1_end_read_request
|
|
+ * which expects 'In_sync' flags and ->degraded to be consistent.
|
|
*/
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
|
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
|
|
@@ -1593,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
|
|
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
|
}
|
|
}
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
mddev->degraded -= count;
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
@@ -2150,7 +2158,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
|
|
d--;
|
|
rdev = conf->mirrors[d].rdev;
|
|
if (rdev &&
|
|
- test_bit(In_sync, &rdev->flags))
|
|
+ !test_bit(Faulty, &rdev->flags))
|
|
r1_sync_page_io(rdev, sect, s,
|
|
conf->tmppage, WRITE);
|
|
}
|
|
@@ -2162,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
|
|
d--;
|
|
rdev = conf->mirrors[d].rdev;
|
|
if (rdev &&
|
|
- test_bit(In_sync, &rdev->flags)) {
|
|
+ !test_bit(Faulty, &rdev->flags)) {
|
|
if (r1_sync_page_io(rdev, sect, s,
|
|
conf->tmppage, READ)) {
|
|
atomic_add(s, &rdev->corrected_errors);
|
|
@@ -2541,9 +2549,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
|
|
|
|
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
|
|
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
|
|
- raise_barrier(conf);
|
|
|
|
- conf->next_resync = sector_nr;
|
|
+ raise_barrier(conf, sector_nr);
|
|
|
|
rcu_read_lock();
|
|
/*
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index cb882aa..a46124e 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
return;
|
|
}
|
|
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
|
|
+ if (test_and_clear_bit(In_sync, &rdev->flags))
|
|
mddev->degraded++;
|
|
- /*
|
|
- * if recovery is running, make sure it aborts.
|
|
- */
|
|
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
- }
|
|
+ /*
|
|
+ * If recovery is running, make sure it aborts.
|
|
+ */
|
|
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
set_bit(Blocked, &rdev->flags);
|
|
set_bit(Faulty, &rdev->flags);
|
|
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
@@ -2954,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
*/
|
|
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
|
end_reshape(conf);
|
|
+ close_sync(conf);
|
|
return 0;
|
|
}
|
|
|
|
@@ -4411,7 +4411,7 @@ read_more:
|
|
read_bio->bi_private = r10_bio;
|
|
read_bio->bi_end_io = end_sync_read;
|
|
read_bio->bi_rw = READ;
|
|
- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
|
+ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
|
|
read_bio->bi_flags |= 1 << BIO_UPTODATE;
|
|
read_bio->bi_vcnt = 0;
|
|
read_bio->bi_iter.bi_size = 0;
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 16f5c21..b98c70e 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -64,6 +64,10 @@
|
|
#define cpu_to_group(cpu) cpu_to_node(cpu)
|
|
#define ANY_GROUP NUMA_NO_NODE
|
|
|
|
+static bool devices_handle_discard_safely = false;
|
|
+module_param(devices_handle_discard_safely, bool, 0644);
|
|
+MODULE_PARM_DESC(devices_handle_discard_safely,
|
|
+ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
|
|
static struct workqueue_struct *raid5_wq;
|
|
/*
|
|
* Stripe cache
|
|
@@ -1910,7 +1914,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
|
|
|
|
conf->slab_cache = sc;
|
|
conf->active_name = 1-conf->active_name;
|
|
- conf->pool_size = newsize;
|
|
+ if (!err)
|
|
+ conf->pool_size = newsize;
|
|
return err;
|
|
}
|
|
|
|
@@ -2892,7 +2897,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
|
|
(s->failed >= 2 && fdev[1]->toread) ||
|
|
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
|
|
!test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
|
|
- (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
|
|
+ ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp)
|
|
+ && s->failed && s->to_write))) {
|
|
/* we would like to get this block, possibly by computing it,
|
|
* otherwise read it if the backing disk is insync
|
|
*/
|
|
@@ -3066,7 +3072,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
|
|
* generate correct data from the parity.
|
|
*/
|
|
if (conf->max_degraded == 2 ||
|
|
- (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
|
|
+ (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
|
|
+ s->failed == 0)) {
|
|
/* Calculate the real rcw later - for now make it
|
|
* look like rcw is cheaper
|
|
*/
|
|
@@ -3779,6 +3786,8 @@ static void handle_stripe(struct stripe_head *sh)
|
|
set_bit(R5_Wantwrite, &dev->flags);
|
|
if (prexor)
|
|
continue;
|
|
+ if (s.failed > 1)
|
|
+ continue;
|
|
if (!test_bit(R5_Insync, &dev->flags) ||
|
|
((i == sh->pd_idx || i == sh->qd_idx) &&
|
|
s.failed == 0))
|
|
@@ -6115,7 +6124,7 @@ static int run(struct mddev *mddev)
|
|
mddev->queue->limits.discard_granularity = stripe;
|
|
/*
|
|
* unaligned part of discard request will be ignored, so can't
|
|
- * guarantee discard_zerors_data
|
|
+ * guarantee discard_zeroes_data
|
|
*/
|
|
mddev->queue->limits.discard_zeroes_data = 0;
|
|
|
|
@@ -6140,6 +6149,18 @@ static int run(struct mddev *mddev)
|
|
!bdev_get_queue(rdev->bdev)->
|
|
limits.discard_zeroes_data)
|
|
discard_supported = false;
|
|
+ /* Unfortunately, discard_zeroes_data is not currently
|
|
+ * a guarantee - just a hint. So we only allow DISCARD
|
|
+ * if the sysadmin has confirmed that only safe devices
|
|
+ * are in use by setting a module parameter.
|
|
+ */
|
|
+ if (!devices_handle_discard_safely) {
|
|
+ if (discard_supported) {
|
|
+ pr_info("md/raid456: discard support disabled due to uncertainty.\n");
|
|
+ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
|
|
+ }
|
|
+ discard_supported = false;
|
|
+ }
|
|
}
|
|
|
|
if (discard_supported &&
|
|
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
|
|
index f953d33..4bfbd5f 100644
|
|
--- a/drivers/media/common/siano/Kconfig
|
|
+++ b/drivers/media/common/siano/Kconfig
|
|
@@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
|
|
bool "Enable debugfs for smsdvb"
|
|
depends on SMS_SIANO_MDTV
|
|
depends on DEBUG_FS
|
|
- depends on SMS_USB_DRV
|
|
- depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
|
|
+ depends on SMS_USB_DRV = SMS_SDIO_DRV
|
|
|
|
---help---
|
|
Choose Y to enable visualizing a dump of the frontend
|
|
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
|
|
index 80643ef..fabe2fc 100644
|
|
--- a/drivers/media/dvb-core/dvb-usb-ids.h
|
|
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
|
|
@@ -279,6 +279,8 @@
|
|
#define USB_PID_PCTV_400E 0x020f
|
|
#define USB_PID_PCTV_450E 0x0222
|
|
#define USB_PID_PCTV_452E 0x021f
|
|
+#define USB_PID_PCTV_78E 0x025a
|
|
+#define USB_PID_PCTV_79E 0x0262
|
|
#define USB_PID_REALTEK_RTL2831U 0x2831
|
|
#define USB_PID_REALTEK_RTL2832U 0x2832
|
|
#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
|
|
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
|
|
index fb504f1..5930aee 100644
|
|
--- a/drivers/media/dvb-frontends/af9013.c
|
|
+++ b/drivers/media/dvb-frontends/af9013.c
|
|
@@ -606,6 +606,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
|
|
}
|
|
}
|
|
|
|
+ /* Return an error if can't find bandwidth or the right clock */
|
|
+ if (i == ARRAY_SIZE(coeff_lut))
|
|
+ return -EINVAL;
|
|
+
|
|
ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
|
|
sizeof(coeff_lut[i].val));
|
|
}
|
|
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
|
|
index 2916d7c..7bc68b3 100644
|
|
--- a/drivers/media/dvb-frontends/cx24116.c
|
|
+++ b/drivers/media/dvb-frontends/cx24116.c
|
|
@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
|
|
struct cx24116_state *state = fe->demodulator_priv;
|
|
int i, ret;
|
|
|
|
+ /* Validate length */
|
|
+ if (d->msg_len > sizeof(d->msg))
|
|
+ return -EINVAL;
|
|
+
|
|
/* Dump DiSEqC message */
|
|
if (debug) {
|
|
printk(KERN_INFO "cx24116: %s(", __func__);
|
|
@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
|
|
printk(") toneburst=%d\n", toneburst);
|
|
}
|
|
|
|
- /* Validate length */
|
|
- if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
|
|
- return -EINVAL;
|
|
-
|
|
/* DiSEqC message */
|
|
for (i = 0; i < d->msg_len; i++)
|
|
state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
|
|
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
|
|
index a6c3c9e..d2eab06 100644
|
|
--- a/drivers/media/dvb-frontends/cx24117.c
|
|
+++ b/drivers/media/dvb-frontends/cx24117.c
|
|
@@ -1043,7 +1043,7 @@ static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
|
|
dev_dbg(&state->priv->i2c->dev, ")\n");
|
|
|
|
/* Validate length */
|
|
- if (d->msg_len > 15)
|
|
+ if (d->msg_len > sizeof(d->msg))
|
|
return -EINVAL;
|
|
|
|
/* DiSEqC message */
|
|
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
|
|
index 1e344b0..22e8c20 100644
|
|
--- a/drivers/media/dvb-frontends/ds3000.c
|
|
+++ b/drivers/media/dvb-frontends/ds3000.c
|
|
@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
|
|
memcpy(&state->frontend.ops, &ds3000_ops,
|
|
sizeof(struct dvb_frontend_ops));
|
|
state->frontend.demodulator_priv = state;
|
|
+
|
|
+ /*
|
|
+ * Some devices like T480 starts with voltage on. Be sure
|
|
+ * to turn voltage off during init, as this can otherwise
|
|
+ * interfere with Unicable SCR systems.
|
|
+ */
|
|
+ ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
|
|
return &state->frontend;
|
|
|
|
error3:
|
|
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
|
|
index 93eeaf7..0b4f8fe 100644
|
|
--- a/drivers/media/dvb-frontends/s5h1420.c
|
|
+++ b/drivers/media/dvb-frontends/s5h1420.c
|
|
@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
|
|
int result = 0;
|
|
|
|
dprintk("enter %s\n", __func__);
|
|
- if (cmd->msg_len > 8)
|
|
+ if (cmd->msg_len > sizeof(cmd->msg))
|
|
return -EINVAL;
|
|
|
|
/* setup for DISEQC */
|
|
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
|
|
index 8ad3a57..287b977 100644
|
|
--- a/drivers/media/dvb-frontends/tda10071.c
|
|
+++ b/drivers/media/dvb-frontends/tda10071.c
|
|
@@ -667,6 +667,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
|
|
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
|
|
int ret, i;
|
|
u8 mode, rolloff, pilot, inversion, div;
|
|
+ fe_modulation_t modulation;
|
|
|
|
dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
|
|
"frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
|
|
@@ -701,10 +702,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
|
|
|
|
switch (c->delivery_system) {
|
|
case SYS_DVBS:
|
|
+ modulation = QPSK;
|
|
rolloff = 0;
|
|
pilot = 2;
|
|
break;
|
|
case SYS_DVBS2:
|
|
+ modulation = c->modulation;
|
|
+
|
|
switch (c->rolloff) {
|
|
case ROLLOFF_20:
|
|
rolloff = 2;
|
|
@@ -749,7 +753,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
|
|
|
|
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
|
|
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
|
|
- c->modulation == TDA10071_MODCOD[i].modulation &&
|
|
+ modulation == TDA10071_MODCOD[i].modulation &&
|
|
c->fec_inner == TDA10071_MODCOD[i].fec) {
|
|
mode = TDA10071_MODCOD[i].val;
|
|
dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
|
|
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
|
|
index 71c8570..112394d 100644
|
|
--- a/drivers/media/i2c/adv7604.c
|
|
+++ b/drivers/media/i2c/adv7604.c
|
|
@@ -1984,7 +1984,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
|
|
v4l2_info(sd, "HDCP keys read: %s%s\n",
|
|
(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
|
|
(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
|
|
- if (!is_hdmi(sd)) {
|
|
+ if (is_hdmi(sd)) {
|
|
bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
|
|
bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
|
|
bool audio_mute = io_read(sd, 0x65) & 0x40;
|
|
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
|
|
index 36c504b..008ac87 100644
|
|
--- a/drivers/media/i2c/mt9v032.c
|
|
+++ b/drivers/media/i2c/mt9v032.c
|
|
@@ -305,8 +305,8 @@ mt9v032_update_hblank(struct mt9v032 *mt9v032)
|
|
|
|
if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
|
|
min_hblank += (mt9v032->hratio - 1) * 10;
|
|
- min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
|
|
- (int)min_hblank);
|
|
+ min_hblank = max_t(int, mt9v032->model->data->min_row_time - crop->width,
|
|
+ min_hblank);
|
|
hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
|
|
|
|
return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
|
|
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
|
|
index 2335529..ab5d9a3 100644
|
|
--- a/drivers/media/i2c/smiapp-pll.c
|
|
+++ b/drivers/media/i2c/smiapp-pll.c
|
|
@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
|
|
{
|
|
dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
|
|
dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
|
|
- if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
|
|
+ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
|
|
dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
|
|
dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
|
|
}
|
|
@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
|
|
dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
|
|
dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
|
|
dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
|
|
- if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
|
|
+ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
|
|
dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
|
|
pll->op_sys_clk_freq_hz);
|
|
dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
|
|
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
|
|
index 8741cae..873d062 100644
|
|
--- a/drivers/media/i2c/smiapp/smiapp-core.c
|
|
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
|
|
@@ -2138,7 +2138,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
|
|
ret = smiapp_set_compose(subdev, fh, sel);
|
|
break;
|
|
default:
|
|
- BUG();
|
|
+ ret = -EINVAL;
|
|
}
|
|
|
|
mutex_unlock(&sensor->mutex);
|
|
@@ -2624,7 +2624,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
|
|
pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
|
|
pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
|
|
|
|
+ mutex_lock(&sensor->mutex);
|
|
rval = smiapp_update_mode(sensor);
|
|
+ mutex_unlock(&sensor->mutex);
|
|
if (rval) {
|
|
dev_err(&client->dev, "update mode failed\n");
|
|
goto out_nvm_release;
|
|
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
|
|
index 72af644..cf93021 100644
|
|
--- a/drivers/media/i2c/tda7432.c
|
|
+++ b/drivers/media/i2c/tda7432.c
|
|
@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
|
|
if (t->mute->val) {
|
|
lf |= TDA7432_MUTE;
|
|
lr |= TDA7432_MUTE;
|
|
- lf |= TDA7432_MUTE;
|
|
+ rf |= TDA7432_MUTE;
|
|
rr |= TDA7432_MUTE;
|
|
}
|
|
/* Mute & update balance*/
|
|
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
|
|
index 703560f..88c1606 100644
|
|
--- a/drivers/media/media-device.c
|
|
+++ b/drivers/media/media-device.c
|
|
@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
|
|
if (ent->name) {
|
|
strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
|
|
u_ent.name[sizeof(u_ent.name) - 1] = '\0';
|
|
- } else {
|
|
- memset(u_ent.name, 0, sizeof(u_ent.name));
|
|
}
|
|
u_ent.type = ent->type;
|
|
u_ent.revision = ent->revision;
|
|
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
|
|
index 716bdc5..83f5074 100644
|
|
--- a/drivers/media/pci/cx18/cx18-driver.c
|
|
+++ b/drivers/media/pci/cx18/cx18-driver.c
|
|
@@ -1091,6 +1091,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
|
|
setup.addr = ADDR_UNSET;
|
|
setup.type = cx->options.tuner;
|
|
setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
|
|
+ setup.config = NULL;
|
|
if (cx->options.radio > 0)
|
|
setup.mode_mask |= T_RADIO;
|
|
setup.tuner_callback = (setup.type == TUNER_XC2028) ?
|
|
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
|
|
index f723f1f..ab85127 100644
|
|
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
|
|
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
|
|
@@ -30,7 +30,7 @@
|
|
|
|
/* Offset base used to differentiate between CAPTURE and OUTPUT
|
|
* while mmaping */
|
|
-#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
|
|
+#define DST_QUEUE_OFF_BASE (1 << 30)
|
|
|
|
#define MFC_BANK1_ALLOC_CTX 0
|
|
#define MFC_BANK2_ALLOC_CTX 1
|
|
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
|
|
index 744e43b..f698e32 100644
|
|
--- a/drivers/media/platform/sh_veu.c
|
|
+++ b/drivers/media/platform/sh_veu.c
|
|
@@ -1183,6 +1183,7 @@ static int sh_veu_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
*vdev = sh_veu_videodev;
|
|
+ vdev->v4l2_dev = &veu->v4l2_dev;
|
|
spin_lock_init(&veu->lock);
|
|
mutex_init(&veu->fop_lock);
|
|
vdev->lock = &veu->fop_lock;
|
|
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
|
|
index b4687a8..7245cca 100644
|
|
--- a/drivers/media/platform/vsp1/vsp1_video.c
|
|
+++ b/drivers/media/platform/vsp1/vsp1_video.c
|
|
@@ -635,8 +635,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
|
|
if (vb->num_planes < format->num_planes)
|
|
return -EINVAL;
|
|
|
|
- buf->video = video;
|
|
-
|
|
for (i = 0; i < vb->num_planes; ++i) {
|
|
buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
|
|
buf->length[i] = vb2_plane_size(vb, i);
|
|
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
|
|
index d8612a3..47b7a8a 100644
|
|
--- a/drivers/media/platform/vsp1/vsp1_video.h
|
|
+++ b/drivers/media/platform/vsp1/vsp1_video.h
|
|
@@ -89,7 +89,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
|
|
}
|
|
|
|
struct vsp1_video_buffer {
|
|
- struct vsp1_video *video;
|
|
struct vb2_buffer buf;
|
|
struct list_head queue;
|
|
|
|
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
|
|
index ed2c8a1..98893a8 100644
|
|
--- a/drivers/media/rc/ir-lirc-codec.c
|
|
+++ b/drivers/media/rc/ir-lirc-codec.c
|
|
@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
|
|
return -EINVAL;
|
|
|
|
/* Packet start */
|
|
- if (ev.reset)
|
|
- return 0;
|
|
+ if (ev.reset) {
|
|
+ /* Userspace expects a long space event before the start of
|
|
+ * the signal to use as a sync. This may be done with repeat
|
|
+ * packets and normal samples. But if a reset has been sent
|
|
+ * then we assume that a long time has passed, so we send a
|
|
+ * space with the maximum time value. */
|
|
+ sample = LIRC_SPACE(LIRC_VALUE_MASK);
|
|
+ IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
|
|
|
|
/* Carrier reports */
|
|
- if (ev.carrier_report) {
|
|
+ } else if (ev.carrier_report) {
|
|
sample = LIRC_FREQUENCY(ev.carrier);
|
|
IR_dprintk(2, "carrier report (freq: %d)\n", sample);
|
|
|
|
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
|
|
index 40c42de..7a62097 100644
|
|
--- a/drivers/media/tuners/m88ts2022.c
|
|
+++ b/drivers/media/tuners/m88ts2022.c
|
|
@@ -314,7 +314,7 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
|
|
div_min = gdiv28 * 78 / 100;
|
|
div_max = clamp_val(div_max, 0U, 63U);
|
|
|
|
- f_3db_hz = c->symbol_rate * 135UL / 200UL;
|
|
+ f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
|
|
f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
|
|
f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
|
|
|
|
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
|
|
index 2018bef..e71decb 100644
|
|
--- a/drivers/media/tuners/xc4000.c
|
|
+++ b/drivers/media/tuners/xc4000.c
|
|
@@ -93,7 +93,7 @@ struct xc4000_priv {
|
|
struct firmware_description *firm;
|
|
int firm_size;
|
|
u32 if_khz;
|
|
- u32 freq_hz;
|
|
+ u32 freq_hz, freq_offset;
|
|
u32 bandwidth;
|
|
u8 video_standard;
|
|
u8 rf_mode;
|
|
@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
|
|
case SYS_ATSC:
|
|
dprintk(1, "%s() VSB modulation\n", __func__);
|
|
priv->rf_mode = XC_RF_MODE_AIR;
|
|
- priv->freq_hz = c->frequency - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
priv->video_standard = XC4000_DTV6;
|
|
type = DTV6;
|
|
break;
|
|
case SYS_DVBC_ANNEX_B:
|
|
dprintk(1, "%s() QAM modulation\n", __func__);
|
|
priv->rf_mode = XC_RF_MODE_CABLE;
|
|
- priv->freq_hz = c->frequency - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
priv->video_standard = XC4000_DTV6;
|
|
type = DTV6;
|
|
break;
|
|
@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
|
|
dprintk(1, "%s() OFDM\n", __func__);
|
|
if (bw == 0) {
|
|
if (c->frequency < 400000000) {
|
|
- priv->freq_hz = c->frequency - 2250000;
|
|
+ priv->freq_offset = 2250000;
|
|
} else {
|
|
- priv->freq_hz = c->frequency - 2750000;
|
|
+ priv->freq_offset = 2750000;
|
|
}
|
|
priv->video_standard = XC4000_DTV7_8;
|
|
type = DTV78;
|
|
} else if (bw <= 6000000) {
|
|
priv->video_standard = XC4000_DTV6;
|
|
- priv->freq_hz = c->frequency - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
type = DTV6;
|
|
} else if (bw <= 7000000) {
|
|
priv->video_standard = XC4000_DTV7;
|
|
- priv->freq_hz = c->frequency - 2250000;
|
|
+ priv->freq_offset = 2250000;
|
|
type = DTV7;
|
|
} else {
|
|
priv->video_standard = XC4000_DTV8;
|
|
- priv->freq_hz = c->frequency - 2750000;
|
|
+ priv->freq_offset = 2750000;
|
|
type = DTV8;
|
|
}
|
|
priv->rf_mode = XC_RF_MODE_AIR;
|
|
@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
|
|
goto fail;
|
|
}
|
|
|
|
+ priv->freq_hz = c->frequency - priv->freq_offset;
|
|
+
|
|
dprintk(1, "%s() frequency=%d (compensated)\n",
|
|
__func__, priv->freq_hz);
|
|
|
|
@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
|
|
{
|
|
struct xc4000_priv *priv = fe->tuner_priv;
|
|
|
|
- *freq = priv->freq_hz;
|
|
+ *freq = priv->freq_hz + priv->freq_offset;
|
|
|
|
if (debug) {
|
|
mutex_lock(&priv->lock);
|
|
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
|
|
index 5cd09a6..b2d9e9c 100644
|
|
--- a/drivers/media/tuners/xc5000.c
|
|
+++ b/drivers/media/tuners/xc5000.c
|
|
@@ -55,7 +55,7 @@ struct xc5000_priv {
|
|
|
|
u32 if_khz;
|
|
u16 xtal_khz;
|
|
- u32 freq_hz;
|
|
+ u32 freq_hz, freq_offset;
|
|
u32 bandwidth;
|
|
u8 video_standard;
|
|
u8 rf_mode;
|
|
@@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
|
|
case SYS_ATSC:
|
|
dprintk(1, "%s() VSB modulation\n", __func__);
|
|
priv->rf_mode = XC_RF_MODE_AIR;
|
|
- priv->freq_hz = freq - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
priv->video_standard = DTV6;
|
|
break;
|
|
case SYS_DVBC_ANNEX_B:
|
|
dprintk(1, "%s() QAM modulation\n", __func__);
|
|
priv->rf_mode = XC_RF_MODE_CABLE;
|
|
- priv->freq_hz = freq - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
priv->video_standard = DTV6;
|
|
break;
|
|
case SYS_ISDBT:
|
|
@@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
|
|
switch (bw) {
|
|
case 6000000:
|
|
priv->video_standard = DTV6;
|
|
- priv->freq_hz = freq - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
break;
|
|
case 7000000:
|
|
priv->video_standard = DTV7;
|
|
- priv->freq_hz = freq - 2250000;
|
|
+ priv->freq_offset = 2250000;
|
|
break;
|
|
case 8000000:
|
|
priv->video_standard = DTV8;
|
|
- priv->freq_hz = freq - 2750000;
|
|
+ priv->freq_offset = 2750000;
|
|
break;
|
|
default:
|
|
printk(KERN_ERR "xc5000 bandwidth not set!\n");
|
|
@@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
|
|
priv->rf_mode = XC_RF_MODE_CABLE;
|
|
if (bw <= 6000000) {
|
|
priv->video_standard = DTV6;
|
|
- priv->freq_hz = freq - 1750000;
|
|
+ priv->freq_offset = 1750000;
|
|
b = 6;
|
|
} else if (bw <= 7000000) {
|
|
priv->video_standard = DTV7;
|
|
- priv->freq_hz = freq - 2250000;
|
|
+ priv->freq_offset = 2250000;
|
|
b = 7;
|
|
} else {
|
|
priv->video_standard = DTV7_8;
|
|
- priv->freq_hz = freq - 2750000;
|
|
+ priv->freq_offset = 2750000;
|
|
b = 8;
|
|
}
|
|
dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
|
|
@@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ priv->freq_hz = freq - priv->freq_offset;
|
|
+
|
|
dprintk(1, "%s() frequency=%d (compensated to %d)\n",
|
|
__func__, freq, priv->freq_hz);
|
|
|
|
@@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
|
|
{
|
|
struct xc5000_priv *priv = fe->tuner_priv;
|
|
dprintk(1, "%s()\n", __func__);
|
|
- *freq = priv->freq_hz;
|
|
+ *freq = priv->freq_hz + priv->freq_offset;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
|
|
index dd32dec..1d4b110 100644
|
|
--- a/drivers/media/usb/au0828/au0828-cards.c
|
|
+++ b/drivers/media/usb/au0828/au0828-cards.c
|
|
@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
|
|
au0828_clear(dev, REG_000, 0x10);
|
|
}
|
|
|
|
+/*
|
|
+ * WARNING: There's a quirks table at sound/usb/quirks-table.h
|
|
+ * that should also be updated every time a new device with V4L2 support
|
|
+ * is added here.
|
|
+ */
|
|
struct au0828_board au0828_boards[] = {
|
|
[AU0828_BOARD_UNKNOWN] = {
|
|
.name = "Unknown board",
|
|
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
|
|
index f615454..7ed75ef 100644
|
|
--- a/drivers/media/usb/au0828/au0828-video.c
|
|
+++ b/drivers/media/usb/au0828/au0828-video.c
|
|
@@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
|
|
|
|
/*
|
|
* Auvitek au0828 analog stream enable
|
|
- * Please set interface0 to AS5 before enable the stream
|
|
*/
|
|
static int au0828_analog_stream_enable(struct au0828_dev *d)
|
|
{
|
|
+ struct usb_interface *iface;
|
|
+ int ret;
|
|
+
|
|
dprintk(1, "au0828_analog_stream_enable called\n");
|
|
+
|
|
+ iface = usb_ifnum_to_if(d->usbdev, 0);
|
|
+ if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
|
|
+ dprintk(1, "Changing intf#0 to alt 5\n");
|
|
+ /* set au0828 interface0 to AS5 here again */
|
|
+ ret = usb_set_interface(d->usbdev, 0, 5);
|
|
+ if (ret < 0) {
|
|
+ printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* FIXME: size should be calculated using d->width, d->height */
|
|
+
|
|
au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
|
|
au0828_writereg(d, 0x106, 0x00);
|
|
/* set x position */
|
|
@@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
|
|
return -ERESTARTSYS;
|
|
}
|
|
if (dev->users == 0) {
|
|
- /* set au0828 interface0 to AS5 here again */
|
|
- ret = usb_set_interface(dev->usbdev, 0, 5);
|
|
- if (ret < 0) {
|
|
- mutex_unlock(&dev->lock);
|
|
- printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
|
|
- kfree(fh);
|
|
- return -EBUSY;
|
|
- }
|
|
-
|
|
au0828_analog_stream_enable(dev);
|
|
au0828_analog_stream_reset(dev);
|
|
|
|
@@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
|
|
}
|
|
}
|
|
|
|
- /* set au0828 interface0 to AS5 here again */
|
|
- ret = usb_set_interface(dev->usbdev, 0, 5);
|
|
- if (ret < 0) {
|
|
- printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
|
|
- return -EBUSY;
|
|
- }
|
|
-
|
|
au0828_analog_stream_enable(dev);
|
|
|
|
return 0;
|
|
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
|
|
index 8ede8ea..88228f7 100644
|
|
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
|
|
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
|
|
@@ -1541,6 +1541,10 @@ static const struct usb_device_id af9035_id_table[] = {
|
|
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
|
|
{ DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
|
|
&af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
|
|
+ { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
|
|
+ &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) },
|
|
+ { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
|
|
+ &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) },
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(usb, af9035_id_table);
|
|
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
|
|
index f674dc0..d2a4e6d 100644
|
|
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
|
|
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
|
|
@@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
|
|
{
|
|
struct dvb_usb_device *d = adap_to_d(adap);
|
|
struct lme2510_state *lme_int = adap_to_priv(adap);
|
|
+ struct usb_host_endpoint *ep;
|
|
|
|
lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
|
|
|
|
@@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
|
|
adap,
|
|
8);
|
|
|
|
+ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
|
|
+ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
|
|
+
|
|
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
|
|
+ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
|
|
+
|
|
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
|
|
|
usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
|
|
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
|
|
index af176b6..e6d3561 100644
|
|
--- a/drivers/media/usb/dvb-usb/af9005.c
|
|
+++ b/drivers/media/usb/dvb-usb/af9005.c
|
|
@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
|
|
err("usb_register failed. (%d)", result);
|
|
return result;
|
|
}
|
|
+#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
|
|
+ /* FIXME: convert to todays kernel IR infrastructure */
|
|
rc_decode = symbol_request(af9005_rc_decode);
|
|
rc_keys = symbol_request(rc_map_af9005_table);
|
|
rc_keys_size = symbol_request(rc_map_af9005_table_size);
|
|
+#endif
|
|
if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
|
|
err("af9005_rc_decode function not found, disabling remote");
|
|
af9005_properties.rc.legacy.rc_query = NULL;
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
|
|
index dfdfa77..c39f7d3 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-audio.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
|
|
@@ -814,7 +814,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
|
|
if (urb_size > ep_size * npackets)
|
|
npackets = DIV_ROUND_UP(urb_size, ep_size);
|
|
|
|
- em28xx_info("Number of URBs: %d, with %d packets and %d size",
|
|
+ em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
|
|
num_urb, npackets, urb_size);
|
|
|
|
/* Estimate the bytes per period */
|
|
@@ -974,7 +974,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
|
|
return 0;
|
|
}
|
|
|
|
- em28xx_info("Closing audio extension");
|
|
+ em28xx_info("Closing audio extension\n");
|
|
|
|
if (dev->adev.sndcard) {
|
|
snd_card_disconnect(dev->adev.sndcard);
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
|
|
index 4d97a76..c1a3f8f 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-cards.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
|
|
@@ -2993,16 +2993,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
|
|
}
|
|
}
|
|
|
|
- if (dev->chip_id == CHIP_ID_EM2870 ||
|
|
- dev->chip_id == CHIP_ID_EM2874 ||
|
|
- dev->chip_id == CHIP_ID_EM28174 ||
|
|
- dev->chip_id == CHIP_ID_EM28178) {
|
|
- /* Digital only device - don't load any alsa module */
|
|
- dev->audio_mode.has_audio = false;
|
|
- dev->has_audio_class = false;
|
|
- dev->has_alsa_audio = false;
|
|
- }
|
|
-
|
|
if (chip_name != default_chip_name)
|
|
printk(KERN_INFO DRIVER_NAME
|
|
": chip ID is %s\n", chip_name);
|
|
@@ -3272,7 +3262,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
|
|
dev->alt = -1;
|
|
dev->is_audio_only = has_audio && !(has_video || has_dvb);
|
|
dev->has_alsa_audio = has_audio;
|
|
- dev->audio_mode.has_audio = has_audio;
|
|
dev->has_video = has_video;
|
|
dev->ifnum = ifnum;
|
|
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
|
|
index 898fb9b..97fd881 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-core.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-core.c
|
|
@@ -506,8 +506,18 @@ int em28xx_audio_setup(struct em28xx *dev)
|
|
int vid1, vid2, feat, cfg;
|
|
u32 vid;
|
|
|
|
- if (!dev->audio_mode.has_audio)
|
|
+ if (dev->chip_id == CHIP_ID_EM2870 ||
|
|
+ dev->chip_id == CHIP_ID_EM2874 ||
|
|
+ dev->chip_id == CHIP_ID_EM28174 ||
|
|
+ dev->chip_id == CHIP_ID_EM28178) {
|
|
+ /* Digital only device - don't load any alsa module */
|
|
+ dev->audio_mode.has_audio = false;
|
|
+ dev->has_audio_class = false;
|
|
+ dev->has_alsa_audio = false;
|
|
return 0;
|
|
+ }
|
|
+
|
|
+ dev->audio_mode.has_audio = true;
|
|
|
|
/* See how this device is configured */
|
|
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
index 1373cfa..ec2ebe9 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
@@ -1468,7 +1468,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
|
|
return 0;
|
|
}
|
|
|
|
- em28xx_info("Closing DVB extension");
|
|
+ em28xx_info("Closing DVB extension\n");
|
|
|
|
if (dev->dvb) {
|
|
struct em28xx_dvb *dvb = dev->dvb;
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
|
|
index 18f65d8..dd59c00 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-input.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-input.c
|
|
@@ -810,7 +810,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
|
|
return 0;
|
|
}
|
|
|
|
- em28xx_info("Closing input extension");
|
|
+ em28xx_info("Closing input extension\n");
|
|
|
|
em28xx_shutdown_buttons(dev);
|
|
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
|
|
index c3c9289..0e8d085 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-video.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-video.c
|
|
@@ -953,13 +953,16 @@ static int em28xx_stop_streaming(struct vb2_queue *vq)
|
|
}
|
|
|
|
spin_lock_irqsave(&dev->slock, flags);
|
|
+ if (dev->usb_ctl.vid_buf != NULL) {
|
|
+ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
|
|
+ dev->usb_ctl.vid_buf = NULL;
|
|
+ }
|
|
while (!list_empty(&vidq->active)) {
|
|
struct em28xx_buffer *buf;
|
|
buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
|
|
list_del(&buf->list);
|
|
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
|
|
}
|
|
- dev->usb_ctl.vid_buf = NULL;
|
|
spin_unlock_irqrestore(&dev->slock, flags);
|
|
|
|
return 0;
|
|
@@ -981,13 +984,16 @@ int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
|
|
}
|
|
|
|
spin_lock_irqsave(&dev->slock, flags);
|
|
+ if (dev->usb_ctl.vbi_buf != NULL) {
|
|
+ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
|
|
+ dev->usb_ctl.vbi_buf = NULL;
|
|
+ }
|
|
while (!list_empty(&vbiq->active)) {
|
|
struct em28xx_buffer *buf;
|
|
buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
|
|
list_del(&buf->list);
|
|
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
|
|
}
|
|
- dev->usb_ctl.vbi_buf = NULL;
|
|
spin_unlock_irqrestore(&dev->slock, flags);
|
|
|
|
return 0;
|
|
@@ -1894,7 +1900,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
|
|
return 0;
|
|
}
|
|
|
|
- em28xx_info("Closing video extension");
|
|
+ em28xx_info("Closing video extension\n");
|
|
|
|
mutex_lock(&dev->lock);
|
|
|
|
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
|
|
index 0500c417..6bce01a 100644
|
|
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
|
|
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
|
|
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
|
|
}
|
|
|
|
/*=========================================================================*/
|
|
-/* bufffer bits */
|
|
+/* buffer bits */
|
|
|
|
/* function expects dev->io_mutex to be hold by caller */
|
|
int hdpvr_cancel_queue(struct hdpvr_device *dev)
|
|
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
|
|
case V4L2_CID_MPEG_AUDIO_ENCODING:
|
|
if (dev->flags & HDPVR_FLAG_AC3_CAP) {
|
|
opt->audio_codec = ctrl->val;
|
|
- return hdpvr_set_audio(dev, opt->audio_input,
|
|
+ return hdpvr_set_audio(dev, opt->audio_input + 1,
|
|
opt->audio_codec);
|
|
}
|
|
return 0;
|
|
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
|
|
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
|
|
V4L2_CID_MPEG_AUDIO_ENCODING,
|
|
ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
|
|
- 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
|
|
+ 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
|
|
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
|
|
V4L2_CID_MPEG_VIDEO_ENCODING,
|
|
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
|
|
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
|
|
index c45c988..4572530 100644
|
|
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
|
|
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
|
|
@@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
|
|
if (mutex_lock_interruptible(&dev->v4l_lock))
|
|
return -ERESTARTSYS;
|
|
|
|
+ /*
|
|
+ * Once URBs are cancelled, the URB complete handler
|
|
+ * won't be running. This is required to safely release the
|
|
+ * current buffer (dev->isoc_ctl.buf).
|
|
+ */
|
|
stk1160_cancel_isoc(dev);
|
|
|
|
/*
|
|
@@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
|
|
stk1160_info("buffer [%p/%d] aborted\n",
|
|
buf, buf->vb.v4l2_buf.index);
|
|
}
|
|
- /* It's important to clear current buffer */
|
|
- dev->isoc_ctl.buf = NULL;
|
|
+
|
|
+ /* It's important to release the current buffer */
|
|
+ if (dev->isoc_ctl.buf) {
|
|
+ buf = dev->isoc_ctl.buf;
|
|
+ dev->isoc_ctl.buf = NULL;
|
|
+
|
|
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
|
|
+ stk1160_info("buffer [%p/%d] aborted\n",
|
|
+ buf, buf->vb.v4l2_buf.index);
|
|
+ }
|
|
spin_unlock_irqrestore(&dev->buf_lock, flags);
|
|
}
|
|
|
|
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
|
|
index 5c45c9d..9c29552 100644
|
|
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
|
|
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
|
|
@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
|
|
0x00, 0x00, 0x00, 0x00,
|
|
0x00, 0x00 };
|
|
|
|
+ if (cmd->msg_len > sizeof(b) - 4)
|
|
+ return -EINVAL;
|
|
+
|
|
memcpy(&b[4], cmd->msg, cmd->msg_len);
|
|
|
|
state->config->send_command(fe, 0x72,
|
|
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
|
|
index c3bb250..4531441 100644
|
|
--- a/drivers/media/usb/uvc/uvc_driver.c
|
|
+++ b/drivers/media/usb/uvc/uvc_driver.c
|
|
@@ -1603,12 +1603,12 @@ static void uvc_delete(struct uvc_device *dev)
|
|
{
|
|
struct list_head *p, *n;
|
|
|
|
- usb_put_intf(dev->intf);
|
|
- usb_put_dev(dev->udev);
|
|
-
|
|
uvc_status_cleanup(dev);
|
|
uvc_ctrl_cleanup_device(dev);
|
|
|
|
+ usb_put_intf(dev->intf);
|
|
+ usb_put_dev(dev->udev);
|
|
+
|
|
if (dev->vdev.dev)
|
|
v4l2_device_unregister(&dev->vdev);
|
|
#ifdef CONFIG_MEDIA_CONTROLLER
|
|
@@ -2210,6 +2210,15 @@ static struct usb_device_id uvc_ids[] = {
|
|
.bInterfaceSubClass = 1,
|
|
.bInterfaceProtocol = 0,
|
|
.driver_info = UVC_QUIRK_PROBE_DEF },
|
|
+ /* Dell XPS M1330 (OmniVision OV7670 webcam) */
|
|
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
+ | USB_DEVICE_ID_MATCH_INT_INFO,
|
|
+ .idVendor = 0x05a9,
|
|
+ .idProduct = 0x7670,
|
|
+ .bInterfaceClass = USB_CLASS_VIDEO,
|
|
+ .bInterfaceSubClass = 1,
|
|
+ .bInterfaceProtocol = 0,
|
|
+ .driver_info = UVC_QUIRK_PROBE_DEF },
|
|
/* Apple Built-In iSight */
|
|
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
| USB_DEVICE_ID_MATCH_INT_INFO,
|
|
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
|
|
index 433d6d7..c5521ce 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-common.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-common.c
|
|
@@ -431,16 +431,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
|
|
/* Bits that must be zero to be aligned */
|
|
unsigned int mask = ~((1 << align) - 1);
|
|
|
|
+ /* Clamp to aligned min and max */
|
|
+ x = clamp(x, (min + ~mask) & mask, max & mask);
|
|
+
|
|
/* Round to nearest aligned value */
|
|
if (align)
|
|
x = (x + (1 << (align - 1))) & mask;
|
|
|
|
- /* Clamp to aligned value of min and max */
|
|
- if (x < min)
|
|
- x = (min + ~mask) & mask;
|
|
- else if (x > max)
|
|
- x = max & mask;
|
|
-
|
|
return x;
|
|
}
|
|
|
|
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
index a2e2579..78d99b1 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
@@ -595,10 +595,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
|
|
aspect.denominator = 9;
|
|
} else if (ratio == 34) {
|
|
aspect.numerator = 4;
|
|
- aspect.numerator = 3;
|
|
+ aspect.denominator = 3;
|
|
} else if (ratio == 68) {
|
|
aspect.numerator = 15;
|
|
- aspect.numerator = 9;
|
|
+ aspect.denominator = 9;
|
|
} else {
|
|
aspect.numerator = hor_landscape + 99;
|
|
aspect.denominator = 100;
|
|
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
|
|
index a127925..06faea4 100644
|
|
--- a/drivers/media/v4l2-core/videobuf2-core.c
|
|
+++ b/drivers/media/v4l2-core/videobuf2-core.c
|
|
@@ -745,6 +745,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
|
|
* to the userspace.
|
|
*/
|
|
req->count = allocated_buffers;
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
|
|
return 0;
|
|
}
|
|
@@ -793,6 +794,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
|
|
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
|
|
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
|
|
q->memory = create->memory;
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
}
|
|
|
|
num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
|
|
@@ -1447,6 +1449,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
|
|
* dequeued in dqbuf.
|
|
*/
|
|
list_add_tail(&vb->queued_entry, &q->queued_list);
|
|
+ q->waiting_for_buffers = false;
|
|
vb->state = VB2_BUF_STATE_QUEUED;
|
|
|
|
/*
|
|
@@ -1841,6 +1844,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
|
|
* and videobuf, effectively returning control over them to userspace.
|
|
*/
|
|
__vb2_queue_cancel(q);
|
|
+ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
|
|
dprintk(3, "Streamoff successful\n");
|
|
return 0;
|
|
@@ -2150,9 +2154,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
|
|
}
|
|
|
|
/*
|
|
- * There is nothing to wait for if no buffers have already been queued.
|
|
+ * There is nothing to wait for if the queue isn't streaming.
|
|
*/
|
|
- if (list_empty(&q->queued_list))
|
|
+ if (!vb2_is_streaming(q))
|
|
+ return res | POLLERR;
|
|
+ /*
|
|
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
|
|
+ * return POLLERR as well. This only affects capture queues, output
|
|
+ * queues will always initialize waiting_for_buffers to false.
|
|
+ */
|
|
+ if (q->waiting_for_buffers)
|
|
return res | POLLERR;
|
|
|
|
if (list_empty(&q->done_list))
|
|
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
|
|
index fc145d2..922a750 100644
|
|
--- a/drivers/memstick/core/mspro_block.c
|
|
+++ b/drivers/memstick/core/mspro_block.c
|
|
@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
|
|
|
|
if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
|
|
if (msb->data_dir == READ) {
|
|
- for (cnt = 0; cnt < msb->current_seg; cnt++)
|
|
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
|
|
t_len += msb->req_sg[cnt].length
|
|
/ msb->page_size;
|
|
|
|
@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
|
|
t_len += msb->current_page - 1;
|
|
|
|
t_len *= msb->page_size;
|
|
+ }
|
|
}
|
|
} else
|
|
t_len = blk_rq_bytes(msb->block_req);
|
|
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
|
|
index 5653e50..424f51d 100644
|
|
--- a/drivers/message/fusion/mptspi.c
|
|
+++ b/drivers/message/fusion/mptspi.c
|
|
@@ -1422,6 +1422,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
goto out_mptspi_probe;
|
|
}
|
|
|
|
+ /* VMWare emulation doesn't properly implement WRITE_SAME
|
|
+ */
|
|
+ if (pdev->subsystem_vendor == 0x15AD)
|
|
+ sh->no_write_same = 1;
|
|
+
|
|
spin_lock_irqsave(&ioc->FreeQlock, flags);
|
|
|
|
/* Attach the SCSI Host to the IOC structure
|
|
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
|
|
index 38917a8..2df3cbc 100644
|
|
--- a/drivers/mfd/kempld-core.c
|
|
+++ b/drivers/mfd/kempld-core.c
|
|
@@ -629,7 +629,7 @@ static int __init kempld_init(void)
|
|
if (force_device_id[0]) {
|
|
for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++)
|
|
if (strstr(id->ident, force_device_id))
|
|
- if (id->callback && id->callback(id))
|
|
+ if (id->callback && !id->callback(id))
|
|
break;
|
|
if (id->matches[0].slot == DMI_NONE)
|
|
return -ENODEV;
|
|
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
|
|
index 90b630c..0aefe50 100644
|
|
--- a/drivers/mfd/omap-usb-host.c
|
|
+++ b/drivers/mfd/omap-usb-host.c
|
|
@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
|
|
|
|
for (i = 0; i < omap->nports; i++) {
|
|
if (is_ehci_phy_mode(pdata->port_mode[i])) {
|
|
- reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
|
|
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
|
|
index 1d15735..89b4c42 100644
|
|
--- a/drivers/mfd/rtsx_pcr.c
|
|
+++ b/drivers/mfd/rtsx_pcr.c
|
|
@@ -1177,7 +1177,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
|
|
pcr->msi_en = msi_en;
|
|
if (pcr->msi_en) {
|
|
ret = pci_enable_msi(pcidev);
|
|
- if (ret < 0)
|
|
+ if (ret)
|
|
pcr->msi_en = false;
|
|
}
|
|
|
|
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
|
|
index 11c19e5..48579e5 100644
|
|
--- a/drivers/mfd/tc6393xb.c
|
|
+++ b/drivers/mfd/tc6393xb.c
|
|
@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
|
|
return 0;
|
|
}
|
|
|
|
+static int tc6393xb_ohci_suspend(struct platform_device *dev)
|
|
+{
|
|
+ struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
|
|
+
|
|
+ /* We can't properly store/restore OHCI state, so fail here */
|
|
+ if (tcpd->resume_restore)
|
|
+ return -EBUSY;
|
|
+
|
|
+ return tc6393xb_ohci_disable(dev);
|
|
+}
|
|
+
|
|
static int tc6393xb_fb_enable(struct platform_device *dev)
|
|
{
|
|
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
|
|
@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
|
|
.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
|
|
.resources = tc6393xb_ohci_resources,
|
|
.enable = tc6393xb_ohci_enable,
|
|
- .suspend = tc6393xb_ohci_disable,
|
|
+ .suspend = tc6393xb_ohci_suspend,
|
|
.resume = tc6393xb_ohci_enable,
|
|
.disable = tc6393xb_ohci_disable,
|
|
},
|
|
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
|
|
index d4e8604..e87a248 100644
|
|
--- a/drivers/mfd/ti_am335x_tscadc.c
|
|
+++ b/drivers/mfd/ti_am335x_tscadc.c
|
|
@@ -54,11 +54,11 @@ void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&tsadc->reg_lock, flags);
|
|
- tsadc->reg_se_cache = val;
|
|
+ tsadc->reg_se_cache |= val;
|
|
if (tsadc->adc_waiting)
|
|
wake_up(&tsadc->reg_se_wait);
|
|
else if (!tsadc->adc_in_use)
|
|
- tscadc_writel(tsadc, REG_SE, val);
|
|
+ tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
|
|
|
|
spin_unlock_irqrestore(&tsadc->reg_lock, flags);
|
|
}
|
|
@@ -97,6 +97,7 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
|
|
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
|
|
{
|
|
spin_lock_irq(&tsadc->reg_lock);
|
|
+ tsadc->reg_se_cache |= val;
|
|
am335x_tscadc_need_adc(tsadc);
|
|
|
|
tscadc_writel(tsadc, REG_SE, val);
|
|
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
|
|
index 6b1a6ef..0c3a647 100644
|
|
--- a/drivers/misc/genwqe/card_utils.c
|
|
+++ b/drivers/misc/genwqe/card_utils.c
|
|
@@ -490,6 +490,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
|
|
m->nr_pages,
|
|
1, /* write by caller */
|
|
m->page_list); /* ptrs to pages */
|
|
+ if (rc < 0)
|
|
+ goto fail_get_user_pages;
|
|
|
|
/* assumption: get_user_pages can be killed by signals. */
|
|
if (rc < m->nr_pages) {
|
|
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
|
|
index 4bc7d62..9a07bba 100644
|
|
--- a/drivers/misc/mei/bus.c
|
|
+++ b/drivers/misc/mei/bus.c
|
|
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
|
|
|
|
dev_dbg(dev, "Device probe\n");
|
|
|
|
- strncpy(id.name, dev_name(dev), sizeof(id.name));
|
|
+ strlcpy(id.name, dev_name(dev), sizeof(id.name));
|
|
|
|
return driver->probe(device, &id);
|
|
}
|
|
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
|
|
index 5a9bfa7..540fe11 100644
|
|
--- a/drivers/misc/mei/client.c
|
|
+++ b/drivers/misc/mei/client.c
|
|
@@ -459,6 +459,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
|
|
cl_err(dev, cl, "failed to disconnect.\n");
|
|
goto free;
|
|
}
|
|
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
|
|
mdelay(10); /* Wait for hardware disconnection ready */
|
|
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
|
|
} else {
|
|
@@ -563,6 +564,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
|
|
cl->timer_count = MEI_CONNECT_TIMEOUT;
|
|
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
|
|
} else {
|
|
+ cl->state = MEI_FILE_INITIALIZING;
|
|
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
|
|
}
|
|
|
|
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
|
|
index cdd31c2..b296538 100644
|
|
--- a/drivers/misc/mei/init.c
|
|
+++ b/drivers/misc/mei/init.c
|
|
@@ -275,6 +275,8 @@ void mei_stop(struct mei_device *dev)
|
|
|
|
dev->dev_state = MEI_DEV_POWER_DOWN;
|
|
mei_reset(dev);
|
|
+ /* move device to disabled state unconditionally */
|
|
+ dev->dev_state = MEI_DEV_DISABLED;
|
|
|
|
mutex_unlock(&dev->device_lock);
|
|
|
|
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
|
|
index a58320c..3114901 100644
|
|
--- a/drivers/misc/mei/nfc.c
|
|
+++ b/drivers/misc/mei/nfc.c
|
|
@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
|
|
ndev = (struct mei_nfc_dev *) cldev->priv_data;
|
|
dev = ndev->cl->dev;
|
|
|
|
+ err = -ENOMEM;
|
|
mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
|
|
if (!mei_buf)
|
|
- return -ENOMEM;
|
|
+ goto out;
|
|
|
|
hdr = (struct mei_nfc_hci_hdr *) mei_buf;
|
|
hdr->cmd = MEI_NFC_CMD_HCI_SEND;
|
|
@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
|
|
hdr->data_size = length;
|
|
|
|
memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
|
|
-
|
|
err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
|
|
if (err < 0)
|
|
- return err;
|
|
-
|
|
- kfree(mei_buf);
|
|
+ goto out;
|
|
|
|
if (!wait_event_interruptible_timeout(ndev->send_wq,
|
|
ndev->recv_req_id == ndev->req_id, HZ)) {
|
|
@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
|
|
} else {
|
|
ndev->req_id++;
|
|
}
|
|
-
|
|
+out:
|
|
+ kfree(mei_buf);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
|
|
index 7b5424f..d71f5ef 100644
|
|
--- a/drivers/mmc/card/block.c
|
|
+++ b/drivers/mmc/card/block.c
|
|
@@ -205,6 +205,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
|
|
|
|
ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
|
|
|
|
+ mmc_blk_put(md);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -260,7 +262,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
|
|
int ret;
|
|
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
|
|
|
|
- ret = snprintf(buf, PAGE_SIZE, "%d",
|
|
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
|
|
get_disk_ro(dev_to_disk(dev)) ^
|
|
md->read_only);
|
|
mmc_blk_put(md);
|
|
@@ -951,6 +953,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
|
|
md->reset_done &= ~type;
|
|
}
|
|
|
|
+int mmc_access_rpmb(struct mmc_queue *mq)
|
|
+{
|
|
+ struct mmc_blk_data *md = mq->data;
|
|
+ /*
|
|
+ * If this is a RPMB partition access, return ture
|
|
+ */
|
|
+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
|
|
{
|
|
struct mmc_blk_data *md = mq->data;
|
|
@@ -1849,9 +1863,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
|
|
break;
|
|
case MMC_BLK_CMD_ERR:
|
|
ret = mmc_blk_cmd_err(md, card, brq, req, ret);
|
|
- if (!mmc_blk_reset(md, card->host, type))
|
|
- break;
|
|
- goto cmd_abort;
|
|
+ if (mmc_blk_reset(md, card->host, type))
|
|
+ goto cmd_abort;
|
|
+ if (!ret)
|
|
+ goto start_new_req;
|
|
+ break;
|
|
case MMC_BLK_RETRY:
|
|
if (retry++ < 5)
|
|
break;
|
|
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
|
|
index 3e049c1..6ceede0 100644
|
|
--- a/drivers/mmc/card/queue.c
|
|
+++ b/drivers/mmc/card/queue.c
|
|
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|
return BLKPREP_KILL;
|
|
}
|
|
|
|
- if (mq && mmc_card_removed(mq->card))
|
|
+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
|
|
return BLKPREP_KILL;
|
|
|
|
req->cmd_flags |= REQ_DONTPREP;
|
|
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
|
|
index 5752d50..99e6521 100644
|
|
--- a/drivers/mmc/card/queue.h
|
|
+++ b/drivers/mmc/card/queue.h
|
|
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
|
|
extern void mmc_packed_clean(struct mmc_queue *);
|
|
|
|
+extern int mmc_access_rpmb(struct mmc_queue *);
|
|
+
|
|
#endif
|
|
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
|
|
index c43e6c8..daca58b 100644
|
|
--- a/drivers/mmc/core/core.c
|
|
+++ b/drivers/mmc/core/core.c
|
|
@@ -2699,6 +2699,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
|
|
switch (mode) {
|
|
case PM_HIBERNATION_PREPARE:
|
|
case PM_SUSPEND_PREPARE:
|
|
+ case PM_RESTORE_PREPARE:
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
host->rescan_disable = 1;
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
|
|
index 42706ea..201ce37 100644
|
|
--- a/drivers/mmc/host/atmel-mci.c
|
|
+++ b/drivers/mmc/host/atmel-mci.c
|
|
@@ -1300,7 +1300,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
if (ios->clock) {
|
|
unsigned int clock_min = ~0U;
|
|
- u32 clkdiv;
|
|
+ int clkdiv;
|
|
|
|
clk_prepare(host->mck);
|
|
unprepare_clk = true;
|
|
@@ -1329,7 +1329,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
/* Calculate clock divider */
|
|
if (host->caps.has_odd_clk_div) {
|
|
clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
|
|
- if (clkdiv > 511) {
|
|
+ if (clkdiv < 0) {
|
|
+ dev_warn(&mmc->class_dev,
|
|
+ "clock %u too fast; using %lu\n",
|
|
+ clock_min, host->bus_hz / 2);
|
|
+ clkdiv = 0;
|
|
+ } else if (clkdiv > 511) {
|
|
dev_warn(&mmc->class_dev,
|
|
"clock %u too slow; using %lu\n",
|
|
clock_min, host->bus_hz / (511 + 2));
|
|
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
|
|
index 054e03d..5d59c92 100644
|
|
--- a/drivers/mmc/host/dw_mmc.c
|
|
+++ b/drivers/mmc/host/dw_mmc.c
|
|
@@ -632,6 +632,13 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
|
|
|
|
WARN_ON(!(data->flags & MMC_DATA_READ));
|
|
|
|
+ /*
|
|
+ * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
|
|
+ * in the FIFO region, so we really shouldn't access it).
|
|
+ */
|
|
+ if (host->verid < DW_MMC_240A)
|
|
+ return;
|
|
+
|
|
if (host->timing != MMC_TIMING_MMC_HS200 &&
|
|
host->timing != MMC_TIMING_UHS_SDR104)
|
|
goto disable;
|
|
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
index 7e18661..ca297d7 100644
|
|
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
@@ -342,6 +342,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
|
|
}
|
|
|
|
if (rsp_type == SD_RSP_TYPE_R2) {
|
|
+ /*
|
|
+ * The controller offloads the last byte {CRC-7, end bit 1'b1}
|
|
+ * of response type R2. Assign dummy CRC, 0, and end bit to the
|
|
+ * byte(ptr[16], goes into the LSB of resp[3] later).
|
|
+ */
|
|
+ ptr[16] = 1;
|
|
+
|
|
for (i = 0; i < 4; i++) {
|
|
cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
|
|
dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
|
|
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
|
|
index 5c0f27f..28f61b7 100644
|
|
--- a/drivers/mmc/host/sdhci-esdhc.h
|
|
+++ b/drivers/mmc/host/sdhci-esdhc.h
|
|
@@ -46,6 +46,6 @@
|
|
#define ESDHC_DMA_SYSCTL 0x40c
|
|
#define ESDHC_DMA_SNOOP 0x00000040
|
|
|
|
-#define ESDHC_HOST_CONTROL_RES 0x05
|
|
+#define ESDHC_HOST_CONTROL_RES 0x01
|
|
|
|
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
|
|
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
|
|
index f49666b..257e9ca 100644
|
|
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
|
|
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
|
|
@@ -88,8 +88,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
|
|
return;
|
|
scratch_32 &= ~((1 << 21) | (1 << 30));
|
|
|
|
- /* Set RTD3 function disabled */
|
|
- scratch_32 |= ((1 << 29) | (1 << 28));
|
|
pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
|
|
|
|
/* Set L1 Entrance Timer */
|
|
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
|
|
index ba4eaf0..19de28b 100644
|
|
--- a/drivers/mmc/host/sdhci-pci.c
|
|
+++ b/drivers/mmc/host/sdhci-pci.c
|
|
@@ -103,6 +103,10 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
|
|
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
|
|
};
|
|
|
|
+static const struct sdhci_pci_fixes sdhci_intel_qrk = {
|
|
+ .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
|
|
+};
|
|
+
|
|
static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
|
|
@@ -733,6 +737,14 @@ static const struct pci_device_id pci_ids[] = {
|
|
|
|
{
|
|
.vendor = PCI_VENDOR_ID_INTEL,
|
|
+ .device = PCI_DEVICE_ID_INTEL_QRK_SD,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .driver_data = (kernel_ulong_t)&sdhci_intel_qrk,
|
|
+ },
|
|
+
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_INTEL,
|
|
.device = PCI_DEVICE_ID_INTEL_MRST_SD0,
|
|
.subvendor = PCI_ANY_ID,
|
|
.subdevice = PCI_ANY_ID,
|
|
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
|
|
index 6d71871..c101477 100644
|
|
--- a/drivers/mmc/host/sdhci-pci.h
|
|
+++ b/drivers/mmc/host/sdhci-pci.h
|
|
@@ -17,6 +17,7 @@
|
|
#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
|
|
#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
|
|
#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
|
|
+#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
|
|
|
|
/*
|
|
* PCI registers
|
|
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
|
|
index 4c5e52f..d70b8aa3 100644
|
|
--- a/drivers/mmc/host/sdhci-pxav3.c
|
|
+++ b/drivers/mmc/host/sdhci-pxav3.c
|
|
@@ -204,8 +204,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
|
|
if (!pdata)
|
|
return NULL;
|
|
|
|
- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
|
|
- if (clk_delay_cycles > 0)
|
|
+ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
|
|
+ &clk_delay_cycles))
|
|
pdata->clk_delay_cycles = clk_delay_cycles;
|
|
|
|
return pdata;
|
|
@@ -260,6 +260,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
|
|
goto err_of_parse;
|
|
sdhci_get_of_property(pdev);
|
|
pdata = pxav3_get_mmc_pdata(dev);
|
|
+ pdev->dev.platform_data = pdata;
|
|
} else if (pdata) {
|
|
/* on-chip device */
|
|
if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
|
|
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
|
|
index 38e1cd4..af7507b 100644
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -1310,6 +1310,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
sdhci_runtime_pm_get(host);
|
|
|
|
+ present = mmc_gpio_get_cd(host->mmc);
|
|
+
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
WARN_ON(host->mrq != NULL);
|
|
@@ -1338,7 +1340,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
* zero: cd-gpio is used, and card is removed
|
|
* one: cd-gpio is used, and card is present
|
|
*/
|
|
- present = mmc_gpio_get_cd(host->mmc);
|
|
if (present < 0) {
|
|
/* If polling, assume that the card is always present. */
|
|
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
|
|
@@ -2049,15 +2050,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
|
|
{
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
unsigned long flags;
|
|
+ int present;
|
|
|
|
/* First check if client has provided their own card event */
|
|
if (host->ops->card_event)
|
|
host->ops->card_event(host);
|
|
|
|
+ present = sdhci_do_get_cd(host);
|
|
+
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
/* Check host->mrq first in case we are runtime suspended */
|
|
- if (host->mrq && !sdhci_do_get_cd(host)) {
|
|
+ if (host->mrq && !present) {
|
|
pr_err("%s: Card removed during transfer!\n",
|
|
mmc_hostname(host->mmc));
|
|
pr_err("%s: Resetting controller.\n",
|
|
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
|
|
index 54730f4..9c208fd 100644
|
|
--- a/drivers/mmc/host/sh_mmcif.c
|
|
+++ b/drivers/mmc/host/sh_mmcif.c
|
|
@@ -1401,7 +1401,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
|
|
host = mmc_priv(mmc);
|
|
host->mmc = mmc;
|
|
host->addr = reg;
|
|
- host->timeout = msecs_to_jiffies(1000);
|
|
+ host->timeout = msecs_to_jiffies(10000);
|
|
host->ccs_enable = !pd || !pd->ccs_unsupported;
|
|
host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
|
|
|
|
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
|
|
index 19d6372..71e4f6c 100644
|
|
--- a/drivers/mtd/ftl.c
|
|
+++ b/drivers/mtd/ftl.c
|
|
@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|
return;
|
|
}
|
|
|
|
- ftl_freepart(partition);
|
|
kfree(partition);
|
|
}
|
|
|
|
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
|
|
index f8a7dd1..70a3db3 100644
|
|
--- a/drivers/mtd/maps/dc21285.c
|
|
+++ b/drivers/mtd/maps/dc21285.c
|
|
@@ -38,9 +38,9 @@ static void nw_en_write(void)
|
|
* we want to write a bit pattern XXX1 to Xilinx to enable
|
|
* the write gate, which will be open for about the next 2ms.
|
|
*/
|
|
- spin_lock_irqsave(&nw_gpio_lock, flags);
|
|
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
|
|
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
|
|
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
|
|
|
|
/*
|
|
* let the ISA bus to catch on...
|
|
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
|
|
index 5073cbc..32d5e40 100644
|
|
--- a/drivers/mtd/mtd_blkdevs.c
|
|
+++ b/drivers/mtd/mtd_blkdevs.c
|
|
@@ -199,6 +199,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
|
|
return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
|
|
|
|
mutex_lock(&dev->lock);
|
|
+ mutex_lock(&mtd_table_mutex);
|
|
|
|
if (dev->open)
|
|
goto unlock;
|
|
@@ -222,6 +223,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
|
|
|
|
unlock:
|
|
dev->open++;
|
|
+ mutex_unlock(&mtd_table_mutex);
|
|
mutex_unlock(&dev->lock);
|
|
blktrans_dev_put(dev);
|
|
return ret;
|
|
@@ -232,6 +234,7 @@ error_release:
|
|
error_put:
|
|
module_put(dev->tr->owner);
|
|
kref_put(&dev->ref, blktrans_dev_release);
|
|
+ mutex_unlock(&mtd_table_mutex);
|
|
mutex_unlock(&dev->lock);
|
|
blktrans_dev_put(dev);
|
|
return ret;
|
|
@@ -245,6 +248,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
|
|
return;
|
|
|
|
mutex_lock(&dev->lock);
|
|
+ mutex_lock(&mtd_table_mutex);
|
|
|
|
if (--dev->open)
|
|
goto unlock;
|
|
@@ -258,6 +262,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
|
|
__put_mtd_device(dev->mtd);
|
|
}
|
|
unlock:
|
|
+ mutex_unlock(&mtd_table_mutex);
|
|
mutex_unlock(&dev->lock);
|
|
blktrans_dev_put(dev);
|
|
}
|
|
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
|
|
index 9715a7b..efc542d 100644
|
|
--- a/drivers/mtd/nand/nand_base.c
|
|
+++ b/drivers/mtd/nand/nand_base.c
|
|
@@ -2000,7 +2000,7 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
|
|
oob += chip->ecc.prepad;
|
|
}
|
|
|
|
- chip->read_buf(mtd, oob, eccbytes);
|
|
+ chip->write_buf(mtd, oob, eccbytes);
|
|
oob += eccbytes;
|
|
|
|
if (chip->ecc.postpad) {
|
|
@@ -3063,7 +3063,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
|
|
int *busw)
|
|
{
|
|
struct nand_onfi_params *p = &chip->onfi_params;
|
|
- int i;
|
|
+ int i, j;
|
|
int val;
|
|
|
|
/* Try ONFI for unknown chip or LP */
|
|
@@ -3072,18 +3072,10 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
|
|
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
|
|
return 0;
|
|
|
|
- /*
|
|
- * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
|
|
- * with NAND_BUSWIDTH_16
|
|
- */
|
|
- if (chip->options & NAND_BUSWIDTH_16) {
|
|
- pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
|
|
for (i = 0; i < 3; i++) {
|
|
- chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
|
|
+ for (j = 0; j < sizeof(*p); j++)
|
|
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
|
|
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
|
|
le16_to_cpu(p->crc)) {
|
|
break;
|
|
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
|
|
index 6f55d92..64d8e32 100644
|
|
--- a/drivers/mtd/nand/omap2.c
|
|
+++ b/drivers/mtd/nand/omap2.c
|
|
@@ -933,7 +933,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
|
|
u32 val;
|
|
|
|
val = readl(info->reg.gpmc_ecc_config);
|
|
- if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
|
|
+ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
|
|
return -EINVAL;
|
|
|
|
/* read ecc result */
|
|
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
|
|
index 51e15fd..d058b00 100644
|
|
--- a/drivers/mtd/nand/pxa3xx_nand.c
|
|
+++ b/drivers/mtd/nand/pxa3xx_nand.c
|
|
@@ -481,6 +481,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
|
|
nand_writel(info, NDCR, ndcr | int_mask);
|
|
}
|
|
|
|
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
|
|
+{
|
|
+ if (info->ecc_bch) {
|
|
+ int timeout;
|
|
+
|
|
+ /*
|
|
+ * According to the datasheet, when reading from NDDB
|
|
+ * with BCH enabled, after each 32 bytes reads, we
|
|
+ * have to make sure that the NDSR.RDDREQ bit is set.
|
|
+ *
|
|
+ * Drain the FIFO 8 32 bits reads at a time, and skip
|
|
+ * the polling on the last read.
|
|
+ */
|
|
+ while (len > 8) {
|
|
+ __raw_readsl(info->mmio_base + NDDB, data, 8);
|
|
+
|
|
+ for (timeout = 0;
|
|
+ !(nand_readl(info, NDSR) & NDSR_RDDREQ);
|
|
+ timeout++) {
|
|
+ if (timeout >= 5) {
|
|
+ dev_err(&info->pdev->dev,
|
|
+ "Timeout on RDDREQ while draining the FIFO\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ mdelay(1);
|
|
+ }
|
|
+
|
|
+ data += 32;
|
|
+ len -= 8;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ __raw_readsl(info->mmio_base + NDDB, data, len);
|
|
+}
|
|
+
|
|
static void handle_data_pio(struct pxa3xx_nand_info *info)
|
|
{
|
|
unsigned int do_bytes = min(info->data_size, info->chunk_size);
|
|
@@ -497,14 +533,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
|
|
DIV_ROUND_UP(info->oob_size, 4));
|
|
break;
|
|
case STATE_PIO_READING:
|
|
- __raw_readsl(info->mmio_base + NDDB,
|
|
- info->data_buff + info->data_buff_pos,
|
|
- DIV_ROUND_UP(do_bytes, 4));
|
|
+ drain_fifo(info,
|
|
+ info->data_buff + info->data_buff_pos,
|
|
+ DIV_ROUND_UP(do_bytes, 4));
|
|
|
|
if (info->oob_size > 0)
|
|
- __raw_readsl(info->mmio_base + NDDB,
|
|
- info->oob_buff + info->oob_buff_pos,
|
|
- DIV_ROUND_UP(info->oob_size, 4));
|
|
+ drain_fifo(info,
|
|
+ info->oob_buff + info->oob_buff_pos,
|
|
+ DIV_ROUND_UP(info->oob_size, 4));
|
|
break;
|
|
default:
|
|
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
|
|
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
|
|
index eeab969..b55bc52 100644
|
|
--- a/drivers/mtd/tests/torturetest.c
|
|
+++ b/drivers/mtd/tests/torturetest.c
|
|
@@ -264,7 +264,9 @@ static int __init tort_init(void)
|
|
int i;
|
|
void *patt;
|
|
|
|
- mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
|
|
+ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
|
|
+ if (err)
|
|
+ goto out;
|
|
|
|
/* Check if the eraseblocks contain only 0xFF bytes */
|
|
if (check) {
|
|
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
|
|
index 6f27d9a..21841fe 100644
|
|
--- a/drivers/mtd/ubi/attach.c
|
|
+++ b/drivers/mtd/ubi/attach.c
|
|
@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
|
|
second_is_newer = !second_is_newer;
|
|
} else {
|
|
dbg_bld("PEB %d CRC is OK", pnum);
|
|
- bitflips = !!err;
|
|
+ bitflips |= !!err;
|
|
}
|
|
mutex_unlock(&ubi->buf_mutex);
|
|
|
|
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
|
|
index 8ca49f2..4cbbd55 100644
|
|
--- a/drivers/mtd/ubi/cdev.c
|
|
+++ b/drivers/mtd/ubi/cdev.c
|
|
@@ -451,7 +451,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
|
|
/* Validate the request */
|
|
err = -EINVAL;
|
|
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
|
|
- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
|
|
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
|
|
break;
|
|
|
|
err = get_exclusive(desc);
|
|
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
|
|
index 0e11671d..930cf2c 100644
|
|
--- a/drivers/mtd/ubi/eba.c
|
|
+++ b/drivers/mtd/ubi/eba.c
|
|
@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|
* during re-size.
|
|
*/
|
|
ubi_move_aeb_to_list(av, aeb, &ai->erase);
|
|
- vol->eba_tbl[aeb->lnum] = aeb->pnum;
|
|
+ else
|
|
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
|
|
index c5dad65..904b451 100644
|
|
--- a/drivers/mtd/ubi/fastmap.c
|
|
+++ b/drivers/mtd/ubi/fastmap.c
|
|
@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
av = tmp_av;
|
|
else {
|
|
ubi_err("orphaned volume in fastmap pool!");
|
|
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
|
|
return UBI_BAD_FASTMAP;
|
|
}
|
|
|
|
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
|
|
index f913d70..c4b1af0 100644
|
|
--- a/drivers/mtd/ubi/misc.c
|
|
+++ b/drivers/mtd/ubi/misc.c
|
|
@@ -74,6 +74,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
|
|
for (i = 0; i < vol->used_ebs; i++) {
|
|
int size;
|
|
|
|
+ cond_resched();
|
|
+
|
|
if (i == vol->used_ebs - 1)
|
|
size = vol->last_eb_bytes;
|
|
else
|
|
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
|
|
index ec2c2dc..2a1b6e0 100644
|
|
--- a/drivers/mtd/ubi/upd.c
|
|
+++ b/drivers/mtd/ubi/upd.c
|
|
@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
|
|
ubi_assert(!vol->updating && !vol->changing_leb);
|
|
vol->updating = 1;
|
|
|
|
+ vol->upd_buf = vmalloc(ubi->leb_size);
|
|
+ if (!vol->upd_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
err = set_update_marker(ubi, vol);
|
|
if (err)
|
|
return err;
|
|
@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
|
|
err = clear_update_marker(ubi, vol, 0);
|
|
if (err)
|
|
return err;
|
|
+
|
|
+ vfree(vol->upd_buf);
|
|
vol->updating = 0;
|
|
return 0;
|
|
}
|
|
|
|
- vol->upd_buf = vmalloc(ubi->leb_size);
|
|
- if (!vol->upd_buf)
|
|
- return -ENOMEM;
|
|
-
|
|
vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
|
|
vol->usable_leb_size);
|
|
vol->upd_bytes = bytes;
|
|
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
|
|
index 02317c1..c6b0b07 100644
|
|
--- a/drivers/mtd/ubi/wl.c
|
|
+++ b/drivers/mtd/ubi/wl.c
|
|
@@ -995,7 +995,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|
int cancel)
|
|
{
|
|
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
|
|
- int vol_id = -1, uninitialized_var(lnum);
|
|
+ int vol_id = -1, lnum = -1;
|
|
#ifdef CONFIG_MTD_UBI_FASTMAP
|
|
int anchor = wrk->anchor;
|
|
#endif
|
|
@@ -1205,7 +1205,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|
|
|
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
|
|
if (err) {
|
|
- kmem_cache_free(ubi_wl_entry_slab, e1);
|
|
if (e2)
|
|
kmem_cache_free(ubi_wl_entry_slab, e2);
|
|
goto out_ro;
|
|
@@ -1219,10 +1218,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
|
|
e2->pnum, vol_id, lnum);
|
|
err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
|
|
- if (err) {
|
|
- kmem_cache_free(ubi_wl_entry_slab, e2);
|
|
+ if (err)
|
|
goto out_ro;
|
|
- }
|
|
}
|
|
|
|
dbg_wl("done");
|
|
@@ -1258,10 +1255,9 @@ out_not_moved:
|
|
|
|
ubi_free_vid_hdr(ubi, vid_hdr);
|
|
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
|
|
- if (err) {
|
|
- kmem_cache_free(ubi_wl_entry_slab, e2);
|
|
+ if (err)
|
|
goto out_ro;
|
|
- }
|
|
+
|
|
mutex_unlock(&ubi->move_mutex);
|
|
return 0;
|
|
|
|
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
|
|
index 494b888..7e5c6a8 100644
|
|
--- a/drivers/net/Kconfig
|
|
+++ b/drivers/net/Kconfig
|
|
@@ -135,6 +135,7 @@ config MACVLAN
|
|
config MACVTAP
|
|
tristate "MAC-VLAN based tap driver"
|
|
depends on MACVLAN
|
|
+ depends on INET
|
|
help
|
|
This adds a specialized tap character device driver that is based
|
|
on the MAC-VLAN network interface, called macvtap. A macvtap device
|
|
@@ -205,6 +206,7 @@ config RIONET_RX_SIZE
|
|
|
|
config TUN
|
|
tristate "Universal TUN/TAP device driver support"
|
|
+ depends on INET
|
|
select CRC32
|
|
---help---
|
|
TUN/TAP provides packet reception and transmission for user space
|
|
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
|
|
index dcde560..3177498 100644
|
|
--- a/drivers/net/bonding/bond_3ad.c
|
|
+++ b/drivers/net/bonding/bond_3ad.c
|
|
@@ -2479,7 +2479,7 @@ out:
|
|
return NETDEV_TX_OK;
|
|
err_free:
|
|
/* no suitable interface, frame not sent */
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
|
|
index e8f133e..c67bbc9 100644
|
|
--- a/drivers/net/bonding/bond_alb.c
|
|
+++ b/drivers/net/bonding/bond_alb.c
|
|
@@ -1479,7 +1479,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|
}
|
|
|
|
/* no suitable interface, frame not sent */
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
out:
|
|
return NETDEV_TX_OK;
|
|
}
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index 1c5caf5..6f56d07 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -2454,9 +2454,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
|
|
if (!rtnl_trylock())
|
|
goto re_arm;
|
|
|
|
- if (slave_state_changed) {
|
|
+ if (slave_state_changed)
|
|
bond_slave_state_change(bond);
|
|
- } else if (do_failover) {
|
|
+ if (do_failover) {
|
|
/* the bond_select_active_slave must hold RTNL
|
|
* and curr_slave_lock for write.
|
|
*/
|
|
@@ -3572,7 +3572,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
|
|
}
|
|
}
|
|
/* no slave that can tx has been found */
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
}
|
|
|
|
/**
|
|
@@ -3628,8 +3628,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
|
|
else
|
|
bond_xmit_slave_id(bond, skb, 0);
|
|
} else {
|
|
- slave_id = bond_rr_gen_slave_id(bond);
|
|
- bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
|
|
+ int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
|
|
+
|
|
+ if (likely(slave_cnt)) {
|
|
+ slave_id = bond_rr_gen_slave_id(bond);
|
|
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
|
|
+ } else {
|
|
+ dev_kfree_skb_any(skb);
|
|
+ }
|
|
}
|
|
|
|
return NETDEV_TX_OK;
|
|
@@ -3648,7 +3654,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
|
|
if (slave)
|
|
bond_dev_queue_xmit(bond, skb, slave->dev);
|
|
else
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
}
|
|
@@ -3660,8 +3666,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
|
|
static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
|
|
{
|
|
struct bonding *bond = netdev_priv(bond_dev);
|
|
+ int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
|
|
|
|
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
|
|
+ if (likely(slave_cnt))
|
|
+ bond_xmit_slave_id(bond, skb,
|
|
+ bond_xmit_hash(bond, skb, bond->slave_cnt));
|
|
+ else
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
}
|
|
@@ -3691,7 +3702,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
|
|
if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
|
|
bond_dev_queue_xmit(bond, skb, slave->dev);
|
|
else
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
}
|
|
@@ -3778,7 +3789,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
|
|
pr_err("%s: Error: Unknown bonding mode %d\n",
|
|
dev->name, bond->params.mode);
|
|
WARN_ON_ONCE(1);
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
@@ -3799,7 +3810,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
if (bond_has_slaves(bond))
|
|
ret = __bond_start_xmit(skb, dev);
|
|
else
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
|
index fc59bc6..84ad2b4 100644
|
|
--- a/drivers/net/can/dev.c
|
|
+++ b/drivers/net/can/dev.c
|
|
@@ -384,7 +384,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
|
|
BUG_ON(idx >= priv->echo_skb_max);
|
|
|
|
if (priv->echo_skb[idx]) {
|
|
- kfree_skb(priv->echo_skb[idx]);
|
|
+ dev_kfree_skb_any(priv->echo_skb[idx]);
|
|
priv->echo_skb[idx] = NULL;
|
|
}
|
|
}
|
|
@@ -502,6 +502,14 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
|
|
skb->pkt_type = PACKET_BROADCAST;
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
+ skb_reset_mac_header(skb);
|
|
+ skb_reset_network_header(skb);
|
|
+ skb_reset_transport_header(skb);
|
|
+
|
|
+ skb_reset_mac_header(skb);
|
|
+ skb_reset_network_header(skb);
|
|
+ skb_reset_transport_header(skb);
|
|
+
|
|
can_skb_reserve(skb);
|
|
can_skb_prv(skb)->ifindex = dev->ifindex;
|
|
|
|
@@ -664,10 +672,14 @@ static int can_changelink(struct net_device *dev,
|
|
if (dev->flags & IFF_UP)
|
|
return -EBUSY;
|
|
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
|
|
- if (cm->flags & ~priv->ctrlmode_supported)
|
|
+
|
|
+ /* check whether changed bits are allowed to be modified */
|
|
+ if (cm->mask & ~priv->ctrlmode_supported)
|
|
return -EOPNOTSUPP;
|
|
+
|
|
+ /* clear bits to be modified and copy the flag values */
|
|
priv->ctrlmode &= ~cm->mask;
|
|
- priv->ctrlmode |= cm->flags;
|
|
+ priv->ctrlmode |= (cm->flags & cm->mask);
|
|
}
|
|
|
|
if (data[IFLA_CAN_RESTART_MS]) {
|
|
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
|
|
index 9250e28..534c0b8 100644
|
|
--- a/drivers/net/can/flexcan.c
|
|
+++ b/drivers/net/can/flexcan.c
|
|
@@ -1102,12 +1102,19 @@ static int flexcan_probe(struct platform_device *pdev)
|
|
const struct flexcan_devtype_data *devtype_data;
|
|
struct net_device *dev;
|
|
struct flexcan_priv *priv;
|
|
+ struct regulator *reg_xceiver;
|
|
struct resource *mem;
|
|
struct clk *clk_ipg = NULL, *clk_per = NULL;
|
|
void __iomem *base;
|
|
int err, irq;
|
|
u32 clock_freq = 0;
|
|
|
|
+ reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
|
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
|
|
+ return -EPROBE_DEFER;
|
|
+ else if (IS_ERR(reg_xceiver))
|
|
+ reg_xceiver = NULL;
|
|
+
|
|
if (pdev->dev.of_node)
|
|
of_property_read_u32(pdev->dev.of_node,
|
|
"clock-frequency", &clock_freq);
|
|
@@ -1169,9 +1176,7 @@ static int flexcan_probe(struct platform_device *pdev)
|
|
priv->pdata = dev_get_platdata(&pdev->dev);
|
|
priv->devtype_data = devtype_data;
|
|
|
|
- priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
|
- if (IS_ERR(priv->reg_xceiver))
|
|
- priv->reg_xceiver = NULL;
|
|
+ priv->reg_xceiver = reg_xceiver;
|
|
|
|
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
|
|
|
|
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
|
|
index 7fbe859..f34f7fa 100644
|
|
--- a/drivers/net/can/usb/esd_usb2.c
|
|
+++ b/drivers/net/can/usb/esd_usb2.c
|
|
@@ -1141,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
|
|
}
|
|
}
|
|
unlink_all_urbs(dev);
|
|
+ kfree(dev);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
|
|
index e77d110..3d19867 100644
|
|
--- a/drivers/net/can/usb/kvaser_usb.c
|
|
+++ b/drivers/net/can/usb/kvaser_usb.c
|
|
@@ -578,7 +578,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
|
usb_sndbulkpipe(dev->udev,
|
|
dev->bulk_out->bEndpointAddress),
|
|
buf, msg->len,
|
|
- kvaser_usb_simple_msg_callback, priv);
|
|
+ kvaser_usb_simple_msg_callback, netdev);
|
|
usb_anchor_urb(urb, &priv->tx_submitted);
|
|
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
@@ -653,11 +653,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|
priv = dev->nets[channel];
|
|
stats = &priv->netdev->stats;
|
|
|
|
- if (status & M16C_STATE_BUS_RESET) {
|
|
- kvaser_usb_unlink_tx_urbs(priv);
|
|
- return;
|
|
- }
|
|
-
|
|
skb = alloc_can_err_skb(priv->netdev, &cf);
|
|
if (!skb) {
|
|
stats->rx_dropped++;
|
|
@@ -668,7 +663,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|
|
|
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
|
|
|
|
- if (status & M16C_STATE_BUS_OFF) {
|
|
+ if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
|
|
cf->can_id |= CAN_ERR_BUSOFF;
|
|
|
|
priv->can.can_stats.bus_off++;
|
|
@@ -694,9 +689,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|
}
|
|
|
|
new_state = CAN_STATE_ERROR_PASSIVE;
|
|
- }
|
|
-
|
|
- if (status == M16C_STATE_BUS_ERROR) {
|
|
+ } else if (status & M16C_STATE_BUS_ERROR) {
|
|
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
|
|
((txerr >= 96) || (rxerr >= 96))) {
|
|
cf->can_id |= CAN_ERR_CRTL;
|
|
@@ -706,7 +699,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
|
|
|
priv->can.can_stats.error_warning++;
|
|
new_state = CAN_STATE_ERROR_WARNING;
|
|
- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
|
|
+ } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
|
|
+ ((txerr < 96) && (rxerr < 96))) {
|
|
cf->can_id |= CAN_ERR_PROT;
|
|
cf->data[2] = CAN_ERR_PROT_ACTIVE;
|
|
|
|
@@ -1237,6 +1231,9 @@ static int kvaser_usb_close(struct net_device *netdev)
|
|
if (err)
|
|
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
|
|
|
|
+ /* reset tx contexts */
|
|
+ kvaser_usb_unlink_tx_urbs(priv);
|
|
+
|
|
priv->can.state = CAN_STATE_STOPPED;
|
|
close_candev(priv->netdev);
|
|
|
|
@@ -1285,12 +1282,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|
if (!urb) {
|
|
netdev_err(netdev, "No memory left for URBs\n");
|
|
stats->tx_dropped++;
|
|
- goto nourbmem;
|
|
+ dev_kfree_skb(skb);
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
|
|
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
|
|
if (!buf) {
|
|
stats->tx_dropped++;
|
|
+ dev_kfree_skb(skb);
|
|
goto nobufmem;
|
|
}
|
|
|
|
@@ -1325,6 +1324,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
+ /* This should never happen; it implies a flow control bug */
|
|
if (!context) {
|
|
netdev_warn(netdev, "cannot find free context\n");
|
|
ret = NETDEV_TX_BUSY;
|
|
@@ -1355,9 +1355,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|
if (unlikely(err)) {
|
|
can_free_echo_skb(netdev, context->echo_index);
|
|
|
|
- skb = NULL; /* set to NULL to avoid double free in
|
|
- * dev_kfree_skb(skb) */
|
|
-
|
|
atomic_dec(&priv->active_tx_urbs);
|
|
usb_unanchor_urb(urb);
|
|
|
|
@@ -1379,8 +1376,6 @@ releasebuf:
|
|
kfree(buf);
|
|
nobufmem:
|
|
usb_free_urb(urb);
|
|
-nourbmem:
|
|
- dev_kfree_skb(skb);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1492,6 +1487,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|
struct kvaser_usb_net_priv *priv;
|
|
int i, err;
|
|
|
|
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
|
|
if (!netdev) {
|
|
dev_err(&intf->dev, "Cannot alloc candev\n");
|
|
@@ -1577,7 +1576,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|
{
|
|
struct kvaser_usb *dev;
|
|
int err = -ENOMEM;
|
|
- int i;
|
|
+ int i, retry = 3;
|
|
|
|
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
|
|
if (!dev)
|
|
@@ -1595,10 +1594,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|
|
|
usb_set_intfdata(intf, dev);
|
|
|
|
- for (i = 0; i < MAX_NET_DEVICES; i++)
|
|
- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
|
|
+ /* On some x86 laptops, plugging a Kvaser device again after
|
|
+ * an unplug makes the firmware always ignore the very first
|
|
+ * command. For such a case, provide some room for retries
|
|
+ * instead of completely exiting the driver.
|
|
+ */
|
|
+ do {
|
|
+ err = kvaser_usb_get_software_info(dev);
|
|
+ } while (--retry && err == -ETIMEDOUT);
|
|
|
|
- err = kvaser_usb_get_software_info(dev);
|
|
if (err) {
|
|
dev_err(&intf->dev,
|
|
"Cannot get software infos, error %d\n", err);
|
|
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
|
|
index 0b7a4c3..03e7f0c 100644
|
|
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
|
|
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
|
|
@@ -734,7 +734,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
|
|
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
|
|
if (!dev->cmd_buf) {
|
|
err = -ENOMEM;
|
|
- goto lbl_set_intf_data;
|
|
+ goto lbl_free_candev;
|
|
}
|
|
|
|
dev->udev = usb_dev;
|
|
@@ -773,7 +773,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
|
|
err = register_candev(netdev);
|
|
if (err) {
|
|
dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
|
|
- goto lbl_free_cmd_buf;
|
|
+ goto lbl_restore_intf_data;
|
|
}
|
|
|
|
if (dev->prev_siblings)
|
|
@@ -786,14 +786,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
|
|
if (dev->adapter->dev_init) {
|
|
err = dev->adapter->dev_init(dev);
|
|
if (err)
|
|
- goto lbl_free_cmd_buf;
|
|
+ goto lbl_unregister_candev;
|
|
}
|
|
|
|
/* set bus off */
|
|
if (dev->adapter->dev_set_bus) {
|
|
err = dev->adapter->dev_set_bus(dev, 0);
|
|
if (err)
|
|
- goto lbl_free_cmd_buf;
|
|
+ goto lbl_unregister_candev;
|
|
}
|
|
|
|
/* get device number early */
|
|
@@ -805,11 +805,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
|
|
|
|
return 0;
|
|
|
|
-lbl_free_cmd_buf:
|
|
- kfree(dev->cmd_buf);
|
|
+lbl_unregister_candev:
|
|
+ unregister_candev(netdev);
|
|
|
|
-lbl_set_intf_data:
|
|
+lbl_restore_intf_data:
|
|
usb_set_intfdata(intf, dev->prev_siblings);
|
|
+ kfree(dev->cmd_buf);
|
|
+
|
|
+lbl_free_candev:
|
|
free_candev(netdev);
|
|
|
|
return err;
|
|
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
|
|
index 263dd92..f7f796a 100644
|
|
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
|
|
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
|
|
@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
|
|
if (!(dev->state & PCAN_USB_STATE_CONNECTED))
|
|
return 0;
|
|
|
|
- memset(req_addr, '\0', req_size);
|
|
-
|
|
req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
|
|
|
|
switch (req_id) {
|
|
@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
|
|
default:
|
|
p = usb_rcvctrlpipe(dev->udev, 0);
|
|
req_type |= USB_DIR_IN;
|
|
+ memset(req_addr, '\0', req_size);
|
|
break;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
|
|
index 9339ccc..ad0e71c 100644
|
|
--- a/drivers/net/ethernet/amd/pcnet32.c
|
|
+++ b/drivers/net/ethernet/amd/pcnet32.c
|
|
@@ -1516,7 +1516,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|
{
|
|
struct pcnet32_private *lp;
|
|
int i, media;
|
|
- int fdx, mii, fset, dxsuflo;
|
|
+ int fdx, mii, fset, dxsuflo, sram;
|
|
int chip_version;
|
|
char *chipname;
|
|
struct net_device *dev;
|
|
@@ -1553,7 +1553,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|
}
|
|
|
|
/* initialize variables */
|
|
- fdx = mii = fset = dxsuflo = 0;
|
|
+ fdx = mii = fset = dxsuflo = sram = 0;
|
|
chip_version = (chip_version >> 12) & 0xffff;
|
|
|
|
switch (chip_version) {
|
|
@@ -1586,6 +1586,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|
chipname = "PCnet/FAST III 79C973"; /* PCI */
|
|
fdx = 1;
|
|
mii = 1;
|
|
+ sram = 1;
|
|
break;
|
|
case 0x2626:
|
|
chipname = "PCnet/Home 79C978"; /* PCI */
|
|
@@ -1609,6 +1610,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|
chipname = "PCnet/FAST III 79C975"; /* PCI */
|
|
fdx = 1;
|
|
mii = 1;
|
|
+ sram = 1;
|
|
break;
|
|
case 0x2628:
|
|
chipname = "PCnet/PRO 79C976";
|
|
@@ -1637,6 +1639,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
|
|
dxsuflo = 1;
|
|
}
|
|
|
|
+ /*
|
|
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
|
|
+ * which we can use for the Tx/Rx buffers but most importantly,
|
|
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
|
|
+ * Tx fifo underflows.
|
|
+ */
|
|
+ if (sram) {
|
|
+ /*
|
|
+ * The SRAM is being configured in two steps. First we
|
|
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
|
|
+ * to the datasheet, each bit corresponds to a 512-byte
|
|
+ * page so we can have at most 24 pages. The SRAM_SIZE
|
|
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
|
|
+ * The low 8-bits start at 0x00 and end at 0xff. So the
|
|
+ * address range is from 0x0000 up to 0x17ff. Therefore,
|
|
+ * the SRAM_SIZE is set to 0x17. The next step is to set
|
|
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
|
|
+ * buffers can share the SRAM equally.
|
|
+ */
|
|
+ a->write_bcr(ioaddr, 25, 0x17);
|
|
+ a->write_bcr(ioaddr, 26, 0xc);
|
|
+ /* And finally enable the NOUFLO bit */
|
|
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
|
|
+ }
|
|
+
|
|
dev = alloc_etherdev(sizeof(*lp));
|
|
if (!dev) {
|
|
ret = -ENOMEM;
|
|
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
|
|
index 380d249..3e1d7d2 100644
|
|
--- a/drivers/net/ethernet/atheros/alx/main.c
|
|
+++ b/drivers/net/ethernet/atheros/alx/main.c
|
|
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
|
|
schedule_work(&alx->reset_wk);
|
|
}
|
|
|
|
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
|
|
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
|
|
{
|
|
struct alx_rx_queue *rxq = &alx->rxq;
|
|
struct alx_rrd *rrd;
|
|
struct alx_buffer *rxb;
|
|
struct sk_buff *skb;
|
|
u16 length, rfd_cleaned = 0;
|
|
+ int work = 0;
|
|
|
|
- while (budget > 0) {
|
|
+ while (work < budget) {
|
|
rrd = &rxq->rrd[rxq->rrd_read_idx];
|
|
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
|
|
break;
|
|
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
|
|
ALX_GET_FIELD(le32_to_cpu(rrd->word0),
|
|
RRD_NOR) != 1) {
|
|
alx_schedule_reset(alx);
|
|
- return 0;
|
|
+ return work;
|
|
}
|
|
|
|
rxb = &rxq->bufs[rxq->read_idx];
|
|
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
|
|
}
|
|
|
|
napi_gro_receive(&alx->napi, skb);
|
|
- budget--;
|
|
+ work++;
|
|
|
|
next_pkt:
|
|
if (++rxq->read_idx == alx->rx_ringsz)
|
|
@@ -258,21 +259,22 @@ next_pkt:
|
|
if (rfd_cleaned)
|
|
alx_refill_rx_ring(alx, GFP_ATOMIC);
|
|
|
|
- return budget > 0;
|
|
+ return work;
|
|
}
|
|
|
|
static int alx_poll(struct napi_struct *napi, int budget)
|
|
{
|
|
struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
|
|
struct alx_hw *hw = &alx->hw;
|
|
- bool complete = true;
|
|
unsigned long flags;
|
|
+ bool tx_complete;
|
|
+ int work;
|
|
|
|
- complete = alx_clean_tx_irq(alx) &&
|
|
- alx_clean_rx_irq(alx, budget);
|
|
+ tx_complete = alx_clean_tx_irq(alx);
|
|
+ work = alx_clean_rx_irq(alx, budget);
|
|
|
|
- if (!complete)
|
|
- return 1;
|
|
+ if (!tx_complete || work == budget)
|
|
+ return budget;
|
|
|
|
napi_complete(&alx->napi);
|
|
|
|
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
|
|
|
|
alx_post_write(hw);
|
|
|
|
- return 0;
|
|
+ return work;
|
|
}
|
|
|
|
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
|
|
index 6c9e1c9..0c8a168 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2.c
|
|
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|
sw_cons = BNX2_NEXT_TX_BD(sw_cons);
|
|
|
|
tx_bytes += skb->len;
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
tx_pkt++;
|
|
if (tx_pkt == budget)
|
|
break;
|
|
@@ -6640,7 +6640,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
|
|
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -6733,7 +6733,7 @@ dma_error:
|
|
PCI_DMA_TODEVICE);
|
|
}
|
|
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
|
|
index 391f29e..1fbeaa9 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
|
|
@@ -337,6 +337,7 @@ struct sw_tx_bd {
|
|
u8 flags;
|
|
/* Set on the first BD descriptor when there is a split BD */
|
|
#define BNX2X_TSO_SPLIT_BD (1<<0)
|
|
+#define BNX2X_HAS_SECOND_PBD (1<<1)
|
|
};
|
|
|
|
struct sw_rx_page {
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
|
|
index 5ed5124..a830d42 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
|
|
@@ -223,6 +223,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
|
|
--nbd;
|
|
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
|
|
|
|
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
|
|
+ /* Skip second parse bd... */
|
|
+ --nbd;
|
|
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
|
|
+ }
|
|
+
|
|
/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
|
|
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
|
|
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
|
|
@@ -3125,7 +3131,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
|
|
}
|
|
#endif
|
|
if (!bnx2x_fp_lock_napi(fp))
|
|
- return work_done;
|
|
+ return budget;
|
|
|
|
for_each_cos_in_tx_queue(fp, cos)
|
|
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
|
|
@@ -3868,6 +3874,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
/* set encapsulation flag in start BD */
|
|
SET_FLAG(tx_start_bd->general_data,
|
|
ETH_TX_START_BD_TUNNEL_EXIST, 1);
|
|
+
|
|
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
|
|
+
|
|
nbd++;
|
|
} else if (xmit_type & XMIT_CSUM) {
|
|
/* Set PBD in checksum offload case w/o encapsulation */
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
index 7d43822..2428740 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
@@ -12395,6 +12395,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
|
|
PCICFG_VENDOR_ID_OFFSET);
|
|
|
|
+ /* Set PCIe reset type to fundamental for EEH recovery */
|
|
+ pdev->needs_freset = 1;
|
|
+
|
|
/* AER (Advanced Error reporting) configuration */
|
|
rc = pci_enable_pcie_error_reporting(pdev);
|
|
if (!rc)
|
|
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
|
|
index 9ab6cc3..1ff5698 100644
|
|
--- a/drivers/net/ethernet/broadcom/tg3.c
|
|
+++ b/drivers/net/ethernet/broadcom/tg3.c
|
|
@@ -6594,7 +6594,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
|
|
pkts_compl++;
|
|
bytes_compl += skb->len;
|
|
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
if (unlikely(tx_bug)) {
|
|
tg3_tx_recover(tp);
|
|
@@ -6924,8 +6924,9 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
|
|
skb->protocol = eth_type_trans(skb, tp->dev);
|
|
|
|
if (len > (tp->dev->mtu + ETH_HLEN) &&
|
|
- skb->protocol != htons(ETH_P_8021Q)) {
|
|
- dev_kfree_skb(skb);
|
|
+ skb->protocol != htons(ETH_P_8021Q) &&
|
|
+ skb->protocol != htons(ETH_P_8021AD)) {
|
|
+ dev_kfree_skb_any(skb);
|
|
goto drop_it_no_recycle;
|
|
}
|
|
|
|
@@ -7808,7 +7809,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|
PCI_DMA_TODEVICE);
|
|
/* Make sure the mapping succeeded */
|
|
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
|
|
- dev_kfree_skb(new_skb);
|
|
+ dev_kfree_skb_any(new_skb);
|
|
ret = -1;
|
|
} else {
|
|
u32 save_entry = *entry;
|
|
@@ -7823,13 +7824,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|
new_skb->len, base_flags,
|
|
mss, vlan)) {
|
|
tg3_tx_skb_unmap(tnapi, save_entry, -1);
|
|
- dev_kfree_skb(new_skb);
|
|
+ dev_kfree_skb_any(new_skb);
|
|
ret = -1;
|
|
}
|
|
}
|
|
}
|
|
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
*pskb = new_skb;
|
|
return ret;
|
|
}
|
|
@@ -7872,7 +7873,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
|
|
} while (segs);
|
|
|
|
tg3_tso_bug_end:
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
}
|
|
@@ -7916,8 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
entry = tnapi->tx_prod;
|
|
base_flags = 0;
|
|
- if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
- base_flags |= TXD_FLAG_TCPUDP_CSUM;
|
|
|
|
mss = skb_shinfo(skb)->gso_size;
|
|
if (mss) {
|
|
@@ -7933,6 +7932,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
|
|
|
|
+ /* HW/FW can not correctly segment packets that have been
|
|
+ * vlan encapsulated.
|
|
+ */
|
|
+ if (skb->protocol == htons(ETH_P_8021Q) ||
|
|
+ skb->protocol == htons(ETH_P_8021AD))
|
|
+ return tg3_tso_bug(tp, skb);
|
|
+
|
|
if (!skb_is_gso_v6(skb)) {
|
|
iph->check = 0;
|
|
iph->tot_len = htons(mss + hdr_len);
|
|
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
base_flags |= tsflags << 12;
|
|
}
|
|
}
|
|
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
+ /* HW/FW can not correctly checksum packets that have been
|
|
+ * vlan encapsulated.
|
|
+ */
|
|
+ if (skb->protocol == htons(ETH_P_8021Q) ||
|
|
+ skb->protocol == htons(ETH_P_8021AD)) {
|
|
+ if (skb_checksum_help(skb))
|
|
+ goto drop;
|
|
+ } else {
|
|
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
|
|
+ }
|
|
}
|
|
|
|
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
|
|
@@ -8094,7 +8111,7 @@ dma_error:
|
|
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
|
|
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
|
|
drop:
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
drop_nofree:
|
|
tp->tx_dropped++;
|
|
return NETDEV_TX_OK;
|
|
@@ -8532,7 +8549,8 @@ static int tg3_init_rings(struct tg3 *tp)
|
|
if (tnapi->rx_rcb)
|
|
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
|
|
|
|
- if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
|
|
+ if (tnapi->prodring.rx_std &&
|
|
+ tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
|
|
tg3_free_rings(tp);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -17714,23 +17732,6 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|
goto err_out_apeunmap;
|
|
}
|
|
|
|
- /*
|
|
- * Reset chip in case UNDI or EFI driver did not shutdown
|
|
- * DMA self test will enable WDMAC and we'll see (spurious)
|
|
- * pending DMA on the PCI bus at that point.
|
|
- */
|
|
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
|
|
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
|
|
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
|
|
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
|
- }
|
|
-
|
|
- err = tg3_test_dma(tp);
|
|
- if (err) {
|
|
- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
|
|
- goto err_out_apeunmap;
|
|
- }
|
|
-
|
|
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
|
|
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
|
|
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
|
|
@@ -17775,6 +17776,23 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|
sndmbx += 0xc;
|
|
}
|
|
|
|
+ /*
|
|
+ * Reset chip in case UNDI or EFI driver did not shutdown
|
|
+ * DMA self test will enable WDMAC and we'll see (spurious)
|
|
+ * pending DMA on the PCI bus at that point.
|
|
+ */
|
|
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
|
|
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
|
|
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
|
|
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
|
+ }
|
|
+
|
|
+ err = tg3_test_dma(tp);
|
|
+ if (err) {
|
|
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
|
|
+ goto err_out_apeunmap;
|
|
+ }
|
|
+
|
|
tg3_init_coal(tp);
|
|
|
|
pci_set_drvdata(pdev, dev);
|
|
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
|
|
index 4ad1187..669eeb4 100644
|
|
--- a/drivers/net/ethernet/brocade/bna/bnad.c
|
|
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
|
|
@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
prefetch(bnad->netdev);
|
|
|
|
cq = ccb->sw_q;
|
|
- cmpl = &cq[ccb->producer_index];
|
|
|
|
while (packets < budget) {
|
|
+ cmpl = &cq[ccb->producer_index];
|
|
if (!cmpl->valid)
|
|
break;
|
|
/* The 'valid' field is set by the adapter, only after writing
|
|
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
|
|
index 6c78848..9e8f4be 100644
|
|
--- a/drivers/net/ethernet/cadence/macb.c
|
|
+++ b/drivers/net/ethernet/cadence/macb.c
|
|
@@ -30,7 +30,6 @@
|
|
#include <linux/of_device.h>
|
|
#include <linux/of_mdio.h>
|
|
#include <linux/of_net.h>
|
|
-#include <linux/pinctrl/consumer.h>
|
|
|
|
#include "macb.h"
|
|
|
|
@@ -1800,7 +1799,6 @@ static int __init macb_probe(struct platform_device *pdev)
|
|
struct phy_device *phydev;
|
|
u32 config;
|
|
int err = -ENXIO;
|
|
- struct pinctrl *pinctrl;
|
|
const char *mac;
|
|
|
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
@@ -1809,15 +1807,6 @@ static int __init macb_probe(struct platform_device *pdev)
|
|
goto err_out;
|
|
}
|
|
|
|
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
|
|
- if (IS_ERR(pinctrl)) {
|
|
- err = PTR_ERR(pinctrl);
|
|
- if (err == -EPROBE_DEFER)
|
|
- goto err_out;
|
|
-
|
|
- dev_warn(&pdev->dev, "No pinctrl provided\n");
|
|
- }
|
|
-
|
|
err = -ENOMEM;
|
|
dev = alloc_etherdev(sizeof(*bp));
|
|
if (!dev)
|
|
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
index b740bfc..ff9b423 100644
|
|
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
@@ -1044,10 +1044,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
|
|
}
|
|
|
|
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
|
|
- skb->csum = htons(checksum);
|
|
- skb->ip_summed = CHECKSUM_COMPLETE;
|
|
- }
|
|
+ /* Hardware does not provide whole packet checksum. It only
|
|
+ * provides pseudo checksum. Since hw validates the packet
|
|
+ * checksum but not provide us the checksum value. use
|
|
+ * CHECSUM_UNNECESSARY.
|
|
+ */
|
|
+ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
|
|
+ ipv4_csum_ok)
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
if (vlan_stripped)
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
|
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
index 80bfa03..075e7e7 100644
|
|
--- a/drivers/net/ethernet/emulex/benet/be_main.c
|
|
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
@@ -1883,7 +1883,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
|
|
queue_tail_inc(txq);
|
|
} while (cur_index != last_index);
|
|
|
|
- kfree_skb(sent_skb);
|
|
+ dev_kfree_skb_any(sent_skb);
|
|
return num_wrbs;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
|
|
index 1fc8334..55e3075 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmveth.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmveth.c
|
|
@@ -292,6 +292,18 @@ failure:
|
|
atomic_add(buffers_added, &(pool->available));
|
|
}
|
|
|
|
+/*
|
|
+ * The final 8 bytes of the buffer list is a counter of frames dropped
|
|
+ * because there was not a buffer in the buffer list capable of holding
|
|
+ * the frame.
|
|
+ */
|
|
+static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
|
|
+{
|
|
+ __be64 *p = adapter->buffer_list_addr + 4096 - 8;
|
|
+
|
|
+ adapter->rx_no_buffer = be64_to_cpup(p);
|
|
+}
|
|
+
|
|
/* replenish routine */
|
|
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
|
{
|
|
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
|
ibmveth_replenish_buffer_pool(adapter, pool);
|
|
}
|
|
|
|
- adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
|
|
- 4096 - 8);
|
|
+ ibmveth_update_rx_no_buffer(adapter);
|
|
}
|
|
|
|
/* empty and free ana buffer pool - also used to do cleanup in error paths */
|
|
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
|
|
|
|
free_irq(netdev->irq, netdev);
|
|
|
|
- adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
|
|
- 4096 - 8);
|
|
+ ibmveth_update_rx_no_buffer(adapter);
|
|
|
|
ibmveth_cleanup(adapter);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
index 46e6544..b655fe4 100644
|
|
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
|
|
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
struct e1000_rx_ring *rx_ring,
|
|
int *work_done, int work_to_do);
|
|
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
|
|
+ struct e1000_rx_ring *rx_ring,
|
|
+ int cleaned_count)
|
|
+{
|
|
+}
|
|
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
|
struct e1000_rx_ring *rx_ring,
|
|
int cleaned_count);
|
|
@@ -3531,8 +3536,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|
msleep(1);
|
|
/* e1000_down has a dependency on max_frame_size */
|
|
hw->max_frame_size = max_frame;
|
|
- if (netif_running(netdev))
|
|
+ if (netif_running(netdev)) {
|
|
+ /* prevent buffers from being reallocated */
|
|
+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
|
|
e1000_down(adapter);
|
|
+ }
|
|
|
|
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
|
|
* means we reserve 2 more, this pushes us to allocate from the next
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
index b901371..5d3206d 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
@@ -4024,6 +4024,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
|
|
DCB_CAP_DCBX_VER_IEEE;
|
|
pf->flags |= I40E_FLAG_DCB_ENABLED;
|
|
}
|
|
+ } else {
|
|
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
|
|
+ pf->hw.aq.asq_last_status);
|
|
}
|
|
|
|
out:
|
|
@@ -8003,7 +8006,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
if (err) {
|
|
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
|
|
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
|
|
- goto err_init_dcb;
|
|
+ /* Continue without DCB enabled */
|
|
}
|
|
#endif /* CONFIG_I40E_DCB */
|
|
|
|
@@ -8119,9 +8122,6 @@ err_vsis:
|
|
err_switch_setup:
|
|
i40e_reset_interrupt_capability(pf);
|
|
del_timer_sync(&pf->service_timer);
|
|
-#ifdef CONFIG_I40E_DCB
|
|
-err_init_dcb:
|
|
-#endif /* CONFIG_I40E_DCB */
|
|
err_mac_addr:
|
|
err_configure_lan_hmc:
|
|
(void)i40e_shutdown_lan_hmc(hw);
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
index 5ca8c47..206e79d 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
|
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
@@ -1613,6 +1613,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
|
|
igb_power_up_phy_copper(&adapter->hw);
|
|
else
|
|
igb_power_up_serdes_link_82575(&adapter->hw);
|
|
+
|
|
+ igb_setup_link(&adapter->hw);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
|
|
index 57e390c..f42c201 100644
|
|
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
|
|
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
|
|
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
int tso;
|
|
|
|
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
if (skb->len <= 0) {
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
tso = ixgb_tso(adapter, skb);
|
|
if (tso < 0) {
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
|
|
index c4c00d9f..96fc7fe 100644
|
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
|
@@ -213,7 +213,7 @@
|
|
/* Various constants */
|
|
|
|
/* Coalescing */
|
|
-#define MVNETA_TXDONE_COAL_PKTS 16
|
|
+#define MVNETA_TXDONE_COAL_PKTS 1
|
|
#define MVNETA_RX_COAL_PKTS 32
|
|
#define MVNETA_RX_COAL_USEC 100
|
|
|
|
@@ -1612,6 +1612,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
|
u16 txq_id = skb_get_queue_mapping(skb);
|
|
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
|
|
struct mvneta_tx_desc *tx_desc;
|
|
+ int len = skb->len;
|
|
struct netdev_queue *nq;
|
|
int frags = 0;
|
|
u32 tx_cmd;
|
|
@@ -1675,7 +1676,7 @@ out:
|
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
stats->tx_packets++;
|
|
- stats->tx_bytes += skb->len;
|
|
+ stats->tx_bytes += len;
|
|
u64_stats_update_end(&stats->syncp);
|
|
} else {
|
|
dev->stats.tx_dropped++;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
index 2f83f34..8be0f3e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
@@ -2497,13 +2497,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
netif_carrier_off(dev);
|
|
mlx4_en_set_default_moderation(priv);
|
|
|
|
- err = register_netdev(dev);
|
|
- if (err) {
|
|
- en_err(priv, "Netdev registration failed for port %d\n", port);
|
|
- goto out;
|
|
- }
|
|
- priv->registered = 1;
|
|
-
|
|
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
|
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
|
|
|
@@ -2543,6 +2536,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
|
SERVICE_TASK_DELAY);
|
|
|
|
+ err = register_netdev(dev);
|
|
+ if (err) {
|
|
+ en_err(priv, "Netdev registration failed for port %d\n", port);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ priv->registered = 1;
|
|
+
|
|
return 0;
|
|
|
|
out:
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
index 1345703..a467261 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
@@ -325,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
|
|
}
|
|
}
|
|
}
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
return tx_info->nr_txbb;
|
|
}
|
|
|
|
@@ -810,8 +810,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
|
|
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
|
|
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
|
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
|
|
- MLX4_WQE_CTRL_TCP_UDP_CSUM);
|
|
+ if (!skb->encapsulation)
|
|
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
|
|
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
|
|
+ else
|
|
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
|
|
ring->tx_csum++;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
|
|
index 57428a0..1e8a4b4 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
|
|
@@ -1456,7 +1456,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
|
|
|
|
switch (op) {
|
|
case RES_OP_RESERVE:
|
|
- count = get_param_l(&in_param);
|
|
+ count = get_param_l(&in_param) & 0xffffff;
|
|
align = get_param_h(&in_param);
|
|
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
|
|
if (err)
|
|
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
|
|
index 68026f7..4a474dd 100644
|
|
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
|
|
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
|
|
@@ -872,6 +872,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
|
|
return -ENOMEM;
|
|
dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
|
|
DMA_BIDIRECTIONAL);
|
|
+ if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
|
|
+ __free_page(dmatest_page);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
/* Run a small DMA test.
|
|
* The magic multipliers to the length tell the firmware
|
|
@@ -1293,6 +1297,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
|
|
int bytes, int watchdog)
|
|
{
|
|
struct page *page;
|
|
+ dma_addr_t bus;
|
|
int idx;
|
|
#if MYRI10GE_ALLOC_SIZE > 4096
|
|
int end_offset;
|
|
@@ -1317,11 +1322,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
|
|
rx->watchdog_needed = 1;
|
|
return;
|
|
}
|
|
+
|
|
+ bus = pci_map_page(mgp->pdev, page, 0,
|
|
+ MYRI10GE_ALLOC_SIZE,
|
|
+ PCI_DMA_FROMDEVICE);
|
|
+ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
|
|
+ __free_pages(page, MYRI10GE_ALLOC_ORDER);
|
|
+ if (rx->fill_cnt - rx->cnt < 16)
|
|
+ rx->watchdog_needed = 1;
|
|
+ return;
|
|
+ }
|
|
+
|
|
rx->page = page;
|
|
rx->page_offset = 0;
|
|
- rx->bus = pci_map_page(mgp->pdev, page, 0,
|
|
- MYRI10GE_ALLOC_SIZE,
|
|
- PCI_DMA_FROMDEVICE);
|
|
+ rx->bus = bus;
|
|
+
|
|
}
|
|
rx->info[idx].page = rx->page;
|
|
rx->info[idx].page_offset = rx->page_offset;
|
|
@@ -2765,6 +2780,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
|
|
mb();
|
|
}
|
|
|
|
+static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
|
|
+ struct myri10ge_tx_buf *tx, int idx)
|
|
+{
|
|
+ unsigned int len;
|
|
+ int last_idx;
|
|
+
|
|
+ /* Free any DMA resources we've alloced and clear out the skb slot */
|
|
+ last_idx = (idx + 1) & tx->mask;
|
|
+ idx = tx->req & tx->mask;
|
|
+ do {
|
|
+ len = dma_unmap_len(&tx->info[idx], len);
|
|
+ if (len) {
|
|
+ if (tx->info[idx].skb != NULL)
|
|
+ pci_unmap_single(mgp->pdev,
|
|
+ dma_unmap_addr(&tx->info[idx],
|
|
+ bus), len,
|
|
+ PCI_DMA_TODEVICE);
|
|
+ else
|
|
+ pci_unmap_page(mgp->pdev,
|
|
+ dma_unmap_addr(&tx->info[idx],
|
|
+ bus), len,
|
|
+ PCI_DMA_TODEVICE);
|
|
+ dma_unmap_len_set(&tx->info[idx], len, 0);
|
|
+ tx->info[idx].skb = NULL;
|
|
+ }
|
|
+ idx = (idx + 1) & tx->mask;
|
|
+ } while (idx != last_idx);
|
|
+}
|
|
+
|
|
/*
|
|
* Transmit a packet. We need to split the packet so that a single
|
|
* segment does not cross myri10ge->tx_boundary, so this makes segment
|
|
@@ -2788,7 +2832,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
|
|
u32 low;
|
|
__be32 high_swapped;
|
|
unsigned int len;
|
|
- int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
|
|
+ int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
|
|
u16 pseudo_hdr_offset, cksum_offset, queue;
|
|
int cum_len, seglen, boundary, rdma_count;
|
|
u8 flags, odd_flag;
|
|
@@ -2885,9 +2929,12 @@ again:
|
|
|
|
/* map the skb for DMA */
|
|
len = skb_headlen(skb);
|
|
+ bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
+ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
|
|
+ goto drop;
|
|
+
|
|
idx = tx->req & tx->mask;
|
|
tx->info[idx].skb = skb;
|
|
- bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
dma_unmap_addr_set(&tx->info[idx], bus, bus);
|
|
dma_unmap_len_set(&tx->info[idx], len, len);
|
|
|
|
@@ -2986,12 +3033,16 @@ again:
|
|
break;
|
|
|
|
/* map next fragment for DMA */
|
|
- idx = (count + tx->req) & tx->mask;
|
|
frag = &skb_shinfo(skb)->frags[frag_idx];
|
|
frag_idx++;
|
|
len = skb_frag_size(frag);
|
|
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
|
|
DMA_TO_DEVICE);
|
|
+ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
|
|
+ myri10ge_unmap_tx_dma(mgp, tx, idx);
|
|
+ goto drop;
|
|
+ }
|
|
+ idx = (count + tx->req) & tx->mask;
|
|
dma_unmap_addr_set(&tx->info[idx], bus, bus);
|
|
dma_unmap_len_set(&tx->info[idx], len, len);
|
|
}
|
|
@@ -3022,31 +3073,8 @@ again:
|
|
return NETDEV_TX_OK;
|
|
|
|
abort_linearize:
|
|
- /* Free any DMA resources we've alloced and clear out the skb
|
|
- * slot so as to not trip up assertions, and to avoid a
|
|
- * double-free if linearizing fails */
|
|
+ myri10ge_unmap_tx_dma(mgp, tx, idx);
|
|
|
|
- last_idx = (idx + 1) & tx->mask;
|
|
- idx = tx->req & tx->mask;
|
|
- tx->info[idx].skb = NULL;
|
|
- do {
|
|
- len = dma_unmap_len(&tx->info[idx], len);
|
|
- if (len) {
|
|
- if (tx->info[idx].skb != NULL)
|
|
- pci_unmap_single(mgp->pdev,
|
|
- dma_unmap_addr(&tx->info[idx],
|
|
- bus), len,
|
|
- PCI_DMA_TODEVICE);
|
|
- else
|
|
- pci_unmap_page(mgp->pdev,
|
|
- dma_unmap_addr(&tx->info[idx],
|
|
- bus), len,
|
|
- PCI_DMA_TODEVICE);
|
|
- dma_unmap_len_set(&tx->info[idx], len, 0);
|
|
- tx->info[idx].skb = NULL;
|
|
- }
|
|
- idx = (idx + 1) & tx->mask;
|
|
- } while (idx != last_idx);
|
|
if (skb_is_gso(skb)) {
|
|
netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
|
|
goto drop;
|
|
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
|
|
index 70849de..5fa076f 100644
|
|
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
|
|
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
|
|
@@ -2390,7 +2390,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
|
|
|
work_done = netxen_process_rcv_ring(sds_ring, budget);
|
|
|
|
- if ((work_done < budget) && tx_complete) {
|
|
+ if (!tx_complete)
|
|
+ work_done = budget;
|
|
+
|
|
+ if (work_done < budget) {
|
|
napi_complete(&sds_ring->napi);
|
|
if (test_bit(__NX_DEV_UP, &adapter->state))
|
|
netxen_nic_enable_int(sds_ring);
|
|
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
|
|
index 737c1a8..a3c1daa 100644
|
|
--- a/drivers/net/ethernet/realtek/8139cp.c
|
|
+++ b/drivers/net/ethernet/realtek/8139cp.c
|
|
@@ -899,7 +899,7 @@ out_unlock:
|
|
|
|
return NETDEV_TX_OK;
|
|
out_dma_error:
|
|
- kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
cp->dev->stats.tx_dropped++;
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
|
|
index da5972e..8cb2f35 100644
|
|
--- a/drivers/net/ethernet/realtek/8139too.c
|
|
+++ b/drivers/net/ethernet/realtek/8139too.c
|
|
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
|
|
if (len < ETH_ZLEN)
|
|
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
|
|
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
} else {
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
dev->stats.tx_dropped++;
|
|
return NETDEV_TX_OK;
|
|
}
|
|
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
|
|
index 3ff7bc3..90c14d1 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169.c
|
|
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
|
|
tp->TxDescArray + entry);
|
|
if (skb) {
|
|
tp->dev->stats.tx_dropped++;
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
tx_skb->skb = NULL;
|
|
}
|
|
}
|
|
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|
err_dma_1:
|
|
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
|
|
err_dma_0:
|
|
- dev_kfree_skb(skb);
|
|
+ dev_kfree_skb_any(skb);
|
|
err_update_stats:
|
|
dev->stats.tx_dropped++;
|
|
return NETDEV_TX_OK;
|
|
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
|
tp->tx_stats.packets++;
|
|
tp->tx_stats.bytes += tx_skb->skb->len;
|
|
u64_stats_update_end(&tp->tx_stats.syncp);
|
|
- dev_kfree_skb(tx_skb->skb);
|
|
+ dev_kfree_skb_any(tx_skb->skb);
|
|
tx_skb->skb = NULL;
|
|
}
|
|
dirty_tx++;
|
|
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
|
|
index 6382b7c..e10f5ed 100644
|
|
--- a/drivers/net/ethernet/smsc/smsc911x.c
|
|
+++ b/drivers/net/ethernet/smsc/smsc911x.c
|
|
@@ -1341,6 +1341,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
|
|
spin_unlock(&pdata->mac_lock);
|
|
}
|
|
|
|
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
|
|
+{
|
|
+ int rc = 0;
|
|
+
|
|
+ if (!pdata->phy_dev)
|
|
+ return rc;
|
|
+
|
|
+ /* If the internal PHY is in General Power-Down mode, all, except the
|
|
+ * management interface, is powered-down and stays in that condition as
|
|
+ * long as Phy register bit 0.11 is HIGH.
|
|
+ *
|
|
+ * In that case, clear the bit 0.11, so the PHY powers up and we can
|
|
+ * access to the phy registers.
|
|
+ */
|
|
+ rc = phy_read(pdata->phy_dev, MII_BMCR);
|
|
+ if (rc < 0) {
|
|
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ /* If the PHY general power-down bit is not set is not necessary to
|
|
+ * disable the general power down-mode.
|
|
+ */
|
|
+ if (rc & BMCR_PDOWN) {
|
|
+ rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
|
|
+ if (rc < 0) {
|
|
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ usleep_range(1000, 1500);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
|
|
{
|
|
int rc = 0;
|
|
@@ -1414,6 +1450,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
|
|
int ret;
|
|
|
|
/*
|
|
+ * Make sure to power-up the PHY chip before doing a reset, otherwise
|
|
+ * the reset fails.
|
|
+ */
|
|
+ ret = smsc911x_phy_general_power_up(pdata);
|
|
+ if (ret) {
|
|
+ SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /*
|
|
* LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
|
|
* are initialized in a Energy Detect Power-Down mode that prevents
|
|
* the MAC chip to be software reseted. So we have to wakeup the PHY
|
|
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
|
|
index fd411d6..03ae9de 100644
|
|
--- a/drivers/net/ethernet/sun/sunvnet.c
|
|
+++ b/drivers/net/ethernet/sun/sunvnet.c
|
|
@@ -656,7 +656,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
spin_lock_irqsave(&port->vio.lock, flags);
|
|
|
|
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
|
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
|
|
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
|
|
if (!netif_queue_stopped(dev)) {
|
|
netif_stop_queue(dev);
|
|
|
|
@@ -704,7 +704,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
dev->stats.tx_bytes += skb->len;
|
|
|
|
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
|
|
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
|
|
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
|
|
netif_stop_queue(dev);
|
|
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
|
|
netif_wake_queue(dev);
|
|
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
|
|
index 921b9df..4eb091d 100644
|
|
--- a/drivers/net/ethernet/ti/cpsw.c
|
|
+++ b/drivers/net/ethernet/ti/cpsw.c
|
|
@@ -596,7 +596,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|
|
|
/* Clear all mcast from ALE */
|
|
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
|
|
- priv->host_port);
|
|
+ priv->host_port, -1);
|
|
|
|
/* Flood All Unicast Packets to Host port */
|
|
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
|
|
@@ -620,6 +620,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
|
|
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
|
|
{
|
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
|
+ int vid;
|
|
+
|
|
+ if (priv->data.dual_emac)
|
|
+ vid = priv->slaves[priv->emac_port].port_vlan;
|
|
+ else
|
|
+ vid = priv->data.default_vlan;
|
|
|
|
if (ndev->flags & IFF_PROMISC) {
|
|
/* Enable promiscuous mode */
|
|
@@ -631,7 +637,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
|
|
}
|
|
|
|
/* Clear all mcast from ALE */
|
|
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
|
|
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
|
|
+ vid);
|
|
|
|
if (!netdev_mc_empty(ndev)) {
|
|
struct netdev_hw_addr *ha;
|
|
@@ -716,6 +723,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
|
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
|
|
{
|
|
struct cpsw_priv *priv = dev_id;
|
|
+ int value = irq - priv->irqs_table[0];
|
|
+
|
|
+ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
|
|
+ * is to make sure we will always write the correct value to the EOI
|
|
+ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
|
|
+ * for TX Interrupt and 3 for MISC Interrupt.
|
|
+ */
|
|
+ cpdma_ctlr_eoi(priv->dma, value);
|
|
|
|
cpsw_intr_disable(priv);
|
|
if (priv->irq_enabled == true) {
|
|
@@ -745,8 +760,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
|
|
int num_tx, num_rx;
|
|
|
|
num_tx = cpdma_chan_process(priv->txch, 128);
|
|
- if (num_tx)
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
|
|
|
num_rx = cpdma_chan_process(priv->rxch, budget);
|
|
if (num_rx < budget) {
|
|
@@ -754,7 +767,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
|
|
|
|
napi_complete(napi);
|
|
cpsw_intr_enable(priv);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
|
prim_cpsw = cpsw_get_slave_priv(priv, 0);
|
|
if (prim_cpsw->irq_enabled == false) {
|
|
prim_cpsw->irq_enabled = true;
|
|
@@ -1265,8 +1277,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
|
napi_enable(&priv->napi);
|
|
cpdma_ctlr_start(priv->dma);
|
|
cpsw_intr_enable(priv);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
|
|
|
if (priv->data.dual_emac)
|
|
priv->slaves[priv->emac_port].open_stat = true;
|
|
@@ -1512,9 +1522,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
|
|
cpdma_chan_start(priv->txch);
|
|
cpdma_ctlr_int_ctrl(priv->dma, true);
|
|
cpsw_intr_enable(priv);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
|
-
|
|
}
|
|
|
|
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
|
|
@@ -1560,9 +1567,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
|
|
cpsw_interrupt(ndev->irq, priv);
|
|
cpdma_ctlr_int_ctrl(priv->dma, true);
|
|
cpsw_intr_enable(priv);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
|
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
|
-
|
|
}
|
|
#endif
|
|
|
|
@@ -1606,6 +1610,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
|
if (vid == priv->data.default_vlan)
|
|
return 0;
|
|
|
|
+ if (priv->data.dual_emac) {
|
|
+ /* In dual EMAC, reserved VLAN id should not be used for
|
|
+ * creating VLAN interfaces as this can break the dual
|
|
+ * EMAC port separation
|
|
+ */
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < priv->data.slaves; i++) {
|
|
+ if (vid == priv->slaves[i].port_vlan)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
|
|
return cpsw_add_vlan_ale_entry(priv, vid);
|
|
}
|
|
@@ -1619,6 +1636,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
|
if (vid == priv->data.default_vlan)
|
|
return 0;
|
|
|
|
+ if (priv->data.dual_emac) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < priv->data.slaves; i++) {
|
|
+ if (vid == priv->slaves[i].port_vlan)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
|
|
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
|
|
if (ret != 0)
|
|
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
|
|
index 7f89306..4eceb7e 100644
|
|
--- a/drivers/net/ethernet/ti/cpsw_ale.c
|
|
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
|
|
@@ -236,7 +236,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
|
|
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
|
|
}
|
|
|
|
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
|
|
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
|
|
{
|
|
u32 ale_entry[ALE_ENTRY_WORDS];
|
|
int ret, idx;
|
|
@@ -247,6 +247,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
|
|
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
|
|
continue;
|
|
|
|
+ /* if vid passed is -1 then remove all multicast entry from
|
|
+ * the table irrespective of vlan id, if a valid vlan id is
|
|
+ * passed then remove only multicast added to that vlan id.
|
|
+ * if vlan id doesn't match then move on to next entry.
|
|
+ */
|
|
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
|
|
+ continue;
|
|
+
|
|
if (cpsw_ale_get_mcast(ale_entry)) {
|
|
u8 addr[6];
|
|
|
|
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
|
|
index de409c3..e701358 100644
|
|
--- a/drivers/net/ethernet/ti/cpsw_ale.h
|
|
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
|
|
@@ -88,7 +88,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
|
|
|
|
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
|
|
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
|
|
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
|
|
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
|
|
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
|
|
int flags, u16 vid);
|
|
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
|
|
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
|
|
index d6fce97..3c1c33c 100644
|
|
--- a/drivers/net/hyperv/netvsc_drv.c
|
|
+++ b/drivers/net/hyperv/netvsc_drv.c
|
|
@@ -146,6 +146,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
|
struct hv_netvsc_packet *packet;
|
|
int ret;
|
|
unsigned int i, num_pages, npg_data;
|
|
+ u32 skb_length = skb->len;
|
|
|
|
/* Add multipages for skb->data and additional 2 for RNDIS */
|
|
npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
|
|
@@ -216,7 +217,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
|
ret = rndis_filter_send(net_device_ctx->device_ctx,
|
|
packet);
|
|
if (ret == 0) {
|
|
- net->stats.tx_bytes += skb->len;
|
|
+ net->stats.tx_bytes += skb_length;
|
|
net->stats.tx_packets++;
|
|
} else {
|
|
kfree(packet);
|
|
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
|
|
index bf0d55e..6adbef8 100644
|
|
--- a/drivers/net/ieee802154/fakehard.c
|
|
+++ b/drivers/net/ieee802154/fakehard.c
|
|
@@ -376,17 +376,20 @@ static int ieee802154fake_probe(struct platform_device *pdev)
|
|
|
|
err = wpan_phy_register(phy);
|
|
if (err)
|
|
- goto out;
|
|
+ goto err_phy_reg;
|
|
|
|
err = register_netdev(dev);
|
|
- if (err < 0)
|
|
- goto out;
|
|
+ if (err)
|
|
+ goto err_netdev_reg;
|
|
|
|
dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
|
|
return 0;
|
|
|
|
-out:
|
|
- unregister_netdev(dev);
|
|
+err_netdev_reg:
|
|
+ wpan_phy_unregister(phy);
|
|
+err_phy_reg:
|
|
+ free_netdev(dev);
|
|
+ wpan_phy_free(phy);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
|
|
index 5adecc5..fbf7dcd 100644
|
|
--- a/drivers/net/macvlan.c
|
|
+++ b/drivers/net/macvlan.c
|
|
@@ -548,6 +548,7 @@ static int macvlan_init(struct net_device *dev)
|
|
(lowerdev->state & MACVLAN_STATE_MASK);
|
|
dev->features = lowerdev->features & MACVLAN_FEATURES;
|
|
dev->features |= ALWAYS_ON_FEATURES;
|
|
+ dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
|
|
dev->gso_max_size = lowerdev->gso_max_size;
|
|
dev->iflink = lowerdev->ifindex;
|
|
dev->hard_header_len = lowerdev->hard_header_len;
|
|
@@ -708,6 +709,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
|
|
features,
|
|
mask);
|
|
features |= ALWAYS_ON_FEATURES;
|
|
+ features &= ~NETIF_F_NETNS_LOCAL;
|
|
|
|
return features;
|
|
}
|
|
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
|
|
index 3381c4f..e8c21f9 100644
|
|
--- a/drivers/net/macvtap.c
|
|
+++ b/drivers/net/macvtap.c
|
|
@@ -16,6 +16,7 @@
|
|
#include <linux/idr.h>
|
|
#include <linux/fs.h>
|
|
|
|
+#include <net/ipv6.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/rtnetlink.h>
|
|
#include <net/sock.h>
|
|
@@ -112,17 +113,15 @@ out:
|
|
return err;
|
|
}
|
|
|
|
+/* Requires RTNL */
|
|
static int macvtap_set_queue(struct net_device *dev, struct file *file,
|
|
struct macvtap_queue *q)
|
|
{
|
|
struct macvlan_dev *vlan = netdev_priv(dev);
|
|
- int err = -EBUSY;
|
|
|
|
- rtnl_lock();
|
|
if (vlan->numqueues == MAX_MACVTAP_QUEUES)
|
|
- goto out;
|
|
+ return -EBUSY;
|
|
|
|
- err = 0;
|
|
rcu_assign_pointer(q->vlan, vlan);
|
|
rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
|
|
sock_hold(&q->sk);
|
|
@@ -136,9 +135,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
|
|
vlan->numvtaps++;
|
|
vlan->numqueues++;
|
|
|
|
-out:
|
|
- rtnl_unlock();
|
|
- return err;
|
|
+ return 0;
|
|
}
|
|
|
|
static int macvtap_disable_queue(struct macvtap_queue *q)
|
|
@@ -454,11 +451,12 @@ static void macvtap_sock_destruct(struct sock *sk)
|
|
static int macvtap_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct net *net = current->nsproxy->net_ns;
|
|
- struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
|
|
+ struct net_device *dev;
|
|
struct macvtap_queue *q;
|
|
- int err;
|
|
+ int err = -ENODEV;
|
|
|
|
- err = -ENODEV;
|
|
+ rtnl_lock();
|
|
+ dev = dev_get_by_macvtap_minor(iminor(inode));
|
|
if (!dev)
|
|
goto out;
|
|
|
|
@@ -498,6 +496,7 @@ out:
|
|
if (dev)
|
|
dev_put(dev);
|
|
|
|
+ rtnl_unlock();
|
|
return err;
|
|
}
|
|
|
|
@@ -572,6 +571,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
|
|
break;
|
|
case VIRTIO_NET_HDR_GSO_UDP:
|
|
gso_type = SKB_GSO_UDP;
|
|
+ if (skb->protocol == htons(ETH_P_IPV6))
|
|
+ ipv6_proxy_select_ident(skb);
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
@@ -628,18 +629,23 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
|
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
|
|
+ if (vlan_tx_tag_present(skb))
|
|
+ vnet_hdr->csum_start += VLAN_HLEN;
|
|
vnet_hdr->csum_offset = skb->csum_offset;
|
|
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
|
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
|
} /* else everything is zero */
|
|
}
|
|
|
|
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
|
|
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
|
|
+
|
|
/* Get packet from user space buffer */
|
|
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|
const struct iovec *iv, unsigned long total_len,
|
|
size_t count, int noblock)
|
|
{
|
|
- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
|
|
+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
|
|
struct sk_buff *skb;
|
|
struct macvlan_dev *vlan;
|
|
unsigned long len = total_len;
|
|
@@ -698,7 +704,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|
linear = vnet_hdr.hdr_len;
|
|
}
|
|
|
|
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
|
|
+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
|
|
linear, noblock, &err);
|
|
if (!skb)
|
|
goto err;
|
|
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
|
|
index 98e7cbf..0be3f9d 100644
|
|
--- a/drivers/net/phy/dp83640.c
|
|
+++ b/drivers/net/phy/dp83640.c
|
|
@@ -45,7 +45,7 @@
|
|
#define PSF_TX 0x1000
|
|
#define EXT_EVENT 1
|
|
#define CAL_EVENT 7
|
|
-#define CAL_TRIGGER 7
|
|
+#define CAL_TRIGGER 1
|
|
#define PER_TRIGGER 6
|
|
|
|
#define MII_DP83640_MICR 0x11
|
|
@@ -442,7 +442,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
|
|
else
|
|
evnt |= EVNT_RISE;
|
|
}
|
|
+ mutex_lock(&clock->extreg_lock);
|
|
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
|
|
+ mutex_unlock(&clock->extreg_lock);
|
|
return 0;
|
|
|
|
case PTP_CLK_REQ_PEROUT:
|
|
@@ -463,6 +465,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
|
|
|
|
static void enable_status_frames(struct phy_device *phydev, bool on)
|
|
{
|
|
+ struct dp83640_private *dp83640 = phydev->priv;
|
|
+ struct dp83640_clock *clock = dp83640->clock;
|
|
u16 cfg0 = 0, ver;
|
|
|
|
if (on)
|
|
@@ -470,9 +474,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
|
|
|
|
ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
|
|
|
|
+ mutex_lock(&clock->extreg_lock);
|
|
+
|
|
ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
|
|
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
|
|
|
|
+ mutex_unlock(&clock->extreg_lock);
|
|
+
|
|
if (!phydev->attached_dev) {
|
|
pr_warn("expected to find an attached netdevice\n");
|
|
return;
|
|
@@ -1063,11 +1071,18 @@ static int dp83640_config_init(struct phy_device *phydev)
|
|
|
|
if (clock->chosen && !list_empty(&clock->phylist))
|
|
recalibrate(clock);
|
|
- else
|
|
+ else {
|
|
+ mutex_lock(&clock->extreg_lock);
|
|
enable_broadcast(phydev, clock->page, 1);
|
|
+ mutex_unlock(&clock->extreg_lock);
|
|
+ }
|
|
|
|
enable_status_frames(phydev, true);
|
|
+
|
|
+ mutex_lock(&clock->extreg_lock);
|
|
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
|
|
+ mutex_unlock(&clock->extreg_lock);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
|
|
index 76d96b9..65cfc5a 100644
|
|
--- a/drivers/net/phy/phy.c
|
|
+++ b/drivers/net/phy/phy.c
|
|
@@ -194,6 +194,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
|
|
}
|
|
|
|
/**
|
|
+ * phy_check_valid - check if there is a valid PHY setting which matches
|
|
+ * speed, duplex, and feature mask
|
|
+ * @speed: speed to match
|
|
+ * @duplex: duplex to match
|
|
+ * @features: A mask of the valid settings
|
|
+ *
|
|
+ * Description: Returns true if there is a valid setting, false otherwise.
|
|
+ */
|
|
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
|
|
+{
|
|
+ unsigned int idx;
|
|
+
|
|
+ idx = phy_find_valid(phy_find_setting(speed, duplex), features);
|
|
+
|
|
+ return settings[idx].speed == speed && settings[idx].duplex == duplex &&
|
|
+ (settings[idx].setting & features);
|
|
+}
|
|
+
|
|
+/**
|
|
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
|
|
* @phydev: the target phy_device struct
|
|
*
|
|
@@ -946,16 +965,17 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
|
{
|
|
/* According to 802.3az,the EEE is supported only in full duplex-mode.
|
|
* Also EEE feature is active when core is operating with MII, GMII
|
|
- * or RGMII.
|
|
+ * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
|
|
+ * should return an error if they do not support EEE.
|
|
*/
|
|
if ((phydev->duplex == DUPLEX_FULL) &&
|
|
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
|
|
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
|
|
- (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
|
|
+ (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
|
|
+ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID))) {
|
|
int eee_lp, eee_cap, eee_adv;
|
|
u32 lp, cap, adv;
|
|
int status;
|
|
- unsigned int idx;
|
|
|
|
/* Read phy status to properly get the right settings */
|
|
status = phy_read_status(phydev);
|
|
@@ -987,8 +1007,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
|
|
|
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
|
|
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
|
|
- idx = phy_find_setting(phydev->speed, phydev->duplex);
|
|
- if (!(lp & adv & settings[idx].setting))
|
|
+ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
|
|
return -EPROTONOSUPPORT;
|
|
|
|
if (clk_stop_enable) {
|
|
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
|
|
index 701cc2e..db7017c 100644
|
|
--- a/drivers/net/phy/phy_device.c
|
|
+++ b/drivers/net/phy/phy_device.c
|
|
@@ -353,7 +353,7 @@ int phy_device_register(struct phy_device *phydev)
|
|
phydev->bus->phy_map[phydev->addr] = phydev;
|
|
|
|
/* Run all of the fixups for this PHY */
|
|
- err = phy_init_hw(phydev);
|
|
+ err = phy_scan_fixups(phydev);
|
|
if (err) {
|
|
pr_err("PHY %d failed to initialize\n", phydev->addr);
|
|
goto out;
|
|
@@ -765,10 +765,11 @@ static int genphy_config_advert(struct phy_device *phydev)
|
|
if (phydev->supported & (SUPPORTED_1000baseT_Half |
|
|
SUPPORTED_1000baseT_Full)) {
|
|
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
|
|
- if (adv != oldadv)
|
|
- changed = 1;
|
|
}
|
|
|
|
+ if (adv != oldadv)
|
|
+ changed = 1;
|
|
+
|
|
err = phy_write(phydev, MII_CTRL1000, adv);
|
|
if (err < 0)
|
|
return err;
|
|
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
|
|
index 602c625..b5edc7f 100644
|
|
--- a/drivers/net/ppp/ppp_deflate.c
|
|
+++ b/drivers/net/ppp/ppp_deflate.c
|
|
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
|
|
/*
|
|
* See if we managed to reduce the size of the packet.
|
|
*/
|
|
- if (olen < isize) {
|
|
+ if (olen < isize && olen <= osize) {
|
|
state->stats.comp_bytes += olen;
|
|
state->stats.comp_packets++;
|
|
} else {
|
|
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
|
|
index 72ff14b..5a1897d 100644
|
|
--- a/drivers/net/ppp/ppp_generic.c
|
|
+++ b/drivers/net/ppp/ppp_generic.c
|
|
@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
if (file == ppp->owner)
|
|
ppp_shutdown_interface(ppp);
|
|
}
|
|
- if (atomic_long_read(&file->f_count) <= 2) {
|
|
+ if (atomic_long_read(&file->f_count) < 2) {
|
|
ppp_release(NULL, file);
|
|
err = 0;
|
|
} else
|
|
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
|
|
index 0180531..1dc628f 100644
|
|
--- a/drivers/net/ppp/pptp.c
|
|
+++ b/drivers/net/ppp/pptp.c
|
|
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|
nf_reset(skb);
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
- ip_select_ident(skb, &rt->dst, NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
ip_send_check(iph);
|
|
|
|
ip_local_out(skb);
|
|
@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
|
|
int len = sizeof(struct sockaddr_pppox);
|
|
struct sockaddr_pppox sp;
|
|
|
|
- sp.sa_family = AF_PPPOX;
|
|
+ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
|
|
+
|
|
+ sp.sa_family = AF_PPPOX;
|
|
sp.sa_protocol = PX_PROTO_PPTP;
|
|
sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
|
|
|
|
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
|
|
index 26d8c29..c28e2da 100644
|
|
--- a/drivers/net/team/team.c
|
|
+++ b/drivers/net/team/team.c
|
|
@@ -42,9 +42,7 @@
|
|
|
|
static struct team_port *team_port_get_rcu(const struct net_device *dev)
|
|
{
|
|
- struct team_port *port = rcu_dereference(dev->rx_handler_data);
|
|
-
|
|
- return team_port_exists(dev) ? port : NULL;
|
|
+ return rcu_dereference(dev->rx_handler_data);
|
|
}
|
|
|
|
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
|
|
@@ -629,6 +627,7 @@ static int team_change_mode(struct team *team, const char *kind)
|
|
static void team_notify_peers_work(struct work_struct *work)
|
|
{
|
|
struct team *team;
|
|
+ int val;
|
|
|
|
team = container_of(work, struct team, notify_peers.dw.work);
|
|
|
|
@@ -636,9 +635,14 @@ static void team_notify_peers_work(struct work_struct *work)
|
|
schedule_delayed_work(&team->notify_peers.dw, 0);
|
|
return;
|
|
}
|
|
+ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
|
|
+ if (val < 0) {
|
|
+ rtnl_unlock();
|
|
+ return;
|
|
+ }
|
|
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
|
|
rtnl_unlock();
|
|
- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
|
|
+ if (val)
|
|
schedule_delayed_work(&team->notify_peers.dw,
|
|
msecs_to_jiffies(team->notify_peers.interval));
|
|
}
|
|
@@ -647,7 +651,7 @@ static void team_notify_peers(struct team *team)
|
|
{
|
|
if (!team->notify_peers.count || !netif_running(team->dev))
|
|
return;
|
|
- atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
|
|
+ atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
|
|
schedule_delayed_work(&team->notify_peers.dw, 0);
|
|
}
|
|
|
|
@@ -669,6 +673,7 @@ static void team_notify_peers_fini(struct team *team)
|
|
static void team_mcast_rejoin_work(struct work_struct *work)
|
|
{
|
|
struct team *team;
|
|
+ int val;
|
|
|
|
team = container_of(work, struct team, mcast_rejoin.dw.work);
|
|
|
|
@@ -676,9 +681,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
|
|
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
|
|
return;
|
|
}
|
|
+ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
|
|
+ if (val < 0) {
|
|
+ rtnl_unlock();
|
|
+ return;
|
|
+ }
|
|
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
|
|
rtnl_unlock();
|
|
- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
|
|
+ if (val)
|
|
schedule_delayed_work(&team->mcast_rejoin.dw,
|
|
msecs_to_jiffies(team->mcast_rejoin.interval));
|
|
}
|
|
@@ -687,7 +697,7 @@ static void team_mcast_rejoin(struct team *team)
|
|
{
|
|
if (!team->mcast_rejoin.count || !netif_running(team->dev))
|
|
return;
|
|
- atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
|
|
+ atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
|
|
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
|
|
}
|
|
|
|
@@ -1713,11 +1723,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
|
|
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
|
|
return -EADDRNOTAVAIL;
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
- rcu_read_lock();
|
|
- list_for_each_entry_rcu(port, &team->port_list, list)
|
|
+ mutex_lock(&team->lock);
|
|
+ list_for_each_entry(port, &team->port_list, list)
|
|
if (team->ops.port_change_dev_addr)
|
|
team->ops.port_change_dev_addr(team, port);
|
|
- rcu_read_unlock();
|
|
+ mutex_unlock(&team->lock);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index 26f8635..ec63314 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -65,6 +65,7 @@
|
|
#include <linux/nsproxy.h>
|
|
#include <linux/virtio_net.h>
|
|
#include <linux/rcupdate.h>
|
|
+#include <net/ipv6.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/netns/generic.h>
|
|
#include <net/rtnetlink.h>
|
|
@@ -1140,6 +1141,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|
break;
|
|
}
|
|
|
|
+ skb_reset_network_header(skb);
|
|
+
|
|
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
|
pr_debug("GSO!\n");
|
|
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
|
@@ -1151,6 +1154,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|
break;
|
|
case VIRTIO_NET_HDR_GSO_UDP:
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
|
|
+ if (skb->protocol == htons(ETH_P_IPV6))
|
|
+ ipv6_proxy_select_ident(skb);
|
|
break;
|
|
default:
|
|
tun->dev->stats.rx_frame_errors++;
|
|
@@ -1180,7 +1185,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
|
|
}
|
|
|
|
- skb_reset_network_header(skb);
|
|
skb_probe_transport_header(skb, 0);
|
|
|
|
rxhash = skb_get_hash(skb);
|
|
@@ -1222,6 +1226,10 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|
struct tun_pi pi = { 0, skb->protocol };
|
|
ssize_t total = 0;
|
|
int vlan_offset = 0, copied;
|
|
+ int vlan_hlen = 0;
|
|
+
|
|
+ if (vlan_tx_tag_present(skb))
|
|
+ vlan_hlen = VLAN_HLEN;
|
|
|
|
if (!(tun->flags & TUN_NO_PI)) {
|
|
if ((len -= sizeof(pi)) < 0)
|
|
@@ -1273,7 +1281,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
|
- gso.csum_start = skb_checksum_start_offset(skb);
|
|
+ gso.csum_start = skb_checksum_start_offset(skb) +
|
|
+ vlan_hlen;
|
|
gso.csum_offset = skb->csum_offset;
|
|
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
|
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
|
@@ -1286,10 +1295,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|
}
|
|
|
|
copied = total;
|
|
- total += skb->len;
|
|
- if (!vlan_tx_tag_present(skb)) {
|
|
- len = min_t(int, skb->len, len);
|
|
- } else {
|
|
+ len = min_t(int, skb->len + vlan_hlen, len);
|
|
+ total += skb->len + vlan_hlen;
|
|
+ if (vlan_hlen) {
|
|
int copy, ret;
|
|
struct {
|
|
__be16 h_vlan_proto;
|
|
@@ -1300,8 +1308,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
|
|
|
|
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
|
|
- len = min_t(int, skb->len + VLAN_HLEN, len);
|
|
- total += VLAN_HLEN;
|
|
|
|
copy = min_t(int, vlan_offset, len);
|
|
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
|
|
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
|
|
index 054e59c..8cee173 100644
|
|
--- a/drivers/net/usb/ax88179_178a.c
|
|
+++ b/drivers/net/usb/ax88179_178a.c
|
|
@@ -696,6 +696,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
|
|
{
|
|
struct usbnet *dev = netdev_priv(net);
|
|
struct sockaddr *addr = p;
|
|
+ int ret;
|
|
|
|
if (netif_running(net))
|
|
return -EBUSY;
|
|
@@ -705,8 +706,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
|
|
memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
|
|
|
|
/* Set the MAC address */
|
|
- return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
|
|
+ ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
|
|
ETH_ALEN, net->dev_addr);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static const struct net_device_ops ax88179_netdev_ops = {
|
|
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
|
|
index 3eed708..fe48f4c 100644
|
|
--- a/drivers/net/usb/cx82310_eth.c
|
|
+++ b/drivers/net/usb/cx82310_eth.c
|
|
@@ -300,9 +300,18 @@ static const struct driver_info cx82310_info = {
|
|
.tx_fixup = cx82310_tx_fixup,
|
|
};
|
|
|
|
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
|
|
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
|
|
+ USB_DEVICE_ID_MATCH_DEV_INFO, \
|
|
+ .idVendor = (vend), \
|
|
+ .idProduct = (prod), \
|
|
+ .bDeviceClass = (cl), \
|
|
+ .bDeviceSubClass = (sc), \
|
|
+ .bDeviceProtocol = (pr)
|
|
+
|
|
static const struct usb_device_id products[] = {
|
|
{
|
|
- USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
|
|
+ USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
|
|
.driver_info = (unsigned long) &cx82310_info
|
|
},
|
|
{ },
|
|
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
|
|
index 3d18bb0..1bfe0fc 100644
|
|
--- a/drivers/net/usb/plusb.c
|
|
+++ b/drivers/net/usb/plusb.c
|
|
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
|
|
}, {
|
|
USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
|
|
.driver_info = (unsigned long) &prolific_info,
|
|
+}, {
|
|
+ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
|
|
+ * Host-to-Host Cable
|
|
+ */
|
|
+ .driver_info = (unsigned long) &prolific_info,
|
|
},
|
|
|
|
{ }, // END
|
|
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
|
|
index d510f1d..db21af8 100644
|
|
--- a/drivers/net/usb/qmi_wwan.c
|
|
+++ b/drivers/net/usb/qmi_wwan.c
|
|
@@ -769,6 +769,7 @@ static const struct usb_device_id products[] = {
|
|
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
|
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
|
|
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
|
+ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
|
|
|
/* 4. Gobi 1000 devices */
|
|
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
|
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
|
|
index 40ad25d..5988910 100644
|
|
--- a/drivers/net/vxlan.c
|
|
+++ b/drivers/net/vxlan.c
|
|
@@ -279,13 +279,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
|
|
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
|
|
}
|
|
|
|
-/* Find VXLAN socket based on network namespace and UDP port */
|
|
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
|
|
+/* Find VXLAN socket based on network namespace, address family and UDP port */
|
|
+static struct vxlan_sock *vxlan_find_sock(struct net *net,
|
|
+ sa_family_t family, __be16 port)
|
|
{
|
|
struct vxlan_sock *vs;
|
|
|
|
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
|
|
- if (inet_sk(vs->sock->sk)->inet_sport == port)
|
|
+ if (inet_sk(vs->sock->sk)->inet_sport == port &&
|
|
+ inet_sk(vs->sock->sk)->sk.sk_family == family)
|
|
return vs;
|
|
}
|
|
return NULL;
|
|
@@ -304,11 +306,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
|
|
}
|
|
|
|
/* Look up VNI in a per net namespace table */
|
|
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
|
|
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
|
|
+ sa_family_t family, __be16 port)
|
|
{
|
|
struct vxlan_sock *vs;
|
|
|
|
- vs = vxlan_find_sock(net, port);
|
|
+ vs = vxlan_find_sock(net, family, port);
|
|
if (!vs)
|
|
return NULL;
|
|
|
|
@@ -1334,7 +1337,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
|
|
} else if (vxlan->flags & VXLAN_F_L3MISS) {
|
|
union vxlan_addr ipa = {
|
|
.sin.sin_addr.s_addr = tip,
|
|
- .sa.sa_family = AF_INET,
|
|
+ .sin.sin_family = AF_INET,
|
|
};
|
|
|
|
vxlan_ip_miss(dev, &ipa);
|
|
@@ -1447,9 +1450,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
|
if (!in6_dev)
|
|
goto out;
|
|
|
|
- if (!pskb_may_pull(skb, skb->len))
|
|
- goto out;
|
|
-
|
|
iphdr = ipv6_hdr(skb);
|
|
saddr = &iphdr->saddr;
|
|
daddr = &iphdr->daddr;
|
|
@@ -1495,7 +1495,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
|
} else if (vxlan->flags & VXLAN_F_L3MISS) {
|
|
union vxlan_addr ipa = {
|
|
.sin6.sin6_addr = msg->target,
|
|
- .sa.sa_family = AF_INET6,
|
|
+ .sin6.sin6_family = AF_INET6,
|
|
};
|
|
|
|
vxlan_ip_miss(dev, &ipa);
|
|
@@ -1528,7 +1528,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
|
|
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
|
|
union vxlan_addr ipa = {
|
|
.sin.sin_addr.s_addr = pip->daddr,
|
|
- .sa.sa_family = AF_INET,
|
|
+ .sin.sin_family = AF_INET,
|
|
};
|
|
|
|
vxlan_ip_miss(dev, &ipa);
|
|
@@ -1549,7 +1549,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
|
|
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
|
|
union vxlan_addr ipa = {
|
|
.sin6.sin6_addr = pip6->daddr,
|
|
- .sa.sa_family = AF_INET6,
|
|
+ .sin6.sin6_family = AF_INET6,
|
|
};
|
|
|
|
vxlan_ip_miss(dev, &ipa);
|
|
@@ -1770,6 +1770,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
struct pcpu_sw_netstats *tx_stats, *rx_stats;
|
|
union vxlan_addr loopback;
|
|
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
|
|
+ struct net_device *dev = skb->dev;
|
|
+ int len = skb->len;
|
|
|
|
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
|
rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
|
|
@@ -1793,16 +1795,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
|
|
u64_stats_update_begin(&tx_stats->syncp);
|
|
tx_stats->tx_packets++;
|
|
- tx_stats->tx_bytes += skb->len;
|
|
+ tx_stats->tx_bytes += len;
|
|
u64_stats_update_end(&tx_stats->syncp);
|
|
|
|
if (netif_rx(skb) == NET_RX_SUCCESS) {
|
|
u64_stats_update_begin(&rx_stats->syncp);
|
|
rx_stats->rx_packets++;
|
|
- rx_stats->rx_bytes += skb->len;
|
|
+ rx_stats->rx_bytes += len;
|
|
u64_stats_update_end(&rx_stats->syncp);
|
|
} else {
|
|
- skb->dev->stats.rx_dropped++;
|
|
+ dev->stats.rx_dropped++;
|
|
}
|
|
}
|
|
|
|
@@ -1873,7 +1875,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|
struct vxlan_dev *dst_vxlan;
|
|
|
|
ip_rt_put(rt);
|
|
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
|
|
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni,
|
|
+ dst->sa.sa_family, dst_port);
|
|
if (!dst_vxlan)
|
|
goto tx_error;
|
|
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
|
|
@@ -1926,7 +1929,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|
struct vxlan_dev *dst_vxlan;
|
|
|
|
dst_release(ndst);
|
|
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
|
|
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni,
|
|
+ dst->sa.sa_family, dst_port);
|
|
if (!dst_vxlan)
|
|
goto tx_error;
|
|
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
|
|
@@ -1977,7 +1981,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
return arp_reduce(dev, skb);
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
|
|
- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
|
|
+ pskb_may_pull(skb, sizeof(struct ipv6hdr)
|
|
+ + sizeof(struct nd_msg)) &&
|
|
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
|
|
struct nd_msg *msg;
|
|
|
|
@@ -1986,6 +1991,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
|
|
return neigh_reduce(dev, skb);
|
|
}
|
|
+ eth = eth_hdr(skb);
|
|
#endif
|
|
}
|
|
|
|
@@ -2082,6 +2088,7 @@ static int vxlan_init(struct net_device *dev)
|
|
{
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
|
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
|
|
struct vxlan_sock *vs;
|
|
int i;
|
|
|
|
@@ -2097,10 +2104,10 @@ static int vxlan_init(struct net_device *dev)
|
|
|
|
|
|
spin_lock(&vn->sock_lock);
|
|
- vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
|
|
- if (vs) {
|
|
+ vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET,
|
|
+ vxlan->dst_port);
|
|
+ if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
|
|
/* If we have a socket with same port already, reuse it */
|
|
- atomic_inc(&vs->refcnt);
|
|
vxlan_vs_add_dev(vs, vxlan);
|
|
} else {
|
|
/* otherwise make new socket outside of RTNL */
|
|
@@ -2565,13 +2572,10 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
|
|
return vs;
|
|
|
|
spin_lock(&vn->sock_lock);
|
|
- vs = vxlan_find_sock(net, port);
|
|
- if (vs) {
|
|
- if (vs->rcv == rcv)
|
|
- atomic_inc(&vs->refcnt);
|
|
- else
|
|
+ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
|
|
+ if (vs && ((vs->rcv != rcv) ||
|
|
+ !atomic_add_unless(&vs->refcnt, 1, 0)))
|
|
vs = ERR_PTR(-EBUSY);
|
|
- }
|
|
spin_unlock(&vn->sock_lock);
|
|
|
|
if (!vs)
|
|
@@ -2711,7 +2715,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
|
if (data[IFLA_VXLAN_PORT])
|
|
vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
|
|
|
|
- if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
|
|
+ if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
|
|
+ vxlan->dst_port)) {
|
|
pr_info("duplicate VNI %u\n", vni);
|
|
return -EEXIST;
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
|
|
index 0583c69..ddaad71 100644
|
|
--- a/drivers/net/wireless/ath/ath5k/qcu.c
|
|
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
|
|
@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
|
|
} else {
|
|
switch (queue_type) {
|
|
case AR5K_TX_QUEUE_DATA:
|
|
- for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
|
|
- ah->ah_txq[queue].tqi_type !=
|
|
- AR5K_TX_QUEUE_INACTIVE; queue++) {
|
|
-
|
|
- if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
|
|
- return -EINVAL;
|
|
- }
|
|
+ queue = queue_info->tqi_subtype;
|
|
break;
|
|
case AR5K_TX_QUEUE_UAPSD:
|
|
queue = AR5K_TX_QUEUE_ID_UAPSD;
|
|
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
|
|
index a3399c4..b9b651e 100644
|
|
--- a/drivers/net/wireless/ath/ath5k/reset.c
|
|
+++ b/drivers/net/wireless/ath/ath5k/reset.c
|
|
@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
|
|
regval = ioread32(reg);
|
|
iowrite32(regval | val, reg);
|
|
regval = ioread32(reg);
|
|
- usleep_range(100, 150);
|
|
+ udelay(100); /* NB: should be atomic */
|
|
|
|
/* Bring BB/MAC out of reset */
|
|
iowrite32(regval & ~val, reg);
|
|
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
|
|
index 09facba..390c2de 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
|
|
@@ -647,6 +647,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
|
|
ah->enabled_cals |= TX_CL_CAL;
|
|
else
|
|
ah->enabled_cals &= ~TX_CL_CAL;
|
|
+
|
|
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
|
|
+ if (ah->is_clk_25mhz) {
|
|
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
|
|
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
|
|
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
|
|
+ } else {
|
|
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
|
|
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
|
|
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
|
|
+ }
|
|
+ udelay(100);
|
|
+ }
|
|
}
|
|
|
|
static void ar9003_hw_prog_ini(struct ath_hw *ah,
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
|
|
index 9078a6c..dcc1494 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hw.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/hw.c
|
|
@@ -858,19 +858,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
|
|
udelay(RTC_PLL_SETTLE_DELAY);
|
|
|
|
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
|
|
-
|
|
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
|
|
- if (ah->is_clk_25mhz) {
|
|
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
|
|
- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
|
|
- REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
|
|
- } else {
|
|
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
|
|
- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
|
|
- REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
|
|
- }
|
|
- udelay(100);
|
|
- }
|
|
}
|
|
|
|
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
|
|
index 0acd4b5..32ae0a4 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hw.h
|
|
+++ b/drivers/net/wireless/ath/ath9k/hw.h
|
|
@@ -216,8 +216,8 @@
|
|
#define AH_WOW_BEACON_MISS BIT(3)
|
|
|
|
enum ath_hw_txq_subtype {
|
|
- ATH_TXQ_AC_BE = 0,
|
|
- ATH_TXQ_AC_BK = 1,
|
|
+ ATH_TXQ_AC_BK = 0,
|
|
+ ATH_TXQ_AC_BE = 1,
|
|
ATH_TXQ_AC_VI = 2,
|
|
ATH_TXQ_AC_VO = 3,
|
|
};
|
|
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
|
|
index 5f72758..8f93ed3 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/mac.c
|
|
@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
|
|
q = ATH9K_NUM_TX_QUEUES - 3;
|
|
break;
|
|
case ATH9K_TX_QUEUE_DATA:
|
|
- for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
|
|
- if (ah->txq[q].tqi_type ==
|
|
- ATH9K_TX_QUEUE_INACTIVE)
|
|
- break;
|
|
- if (q == ATH9K_NUM_TX_QUEUES) {
|
|
- ath_err(common, "No available TX queue\n");
|
|
- return -1;
|
|
- }
|
|
+ q = qinfo->tqi_subtype;
|
|
break;
|
|
default:
|
|
ath_err(common, "Invalid TX queue type: %u\n", type);
|
|
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
|
|
index a8e6d41..5d75b569 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/main.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/main.c
|
|
@@ -205,11 +205,13 @@ static bool ath_prepare_reset(struct ath_softc *sc)
|
|
ath_stop_ani(sc);
|
|
ath9k_hw_disable_interrupts(ah);
|
|
|
|
- if (!ath_drain_all_txq(sc))
|
|
- ret = false;
|
|
-
|
|
- if (!ath_stoprecv(sc))
|
|
- ret = false;
|
|
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
|
|
+ ret &= ath_stoprecv(sc);
|
|
+ ret &= ath_drain_all_txq(sc);
|
|
+ } else {
|
|
+ ret &= ath_drain_all_txq(sc);
|
|
+ ret &= ath_stoprecv(sc);
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
index 0526ddf..0fe7674 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
@@ -890,6 +890,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
|
|
|
|
tx_info = IEEE80211_SKB_CB(skb);
|
|
tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
|
+
|
|
+ /*
|
|
+ * No aggregation session is running, but there may be frames
|
|
+ * from a previous session or a failed attempt in the queue.
|
|
+ * Send them out as normal data frames
|
|
+ */
|
|
+ if (!tid->active)
|
|
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
+
|
|
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
|
|
bf->bf_state.bf_type = 0;
|
|
return bf;
|
|
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
|
|
index 8596aba..237d0cd 100644
|
|
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
|
|
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
|
|
@@ -256,6 +256,7 @@ struct ar9170 {
|
|
atomic_t rx_work_urbs;
|
|
atomic_t rx_pool_urbs;
|
|
kernel_ulong_t features;
|
|
+ bool usb_ep_cmd_is_bulk;
|
|
|
|
/* firmware settings */
|
|
struct completion fw_load_wait;
|
|
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
|
|
index ca115f3..bc931f6 100644
|
|
--- a/drivers/net/wireless/ath/carl9170/usb.c
|
|
+++ b/drivers/net/wireless/ath/carl9170/usb.c
|
|
@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
|
|
goto err_free;
|
|
}
|
|
|
|
- usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
|
|
- AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
|
|
- carl9170_usb_cmd_complete, ar, 1);
|
|
+ if (ar->usb_ep_cmd_is_bulk)
|
|
+ usb_fill_bulk_urb(urb, ar->udev,
|
|
+ usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
|
|
+ cmd, cmd->hdr.len + 4,
|
|
+ carl9170_usb_cmd_complete, ar);
|
|
+ else
|
|
+ usb_fill_int_urb(urb, ar->udev,
|
|
+ usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
|
|
+ cmd, cmd->hdr.len + 4,
|
|
+ carl9170_usb_cmd_complete, ar, 1);
|
|
|
|
if (free_buf)
|
|
urb->transfer_flags |= URB_FREE_BUFFER;
|
|
@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
|
|
static int carl9170_usb_probe(struct usb_interface *intf,
|
|
const struct usb_device_id *id)
|
|
{
|
|
+ struct usb_endpoint_descriptor *ep;
|
|
struct ar9170 *ar;
|
|
struct usb_device *udev;
|
|
- int err;
|
|
+ int i, err;
|
|
|
|
err = usb_reset_device(interface_to_usbdev(intf));
|
|
if (err)
|
|
@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
|
|
ar->intf = intf;
|
|
ar->features = id->driver_info;
|
|
|
|
+ /* We need to remember the type of endpoint 4 because it differs
|
|
+ * between high- and full-speed configuration. The high-speed
|
|
+ * configuration specifies it as interrupt and the full-speed
|
|
+ * configuration as bulk endpoint. This information is required
|
|
+ * later when sending urbs to that endpoint.
|
|
+ */
|
|
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
|
|
+ ep = &intf->cur_altsetting->endpoint[i].desc;
|
|
+
|
|
+ if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
|
|
+ usb_endpoint_dir_out(ep) &&
|
|
+ usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
|
|
+ ar->usb_ep_cmd_is_bulk = true;
|
|
+ }
|
|
+
|
|
usb_set_intfdata(intf, ar);
|
|
SET_IEEE80211_DEV(ar->hw, &intf->dev);
|
|
|
|
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
|
|
index fad77dd..3f9cb89 100644
|
|
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
|
|
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
|
|
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
|
|
ifevent->action, ifevent->ifidx, ifevent->bssidx,
|
|
ifevent->flags, ifevent->role);
|
|
|
|
- if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
|
|
+ /* The P2P Device interface event must not be ignored
|
|
+ * contrary to what firmware tells us. The only way to
|
|
+ * distinguish the P2P Device is by looking at the ifidx
|
|
+ * and bssidx received.
|
|
+ */
|
|
+ if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
|
|
+ (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
|
|
brcmf_dbg(EVENT, "event can be ignored\n");
|
|
return;
|
|
}
|
|
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
|
|
return;
|
|
}
|
|
|
|
- if (ifevent->action == BRCMF_E_IF_CHANGE)
|
|
+ if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
|
|
brcmf_fws_reset_interface(ifp);
|
|
|
|
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
|
|
|
|
- if (ifevent->action == BRCMF_E_IF_DEL) {
|
|
+ if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
|
|
brcmf_fws_del_interface(ifp);
|
|
brcmf_del_if(drvr, ifevent->bssidx);
|
|
}
|
|
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
|
|
index 51b53a7..d26b476 100644
|
|
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
|
|
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
|
|
@@ -167,6 +167,8 @@ enum brcmf_fweh_event_code {
|
|
#define BRCMF_E_IF_ROLE_STA 0
|
|
#define BRCMF_E_IF_ROLE_AP 1
|
|
#define BRCMF_E_IF_ROLE_WDS 2
|
|
+#define BRCMF_E_IF_ROLE_P2P_GO 3
|
|
+#define BRCMF_E_IF_ROLE_P2P_CLIENT 4
|
|
|
|
/**
|
|
* definitions for event packet validation.
|
|
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
|
|
index 3441f70..6e8cdb8 100644
|
|
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
|
|
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
|
|
@@ -708,7 +708,6 @@ struct iwl_priv {
|
|
unsigned long reload_jiffies;
|
|
int reload_count;
|
|
bool ucode_loaded;
|
|
- bool init_ucode_run; /* Don't run init uCode again */
|
|
|
|
u8 plcp_delta_threshold;
|
|
|
|
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
|
|
index c1e3113..503a81e 100644
|
|
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
|
|
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
|
|
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|
/* recalculate basic rates */
|
|
iwl_calc_basic_rates(priv, ctx);
|
|
|
|
+ /*
|
|
+ * force CTS-to-self frames protection if RTS-CTS is not preferred
|
|
+ * one aggregation protection method
|
|
+ */
|
|
+ if (!priv->hw_params.use_rts_for_aggregation)
|
|
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
|
+
|
|
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
|
|
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
|
|
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
|
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|
else
|
|
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
|
|
|
|
+ if (bss_conf->use_cts_prot)
|
|
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
|
+ else
|
|
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
|
|
+
|
|
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
|
|
|
|
if (vif->type == NL80211_IFTYPE_AP ||
|
|
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
|
|
index cf03ef5..8b2dedc 100644
|
|
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
|
|
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
|
|
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
|
|
return 0;
|
|
|
|
- if (priv->init_ucode_run)
|
|
- return 0;
|
|
-
|
|
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
|
calib_complete, ARRAY_SIZE(calib_complete),
|
|
iwlagn_wait_calib, priv);
|
|
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|
*/
|
|
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
|
|
UCODE_CALIB_TIMEOUT);
|
|
- if (!ret)
|
|
- priv->init_ucode_run = true;
|
|
|
|
goto out;
|
|
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
|
|
index 1ced525..b45d78f 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
|
|
@@ -119,6 +119,8 @@ enum iwl_led_mode {
|
|
#define IWL_LONG_WD_TIMEOUT 10000
|
|
#define IWL_MAX_WD_TIMEOUT 120000
|
|
|
|
+#define IWL_DEFAULT_MAX_TX_POWER 22
|
|
+
|
|
/* Antenna presence definitions */
|
|
#define ANT_NONE 0x0
|
|
#define ANT_A BIT(0)
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
|
|
index 725e954..3c3eb78 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
|
|
@@ -118,8 +118,6 @@ static const u8 iwl_nvm_channels[] = {
|
|
#define LAST_2GHZ_HT_PLUS 9
|
|
#define LAST_5GHZ_HT 161
|
|
|
|
-#define DEFAULT_MAX_TX_POWER 16
|
|
-
|
|
/* rate data (static) */
|
|
static struct ieee80211_rate iwl_cfg80211_rates[] = {
|
|
{ .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
|
|
@@ -242,7 +240,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|
* Default value - highest tx power value. max_power
|
|
* is not used in mvm, and is used for backwards compatibility
|
|
*/
|
|
- channel->max_power = DEFAULT_MAX_TX_POWER;
|
|
+ channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
|
|
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
|
|
IWL_DEBUG_EEPROM(dev,
|
|
"Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
|
|
index 1f065cf..d090ed7 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
|
|
@@ -514,6 +514,7 @@ enum iwl_trans_state {
|
|
* Set during transport allocation.
|
|
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
|
|
* @pm_support: set to true in start_hw if link pm is supported
|
|
+ * @ltr_enabled: set to true if the LTR is enabled
|
|
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
|
|
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
|
* @dev_cmd_headroom: room needed for the transport's private use before the
|
|
@@ -539,6 +540,7 @@ struct iwl_trans {
|
|
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
|
|
|
|
bool pm_support;
|
|
+ bool ltr_enabled;
|
|
|
|
/* The following fields are internal only */
|
|
struct kmem_cache *dev_cmd_pool;
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
|
|
index 884c087..fa66471 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
|
|
@@ -66,13 +66,46 @@
|
|
|
|
/* Power Management Commands, Responses, Notifications */
|
|
|
|
+/**
|
|
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
|
|
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
|
|
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
|
|
+ * memory access
|
|
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
|
|
+ * reg change
|
|
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
|
|
+ * D0 to D3
|
|
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
|
|
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
|
|
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
|
|
+ */
|
|
+enum iwl_ltr_config_flags {
|
|
+ LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
|
|
+ LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
|
|
+ LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
|
|
+ LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
|
|
+ LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
|
|
+ LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
|
|
+ LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct iwl_ltr_config_cmd - configures the LTR
|
|
+ * @flags: See %enum iwl_ltr_config_flags
|
|
+ */
|
|
+struct iwl_ltr_config_cmd {
|
|
+ __le32 flags;
|
|
+ __le32 static_long;
|
|
+ __le32 static_short;
|
|
+} __packed;
|
|
+
|
|
/* Radio LP RX Energy Threshold measured in dBm */
|
|
#define POWER_LPRX_RSSI_THRESHOLD 75
|
|
#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
|
|
#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
|
|
|
|
/**
|
|
- * enum iwl_scan_flags - masks for power table command flags
|
|
+ * enum iwl_power_flags - masks for power table command flags
|
|
* @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
|
|
* receiver and transmitter. '0' - does not allow.
|
|
* @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
|
|
index 989d7db..60dc387 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
|
|
@@ -142,6 +142,7 @@ enum {
|
|
/* Power - legacy power table command */
|
|
POWER_TABLE_CMD = 0x77,
|
|
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
|
|
+ LTR_CONFIG = 0xee,
|
|
|
|
/* Thermal Throttling*/
|
|
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
|
|
@@ -1393,7 +1394,7 @@ enum iwl_sf_scenario {
|
|
#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
|
|
|
|
/* smart FIFO default values */
|
|
-#define SF_W_MARK_SISO 4096
|
|
+#define SF_W_MARK_SISO 6144
|
|
#define SF_W_MARK_MIMO2 8192
|
|
#define SF_W_MARK_MIMO3 6144
|
|
#define SF_W_MARK_LEGACY 4096
|
|
@@ -1415,14 +1416,14 @@ enum iwl_sf_scenario {
|
|
|
|
/**
|
|
* Smart Fifo configuration command.
|
|
- * @state: smart fifo state, types listed in iwl_sf_sate.
|
|
+ * @state: smart fifo state, types listed in enum %iwl_sf_sate.
|
|
* @watermark: Minimum allowed availabe free space in RXF for transient state.
|
|
* @long_delay_timeouts: aging and idle timer values for each scenario
|
|
* in long delay state.
|
|
* @full_on_timeouts: timer values for each scenario in full on state.
|
|
*/
|
|
struct iwl_sf_cfg_cmd {
|
|
- enum iwl_sf_state state;
|
|
+ __le32 state;
|
|
__le32 watermark[SF_TRANSIENT_STATES_NUMBER];
|
|
__le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
|
|
__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
|
|
index c03d395..2ef344f 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
|
|
@@ -439,6 +439,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|
goto error;
|
|
}
|
|
|
|
+ if (mvm->trans->ltr_enabled) {
|
|
+ struct iwl_ltr_config_cmd cmd = {
|
|
+ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
|
|
+ };
|
|
+
|
|
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
|
|
+ sizeof(cmd), &cmd));
|
|
+ }
|
|
+
|
|
ret = iwl_mvm_power_update_device_mode(mvm);
|
|
if (ret)
|
|
goto error;
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
|
|
index d06414e..4a3b8b7 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
|
|
@@ -410,9 +410,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
|
|
mvmvif->uploaded = false;
|
|
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
|
|
|
- /* does this make sense at all? */
|
|
- mvmvif->color++;
|
|
-
|
|
spin_lock_bh(&mvm->time_event_lock);
|
|
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
|
|
spin_unlock_bh(&mvm->time_event_lock);
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
|
|
index a3d43de..dbff7f0 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
|
|
@@ -313,6 +313,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
|
|
CMD(REPLY_BEACON_FILTERING_CMD),
|
|
CMD(REPLY_THERMAL_MNG_BACKOFF),
|
|
CMD(MAC_PM_POWER_TABLE),
|
|
+ CMD(LTR_CONFIG),
|
|
CMD(BT_COEX_CI),
|
|
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
|
|
};
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
|
|
index 88809b2..dab8fd1 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
|
|
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
|
|
enum iwl_sf_state new_state)
|
|
{
|
|
struct iwl_sf_cfg_cmd sf_cmd = {
|
|
- .state = new_state,
|
|
+ .state = cpu_to_le32(new_state),
|
|
};
|
|
struct ieee80211_sta *sta;
|
|
int ret = 0;
|
|
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
|
|
index 76ee486..4efcb28 100644
|
|
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
|
|
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
|
|
@@ -835,6 +835,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
|
sta_id = ba_notif->sta_id;
|
|
tid = ba_notif->tid;
|
|
|
|
+ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
|
|
+ tid >= IWL_MAX_TID_COUNT,
|
|
+ "sta_id %d tid %d", sta_id, tid))
|
|
+ return 0;
|
|
+
|
|
rcu_read_lock();
|
|
|
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
|
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
|
|
index df1f5e7..1ac33d9 100644
|
|
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
|
|
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
|
|
@@ -272,6 +272,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
|
|
@@ -315,6 +317,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|
{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
|
|
{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
|
|
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
|
|
index 16be0c0..fb62927 100644
|
|
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
|
|
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
|
|
@@ -94,6 +94,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
u16 lctl;
|
|
+ u16 cap;
|
|
|
|
/*
|
|
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
|
|
@@ -104,16 +105,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
|
|
* power savings, even without L1.
|
|
*/
|
|
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
|
|
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
|
|
- /* L1-ASPM enabled; disable(!) L0S */
|
|
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
|
|
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
|
- dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
|
|
- } else {
|
|
- /* L1-ASPM disabled; enable(!) L0S */
|
|
+ else
|
|
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
|
- dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
|
|
- }
|
|
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
|
|
+
|
|
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
|
|
+ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
|
|
+ dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
|
|
+ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
|
|
+ trans->ltr_enabled ? "En" : "Dis");
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
|
|
index 3d54900..52427fb 100644
|
|
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
|
|
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
|
|
@@ -729,7 +729,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
|
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
|
|
trans_pcie->kw.dma >> 4);
|
|
|
|
- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
|
|
+ /*
|
|
+ * Send 0 as the scd_base_addr since the device may have be reset
|
|
+ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
|
|
+ * contain garbage.
|
|
+ */
|
|
+ iwl_pcie_tx_start(trans, 0);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
|
|
index 98718e4..5e836b1 100644
|
|
--- a/drivers/net/wireless/mac80211_hwsim.c
|
|
+++ b/drivers/net/wireless/mac80211_hwsim.c
|
|
@@ -1976,7 +1976,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
|
|
if (err != 0) {
|
|
printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
|
|
err);
|
|
- goto failed_hw;
|
|
+ goto failed_bind;
|
|
}
|
|
|
|
skb_queue_head_init(&data->pending);
|
|
@@ -2159,6 +2159,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
|
|
return idx;
|
|
|
|
failed_hw:
|
|
+ device_release_driver(data->dev);
|
|
+failed_bind:
|
|
device_unregister(data->dev);
|
|
failed_drvdata:
|
|
ieee80211_free_hw(hw);
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
|
|
index a394a9a..ebd5625 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2800.h
|
|
+++ b/drivers/net/wireless/rt2x00/rt2800.h
|
|
@@ -52,6 +52,7 @@
|
|
* RF5592 2.4G/5G 2T2R
|
|
* RF3070 2.4G 1T1R
|
|
* RF5360 2.4G 1T1R
|
|
+ * RF5362 2.4G 1T1R
|
|
* RF5370 2.4G 1T1R
|
|
* RF5390 2.4G 1T1R
|
|
*/
|
|
@@ -72,6 +73,7 @@
|
|
#define RF3070 0x3070
|
|
#define RF3290 0x3290
|
|
#define RF5360 0x5360
|
|
+#define RF5362 0x5362
|
|
#define RF5370 0x5370
|
|
#define RF5372 0x5372
|
|
#define RF5390 0x5390
|
|
@@ -2039,7 +2041,7 @@ struct mac_iveiv_entry {
|
|
* 2 - drop tx power by 12dBm,
|
|
* 3 - increase tx power by 6dBm
|
|
*/
|
|
-#define BBP1_TX_POWER_CTRL FIELD8(0x07)
|
|
+#define BBP1_TX_POWER_CTRL FIELD8(0x03)
|
|
#define BBP1_TX_ANTENNA FIELD8(0x18)
|
|
|
|
/*
|
|
@@ -2145,7 +2147,7 @@ struct mac_iveiv_entry {
|
|
/* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
|
|
#define RFCSR3_PA1_BIAS_CCK FIELD8(0x70)
|
|
#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
|
|
-/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
|
|
+/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */
|
|
#define RFCSR3_VCOCAL_EN FIELD8(0x80)
|
|
/* Bits for RF3050 */
|
|
#define RFCSR3_BIT1 FIELD8(0x02)
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
|
|
index 41d4a81..4e16d4d 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
|
|
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
|
|
@@ -3142,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
|
|
break;
|
|
case RF3070:
|
|
case RF5360:
|
|
+ case RF5362:
|
|
case RF5370:
|
|
case RF5372:
|
|
case RF5390:
|
|
@@ -3159,6 +3160,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
|
|
rt2x00_rf(rt2x00dev, RF3290) ||
|
|
rt2x00_rf(rt2x00dev, RF3322) ||
|
|
rt2x00_rf(rt2x00dev, RF5360) ||
|
|
+ rt2x00_rf(rt2x00dev, RF5362) ||
|
|
rt2x00_rf(rt2x00dev, RF5370) ||
|
|
rt2x00_rf(rt2x00dev, RF5372) ||
|
|
rt2x00_rf(rt2x00dev, RF5390) ||
|
|
@@ -4273,6 +4275,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
|
|
case RF3070:
|
|
case RF3290:
|
|
case RF5360:
|
|
+ case RF5362:
|
|
case RF5370:
|
|
case RF5372:
|
|
case RF5390:
|
|
@@ -7073,6 +7076,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
|
|
case RF3320:
|
|
case RF3322:
|
|
case RF5360:
|
|
+ case RF5362:
|
|
case RF5370:
|
|
case RF5372:
|
|
case RF5390:
|
|
@@ -7529,6 +7533,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|
case RF3320:
|
|
case RF3322:
|
|
case RF5360:
|
|
+ case RF5362:
|
|
case RF5370:
|
|
case RF5372:
|
|
case RF5390:
|
|
@@ -7658,6 +7663,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|
case RF3070:
|
|
case RF3290:
|
|
case RF5360:
|
|
+ case RF5362:
|
|
case RF5370:
|
|
case RF5372:
|
|
case RF5390:
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
|
|
index caddc1b..e8abd0f 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
|
|
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
|
|
@@ -991,6 +991,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|
{ USB_DEVICE(0x07d1, 0x3c17) },
|
|
{ USB_DEVICE(0x2001, 0x3317) },
|
|
{ USB_DEVICE(0x2001, 0x3c1b) },
|
|
+ { USB_DEVICE(0x2001, 0x3c25) },
|
|
/* Draytek */
|
|
{ USB_DEVICE(0x07fa, 0x7712) },
|
|
/* DVICO */
|
|
@@ -1062,6 +1063,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|
/* Ovislink */
|
|
{ USB_DEVICE(0x1b75, 0x3071) },
|
|
{ USB_DEVICE(0x1b75, 0x3072) },
|
|
+ { USB_DEVICE(0x1b75, 0xa200) },
|
|
/* Para */
|
|
{ USB_DEVICE(0x20b8, 0x8888) },
|
|
/* Pegatron */
|
|
@@ -1235,6 +1237,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|
/* Arcadyan */
|
|
{ USB_DEVICE(0x043e, 0x7a12) },
|
|
{ USB_DEVICE(0x043e, 0x7a32) },
|
|
+ /* ASUS */
|
|
+ { USB_DEVICE(0x0b05, 0x17e8) },
|
|
/* Azurewave */
|
|
{ USB_DEVICE(0x13d3, 0x3329) },
|
|
{ USB_DEVICE(0x13d3, 0x3365) },
|
|
@@ -1271,6 +1275,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|
{ USB_DEVICE(0x057c, 0x8501) },
|
|
/* Buffalo */
|
|
{ USB_DEVICE(0x0411, 0x0241) },
|
|
+ { USB_DEVICE(0x0411, 0x0253) },
|
|
/* D-Link */
|
|
{ USB_DEVICE(0x2001, 0x3c1a) },
|
|
{ USB_DEVICE(0x2001, 0x3c21) },
|
|
@@ -1361,6 +1366,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|
{ USB_DEVICE(0x0df6, 0x0053) },
|
|
{ USB_DEVICE(0x0df6, 0x0069) },
|
|
{ USB_DEVICE(0x0df6, 0x006f) },
|
|
+ { USB_DEVICE(0x0df6, 0x0078) },
|
|
/* SMC */
|
|
{ USB_DEVICE(0x083a, 0xa512) },
|
|
{ USB_DEVICE(0x083a, 0xc522) },
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
index 5642ccc..22d49d5 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
|
|
skb_trim(skb, frame_length);
|
|
}
|
|
|
|
-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
|
|
+/*
|
|
+ * H/W needs L2 padding between the header and the paylod if header size
|
|
+ * is not 4 bytes aligned.
|
|
+ */
|
|
+void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
|
|
{
|
|
- unsigned int payload_length = skb->len - header_length;
|
|
- unsigned int header_align = ALIGN_SIZE(skb, 0);
|
|
- unsigned int payload_align = ALIGN_SIZE(skb, header_length);
|
|
- unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
|
|
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
|
|
|
|
- /*
|
|
- * Adjust the header alignment if the payload needs to be moved more
|
|
- * than the header.
|
|
- */
|
|
- if (payload_align > header_align)
|
|
- header_align += 4;
|
|
-
|
|
- /* There is nothing to do if no alignment is needed */
|
|
- if (!header_align)
|
|
+ if (!l2pad)
|
|
return;
|
|
|
|
- /* Reserve the amount of space needed in front of the frame */
|
|
- skb_push(skb, header_align);
|
|
-
|
|
- /*
|
|
- * Move the header.
|
|
- */
|
|
- memmove(skb->data, skb->data + header_align, header_length);
|
|
-
|
|
- /* Move the payload, if present and if required */
|
|
- if (payload_length && payload_align)
|
|
- memmove(skb->data + header_length + l2pad,
|
|
- skb->data + header_length + l2pad + payload_align,
|
|
- payload_length);
|
|
-
|
|
- /* Trim the skb to the correct size */
|
|
- skb_trim(skb, header_length + l2pad + payload_length);
|
|
+ skb_push(skb, l2pad);
|
|
+ memmove(skb->data, skb->data + l2pad, hdr_len);
|
|
}
|
|
|
|
-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
|
|
+void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
|
|
{
|
|
- /*
|
|
- * L2 padding is only present if the skb contains more than just the
|
|
- * IEEE 802.11 header.
|
|
- */
|
|
- unsigned int l2pad = (skb->len > header_length) ?
|
|
- L2PAD_SIZE(header_length) : 0;
|
|
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
|
|
|
|
if (!l2pad)
|
|
return;
|
|
|
|
- memmove(skb->data + l2pad, skb->data, header_length);
|
|
+ memmove(skb->data + l2pad, skb->data, hdr_len);
|
|
skb_pull(skb, l2pad);
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
|
|
index c613110..66c92a1 100644
|
|
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
|
|
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
|
|
@@ -314,9 +314,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
|
|
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
|
|
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
|
|
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
|
|
+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
|
|
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
|
|
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
|
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
|
+ {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
|
|
{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
|
|
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
|
|
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
|
|
@@ -369,6 +371,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
|
|
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
|
|
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
|
|
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
|
|
+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
|
|
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
|
|
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
|
|
{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
|
|
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
|
|
index 4933f02..bc409ec 100644
|
|
--- a/drivers/net/wireless/rtlwifi/usb.c
|
|
+++ b/drivers/net/wireless/rtlwifi/usb.c
|
|
@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
|
|
|
|
do {
|
|
status = usb_control_msg(udev, pipe, request, reqtype, value,
|
|
- index, pdata, len, 0); /*max. timeout*/
|
|
+ index, pdata, len, 1000);
|
|
if (status < 0) {
|
|
/* firmware download is checksumed, don't retry */
|
|
if ((value >= FW_8192C_START_ADDRESS &&
|
|
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
|
|
index 7f1669c..779dc2b 100644
|
|
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
|
|
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
|
|
@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
|
|
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
|
|
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
|
|
|
|
-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
|
|
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
|
|
|
|
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
|
|
AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
|
|
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
|
|
index f7381dd..1bce432 100644
|
|
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
|
|
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
|
|
@@ -26,8 +26,8 @@
|
|
|
|
#include "wlcore.h"
|
|
|
|
-int wl1271_format_buffer(char __user *userbuf, size_t count,
|
|
- loff_t *ppos, char *fmt, ...);
|
|
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
|
|
+ loff_t *ppos, char *fmt, ...);
|
|
|
|
int wl1271_debugfs_init(struct wl1271 *wl);
|
|
void wl1271_debugfs_exit(struct wl1271 *wl);
|
|
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
|
|
index 7a206cf..d18e653 100644
|
|
--- a/drivers/net/xen-netback/xenbus.c
|
|
+++ b/drivers/net/xen-netback/xenbus.c
|
|
@@ -32,6 +32,8 @@ struct backend_info {
|
|
enum xenbus_state frontend_state;
|
|
struct xenbus_watch hotplug_status_watch;
|
|
u8 have_hotplug_status_watch:1;
|
|
+
|
|
+ const char *hotplug_script;
|
|
};
|
|
|
|
static int connect_rings(struct backend_info *);
|
|
@@ -54,6 +56,7 @@ static int netback_remove(struct xenbus_device *dev)
|
|
xenvif_free(be->vif);
|
|
be->vif = NULL;
|
|
}
|
|
+ kfree(be->hotplug_script);
|
|
kfree(be);
|
|
dev_set_drvdata(&dev->dev, NULL);
|
|
return 0;
|
|
@@ -71,6 +74,7 @@ static int netback_probe(struct xenbus_device *dev,
|
|
struct xenbus_transaction xbt;
|
|
int err;
|
|
int sg;
|
|
+ const char *script;
|
|
struct backend_info *be = kzalloc(sizeof(struct backend_info),
|
|
GFP_KERNEL);
|
|
if (!be) {
|
|
@@ -157,6 +161,15 @@ static int netback_probe(struct xenbus_device *dev,
|
|
if (err)
|
|
pr_debug("Error writing feature-split-event-channels\n");
|
|
|
|
+ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
|
|
+ if (IS_ERR(script)) {
|
|
+ err = PTR_ERR(script);
|
|
+ xenbus_dev_fatal(dev, err, "reading script");
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ be->hotplug_script = script;
|
|
+
|
|
err = xenbus_switch_state(dev, XenbusStateInitWait);
|
|
if (err)
|
|
goto fail;
|
|
@@ -187,22 +200,14 @@ static int netback_uevent(struct xenbus_device *xdev,
|
|
struct kobj_uevent_env *env)
|
|
{
|
|
struct backend_info *be = dev_get_drvdata(&xdev->dev);
|
|
- char *val;
|
|
|
|
- val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
|
|
- if (IS_ERR(val)) {
|
|
- int err = PTR_ERR(val);
|
|
- xenbus_dev_fatal(xdev, err, "reading script");
|
|
- return err;
|
|
- } else {
|
|
- if (add_uevent_var(env, "script=%s", val)) {
|
|
- kfree(val);
|
|
- return -ENOMEM;
|
|
- }
|
|
- kfree(val);
|
|
- }
|
|
+ if (!be)
|
|
+ return 0;
|
|
+
|
|
+ if (add_uevent_var(env, "script=%s", be->hotplug_script))
|
|
+ return -ENOMEM;
|
|
|
|
- if (!be || !be->vif)
|
|
+ if (!be->vif)
|
|
return 0;
|
|
|
|
return add_uevent_var(env, "vif=%s", be->vif->dev->name);
|
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
|
index e30d800..19db057 100644
|
|
--- a/drivers/net/xen-netfront.c
|
|
+++ b/drivers/net/xen-netfront.c
|
|
@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|
len = skb_frag_size(frag);
|
|
offset = frag->page_offset;
|
|
|
|
- /* Data must not cross a page boundary. */
|
|
- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
|
|
-
|
|
/* Skip unused frames from start of page */
|
|
page += offset >> PAGE_SHIFT;
|
|
offset &= ~PAGE_MASK;
|
|
@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|
while (len > 0) {
|
|
unsigned long bytes;
|
|
|
|
- BUG_ON(offset >= PAGE_SIZE);
|
|
-
|
|
bytes = PAGE_SIZE - offset;
|
|
if (bytes > len)
|
|
bytes = len;
|
|
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
|
|
index f868333..963a4a5 100644
|
|
--- a/drivers/nfc/microread/microread.c
|
|
+++ b/drivers/nfc/microread/microread.c
|
|
@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
|
|
targets->sens_res =
|
|
be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
|
|
targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
|
|
- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
|
|
- skb->data[MICROREAD_EMCF_A_LEN]);
|
|
targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
|
|
+ if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
|
|
+ r = -EINVAL;
|
|
+ goto exit_free;
|
|
+ }
|
|
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
|
|
+ targets->nfcid1_len);
|
|
break;
|
|
case MICROREAD_GATE_ID_MREAD_ISO_A_3:
|
|
targets->supported_protocols =
|
|
@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
|
|
targets->sens_res =
|
|
be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
|
|
targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
|
|
- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
|
|
- skb->data[MICROREAD_EMCF_A3_LEN]);
|
|
targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
|
|
+ if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
|
|
+ r = -EINVAL;
|
|
+ goto exit_free;
|
|
+ }
|
|
+ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
|
|
+ targets->nfcid1_len);
|
|
break;
|
|
case MICROREAD_GATE_ID_MREAD_ISO_B:
|
|
targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
|
|
diff --git a/drivers/of/address.c b/drivers/of/address.c
|
|
index 1a54f1f..9eae613 100644
|
|
--- a/drivers/of/address.c
|
|
+++ b/drivers/of/address.c
|
|
@@ -401,6 +401,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
|
|
return NULL;
|
|
}
|
|
|
|
+static int of_empty_ranges_quirk(void)
|
|
+{
|
|
+ if (IS_ENABLED(CONFIG_PPC)) {
|
|
+ /* To save cycles, we cache the result */
|
|
+ static int quirk_state = -1;
|
|
+
|
|
+ if (quirk_state < 0)
|
|
+ quirk_state =
|
|
+ of_machine_is_compatible("Power Macintosh") ||
|
|
+ of_machine_is_compatible("MacRISC");
|
|
+ return quirk_state;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int of_translate_one(struct device_node *parent, struct of_bus *bus,
|
|
struct of_bus *pbus, __be32 *addr,
|
|
int na, int ns, int pna, const char *rprop)
|
|
@@ -426,12 +441,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
|
|
* This code is only enabled on powerpc. --gcl
|
|
*/
|
|
ranges = of_get_property(parent, rprop, &rlen);
|
|
-#if !defined(CONFIG_PPC)
|
|
- if (ranges == NULL) {
|
|
+ if (ranges == NULL && !of_empty_ranges_quirk()) {
|
|
pr_err("OF: no ranges; cannot translate\n");
|
|
return 1;
|
|
}
|
|
-#endif /* !defined(CONFIG_PPC) */
|
|
if (ranges == NULL || rlen == 0) {
|
|
offset = of_read_number(addr, na);
|
|
memset(addr, 0, pna * 4);
|
|
@@ -691,10 +704,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
|
|
struct resource res;
|
|
|
|
while (dn) {
|
|
- if (of_address_to_resource(dn, 0, &res))
|
|
- continue;
|
|
- if (res.start == base_address)
|
|
+ if (!of_address_to_resource(dn, 0, &res) &&
|
|
+ res.start == base_address)
|
|
return dn;
|
|
+
|
|
dn = of_find_matching_node(dn, matches);
|
|
}
|
|
|
|
diff --git a/drivers/of/base.c b/drivers/of/base.c
|
|
index 89e888a..e99f329 100644
|
|
--- a/drivers/of/base.c
|
|
+++ b/drivers/of/base.c
|
|
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(of_n_size_cells);
|
|
#ifdef CONFIG_NUMA
|
|
int __weak of_node_to_nid(struct device_node *np)
|
|
{
|
|
- return numa_node_id();
|
|
+ return NUMA_NO_NODE;
|
|
}
|
|
#endif
|
|
|
|
@@ -1117,52 +1117,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
|
|
EXPORT_SYMBOL_GPL(of_property_read_string);
|
|
|
|
/**
|
|
- * of_property_read_string_index - Find and read a string from a multiple
|
|
- * strings property.
|
|
- * @np: device node from which the property value is to be read.
|
|
- * @propname: name of the property to be searched.
|
|
- * @index: index of the string in the list of strings
|
|
- * @out_string: pointer to null terminated return string, modified only if
|
|
- * return value is 0.
|
|
- *
|
|
- * Search for a property in a device tree node and retrieve a null
|
|
- * terminated string value (pointer to data, not a copy) in the list of strings
|
|
- * contained in that property.
|
|
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
|
|
- * property does not have a value, and -EILSEQ if the string is not
|
|
- * null-terminated within the length of the property data.
|
|
- *
|
|
- * The out_string pointer is modified only if a valid string can be decoded.
|
|
- */
|
|
-int of_property_read_string_index(struct device_node *np, const char *propname,
|
|
- int index, const char **output)
|
|
-{
|
|
- struct property *prop = of_find_property(np, propname, NULL);
|
|
- int i = 0;
|
|
- size_t l = 0, total = 0;
|
|
- const char *p;
|
|
-
|
|
- if (!prop)
|
|
- return -EINVAL;
|
|
- if (!prop->value)
|
|
- return -ENODATA;
|
|
- if (strnlen(prop->value, prop->length) >= prop->length)
|
|
- return -EILSEQ;
|
|
-
|
|
- p = prop->value;
|
|
-
|
|
- for (i = 0; total < prop->length; total += l, p += l) {
|
|
- l = strlen(p) + 1;
|
|
- if (i++ == index) {
|
|
- *output = p;
|
|
- return 0;
|
|
- }
|
|
- }
|
|
- return -ENODATA;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(of_property_read_string_index);
|
|
-
|
|
-/**
|
|
* of_property_match_string() - Find string in a list and return index
|
|
* @np: pointer to node containing string list property
|
|
* @propname: string list property name
|
|
@@ -1188,7 +1142,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
|
|
end = p + prop->length;
|
|
|
|
for (i = 0; p < end; i++, p += l) {
|
|
- l = strlen(p) + 1;
|
|
+ l = strnlen(p, end - p) + 1;
|
|
if (p + l > end)
|
|
return -EILSEQ;
|
|
pr_debug("comparing %s with %s\n", string, p);
|
|
@@ -1200,39 +1154,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
|
|
EXPORT_SYMBOL_GPL(of_property_match_string);
|
|
|
|
/**
|
|
- * of_property_count_strings - Find and return the number of strings from a
|
|
- * multiple strings property.
|
|
+ * of_property_read_string_util() - Utility helper for parsing string properties
|
|
* @np: device node from which the property value is to be read.
|
|
* @propname: name of the property to be searched.
|
|
+ * @out_strs: output array of string pointers.
|
|
+ * @sz: number of array elements to read.
|
|
+ * @skip: Number of strings to skip over at beginning of list.
|
|
*
|
|
- * Search for a property in a device tree node and retrieve the number of null
|
|
- * terminated string contain in it. Returns the number of strings on
|
|
- * success, -EINVAL if the property does not exist, -ENODATA if property
|
|
- * does not have a value, and -EILSEQ if the string is not null-terminated
|
|
- * within the length of the property data.
|
|
+ * Don't call this function directly. It is a utility helper for the
|
|
+ * of_property_read_string*() family of functions.
|
|
*/
|
|
-int of_property_count_strings(struct device_node *np, const char *propname)
|
|
+int of_property_read_string_helper(struct device_node *np, const char *propname,
|
|
+ const char **out_strs, size_t sz, int skip)
|
|
{
|
|
struct property *prop = of_find_property(np, propname, NULL);
|
|
- int i = 0;
|
|
- size_t l = 0, total = 0;
|
|
- const char *p;
|
|
+ int l = 0, i = 0;
|
|
+ const char *p, *end;
|
|
|
|
if (!prop)
|
|
return -EINVAL;
|
|
if (!prop->value)
|
|
return -ENODATA;
|
|
- if (strnlen(prop->value, prop->length) >= prop->length)
|
|
- return -EILSEQ;
|
|
-
|
|
p = prop->value;
|
|
+ end = p + prop->length;
|
|
|
|
- for (i = 0; total < prop->length; total += l, p += l, i++)
|
|
- l = strlen(p) + 1;
|
|
-
|
|
- return i;
|
|
+ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
|
|
+ l = strnlen(p, end - p) + 1;
|
|
+ if (p + l > end)
|
|
+ return -EILSEQ;
|
|
+ if (out_strs && i >= skip)
|
|
+ *out_strs++ = p;
|
|
+ }
|
|
+ i -= skip;
|
|
+ return i <= 0 ? -ENODATA : i;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(of_property_count_strings);
|
|
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
|
|
|
|
void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
|
|
{
|
|
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
|
|
index ca01893..bbff99d 100644
|
|
--- a/drivers/of/irq.c
|
|
+++ b/drivers/of/irq.c
|
|
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
|
struct device_node *p;
|
|
const __be32 *intspec, *tmp, *addr;
|
|
u32 intsize, intlen;
|
|
- int i, res = -EINVAL;
|
|
+ int i, res;
|
|
|
|
pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
|
|
|
|
@@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
|
/* Get the reg property (if any) */
|
|
addr = of_get_property(device, "reg", NULL);
|
|
|
|
+ /* Try the new-style interrupts-extended first */
|
|
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
|
|
+ "#interrupt-cells", index, out_irq);
|
|
+ if (!res)
|
|
+ return of_irq_parse_raw(addr, out_irq);
|
|
+
|
|
/* Get the interrupts property */
|
|
intspec = of_get_property(device, "interrupts", &intlen);
|
|
- if (intspec == NULL) {
|
|
- /* Try the new-style interrupts-extended */
|
|
- res = of_parse_phandle_with_args(device, "interrupts-extended",
|
|
- "#interrupt-cells", index, out_irq);
|
|
- if (res)
|
|
- return -EINVAL;
|
|
- return of_irq_parse_raw(addr, out_irq);
|
|
- }
|
|
+ if (intspec == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
intlen /= sizeof(*intspec);
|
|
|
|
pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
|
|
@@ -322,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
|
|
|
/* Get size of interrupt specifier */
|
|
tmp = of_get_property(p, "#interrupt-cells", NULL);
|
|
- if (tmp == NULL)
|
|
+ if (tmp == NULL) {
|
|
+ res = -EINVAL;
|
|
goto out;
|
|
+ }
|
|
intsize = be32_to_cpu(*tmp);
|
|
|
|
pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
|
|
|
|
/* Check index */
|
|
- if ((index + 1) * intsize > intlen)
|
|
+ if ((index + 1) * intsize > intlen) {
|
|
+ res = -EINVAL;
|
|
goto out;
|
|
+ }
|
|
|
|
/* Copy intspec into irq structure */
|
|
intspec += index * intsize;
|
|
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
|
|
index 6643d19..70c61d7 100644
|
|
--- a/drivers/of/selftest.c
|
|
+++ b/drivers/of/selftest.c
|
|
@@ -132,8 +132,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
|
|
selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
|
|
}
|
|
|
|
-static void __init of_selftest_property_match_string(void)
|
|
+static void __init of_selftest_property_string(void)
|
|
{
|
|
+ const char *strings[4];
|
|
struct device_node *np;
|
|
int rc;
|
|
|
|
@@ -150,13 +151,66 @@ static void __init of_selftest_property_match_string(void)
|
|
rc = of_property_match_string(np, "phandle-list-names", "third");
|
|
selftest(rc == 2, "third expected:0 got:%i\n", rc);
|
|
rc = of_property_match_string(np, "phandle-list-names", "fourth");
|
|
- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
|
|
+ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
|
|
rc = of_property_match_string(np, "missing-property", "blah");
|
|
- selftest(rc == -EINVAL, "missing property; rc=%i", rc);
|
|
+ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
|
|
rc = of_property_match_string(np, "empty-property", "blah");
|
|
- selftest(rc == -ENODATA, "empty property; rc=%i", rc);
|
|
+ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
|
|
rc = of_property_match_string(np, "unterminated-string", "blah");
|
|
- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
|
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
|
+
|
|
+ /* of_property_count_strings() tests */
|
|
+ rc = of_property_count_strings(np, "string-property");
|
|
+ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
|
|
+ rc = of_property_count_strings(np, "phandle-list-names");
|
|
+ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
|
|
+ rc = of_property_count_strings(np, "unterminated-string");
|
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
|
+ rc = of_property_count_strings(np, "unterminated-string-list");
|
|
+ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
|
|
+
|
|
+ /* of_property_read_string_index() tests */
|
|
+ rc = of_property_read_string_index(np, "string-property", 0, strings);
|
|
+ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ strings[0] = NULL;
|
|
+ rc = of_property_read_string_index(np, "string-property", 1, strings);
|
|
+ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
|
|
+ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
|
|
+ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
|
|
+ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ strings[0] = NULL;
|
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
|
|
+ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ strings[0] = NULL;
|
|
+ rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
|
|
+ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
|
|
+ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ strings[0] = NULL;
|
|
+ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
|
|
+ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
|
+ strings[1] = NULL;
|
|
+
|
|
+ /* of_property_read_string_array() tests */
|
|
+ rc = of_property_read_string_array(np, "string-property", strings, 4);
|
|
+ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
|
|
+ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
|
|
+ rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
|
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
|
+ /* -- An incorrectly formed string should cause a failure */
|
|
+ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
|
|
+ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
|
|
+ /* -- parsing the correctly formed strings should still work: */
|
|
+ strings[2] = NULL;
|
|
+ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
|
|
+ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
|
|
+ strings[1] = NULL;
|
|
+ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
|
|
+ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
|
|
}
|
|
|
|
static void __init of_selftest_parse_interrupts(void)
|
|
@@ -379,7 +433,7 @@ static int __init of_selftest(void)
|
|
|
|
pr_info("start of selftest - you will see error messages\n");
|
|
of_selftest_parse_phandle_with_args();
|
|
- of_selftest_property_match_string();
|
|
+ of_selftest_property_string();
|
|
of_selftest_parse_interrupts();
|
|
of_selftest_parse_interrupts_extended();
|
|
of_selftest_match_node();
|
|
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
|
|
index 0007d3c..eedee37 100644
|
|
--- a/drivers/of/testcase-data/tests-phandle.dtsi
|
|
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
|
|
@@ -32,7 +32,9 @@
|
|
phandle-list-bad-args = <&provider2 1 0>,
|
|
<&provider3 0>;
|
|
empty-property;
|
|
+ string-property = "foobar";
|
|
unterminated-string = [40 41 42 43];
|
|
+ unterminated-string-list = "first", "second", [40 41 42 43];
|
|
};
|
|
};
|
|
};
|
|
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
|
|
index 2872ece..44333bd 100644
|
|
--- a/drivers/parport/Kconfig
|
|
+++ b/drivers/parport/Kconfig
|
|
@@ -5,6 +5,12 @@
|
|
# Parport configuration.
|
|
#
|
|
|
|
+config ARCH_MIGHT_HAVE_PC_PARPORT
|
|
+ bool
|
|
+ help
|
|
+ Select this config option from the architecture Kconfig if
|
|
+ the architecture might have PC parallel port hardware.
|
|
+
|
|
menuconfig PARPORT
|
|
tristate "Parallel port support"
|
|
depends on HAS_IOMEM
|
|
@@ -31,12 +37,6 @@ menuconfig PARPORT
|
|
|
|
If unsure, say Y.
|
|
|
|
-config ARCH_MIGHT_HAVE_PC_PARPORT
|
|
- bool
|
|
- help
|
|
- Select this config option from the architecture Kconfig if
|
|
- the architecture might have PC parallel port hardware.
|
|
-
|
|
if PARPORT
|
|
|
|
config PARPORT_PC
|
|
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
|
|
index 483d9ad..9773667 100644
|
|
--- a/drivers/pci/host/pci-mvebu.c
|
|
+++ b/drivers/pci/host/pci-mvebu.c
|
|
@@ -855,7 +855,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
|
|
rangesz = pna + na + ns;
|
|
nranges = rlen / sizeof(__be32) / rangesz;
|
|
|
|
- for (i = 0; i < nranges; i++) {
|
|
+ for (i = 0; i < nranges; i++, range += rangesz) {
|
|
u32 flags = of_read_number(range, 1);
|
|
u32 slot = of_read_number(range + 1, 1);
|
|
u64 cpuaddr = of_read_number(range + na, pna);
|
|
@@ -865,14 +865,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
|
|
rtype = IORESOURCE_IO;
|
|
else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
|
|
rtype = IORESOURCE_MEM;
|
|
+ else
|
|
+ continue;
|
|
|
|
if (slot == PCI_SLOT(devfn) && type == rtype) {
|
|
*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
|
|
*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
|
|
return 0;
|
|
}
|
|
-
|
|
- range += rangesz;
|
|
}
|
|
|
|
return -ENOENT;
|
|
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
|
|
index fb02fc2..ced17f2 100644
|
|
--- a/drivers/pci/msi.c
|
|
+++ b/drivers/pci/msi.c
|
|
@@ -599,6 +599,20 @@ error_attrs:
|
|
return ret;
|
|
}
|
|
|
|
+static int msi_verify_entries(struct pci_dev *dev)
|
|
+{
|
|
+ struct msi_desc *entry;
|
|
+
|
|
+ list_for_each_entry(entry, &dev->msi_list, list) {
|
|
+ if (!dev->no_64bit_msi || !entry->msg.address_hi)
|
|
+ continue;
|
|
+ dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
|
|
+ " tried to assign one above 4G\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* msi_capability_init - configure device's MSI capability structure
|
|
* @dev: pointer to the pci_dev data structure of MSI device function
|
|
@@ -652,6 +666,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|
return ret;
|
|
}
|
|
|
|
+ ret = msi_verify_entries(dev);
|
|
+ if (ret) {
|
|
+ msi_mask_irq(entry, mask, ~mask);
|
|
+ free_msi_irqs(dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
ret = populate_msi_sysfs(dev);
|
|
if (ret) {
|
|
msi_mask_irq(entry, mask, ~mask);
|
|
@@ -767,6 +788,11 @@ static int msix_capability_init(struct pci_dev *dev,
|
|
if (ret)
|
|
goto out_avail;
|
|
|
|
+ /* Check if all MSI entries honor device restrictions */
|
|
+ ret = msi_verify_entries(dev);
|
|
+ if (ret)
|
|
+ goto out_free;
|
|
+
|
|
/*
|
|
* Some devices require MSI-X to be enabled before we can touch the
|
|
* MSI-X registers. We need to mask all the vectors to prevent
|
|
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
|
|
index 25f0bc6..7f41551 100644
|
|
--- a/drivers/pci/pci-driver.c
|
|
+++ b/drivers/pci/pci-driver.c
|
|
@@ -1324,7 +1324,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
|
|
return -ENOMEM;
|
|
|
|
- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
|
|
+ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
|
|
pdev->vendor, pdev->device,
|
|
pdev->subsystem_vendor, pdev->subsystem_device,
|
|
(u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
|
|
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
|
|
index 45113da..e27a3dc 100644
|
|
--- a/drivers/pci/pci-label.c
|
|
+++ b/drivers/pci/pci-label.c
|
|
@@ -168,8 +168,8 @@ enum acpi_attr_enum {
|
|
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
|
|
{
|
|
int len;
|
|
- len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
|
|
- obj->string.length,
|
|
+ len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
|
|
+ obj->buffer.length,
|
|
UTF16_LITTLE_ENDIAN,
|
|
buf, PAGE_SIZE);
|
|
buf[len] = '\n';
|
|
@@ -194,16 +194,22 @@ dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr)
|
|
tmp = obj->package.elements;
|
|
if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
|
|
tmp[0].type == ACPI_TYPE_INTEGER &&
|
|
- tmp[1].type == ACPI_TYPE_STRING) {
|
|
+ (tmp[1].type == ACPI_TYPE_STRING ||
|
|
+ tmp[1].type == ACPI_TYPE_BUFFER)) {
|
|
/*
|
|
* The second string element is optional even when
|
|
* this _DSM is implemented; when not implemented,
|
|
* this entry must return a null string.
|
|
*/
|
|
- if (attr == ACPI_ATTR_INDEX_SHOW)
|
|
+ if (attr == ACPI_ATTR_INDEX_SHOW) {
|
|
scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
|
|
- else if (attr == ACPI_ATTR_LABEL_SHOW)
|
|
- dsm_label_utf16s_to_utf8s(tmp + 1, buf);
|
|
+ } else if (attr == ACPI_ATTR_LABEL_SHOW) {
|
|
+ if (tmp[1].type == ACPI_TYPE_STRING)
|
|
+ scnprintf(buf, PAGE_SIZE, "%s\n",
|
|
+ tmp[1].string.pointer);
|
|
+ else if (tmp[1].type == ACPI_TYPE_BUFFER)
|
|
+ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
|
|
+ }
|
|
len = strlen(buf) > 0 ? strlen(buf) : -1;
|
|
}
|
|
|
|
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
|
|
index 276ef9c..a943c6c 100644
|
|
--- a/drivers/pci/pci-sysfs.c
|
|
+++ b/drivers/pci/pci-sysfs.c
|
|
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
{
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
|
|
- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
|
|
+ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
|
|
pci_dev->vendor, pci_dev->device,
|
|
pci_dev->subsystem_vendor, pci_dev->subsystem_device,
|
|
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
|
|
@@ -186,9 +186,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
}
|
|
static DEVICE_ATTR_RO(modalias);
|
|
|
|
-static ssize_t enabled_store(struct device *dev,
|
|
- struct device_attribute *attr, const char *buf,
|
|
- size_t count)
|
|
+static ssize_t enable_store(struct device *dev,
|
|
+ struct device_attribute *attr, const char *buf,
|
|
+ size_t count)
|
|
{
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
unsigned long val;
|
|
@@ -212,15 +212,15 @@ static ssize_t enabled_store(struct device *dev,
|
|
return result < 0 ? result : count;
|
|
}
|
|
|
|
-static ssize_t enabled_show(struct device *dev,
|
|
- struct device_attribute *attr, char *buf)
|
|
+static ssize_t enable_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
{
|
|
struct pci_dev *pdev;
|
|
|
|
pdev = to_pci_dev (dev);
|
|
return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
|
|
}
|
|
-static DEVICE_ATTR_RW(enabled);
|
|
+static DEVICE_ATTR_RW(enable);
|
|
|
|
#ifdef CONFIG_NUMA
|
|
static ssize_t
|
|
@@ -526,7 +526,7 @@ static struct attribute *pci_dev_attrs[] = {
|
|
#endif
|
|
&dev_attr_dma_mask_bits.attr,
|
|
&dev_attr_consistent_dma_mask_bits.attr,
|
|
- &dev_attr_enabled.attr,
|
|
+ &dev_attr_enable.attr,
|
|
&dev_attr_broken_parity_status.attr,
|
|
&dev_attr_msi_bus.attr,
|
|
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
|
|
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
|
|
index be36adf..78c65d3 100644
|
|
--- a/drivers/pci/pci.c
|
|
+++ b/drivers/pci/pci.c
|
|
@@ -830,12 +830,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
|
|
|
if (!__pci_complete_power_transition(dev, state))
|
|
error = 0;
|
|
- /*
|
|
- * When aspm_policy is "powersave" this call ensures
|
|
- * that ASPM is configured.
|
|
- */
|
|
- if (!error && dev->bus->self)
|
|
- pcie_aspm_powersave_config_link(dev->bus->self);
|
|
|
|
return error;
|
|
}
|
|
@@ -1181,12 +1175,18 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
|
|
static int do_pci_enable_device(struct pci_dev *dev, int bars)
|
|
{
|
|
int err;
|
|
+ struct pci_dev *bridge;
|
|
u16 cmd;
|
|
u8 pin;
|
|
|
|
err = pci_set_power_state(dev, PCI_D0);
|
|
if (err < 0 && err != -EIO)
|
|
return err;
|
|
+
|
|
+ bridge = pci_upstream_bridge(dev);
|
|
+ if (bridge)
|
|
+ pcie_aspm_powersave_config_link(bridge);
|
|
+
|
|
err = pcibios_enable_device(dev, bars);
|
|
if (err < 0)
|
|
return err;
|
|
@@ -3187,7 +3187,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
|
|
{
|
|
struct pci_dev *pdev;
|
|
|
|
- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
|
|
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
|
|
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
|
|
return -ENOTTY;
|
|
|
|
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
|
|
@@ -3221,7 +3222,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
|
|
{
|
|
struct pci_dev *pdev;
|
|
|
|
- if (dev->subordinate || !dev->slot)
|
|
+ if (dev->subordinate || !dev->slot ||
|
|
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
|
|
return -ENOTTY;
|
|
|
|
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
|
|
@@ -3452,6 +3454,20 @@ int pci_try_reset_function(struct pci_dev *dev)
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_try_reset_function);
|
|
|
|
+/* Do any devices on or below this bus prevent a bus reset? */
|
|
+static bool pci_bus_resetable(struct pci_bus *bus)
|
|
+{
|
|
+ struct pci_dev *dev;
|
|
+
|
|
+ list_for_each_entry(dev, &bus->devices, bus_list) {
|
|
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
|
|
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
/* Lock devices from the top of the tree down */
|
|
static void pci_bus_lock(struct pci_bus *bus)
|
|
{
|
|
@@ -3502,6 +3518,22 @@ unlock:
|
|
return 0;
|
|
}
|
|
|
|
+/* Do any devices on or below this slot prevent a bus reset? */
|
|
+static bool pci_slot_resetable(struct pci_slot *slot)
|
|
+{
|
|
+ struct pci_dev *dev;
|
|
+
|
|
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
|
|
+ if (!dev->slot || dev->slot != slot)
|
|
+ continue;
|
|
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
|
|
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
/* Lock devices from the top of the tree down */
|
|
static void pci_slot_lock(struct pci_slot *slot)
|
|
{
|
|
@@ -3623,7 +3655,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
|
|
{
|
|
int rc;
|
|
|
|
- if (!slot)
|
|
+ if (!slot || !pci_slot_resetable(slot))
|
|
return -ENOTTY;
|
|
|
|
if (!probe)
|
|
@@ -3715,7 +3747,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
|
|
|
|
static int pci_bus_reset(struct pci_bus *bus, int probe)
|
|
{
|
|
- if (!bus->self)
|
|
+ if (!bus->self || !pci_bus_resetable(bus))
|
|
return -ENOTTY;
|
|
|
|
if (probe)
|
|
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
|
|
index 34ff702..5d3b456 100644
|
|
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
|
|
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
|
|
@@ -127,16 +127,8 @@ static const char *aer_agent_string[] = {
|
|
static void __print_tlp_header(struct pci_dev *dev,
|
|
struct aer_header_log_regs *t)
|
|
{
|
|
- unsigned char *tlp = (unsigned char *)&t;
|
|
-
|
|
- dev_err(&dev->dev, " TLP Header:"
|
|
- " %02x%02x%02x%02x %02x%02x%02x%02x"
|
|
- " %02x%02x%02x%02x %02x%02x%02x%02x\n",
|
|
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
|
|
- *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
|
|
- *(tlp + 11), *(tlp + 10), *(tlp + 9),
|
|
- *(tlp + 8), *(tlp + 15), *(tlp + 14),
|
|
- *(tlp + 13), *(tlp + 12));
|
|
+ dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n",
|
|
+ t->dw0, t->dw1, t->dw2, t->dw3);
|
|
}
|
|
|
|
static void __aer_print_error(struct pci_dev *dev,
|
|
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
|
|
index 6e34498..5b428db 100644
|
|
--- a/drivers/pci/probe.c
|
|
+++ b/drivers/pci/probe.c
|
|
@@ -214,14 +214,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|
res->flags |= IORESOURCE_SIZEALIGN;
|
|
if (res->flags & IORESOURCE_IO) {
|
|
l &= PCI_BASE_ADDRESS_IO_MASK;
|
|
+ sz &= PCI_BASE_ADDRESS_IO_MASK;
|
|
mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
|
|
} else {
|
|
l &= PCI_BASE_ADDRESS_MEM_MASK;
|
|
+ sz &= PCI_BASE_ADDRESS_MEM_MASK;
|
|
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
|
|
}
|
|
} else {
|
|
res->flags |= (l & IORESOURCE_ROM_ENABLE);
|
|
l &= PCI_ROM_ADDRESS_MASK;
|
|
+ sz &= PCI_ROM_ADDRESS_MASK;
|
|
mask = (u32)PCI_ROM_ADDRESS_MASK;
|
|
}
|
|
|
|
@@ -395,15 +398,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
|
|
{
|
|
struct pci_dev *dev = child->self;
|
|
u16 mem_base_lo, mem_limit_lo;
|
|
- unsigned long base, limit;
|
|
+ u64 base64, limit64;
|
|
+ dma_addr_t base, limit;
|
|
struct pci_bus_region region;
|
|
struct resource *res;
|
|
|
|
res = child->resource[2];
|
|
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
|
|
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
|
|
- base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
|
|
- limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
|
|
+ base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
|
|
+ limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
|
|
|
|
if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
|
|
u32 mem_base_hi, mem_limit_hi;
|
|
@@ -417,18 +421,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
|
|
* this, just assume they are not being used.
|
|
*/
|
|
if (mem_base_hi <= mem_limit_hi) {
|
|
-#if BITS_PER_LONG == 64
|
|
- base |= ((unsigned long) mem_base_hi) << 32;
|
|
- limit |= ((unsigned long) mem_limit_hi) << 32;
|
|
-#else
|
|
- if (mem_base_hi || mem_limit_hi) {
|
|
- dev_err(&dev->dev, "can't handle 64-bit "
|
|
- "address space for bridge\n");
|
|
- return;
|
|
- }
|
|
-#endif
|
|
+ base64 |= (u64) mem_base_hi << 32;
|
|
+ limit64 |= (u64) mem_limit_hi << 32;
|
|
}
|
|
}
|
|
+
|
|
+ base = (dma_addr_t) base64;
|
|
+ limit = (dma_addr_t) limit64;
|
|
+
|
|
+ if (base != base64) {
|
|
+ dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
|
|
+ (unsigned long long) base64);
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (base <= limit) {
|
|
res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
|
|
IORESOURCE_MEM | IORESOURCE_PREFETCH;
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index 813f437..2afa480 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <linux/ioport.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/ktime.h>
|
|
+#include <linux/mm.h>
|
|
#include <asm/dma.h> /* isa_dma_bridge_buggy */
|
|
#include "pci.h"
|
|
|
|
@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev)
|
|
}
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
|
|
|
|
+/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
|
|
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
|
|
+ struct resource *r = &dev->resource[i];
|
|
+
|
|
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
|
|
+ r->end = PAGE_SIZE - 1;
|
|
+ r->start = 0;
|
|
+ r->flags |= IORESOURCE_UNSET;
|
|
+ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
|
|
+ i, r);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
|
|
+
|
|
/*
|
|
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
|
|
* If it's needed, re-allocate the region.
|
|
@@ -2770,12 +2790,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
|
|
|
|
static void fixup_ti816x_class(struct pci_dev *dev)
|
|
{
|
|
+ u32 class = dev->class;
|
|
+
|
|
/* TI 816x devices do not have class code set when in PCIe boot mode */
|
|
- dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
|
|
- dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
|
|
+ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
|
|
+ dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
|
|
+ class, dev->class);
|
|
}
|
|
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
|
|
- PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
|
|
+ PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
|
|
|
|
/* Some PCIe devices do not work reliably with the claimed maximum
|
|
* payload size supported.
|
|
@@ -2988,6 +3011,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
|
|
DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
|
|
quirk_broken_intx_masking);
|
|
|
|
+static void quirk_no_bus_reset(struct pci_dev *dev)
|
|
+{
|
|
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
|
|
+ * throw a Link Down error on AER-capable systems and regardless of AER,
|
|
+ * config space of the device is never accessible again and typically
|
|
+ * causes the system to hang or reset when access is attempted.
|
|
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
|
|
+ */
|
|
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
|
|
+
|
|
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
|
|
struct pci_fixup *end)
|
|
{
|
|
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
|
|
index 5d59572..5510c88 100644
|
|
--- a/drivers/pci/rom.c
|
|
+++ b/drivers/pci/rom.c
|
|
@@ -69,6 +69,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
|
|
{
|
|
void __iomem *image;
|
|
int last_image;
|
|
+ unsigned length;
|
|
|
|
image = rom;
|
|
do {
|
|
@@ -91,9 +92,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
|
|
if (readb(pds + 3) != 'R')
|
|
break;
|
|
last_image = readb(pds + 21) & 0x80;
|
|
- /* this length is reliable */
|
|
- image += readw(pds + 16) * 512;
|
|
- } while (!last_image);
|
|
+ length = readw(pds + 16);
|
|
+ image += length * 512;
|
|
+ } while (length && !last_image);
|
|
|
|
/* never return a size larger than the PCI resource window */
|
|
/* there are known ROMs that get the size wrong */
|
|
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
|
|
index 615a45a..582688f 100644
|
|
--- a/drivers/pcmcia/topic.h
|
|
+++ b/drivers/pcmcia/topic.h
|
|
@@ -104,6 +104,9 @@
|
|
#define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */
|
|
#define TOPIC_EXCA_IFC_33V_ENA 0x01
|
|
|
|
+#define TOPIC_PCI_CFG_PPBCN 0x3e /* 16-bit */
|
|
+#define TOPIC_PCI_CFG_PPBCN_WBEN 0x0400
|
|
+
|
|
static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
|
|
{
|
|
struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
|
|
@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket)
|
|
static int topic95_override(struct yenta_socket *socket)
|
|
{
|
|
u8 fctrl;
|
|
+ u16 ppbcn;
|
|
|
|
/* enable 3.3V support for 16bit cards */
|
|
fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
|
|
@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket)
|
|
/* tell yenta to use exca registers to power 16bit cards */
|
|
socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
|
|
|
|
+ /* Disable write buffers to prevent lockups under load with numerous
|
|
+ Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
|
|
+ net. This is not a power-on default according to the datasheet
|
|
+ but some BIOSes seem to set it. */
|
|
+ if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
|
|
+ && socket->dev->revision <= 7
|
|
+ && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
|
|
+ ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
|
|
+ pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
|
|
+ dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
|
|
index 34d56f7..86592dd 100644
|
|
--- a/drivers/phy/phy-core.c
|
|
+++ b/drivers/phy/phy-core.c
|
|
@@ -50,7 +50,9 @@ static void devm_phy_consume(struct device *dev, void *res)
|
|
|
|
static int devm_phy_match(struct device *dev, void *res, void *match_data)
|
|
{
|
|
- return res == match_data;
|
|
+ struct phy **phy = res;
|
|
+
|
|
+ return *phy == match_data;
|
|
}
|
|
|
|
static struct phy *phy_lookup(struct device *device, const char *port)
|
|
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
|
|
index c3ace1d..aaac359 100644
|
|
--- a/drivers/phy/phy-twl4030-usb.c
|
|
+++ b/drivers/phy/phy-twl4030-usb.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/delay.h>
|
|
#include <linux/usb/otg.h>
|
|
#include <linux/phy/phy.h>
|
|
+#include <linux/pm_runtime.h>
|
|
#include <linux/usb/musb-omap.h>
|
|
#include <linux/usb/ulpi.h>
|
|
#include <linux/i2c/twl.h>
|
|
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
|
|
}
|
|
}
|
|
|
|
-static int twl4030_phy_power_off(struct phy *phy)
|
|
+static int twl4030_usb_runtime_suspend(struct device *dev)
|
|
{
|
|
- struct twl4030_usb *twl = phy_get_drvdata(phy);
|
|
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
|
|
|
|
+ dev_dbg(twl->dev, "%s\n", __func__);
|
|
if (twl->asleep)
|
|
return 0;
|
|
|
|
twl4030_phy_power(twl, 0);
|
|
twl->asleep = 1;
|
|
- dev_dbg(twl->dev, "%s\n", __func__);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
-static void __twl4030_phy_power_on(struct twl4030_usb *twl)
|
|
+static int twl4030_usb_runtime_resume(struct device *dev)
|
|
{
|
|
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
|
|
+
|
|
+ dev_dbg(twl->dev, "%s\n", __func__);
|
|
+ if (!twl->asleep)
|
|
+ return 0;
|
|
+
|
|
twl4030_phy_power(twl, 1);
|
|
- twl4030_i2c_access(twl, 1);
|
|
- twl4030_usb_set_mode(twl, twl->usb_mode);
|
|
- if (twl->usb_mode == T2_USB_MODE_ULPI)
|
|
- twl4030_i2c_access(twl, 0);
|
|
+ twl->asleep = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int twl4030_phy_power_off(struct phy *phy)
|
|
+{
|
|
+ struct twl4030_usb *twl = phy_get_drvdata(phy);
|
|
+
|
|
+ dev_dbg(twl->dev, "%s\n", __func__);
|
|
+ pm_runtime_mark_last_busy(twl->dev);
|
|
+ pm_runtime_put_autosuspend(twl->dev);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int twl4030_phy_power_on(struct phy *phy)
|
|
{
|
|
struct twl4030_usb *twl = phy_get_drvdata(phy);
|
|
|
|
- if (!twl->asleep)
|
|
- return 0;
|
|
- __twl4030_phy_power_on(twl);
|
|
- twl->asleep = 0;
|
|
dev_dbg(twl->dev, "%s\n", __func__);
|
|
+ pm_runtime_get_sync(twl->dev);
|
|
+ twl4030_i2c_access(twl, 1);
|
|
+ twl4030_usb_set_mode(twl, twl->usb_mode);
|
|
+ if (twl->usb_mode == T2_USB_MODE_ULPI)
|
|
+ twl4030_i2c_access(twl, 0);
|
|
|
|
/*
|
|
* XXX When VBUS gets driven after musb goes to A mode,
|
|
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
|
|
* USB_LINK_VBUS state. musb_hdrc won't care until it
|
|
* starts to handle softconnect right.
|
|
*/
|
|
+ if ((status == OMAP_MUSB_VBUS_VALID) ||
|
|
+ (status == OMAP_MUSB_ID_GROUND)) {
|
|
+ if (twl->asleep)
|
|
+ pm_runtime_get_sync(twl->dev);
|
|
+ } else {
|
|
+ if (!twl->asleep) {
|
|
+ pm_runtime_mark_last_busy(twl->dev);
|
|
+ pm_runtime_put_autosuspend(twl->dev);
|
|
+ }
|
|
+ }
|
|
omap_musb_mailbox(status);
|
|
}
|
|
- sysfs_notify(&twl->dev->kobj, NULL, "vbus");
|
|
+
|
|
+ /* don't schedule during sleep - irq works right then */
|
|
+ if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
|
|
+ cancel_delayed_work(&twl->id_workaround_work);
|
|
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
|
|
+ }
|
|
+
|
|
+ if (irq)
|
|
+ sysfs_notify(&twl->dev->kobj, NULL, "vbus");
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work)
|
|
{
|
|
struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
|
|
id_workaround_work.work);
|
|
- enum omap_musb_vbus_id_status status;
|
|
- bool status_changed = false;
|
|
-
|
|
- status = twl4030_usb_linkstat(twl);
|
|
-
|
|
- spin_lock_irq(&twl->lock);
|
|
- if (status >= 0 && status != twl->linkstat) {
|
|
- twl->linkstat = status;
|
|
- status_changed = true;
|
|
- }
|
|
- spin_unlock_irq(&twl->lock);
|
|
-
|
|
- if (status_changed) {
|
|
- dev_dbg(twl->dev, "handle missing status change to %d\n",
|
|
- status);
|
|
- omap_musb_mailbox(status);
|
|
- }
|
|
|
|
- /* don't schedule during sleep - irq works right then */
|
|
- if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) {
|
|
- cancel_delayed_work(&twl->id_workaround_work);
|
|
- schedule_delayed_work(&twl->id_workaround_work, HZ);
|
|
- }
|
|
+ twl4030_usb_irq(0, twl);
|
|
}
|
|
|
|
static int twl4030_phy_init(struct phy *phy)
|
|
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy)
|
|
struct twl4030_usb *twl = phy_get_drvdata(phy);
|
|
enum omap_musb_vbus_id_status status;
|
|
|
|
- /*
|
|
- * Start in sleep state, we'll get called through set_suspend()
|
|
- * callback when musb is runtime resumed and it's time to start.
|
|
- */
|
|
- __twl4030_phy_power(twl, 0);
|
|
- twl->asleep = 1;
|
|
-
|
|
+ pm_runtime_get_sync(twl->dev);
|
|
status = twl4030_usb_linkstat(twl);
|
|
twl->linkstat = status;
|
|
|
|
- if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) {
|
|
+ if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID)
|
|
omap_musb_mailbox(twl->linkstat);
|
|
- twl4030_phy_power_on(phy);
|
|
- }
|
|
|
|
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
|
|
+ pm_runtime_mark_last_busy(twl->dev);
|
|
+ pm_runtime_put_autosuspend(twl->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -650,6 +661,11 @@ static const struct phy_ops ops = {
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
+static const struct dev_pm_ops twl4030_usb_pm_ops = {
|
|
+ SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
|
|
+ twl4030_usb_runtime_resume, NULL)
|
|
+};
|
|
+
|
|
static int twl4030_usb_probe(struct platform_device *pdev)
|
|
{
|
|
struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
|
|
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
|
|
|
|
ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
|
|
|
|
+ pm_runtime_use_autosuspend(&pdev->dev);
|
|
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+ pm_runtime_get_sync(&pdev->dev);
|
|
+
|
|
/* Our job is to use irqs and status from the power module
|
|
* to keep the transceiver disabled when nothing's connected.
|
|
*
|
|
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev)
|
|
return status;
|
|
}
|
|
|
|
+ pm_runtime_mark_last_busy(&pdev->dev);
|
|
+ pm_runtime_put_autosuspend(twl->dev);
|
|
+
|
|
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
|
|
return 0;
|
|
}
|
|
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
struct twl4030_usb *twl = platform_get_drvdata(pdev);
|
|
int val;
|
|
|
|
+ pm_runtime_get_sync(twl->dev);
|
|
cancel_delayed_work(&twl->id_workaround_work);
|
|
device_remove_file(twl->dev, &dev_attr_vbus);
|
|
|
|
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
|
|
/* disable complete OTG block */
|
|
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
|
|
-
|
|
- if (!twl->asleep)
|
|
- twl4030_phy_power(twl, 0);
|
|
+ pm_runtime_mark_last_busy(twl->dev);
|
|
+ pm_runtime_put(twl->dev);
|
|
|
|
return 0;
|
|
}
|
|
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = {
|
|
.remove = twl4030_usb_remove,
|
|
.driver = {
|
|
.name = "twl4030_usb",
|
|
+ .pm = &twl4030_usb_pm_ops,
|
|
.owner = THIS_MODULE,
|
|
.of_match_table = of_match_ptr(twl4030_usb_id_table),
|
|
},
|
|
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
|
|
index c0fe609..a2a79c7 100644
|
|
--- a/drivers/pinctrl/core.c
|
|
+++ b/drivers/pinctrl/core.c
|
|
@@ -1121,7 +1121,7 @@ void devm_pinctrl_put(struct pinctrl *p)
|
|
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
|
|
|
|
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
|
|
- bool dup, bool locked)
|
|
+ bool dup)
|
|
{
|
|
int i, ret;
|
|
struct pinctrl_maps *maps_node;
|
|
@@ -1189,11 +1189,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
|
|
maps_node->maps = maps;
|
|
}
|
|
|
|
- if (!locked)
|
|
- mutex_lock(&pinctrl_maps_mutex);
|
|
+ mutex_lock(&pinctrl_maps_mutex);
|
|
list_add_tail(&maps_node->node, &pinctrl_maps);
|
|
- if (!locked)
|
|
- mutex_unlock(&pinctrl_maps_mutex);
|
|
+ mutex_unlock(&pinctrl_maps_mutex);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1208,7 +1206,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
|
|
int pinctrl_register_mappings(struct pinctrl_map const *maps,
|
|
unsigned num_maps)
|
|
{
|
|
- return pinctrl_register_map(maps, num_maps, true, false);
|
|
+ return pinctrl_register_map(maps, num_maps, true);
|
|
}
|
|
|
|
void pinctrl_unregister_map(struct pinctrl_map const *map)
|
|
@@ -1812,14 +1810,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
|
|
if (pctldev == NULL)
|
|
return;
|
|
|
|
- mutex_lock(&pinctrldev_list_mutex);
|
|
mutex_lock(&pctldev->mutex);
|
|
-
|
|
pinctrl_remove_device_debugfs(pctldev);
|
|
+ mutex_unlock(&pctldev->mutex);
|
|
|
|
if (!IS_ERR(pctldev->p))
|
|
pinctrl_put(pctldev->p);
|
|
|
|
+ mutex_lock(&pinctrldev_list_mutex);
|
|
+ mutex_lock(&pctldev->mutex);
|
|
/* TODO: check that no pinmuxes are still active? */
|
|
list_del(&pctldev->node);
|
|
/* Destroy descriptor tree */
|
|
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
|
|
index 75476b3..b24ea84 100644
|
|
--- a/drivers/pinctrl/core.h
|
|
+++ b/drivers/pinctrl/core.h
|
|
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
|
|
}
|
|
|
|
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
|
|
- bool dup, bool locked);
|
|
+ bool dup);
|
|
void pinctrl_unregister_map(struct pinctrl_map const *map);
|
|
|
|
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
|
|
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
|
|
index a53d15c..243e533 100644
|
|
--- a/drivers/pinctrl/devicetree.c
|
|
+++ b/drivers/pinctrl/devicetree.c
|
|
@@ -93,7 +93,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
|
|
dt_map->num_maps = num_maps;
|
|
list_add_tail(&dt_map->node, &p->dt_maps);
|
|
|
|
- return pinctrl_register_map(map, num_maps, false, true);
|
|
+ return pinctrl_register_map(map, num_maps, false);
|
|
}
|
|
|
|
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
|
|
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
|
|
index ae1f760..bb525b1 100644
|
|
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
|
|
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
|
|
@@ -358,11 +358,11 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
|
|
MPP_MODE(64,
|
|
MPP_FUNCTION(0x0, "gpio", NULL),
|
|
MPP_FUNCTION(0x1, "spi0", "miso"),
|
|
- MPP_FUNCTION(0x2, "spi0-1", "cs1")),
|
|
+ MPP_FUNCTION(0x2, "spi0", "cs1")),
|
|
MPP_MODE(65,
|
|
MPP_FUNCTION(0x0, "gpio", NULL),
|
|
MPP_FUNCTION(0x1, "spi0", "mosi"),
|
|
- MPP_FUNCTION(0x2, "spi0-1", "cs2")),
|
|
+ MPP_FUNCTION(0x2, "spi0", "cs2")),
|
|
};
|
|
|
|
static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
|
|
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
|
|
index 843a51f..d918c51 100644
|
|
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
|
|
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
|
|
@@ -14,10 +14,7 @@
|
|
* available: mv78230, mv78260 and mv78460. From a pin muxing
|
|
* perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
|
|
* both have 67 MPP pins (more GPIOs and address lines for the memory
|
|
- * bus mainly). The only difference between the mv78260 and the
|
|
- * mv78460 in terms of pin muxing is the addition of two functions on
|
|
- * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
|
|
- * cores, mv78460 has four cores).
|
|
+ * bus mainly).
|
|
*/
|
|
|
|
#include <linux/err.h>
|
|
@@ -159,20 +156,17 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_MODE(24,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sata1", "prsnt", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "tdm", "rst", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x4, "lcd", "hsync", V_MV78230_PLUS)),
|
|
MPP_MODE(25,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sata0", "prsnt", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "tdm", "pclk", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x4, "lcd", "vsync", V_MV78230_PLUS)),
|
|
MPP_MODE(26,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "tdm", "fsync", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS)),
|
|
MPP_MODE(27,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "ptp", "trig", V_MV78230_PLUS),
|
|
@@ -187,8 +181,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "ptp", "clk", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "tdm", "int0", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS)),
|
|
MPP_MODE(30,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sd0", "clk", V_MV78230_PLUS),
|
|
@@ -196,13 +189,11 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_MODE(31,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sd0", "cmd", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS)),
|
|
MPP_MODE(32,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sd0", "d0", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS)),
|
|
MPP_MODE(33,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "sd0", "d1", V_MV78230_PLUS),
|
|
@@ -234,7 +225,6 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "spi", "cs1", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x2, "uart2", "cts", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0", V_MV78230_PLUS)),
|
|
MPP_MODE(41,
|
|
@@ -249,15 +239,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_VAR_FUNCTION(0x1, "uart2", "rxd", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x2, "uart0", "cts", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "tdm", "int7", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS)),
|
|
MPP_MODE(43,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "uart2", "txd", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x2, "uart0", "rts", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x3, "spi", "cs3", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd", V_MV78460)),
|
|
+ MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS)),
|
|
MPP_MODE(44,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "uart2", "cts", V_MV78230_PLUS),
|
|
@@ -286,7 +274,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3", V_MV78230_PLUS)),
|
|
MPP_MODE(48,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
|
|
- MPP_VAR_FUNCTION(0x1, "tclk", NULL, V_MV78230_PLUS),
|
|
+ MPP_VAR_FUNCTION(0x1, "dev", "clkout", V_MV78230_PLUS),
|
|
MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
|
|
MPP_MODE(49,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
|
|
@@ -308,16 +296,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
|
|
MPP_VAR_FUNCTION(0x1, "dev", "ad19", V_MV78260_PLUS)),
|
|
MPP_MODE(55,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd", V_MV78260_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS)),
|
|
MPP_MODE(56,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd", V_MV78260_PLUS)),
|
|
+ MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS)),
|
|
MPP_MODE(57,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS),
|
|
- MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd", V_MV78460)),
|
|
+ MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS)),
|
|
MPP_MODE(58,
|
|
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
|
|
MPP_VAR_FUNCTION(0x1, "dev", "ad23", V_MV78260_PLUS)),
|
|
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
|
|
index 665b96b..eb9f190 100644
|
|
--- a/drivers/pinctrl/pinctrl-baytrail.c
|
|
+++ b/drivers/pinctrl/pinctrl-baytrail.c
|
|
@@ -263,7 +263,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
|
|
spin_lock_irqsave(&vg->lock, flags);
|
|
|
|
reg_val = readl(reg) | BYT_DIR_MASK;
|
|
- reg_val &= ~BYT_OUTPUT_EN;
|
|
+ reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
|
|
|
|
if (value)
|
|
writel(reg_val | BYT_LEVEL, reg);
|
|
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
|
|
index c91f69b3..dcfcaea 100644
|
|
--- a/drivers/platform/x86/acer-wmi.c
|
|
+++ b/drivers/platform/x86/acer-wmi.c
|
|
@@ -570,6 +570,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /*
|
|
+ * Note no video_set_backlight_video_vendor, we must use the
|
|
+ * acer interface, as there is no native backlight interface.
|
|
+ */
|
|
+ .ident = "Acer KAV80",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
|
|
+ },
|
|
+ },
|
|
{}
|
|
};
|
|
|
|
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
|
|
index 7297df2..54407a8 100644
|
|
--- a/drivers/platform/x86/compal-laptop.c
|
|
+++ b/drivers/platform/x86/compal-laptop.c
|
|
@@ -1027,9 +1027,9 @@ static int compal_probe(struct platform_device *pdev)
|
|
if (err)
|
|
return err;
|
|
|
|
- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
|
|
- DRIVER_NAME, data,
|
|
- compal_hwmon_groups);
|
|
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
|
|
+ DRIVER_NAME, data,
|
|
+ compal_hwmon_groups);
|
|
if (IS_ERR(hwmon_dev)) {
|
|
err = PTR_ERR(hwmon_dev);
|
|
goto remove;
|
|
@@ -1037,7 +1037,9 @@ static int compal_probe(struct platform_device *pdev)
|
|
|
|
/* Power supply */
|
|
initialize_power_supply_data(data);
|
|
- power_supply_register(&compal_device->dev, &data->psy);
|
|
+ err = power_supply_register(&compal_device->dev, &data->psy);
|
|
+ if (err < 0)
|
|
+ goto remove;
|
|
|
|
platform_set_drvdata(pdev, data);
|
|
|
|
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
|
|
index fed4111..1beb232 100644
|
|
--- a/drivers/platform/x86/dell-laptop.c
|
|
+++ b/drivers/platform/x86/dell-laptop.c
|
|
@@ -272,7 +272,6 @@ static struct dmi_system_id dell_quirks[] = {
|
|
};
|
|
|
|
static struct calling_interface_buffer *buffer;
|
|
-static struct page *bufferpage;
|
|
static DEFINE_MUTEX(buffer_mutex);
|
|
|
|
static int hwswitch_state;
|
|
@@ -825,12 +824,11 @@ static int __init dell_init(void)
|
|
* Allocate buffer below 4GB for SMI data--only 32-bit physical addr
|
|
* is passed to SMI handler.
|
|
*/
|
|
- bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
|
|
- if (!bufferpage) {
|
|
+ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
|
|
+ if (!buffer) {
|
|
ret = -ENOMEM;
|
|
goto fail_buffer;
|
|
}
|
|
- buffer = page_address(bufferpage);
|
|
|
|
ret = dell_setup_rfkill();
|
|
|
|
@@ -892,7 +890,7 @@ fail_backlight:
|
|
cancel_delayed_work_sync(&dell_rfkill_work);
|
|
dell_cleanup_rfkill();
|
|
fail_rfkill:
|
|
- free_page((unsigned long)bufferpage);
|
|
+ free_page((unsigned long)buffer);
|
|
fail_buffer:
|
|
platform_device_del(platform_device);
|
|
fail_platform_device2:
|
|
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
|
|
index 390e8e3..25721bf 100644
|
|
--- a/drivers/platform/x86/dell-wmi.c
|
|
+++ b/drivers/platform/x86/dell-wmi.c
|
|
@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
|
|
const struct key_entry *key;
|
|
int reported_key;
|
|
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
|
|
+ int buffer_size = obj->buffer.length/2;
|
|
|
|
- if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
|
|
+ if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
|
|
pr_info("Received unknown WMI event (0x%x)\n",
|
|
buffer_entry[1]);
|
|
kfree(obj);
|
|
return;
|
|
}
|
|
|
|
- if (dell_new_hk_type || buffer_entry[1] == 0x0)
|
|
+ if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
|
|
reported_key = (int)buffer_entry[2];
|
|
- else
|
|
+ else if (buffer_size >= 2)
|
|
reported_key = (int)buffer_entry[1] & 0xffff;
|
|
+ else {
|
|
+ pr_info("Received unknown WMI event\n");
|
|
+ kfree(obj);
|
|
+ return;
|
|
+ }
|
|
|
|
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
|
|
reported_key);
|
|
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
|
|
index 3dc9344..07fbcb0 100644
|
|
--- a/drivers/platform/x86/hp_accel.c
|
|
+++ b/drivers/platform/x86/hp_accel.c
|
|
@@ -237,6 +237,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
|
|
AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
|
|
AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
|
|
AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
|
|
+ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
|
|
{ NULL, }
|
|
/* Laptop models without axis info (yet):
|
|
* "NC6910" "HP Compaq 6910"
|
|
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
|
|
index 6dd060a..0d1a5d4 100644
|
|
--- a/drivers/platform/x86/ideapad-laptop.c
|
|
+++ b/drivers/platform/x86/ideapad-laptop.c
|
|
@@ -461,8 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
|
|
static int ideapad_rfk_set(void *data, bool blocked)
|
|
{
|
|
struct ideapad_rfk_priv *priv = data;
|
|
+ int opcode = ideapad_rfk_data[priv->dev].opcode;
|
|
|
|
- return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
|
|
+ return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
|
|
}
|
|
|
|
static struct rfkill_ops ideapad_rfk_ops = {
|
|
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
|
|
index c31aa07..da1c6cb 100644
|
|
--- a/drivers/pnp/pnpacpi/core.c
|
|
+++ b/drivers/pnp/pnpacpi/core.c
|
|
@@ -339,8 +339,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
|
|
struct pnp_dev *pnp = _pnp;
|
|
|
|
/* true means it matched */
|
|
- return !acpi->physical_node_count
|
|
- && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
|
|
+ return pnp->data == acpi;
|
|
}
|
|
|
|
static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
|
|
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
|
|
index de029bb..5ccca87 100644
|
|
--- a/drivers/power/88pm860x_charger.c
|
|
+++ b/drivers/power/88pm860x_charger.c
|
|
@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
|
|
return 0;
|
|
|
|
out_irq:
|
|
+ power_supply_unregister(&info->usb);
|
|
while (--i >= 0)
|
|
free_irq(info->irq[i], info);
|
|
out:
|
|
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
|
|
index 79a37f6..1f49986 100644
|
|
--- a/drivers/power/bq2415x_charger.c
|
|
+++ b/drivers/power/bq2415x_charger.c
|
|
@@ -840,8 +840,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
|
|
if (bq->automode < 1)
|
|
return NOTIFY_OK;
|
|
|
|
- sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
|
|
- bq2415x_set_mode(bq, bq->reported_mode);
|
|
+ schedule_delayed_work(&bq->work, 0);
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
@@ -892,6 +891,11 @@ static void bq2415x_timer_work(struct work_struct *work)
|
|
int error;
|
|
int boost;
|
|
|
|
+ if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
|
|
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
|
|
+ bq2415x_set_mode(bq, bq->reported_mode);
|
|
+ }
|
|
+
|
|
if (!bq->autotimer)
|
|
return;
|
|
|
|
@@ -1575,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client,
|
|
if (np) {
|
|
bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection");
|
|
|
|
- if (!bq->notify_psy)
|
|
- return -EPROBE_DEFER;
|
|
+ if (IS_ERR(bq->notify_psy)) {
|
|
+ dev_info(&client->dev,
|
|
+ "no 'ti,usb-charger-detection' property (err=%ld)\n",
|
|
+ PTR_ERR(bq->notify_psy));
|
|
+ bq->notify_psy = NULL;
|
|
+ } else if (!bq->notify_psy) {
|
|
+ ret = -EPROBE_DEFER;
|
|
+ goto error_2;
|
|
+ }
|
|
}
|
|
else if (pdata->notify_device)
|
|
bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
|
|
@@ -1598,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client,
|
|
ret = of_property_read_u32(np, "ti,current-limit",
|
|
&bq->init_data.current_limit);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
ret = of_property_read_u32(np, "ti,weak-battery-voltage",
|
|
&bq->init_data.weak_battery_voltage);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
ret = of_property_read_u32(np, "ti,battery-regulation-voltage",
|
|
&bq->init_data.battery_regulation_voltage);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
ret = of_property_read_u32(np, "ti,charge-current",
|
|
&bq->init_data.charge_current);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
ret = of_property_read_u32(np, "ti,termination-current",
|
|
&bq->init_data.termination_current);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
ret = of_property_read_u32(np, "ti,resistor-sense",
|
|
&bq->init_data.resistor_sense);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto error_2;
|
|
} else {
|
|
memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
|
|
}
|
|
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
|
|
index ad3ff8f..e4c95e1 100644
|
|
--- a/drivers/power/bq24190_charger.c
|
|
+++ b/drivers/power/bq24190_charger.c
|
|
@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
|
|
charger->properties = bq24190_charger_properties;
|
|
charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
|
|
charger->supplied_to = bq24190_charger_supplied_to;
|
|
- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
|
|
+ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
|
|
charger->get_property = bq24190_charger_get_property;
|
|
charger->set_property = bq24190_charger_set_property;
|
|
charger->property_is_writeable = bq24190_charger_property_is_writeable;
|
|
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
|
|
index 9e4dab4..03bfac3 100644
|
|
--- a/drivers/power/charger-manager.c
|
|
+++ b/drivers/power/charger-manager.c
|
|
@@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
|
|
static bool is_batt_present(struct charger_manager *cm)
|
|
{
|
|
union power_supply_propval val;
|
|
+ struct power_supply *psy;
|
|
bool present = false;
|
|
int i, ret;
|
|
|
|
@@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm)
|
|
case CM_NO_BATTERY:
|
|
break;
|
|
case CM_FUEL_GAUGE:
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!psy)
|
|
+ break;
|
|
+
|
|
+ ret = psy->get_property(psy,
|
|
POWER_SUPPLY_PROP_PRESENT, &val);
|
|
if (ret == 0 && val.intval)
|
|
present = true;
|
|
break;
|
|
case CM_CHARGER_STAT:
|
|
- for (i = 0; cm->charger_stat[i]; i++) {
|
|
- ret = cm->charger_stat[i]->get_property(
|
|
- cm->charger_stat[i],
|
|
- POWER_SUPPLY_PROP_PRESENT, &val);
|
|
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
|
|
+ psy = power_supply_get_by_name(
|
|
+ cm->desc->psy_charger_stat[i]);
|
|
+ if (!psy) {
|
|
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
|
|
+ cm->desc->psy_charger_stat[i]);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT,
|
|
+ &val);
|
|
if (ret == 0 && val.intval) {
|
|
present = true;
|
|
break;
|
|
@@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm)
|
|
static bool is_ext_pwr_online(struct charger_manager *cm)
|
|
{
|
|
union power_supply_propval val;
|
|
+ struct power_supply *psy;
|
|
bool online = false;
|
|
int i, ret;
|
|
|
|
/* If at least one of them has one, it's yes. */
|
|
- for (i = 0; cm->charger_stat[i]; i++) {
|
|
- ret = cm->charger_stat[i]->get_property(
|
|
- cm->charger_stat[i],
|
|
- POWER_SUPPLY_PROP_ONLINE, &val);
|
|
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
|
|
+ psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
|
|
+ if (!psy) {
|
|
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
|
|
+ cm->desc->psy_charger_stat[i]);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
|
|
if (ret == 0 && val.intval) {
|
|
online = true;
|
|
break;
|
|
@@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm)
|
|
static int get_batt_uV(struct charger_manager *cm, int *uV)
|
|
{
|
|
union power_supply_propval val;
|
|
+ struct power_supply *fuel_gauge;
|
|
int ret;
|
|
|
|
- if (!cm->fuel_gauge)
|
|
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge)
|
|
return -ENODEV;
|
|
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
|
|
if (ret)
|
|
return ret;
|
|
@@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm)
|
|
{
|
|
int i, ret;
|
|
bool charging = false;
|
|
+ struct power_supply *psy;
|
|
union power_supply_propval val;
|
|
|
|
/* If there is no battery, it cannot be charged */
|
|
@@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm)
|
|
return false;
|
|
|
|
/* If at least one of the charger is charging, return yes */
|
|
- for (i = 0; cm->charger_stat[i]; i++) {
|
|
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
|
|
/* 1. The charger sholuld not be DISABLED */
|
|
if (cm->emergency_stop)
|
|
continue;
|
|
if (!cm->charger_enabled)
|
|
continue;
|
|
|
|
+ psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
|
|
+ if (!psy) {
|
|
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
|
|
+ cm->desc->psy_charger_stat[i]);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
/* 2. The charger should be online (ext-power) */
|
|
- ret = cm->charger_stat[i]->get_property(
|
|
- cm->charger_stat[i],
|
|
- POWER_SUPPLY_PROP_ONLINE, &val);
|
|
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
|
|
if (ret) {
|
|
dev_warn(cm->dev, "Cannot read ONLINE value from %s\n",
|
|
cm->desc->psy_charger_stat[i]);
|
|
@@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm)
|
|
* 3. The charger should not be FULL, DISCHARGING,
|
|
* or NOT_CHARGING.
|
|
*/
|
|
- ret = cm->charger_stat[i]->get_property(
|
|
- cm->charger_stat[i],
|
|
- POWER_SUPPLY_PROP_STATUS, &val);
|
|
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
|
|
if (ret) {
|
|
dev_warn(cm->dev, "Cannot read STATUS value from %s\n",
|
|
cm->desc->psy_charger_stat[i]);
|
|
@@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm)
|
|
{
|
|
struct charger_desc *desc = cm->desc;
|
|
union power_supply_propval val;
|
|
+ struct power_supply *fuel_gauge;
|
|
int ret = 0;
|
|
int uV;
|
|
|
|
@@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm)
|
|
if (!is_batt_present(cm))
|
|
return false;
|
|
|
|
- if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
|
|
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge)
|
|
+ return false;
|
|
+
|
|
+ if (desc->fullbatt_full_capacity > 0) {
|
|
val.intval = 0;
|
|
|
|
/* Not full if capacity of fuel gauge isn't full */
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CHARGE_FULL, &val);
|
|
if (!ret && val.intval > desc->fullbatt_full_capacity)
|
|
return true;
|
|
@@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm)
|
|
}
|
|
|
|
/* Full, if the capacity is more than fullbatt_soc */
|
|
- if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
|
|
+ if (desc->fullbatt_soc > 0) {
|
|
val.intval = 0;
|
|
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CAPACITY, &val);
|
|
if (!ret && val.intval >= desc->fullbatt_soc)
|
|
return true;
|
|
@@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm)
|
|
return ret;
|
|
}
|
|
|
|
+static int cm_get_battery_temperature_by_psy(struct charger_manager *cm,
|
|
+ int *temp)
|
|
+{
|
|
+ struct power_supply *fuel_gauge;
|
|
+
|
|
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return fuel_gauge->get_property(fuel_gauge,
|
|
+ POWER_SUPPLY_PROP_TEMP,
|
|
+ (union power_supply_propval *)temp);
|
|
+}
|
|
+
|
|
static int cm_get_battery_temperature(struct charger_manager *cm,
|
|
int *temp)
|
|
{
|
|
@@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
|
|
return -ENODEV;
|
|
|
|
#ifdef CONFIG_THERMAL
|
|
- ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
|
|
- if (!ret)
|
|
- /* Calibrate temperature unit */
|
|
- *temp /= 100;
|
|
-#else
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
- POWER_SUPPLY_PROP_TEMP,
|
|
- (union power_supply_propval *)temp);
|
|
+ if (cm->tzd_batt) {
|
|
+ ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
|
|
+ if (!ret)
|
|
+ /* Calibrate temperature unit */
|
|
+ *temp /= 100;
|
|
+ } else
|
|
#endif
|
|
+ {
|
|
+ /* if-else continued from CONFIG_THERMAL */
|
|
+ ret = cm_get_battery_temperature_by_psy(cm, temp);
|
|
+ }
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy,
|
|
struct charger_manager *cm = container_of(psy,
|
|
struct charger_manager, charger_psy);
|
|
struct charger_desc *desc = cm->desc;
|
|
+ struct power_supply *fuel_gauge;
|
|
int ret = 0;
|
|
int uV;
|
|
|
|
@@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy,
|
|
ret = get_batt_uV(cm, &val->intval);
|
|
break;
|
|
case POWER_SUPPLY_PROP_CURRENT_NOW:
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge) {
|
|
+ ret = -ENODEV;
|
|
+ break;
|
|
+ }
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CURRENT_NOW, val);
|
|
break;
|
|
case POWER_SUPPLY_PROP_TEMP:
|
|
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
|
|
return cm_get_battery_temperature(cm, &val->intval);
|
|
case POWER_SUPPLY_PROP_CAPACITY:
|
|
- if (!cm->fuel_gauge) {
|
|
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge) {
|
|
ret = -ENODEV;
|
|
break;
|
|
}
|
|
@@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy,
|
|
break;
|
|
}
|
|
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CAPACITY, val);
|
|
if (ret)
|
|
break;
|
|
@@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy,
|
|
break;
|
|
case POWER_SUPPLY_PROP_CHARGE_NOW:
|
|
if (is_charging(cm)) {
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ fuel_gauge = power_supply_get_by_name(
|
|
+ cm->desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge) {
|
|
+ ret = -ENODEV;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CHARGE_NOW,
|
|
val);
|
|
if (ret) {
|
|
@@ -1485,14 +1545,15 @@ err:
|
|
return ret;
|
|
}
|
|
|
|
-static int cm_init_thermal_data(struct charger_manager *cm)
|
|
+static int cm_init_thermal_data(struct charger_manager *cm,
|
|
+ struct power_supply *fuel_gauge)
|
|
{
|
|
struct charger_desc *desc = cm->desc;
|
|
union power_supply_propval val;
|
|
int ret;
|
|
|
|
/* Verify whether fuel gauge provides battery temperature */
|
|
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ ret = fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_TEMP, &val);
|
|
|
|
if (!ret) {
|
|
@@ -1502,8 +1563,6 @@ static int cm_init_thermal_data(struct charger_manager *cm)
|
|
cm->desc->measure_battery_temp = true;
|
|
}
|
|
#ifdef CONFIG_THERMAL
|
|
- cm->tzd_batt = cm->fuel_gauge->tzd;
|
|
-
|
|
if (ret && desc->thermal_zone) {
|
|
cm->tzd_batt =
|
|
thermal_zone_get_zone_by_name(desc->thermal_zone);
|
|
@@ -1666,6 +1725,7 @@ static int charger_manager_probe(struct platform_device *pdev)
|
|
int ret = 0, i = 0;
|
|
int j = 0;
|
|
union power_supply_propval val;
|
|
+ struct power_supply *fuel_gauge;
|
|
|
|
if (g_desc && !rtc_dev && g_desc->rtc_name) {
|
|
rtc_dev = rtc_class_open(g_desc->rtc_name);
|
|
@@ -1720,27 +1780,29 @@ static int charger_manager_probe(struct platform_device *pdev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!desc->psy_fuel_gauge) {
|
|
+ dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
/* Counting index only */
|
|
while (desc->psy_charger_stat[i])
|
|
i++;
|
|
|
|
- cm->charger_stat = devm_kzalloc(&pdev->dev,
|
|
- sizeof(struct power_supply *) * i, GFP_KERNEL);
|
|
- if (!cm->charger_stat)
|
|
- return -ENOMEM;
|
|
-
|
|
+ /* Check if charger's supplies are present at probe */
|
|
for (i = 0; desc->psy_charger_stat[i]; i++) {
|
|
- cm->charger_stat[i] = power_supply_get_by_name(
|
|
- desc->psy_charger_stat[i]);
|
|
- if (!cm->charger_stat[i]) {
|
|
+ struct power_supply *psy;
|
|
+
|
|
+ psy = power_supply_get_by_name(desc->psy_charger_stat[i]);
|
|
+ if (!psy) {
|
|
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
|
|
desc->psy_charger_stat[i]);
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
- cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
|
|
- if (!cm->fuel_gauge) {
|
|
+ fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
|
|
+ if (!fuel_gauge) {
|
|
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
|
|
desc->psy_fuel_gauge);
|
|
return -ENODEV;
|
|
@@ -1783,13 +1845,13 @@ static int charger_manager_probe(struct platform_device *pdev)
|
|
cm->charger_psy.num_properties = psy_default.num_properties;
|
|
|
|
/* Find which optional psy-properties are available */
|
|
- if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ if (!fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
|
|
cm->charger_psy.properties[cm->charger_psy.num_properties] =
|
|
POWER_SUPPLY_PROP_CHARGE_NOW;
|
|
cm->charger_psy.num_properties++;
|
|
}
|
|
- if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
|
|
+ if (!fuel_gauge->get_property(fuel_gauge,
|
|
POWER_SUPPLY_PROP_CURRENT_NOW,
|
|
&val)) {
|
|
cm->charger_psy.properties[cm->charger_psy.num_properties] =
|
|
@@ -1797,7 +1859,7 @@ static int charger_manager_probe(struct platform_device *pdev)
|
|
cm->charger_psy.num_properties++;
|
|
}
|
|
|
|
- ret = cm_init_thermal_data(cm);
|
|
+ ret = cm_init_thermal_data(cm, fuel_gauge);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "Failed to initialize thermal data\n");
|
|
cm->desc->measure_battery_temp = false;
|
|
@@ -2054,8 +2116,8 @@ static bool find_power_supply(struct charger_manager *cm,
|
|
int i;
|
|
bool found = false;
|
|
|
|
- for (i = 0; cm->charger_stat[i]; i++) {
|
|
- if (psy == cm->charger_stat[i]) {
|
|
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
|
|
+ if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) {
|
|
found = true;
|
|
break;
|
|
}
|
|
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
|
|
index a0024b2..86e03c6 100644
|
|
--- a/drivers/power/gpio-charger.c
|
|
+++ b/drivers/power/gpio-charger.c
|
|
@@ -168,7 +168,7 @@ static int gpio_charger_suspend(struct device *dev)
|
|
|
|
if (device_may_wakeup(dev))
|
|
gpio_charger->wakeup_enabled =
|
|
- enable_irq_wake(gpio_charger->irq);
|
|
+ !enable_irq_wake(gpio_charger->irq);
|
|
|
|
return 0;
|
|
}
|
|
@@ -178,7 +178,7 @@ static int gpio_charger_resume(struct device *dev)
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
|
|
|
|
- if (gpio_charger->wakeup_enabled)
|
|
+ if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled)
|
|
disable_irq_wake(gpio_charger->irq);
|
|
power_supply_changed(&gpio_charger->charger);
|
|
|
|
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
|
|
index ed49b50..72da2a6 100644
|
|
--- a/drivers/power/lp8788-charger.c
|
|
+++ b/drivers/power/lp8788-charger.c
|
|
@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
|
|
pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
|
|
pchg->battery.get_property = lp8788_battery_get_property;
|
|
|
|
- if (power_supply_register(&pdev->dev, &pchg->battery))
|
|
+ if (power_supply_register(&pdev->dev, &pchg->battery)) {
|
|
+ power_supply_unregister(&pchg->charger);
|
|
return -EPERM;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
|
|
index 7ef445a..cf90760 100644
|
|
--- a/drivers/power/twl4030_madc_battery.c
|
|
+++ b/drivers/power/twl4030_madc_battery.c
|
|
@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
|
|
{
|
|
struct twl4030_madc_battery *twl4030_madc_bat;
|
|
struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
|
|
+ int ret = 0;
|
|
|
|
twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
|
|
if (!twl4030_madc_bat)
|
|
@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
|
|
|
|
twl4030_madc_bat->pdata = pdata;
|
|
platform_set_drvdata(pdev, twl4030_madc_bat);
|
|
- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
|
|
+ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
|
|
+ if (ret < 0)
|
|
+ kfree(twl4030_madc_bat);
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int twl4030_madc_battery_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
|
|
index 91245f5..47257b6 100644
|
|
--- a/drivers/rapidio/devices/tsi721_dma.c
|
|
+++ b/drivers/rapidio/devices/tsi721_dma.c
|
|
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
|
|
"desc %p not ACKed\n", tx_desc);
|
|
}
|
|
|
|
+ if (ret == NULL) {
|
|
+ dev_dbg(bdma_chan->dchan.device->dev,
|
|
+ "%s: unable to obtain tx descriptor\n", __func__);
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
i = bdma_chan->wr_count_next % bdma_chan->bd_num;
|
|
if (i == bdma_chan->bd_num - 1) {
|
|
i = 0;
|
|
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
|
|
tx_desc->txd.phys = bdma_chan->bd_phys +
|
|
i * sizeof(struct tsi721_dma_desc);
|
|
tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
|
|
-
|
|
+err_out:
|
|
spin_unlock_bh(&bdma_chan->lock);
|
|
|
|
return ret;
|
|
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
|
|
index f0ea4fd..8b963a7 100644
|
|
--- a/drivers/regulator/arizona-ldo1.c
|
|
+++ b/drivers/regulator/arizona-ldo1.c
|
|
@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
|
|
.map_voltage = regulator_map_voltage_linear,
|
|
.get_voltage_sel = regulator_get_voltage_sel_regmap,
|
|
.set_voltage_sel = regulator_set_voltage_sel_regmap,
|
|
- .get_bypass = regulator_get_bypass_regmap,
|
|
- .set_bypass = regulator_set_bypass_regmap,
|
|
};
|
|
|
|
static const struct regulator_desc arizona_ldo1 = {
|
|
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
|
|
index 8a9f2b0..e4491c5 100644
|
|
--- a/drivers/regulator/core.c
|
|
+++ b/drivers/regulator/core.c
|
|
@@ -775,7 +775,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
|
|
static void print_constraints(struct regulator_dev *rdev)
|
|
{
|
|
struct regulation_constraints *constraints = rdev->constraints;
|
|
- char buf[80] = "";
|
|
+ char buf[160] = "";
|
|
int count = 0;
|
|
int ret;
|
|
|
|
@@ -1481,7 +1481,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
|
|
}
|
|
EXPORT_SYMBOL_GPL(regulator_get_optional);
|
|
|
|
-/* Locks held by regulator_put() */
|
|
+/* regulator_list_mutex lock held by regulator_put() */
|
|
static void _regulator_put(struct regulator *regulator)
|
|
{
|
|
struct regulator_dev *rdev;
|
|
@@ -1496,12 +1496,14 @@ static void _regulator_put(struct regulator *regulator)
|
|
/* remove any sysfs entries */
|
|
if (regulator->dev)
|
|
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
|
|
+ mutex_lock(&rdev->mutex);
|
|
kfree(regulator->supply_name);
|
|
list_del(®ulator->list);
|
|
kfree(regulator);
|
|
|
|
rdev->open_count--;
|
|
rdev->exclusive = 0;
|
|
+ mutex_unlock(&rdev->mutex);
|
|
|
|
module_put(rdev->owner);
|
|
}
|
|
@@ -1767,10 +1769,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
|
|
trace_regulator_enable(rdev_get_name(rdev));
|
|
|
|
if (rdev->ena_pin) {
|
|
- ret = regulator_ena_gpio_ctrl(rdev, true);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- rdev->ena_gpio_state = 1;
|
|
+ if (!rdev->ena_gpio_state) {
|
|
+ ret = regulator_ena_gpio_ctrl(rdev, true);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ rdev->ena_gpio_state = 1;
|
|
+ }
|
|
} else if (rdev->desc->ops->enable) {
|
|
ret = rdev->desc->ops->enable(rdev);
|
|
if (ret < 0)
|
|
@@ -1902,10 +1906,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
|
|
trace_regulator_disable(rdev_get_name(rdev));
|
|
|
|
if (rdev->ena_pin) {
|
|
- ret = regulator_ena_gpio_ctrl(rdev, false);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- rdev->ena_gpio_state = 0;
|
|
+ if (rdev->ena_gpio_state) {
|
|
+ ret = regulator_ena_gpio_ctrl(rdev, false);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ rdev->ena_gpio_state = 0;
|
|
+ }
|
|
|
|
} else if (rdev->desc->ops->disable) {
|
|
ret = rdev->desc->ops->disable(rdev);
|
|
@@ -3459,12 +3465,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|
config->ena_gpio, ret);
|
|
goto wash;
|
|
}
|
|
-
|
|
- if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
|
|
- rdev->ena_gpio_state = 1;
|
|
-
|
|
- if (config->ena_gpio_invert)
|
|
- rdev->ena_gpio_state = !rdev->ena_gpio_state;
|
|
}
|
|
|
|
/* set regulator constraints */
|
|
@@ -3636,9 +3636,11 @@ int regulator_suspend_finish(void)
|
|
list_for_each_entry(rdev, ®ulator_list, list) {
|
|
mutex_lock(&rdev->mutex);
|
|
if (rdev->use_count > 0 || rdev->constraints->always_on) {
|
|
- error = _regulator_do_enable(rdev);
|
|
- if (error)
|
|
- ret = error;
|
|
+ if (!_regulator_is_enabled(rdev)) {
|
|
+ error = _regulator_do_enable(rdev);
|
|
+ if (error)
|
|
+ ret = error;
|
|
+ }
|
|
} else {
|
|
if (!have_full_constraints())
|
|
goto unlock;
|
|
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
|
|
index 5fb899f..24c926b 100644
|
|
--- a/drivers/regulator/max77693.c
|
|
+++ b/drivers/regulator/max77693.c
|
|
@@ -232,7 +232,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
|
|
struct max77693_pmic_dev *max77693_pmic;
|
|
struct max77693_regulator_data *rdata = NULL;
|
|
int num_rdata, i;
|
|
- struct regulator_config config;
|
|
+ struct regulator_config config = { };
|
|
|
|
num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
|
|
if (!rdata || num_rdata <= 0) {
|
|
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
|
|
index 7854a65..110eab8 100644
|
|
--- a/drivers/rtc/rtc-isl12057.c
|
|
+++ b/drivers/rtc/rtc-isl12057.c
|
|
@@ -89,7 +89,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
|
|
tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
|
|
|
|
if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
|
|
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f);
|
|
+ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
|
|
if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
|
|
tm->tm_hour += 12;
|
|
} else { /* 24 hour mode */
|
|
@@ -98,7 +98,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
|
|
|
|
tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
|
|
tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
|
|
- tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */
|
|
+ tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
|
|
tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
|
|
}
|
|
|
|
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
|
|
index 3eb3642..d2b1ab3 100644
|
|
--- a/drivers/rtc/rtc-sirfsoc.c
|
|
+++ b/drivers/rtc/rtc-sirfsoc.c
|
|
@@ -290,14 +290,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
|
|
rtc_div = ((32768 / RTC_HZ) / 2) - 1;
|
|
sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
|
|
|
|
- rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
|
|
- &sirfsoc_rtc_ops, THIS_MODULE);
|
|
- if (IS_ERR(rtcdrv->rtc)) {
|
|
- err = PTR_ERR(rtcdrv->rtc);
|
|
- dev_err(&pdev->dev, "can't register RTC device\n");
|
|
- return err;
|
|
- }
|
|
-
|
|
/* 0x3 -> RTC_CLK */
|
|
sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
|
|
rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
|
|
@@ -312,6 +304,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
|
|
rtcdrv->overflow_rtc =
|
|
sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
|
|
|
|
+ rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
|
|
+ &sirfsoc_rtc_ops, THIS_MODULE);
|
|
+ if (IS_ERR(rtcdrv->rtc)) {
|
|
+ err = PTR_ERR(rtcdrv->rtc);
|
|
+ dev_err(&pdev->dev, "can't register RTC device\n");
|
|
+ return err;
|
|
+ }
|
|
+
|
|
rtcdrv->irq = platform_get_irq(pdev, 0);
|
|
err = devm_request_irq(
|
|
&pdev->dev,
|
|
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
|
|
index bb86494..19915c5 100644
|
|
--- a/drivers/s390/char/con3215.c
|
|
+++ b/drivers/s390/char/con3215.c
|
|
@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
|
|
- if (raw->flags & RAW3215_TIMER_RUNS) {
|
|
- del_timer(&raw->timer);
|
|
- raw->flags &= ~RAW3215_TIMER_RUNS;
|
|
- if (!(raw->port.flags & ASYNC_SUSPENDED)) {
|
|
- raw3215_mk_write_req(raw);
|
|
- raw3215_start_io(raw);
|
|
+ raw->flags &= ~RAW3215_TIMER_RUNS;
|
|
+ if (!(raw->port.flags & ASYNC_SUSPENDED)) {
|
|
+ raw3215_mk_write_req(raw);
|
|
+ raw3215_start_io(raw);
|
|
+ if ((raw->queued_read || raw->queued_write) &&
|
|
+ !(raw->flags & RAW3215_WORKING) &&
|
|
+ !(raw->flags & RAW3215_TIMER_RUNS)) {
|
|
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
|
|
+ add_timer(&raw->timer);
|
|
+ raw->flags |= RAW3215_TIMER_RUNS;
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
|
|
@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
|
|
(raw->flags & RAW3215_FLUSHING)) {
|
|
/* execute write requests bigger than minimum size */
|
|
raw3215_start_io(raw);
|
|
- if (raw->flags & RAW3215_TIMER_RUNS) {
|
|
- del_timer(&raw->timer);
|
|
- raw->flags &= ~RAW3215_TIMER_RUNS;
|
|
- }
|
|
- } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
|
|
- /* delay small writes */
|
|
- raw->timer.expires = RAW3215_TIMEOUT + jiffies;
|
|
- add_timer(&raw->timer);
|
|
- raw->flags |= RAW3215_TIMER_RUNS;
|
|
}
|
|
}
|
|
+ if ((raw->queued_read || raw->queued_write) &&
|
|
+ !(raw->flags & RAW3215_WORKING) &&
|
|
+ !(raw->flags & RAW3215_TIMER_RUNS)) {
|
|
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
|
|
+ add_timer(&raw->timer);
|
|
+ raw->flags |= RAW3215_TIMER_RUNS;
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -1027,12 +1029,26 @@ static int tty3215_write(struct tty_struct * tty,
|
|
const unsigned char *buf, int count)
|
|
{
|
|
struct raw3215_info *raw;
|
|
+ int i, written;
|
|
|
|
if (!tty)
|
|
return 0;
|
|
raw = (struct raw3215_info *) tty->driver_data;
|
|
- raw3215_write(raw, buf, count);
|
|
- return count;
|
|
+ written = count;
|
|
+ while (count > 0) {
|
|
+ for (i = 0; i < count; i++)
|
|
+ if (buf[i] == '\t' || buf[i] == '\n')
|
|
+ break;
|
|
+ raw3215_write(raw, buf, i);
|
|
+ count -= i;
|
|
+ buf += i;
|
|
+ if (count > 0) {
|
|
+ raw3215_putchar(raw, *buf);
|
|
+ count--;
|
|
+ buf++;
|
|
+ }
|
|
+ }
|
|
+ return written;
|
|
}
|
|
|
|
/*
|
|
@@ -1180,7 +1196,7 @@ static int __init tty3215_init(void)
|
|
driver->subtype = SYSTEM_TYPE_TTY;
|
|
driver->init_termios = tty_std_termios;
|
|
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
|
|
- driver->init_termios.c_oflag = ONLCR | XTABS;
|
|
+ driver->init_termios.c_oflag = ONLCR;
|
|
driver->init_termios.c_lflag = ISIG;
|
|
driver->flags = TTY_DRIVER_REAL_RAW;
|
|
tty_set_operations(driver, &tty3215_ops);
|
|
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
|
|
index 82f2c38..b334f68 100644
|
|
--- a/drivers/s390/char/sclp_early.c
|
|
+++ b/drivers/s390/char/sclp_early.c
|
|
@@ -7,6 +7,7 @@
|
|
#define KMSG_COMPONENT "sclp_early"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
+#include <linux/errno.h>
|
|
#include <asm/ctl_reg.h>
|
|
#include <asm/sclp.h>
|
|
#include <asm/ipl.h>
|
|
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
|
|
index ab3baa7..86ade85 100644
|
|
--- a/drivers/s390/crypto/ap_bus.c
|
|
+++ b/drivers/s390/crypto/ap_bus.c
|
|
@@ -44,6 +44,7 @@
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/ktime.h>
|
|
#include <asm/facility.h>
|
|
+#include <linux/crypto.h>
|
|
|
|
#include "ap_bus.h"
|
|
|
|
@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
|
|
MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
|
|
"Copyright IBM Corp. 2006, 2012");
|
|
MODULE_LICENSE("GPL");
|
|
-MODULE_ALIAS("z90crypt");
|
|
+MODULE_ALIAS_CRYPTO("z90crypt");
|
|
|
|
/*
|
|
* Module parameter
|
|
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
|
|
index 160e751..0787b97 100644
|
|
--- a/drivers/sbus/char/bbc_envctrl.c
|
|
+++ b/drivers/sbus/char/bbc_envctrl.c
|
|
@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
|
|
if (!tp)
|
|
return;
|
|
|
|
+ INIT_LIST_HEAD(&tp->bp_list);
|
|
+ INIT_LIST_HEAD(&tp->glob_list);
|
|
+
|
|
tp->client = bbc_i2c_attach(bp, op);
|
|
if (!tp->client) {
|
|
kfree(tp);
|
|
@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
|
|
if (!fp)
|
|
return;
|
|
|
|
+ INIT_LIST_HEAD(&fp->bp_list);
|
|
+ INIT_LIST_HEAD(&fp->glob_list);
|
|
+
|
|
fp->client = bbc_i2c_attach(bp, op);
|
|
if (!fp->client) {
|
|
kfree(fp);
|
|
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
|
|
index c7763e4..812b5f0 100644
|
|
--- a/drivers/sbus/char/bbc_i2c.c
|
|
+++ b/drivers/sbus/char/bbc_i2c.c
|
|
@@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
|
|
if (!bp)
|
|
return NULL;
|
|
|
|
+ INIT_LIST_HEAD(&bp->temps);
|
|
+ INIT_LIST_HEAD(&bp->fans);
|
|
+
|
|
bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
|
|
if (!bp->i2c_control_regs)
|
|
goto fail;
|
|
|
|
- bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
|
|
- if (!bp->i2c_bussel_reg)
|
|
- goto fail;
|
|
+ if (op->num_resources == 2) {
|
|
+ bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
|
|
+ if (!bp->i2c_bussel_reg)
|
|
+ goto fail;
|
|
+ }
|
|
|
|
bp->waiting = 0;
|
|
init_waitqueue_head(&bp->wq);
|
|
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
|
|
index 0a73253..5f57e3d 100644
|
|
--- a/drivers/scsi/3w-9xxx.c
|
|
+++ b/drivers/scsi/3w-9xxx.c
|
|
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
|
|
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
|
|
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
|
|
static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
|
|
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
|
|
|
|
/* Functions */
|
|
|
|
@@ -1352,11 +1351,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
|
|
}
|
|
|
|
/* Now complete the io */
|
|
+ scsi_dma_unmap(cmd);
|
|
+ cmd->scsi_done(cmd);
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
twa_free_request_id(tw_dev, request_id);
|
|
tw_dev->posted_request_count--;
|
|
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
|
|
- twa_unmap_scsi_data(tw_dev, request_id);
|
|
}
|
|
|
|
/* Check for valid status after each drain */
|
|
@@ -1414,26 +1413,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
|
|
}
|
|
} /* End twa_load_sgl() */
|
|
|
|
-/* This function will perform a pci-dma mapping for a scatter gather list */
|
|
-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
|
|
-{
|
|
- int use_sg;
|
|
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
|
|
-
|
|
- use_sg = scsi_dma_map(cmd);
|
|
- if (!use_sg)
|
|
- return 0;
|
|
- else if (use_sg < 0) {
|
|
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- cmd->SCp.phase = TW_PHASE_SGLIST;
|
|
- cmd->SCp.have_data_in = use_sg;
|
|
-
|
|
- return use_sg;
|
|
-} /* End twa_map_scsi_sg_data() */
|
|
-
|
|
/* This function will poll for a response interrupt of a request */
|
|
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
|
|
{
|
|
@@ -1612,9 +1591,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
|
|
(tw_dev->state[i] != TW_S_INITIAL) &&
|
|
(tw_dev->state[i] != TW_S_COMPLETED)) {
|
|
if (tw_dev->srb[i]) {
|
|
- tw_dev->srb[i]->result = (DID_RESET << 16);
|
|
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
|
|
- twa_unmap_scsi_data(tw_dev, i);
|
|
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
|
|
+
|
|
+ cmd->result = (DID_RESET << 16);
|
|
+ scsi_dma_unmap(cmd);
|
|
+ cmd->scsi_done(cmd);
|
|
}
|
|
}
|
|
}
|
|
@@ -1793,21 +1774,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
|
|
/* Save the scsi command for use by the ISR */
|
|
tw_dev->srb[request_id] = SCpnt;
|
|
|
|
- /* Initialize phase to zero */
|
|
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
|
|
-
|
|
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
|
|
switch (retval) {
|
|
case SCSI_MLQUEUE_HOST_BUSY:
|
|
+ scsi_dma_unmap(SCpnt);
|
|
twa_free_request_id(tw_dev, request_id);
|
|
- twa_unmap_scsi_data(tw_dev, request_id);
|
|
break;
|
|
case 1:
|
|
- tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
- twa_free_request_id(tw_dev, request_id);
|
|
- twa_unmap_scsi_data(tw_dev, request_id);
|
|
SCpnt->result = (DID_ERROR << 16);
|
|
+ scsi_dma_unmap(SCpnt);
|
|
done(SCpnt);
|
|
+ tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
+ twa_free_request_id(tw_dev, request_id);
|
|
retval = 0;
|
|
}
|
|
out:
|
|
@@ -1875,8 +1853,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|
command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
|
|
command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
|
|
} else {
|
|
- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
|
|
- if (sg_count == 0)
|
|
+ sg_count = scsi_dma_map(srb);
|
|
+ if (sg_count < 0)
|
|
goto out;
|
|
|
|
scsi_for_each_sg(srb, sg, sg_count, i) {
|
|
@@ -1991,15 +1969,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
|
|
return(table[index].text);
|
|
} /* End twa_string_lookup() */
|
|
|
|
-/* This function will perform a pci-dma unmap */
|
|
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
|
|
-{
|
|
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
|
|
-
|
|
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
|
|
- scsi_dma_unmap(cmd);
|
|
-} /* End twa_unmap_scsi_data() */
|
|
-
|
|
/* This function gets called when a disk is coming on-line */
|
|
static int twa_slave_configure(struct scsi_device *sdev)
|
|
{
|
|
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
|
|
index 040f721..0fdc83c 100644
|
|
--- a/drivers/scsi/3w-9xxx.h
|
|
+++ b/drivers/scsi/3w-9xxx.h
|
|
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
|
|
#define TW_CURRENT_DRIVER_BUILD 0
|
|
#define TW_CURRENT_DRIVER_BRANCH 0
|
|
|
|
-/* Phase defines */
|
|
-#define TW_PHASE_INITIAL 0
|
|
-#define TW_PHASE_SINGLE 1
|
|
-#define TW_PHASE_SGLIST 2
|
|
-
|
|
/* Misc defines */
|
|
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
|
|
#define TW_SECTOR_SIZE 512
|
|
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
|
|
index 4de3460..61702ac 100644
|
|
--- a/drivers/scsi/3w-sas.c
|
|
+++ b/drivers/scsi/3w-sas.c
|
|
@@ -303,26 +303,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
|
|
return 0;
|
|
} /* End twl_post_command_packet() */
|
|
|
|
-/* This function will perform a pci-dma mapping for a scatter gather list */
|
|
-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
|
|
-{
|
|
- int use_sg;
|
|
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
|
|
-
|
|
- use_sg = scsi_dma_map(cmd);
|
|
- if (!use_sg)
|
|
- return 0;
|
|
- else if (use_sg < 0) {
|
|
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- cmd->SCp.phase = TW_PHASE_SGLIST;
|
|
- cmd->SCp.have_data_in = use_sg;
|
|
-
|
|
- return use_sg;
|
|
-} /* End twl_map_scsi_sg_data() */
|
|
-
|
|
/* This function hands scsi cdb's to the firmware */
|
|
static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
|
|
{
|
|
@@ -370,8 +350,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|
if (!sglistarg) {
|
|
/* Map sglist from scsi layer to cmd packet */
|
|
if (scsi_sg_count(srb)) {
|
|
- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
|
|
- if (sg_count == 0)
|
|
+ sg_count = scsi_dma_map(srb);
|
|
+ if (sg_count <= 0)
|
|
goto out;
|
|
|
|
scsi_for_each_sg(srb, sg, sg_count, i) {
|
|
@@ -1116,15 +1096,6 @@ out:
|
|
return retval;
|
|
} /* End twl_initialize_device_extension() */
|
|
|
|
-/* This function will perform a pci-dma unmap */
|
|
-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
|
|
-{
|
|
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
|
|
-
|
|
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
|
|
- scsi_dma_unmap(cmd);
|
|
-} /* End twl_unmap_scsi_data() */
|
|
-
|
|
/* This function will handle attention interrupts */
|
|
static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
|
|
{
|
|
@@ -1265,11 +1236,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
|
|
}
|
|
|
|
/* Now complete the io */
|
|
+ scsi_dma_unmap(cmd);
|
|
+ cmd->scsi_done(cmd);
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
twl_free_request_id(tw_dev, request_id);
|
|
tw_dev->posted_request_count--;
|
|
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
|
|
- twl_unmap_scsi_data(tw_dev, request_id);
|
|
}
|
|
|
|
/* Check for another response interrupt */
|
|
@@ -1414,10 +1385,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
|
|
if ((tw_dev->state[i] != TW_S_FINISHED) &&
|
|
(tw_dev->state[i] != TW_S_INITIAL) &&
|
|
(tw_dev->state[i] != TW_S_COMPLETED)) {
|
|
- if (tw_dev->srb[i]) {
|
|
- tw_dev->srb[i]->result = (DID_RESET << 16);
|
|
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
|
|
- twl_unmap_scsi_data(tw_dev, i);
|
|
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
|
|
+
|
|
+ if (cmd) {
|
|
+ cmd->result = (DID_RESET << 16);
|
|
+ scsi_dma_unmap(cmd);
|
|
+ cmd->scsi_done(cmd);
|
|
}
|
|
}
|
|
}
|
|
@@ -1521,9 +1494,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
|
|
/* Save the scsi command for use by the ISR */
|
|
tw_dev->srb[request_id] = SCpnt;
|
|
|
|
- /* Initialize phase to zero */
|
|
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
|
|
-
|
|
retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
|
|
if (retval) {
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
|
|
index d474892..fec6449 100644
|
|
--- a/drivers/scsi/3w-sas.h
|
|
+++ b/drivers/scsi/3w-sas.h
|
|
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
|
|
#define TW_CURRENT_DRIVER_BUILD 0
|
|
#define TW_CURRENT_DRIVER_BRANCH 0
|
|
|
|
-/* Phase defines */
|
|
-#define TW_PHASE_INITIAL 0
|
|
-#define TW_PHASE_SGLIST 2
|
|
-
|
|
/* Misc defines */
|
|
#define TW_SECTOR_SIZE 512
|
|
#define TW_MAX_UNITS 32
|
|
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
|
|
index 752624e..b327742 100644
|
|
--- a/drivers/scsi/3w-xxxx.c
|
|
+++ b/drivers/scsi/3w-xxxx.c
|
|
@@ -1284,32 +1284,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
|
|
return 0;
|
|
} /* End tw_initialize_device_extension() */
|
|
|
|
-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
|
|
-{
|
|
- int use_sg;
|
|
-
|
|
- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
|
|
-
|
|
- use_sg = scsi_dma_map(cmd);
|
|
- if (use_sg < 0) {
|
|
- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- cmd->SCp.phase = TW_PHASE_SGLIST;
|
|
- cmd->SCp.have_data_in = use_sg;
|
|
-
|
|
- return use_sg;
|
|
-} /* End tw_map_scsi_sg_data() */
|
|
-
|
|
-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
|
|
-{
|
|
- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
|
|
-
|
|
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
|
|
- scsi_dma_unmap(cmd);
|
|
-} /* End tw_unmap_scsi_data() */
|
|
-
|
|
/* This function will reset a device extension */
|
|
static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
|
|
{
|
|
@@ -1332,8 +1306,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
|
|
srb = tw_dev->srb[i];
|
|
if (srb != NULL) {
|
|
srb->result = (DID_RESET << 16);
|
|
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
|
|
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
|
|
+ scsi_dma_unmap(srb);
|
|
+ srb->scsi_done(srb);
|
|
}
|
|
}
|
|
}
|
|
@@ -1780,8 +1754,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
|
|
command_packet->byte8.io.lba = lba;
|
|
command_packet->byte6.block_count = num_sectors;
|
|
|
|
- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
|
|
- if (!use_sg)
|
|
+ use_sg = scsi_dma_map(srb);
|
|
+ if (use_sg <= 0)
|
|
return 1;
|
|
|
|
scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
|
|
@@ -1968,9 +1942,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
|
|
/* Save the scsi command for use by the ISR */
|
|
tw_dev->srb[request_id] = SCpnt;
|
|
|
|
- /* Initialize phase to zero */
|
|
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
|
|
-
|
|
switch (*command) {
|
|
case READ_10:
|
|
case READ_6:
|
|
@@ -2198,12 +2169,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
|
|
|
|
/* Now complete the io */
|
|
if ((error != TW_ISR_DONT_COMPLETE)) {
|
|
+ scsi_dma_unmap(tw_dev->srb[request_id]);
|
|
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
tw_state_request_finish(tw_dev, request_id);
|
|
tw_dev->posted_request_count--;
|
|
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
|
|
-
|
|
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
|
|
index 49dcf03..1d31858 100644
|
|
--- a/drivers/scsi/3w-xxxx.h
|
|
+++ b/drivers/scsi/3w-xxxx.h
|
|
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
|
|
#define TW_AEN_SMART_FAIL 0x000F
|
|
#define TW_AEN_SBUF_FAIL 0x0024
|
|
|
|
-/* Phase defines */
|
|
-#define TW_PHASE_INITIAL 0
|
|
-#define TW_PHASE_SINGLE 1
|
|
-#define TW_PHASE_SGLIST 2
|
|
-
|
|
/* Misc defines */
|
|
#define TW_ALIGNMENT_6000 64 /* 64 bytes */
|
|
#define TW_ALIGNMENT_7000 4 /* 4 bytes */
|
|
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
|
|
index 1e9d6ad..7563b3d 100644
|
|
--- a/drivers/scsi/NCR5380.c
|
|
+++ b/drivers/scsi/NCR5380.c
|
|
@@ -2655,14 +2655,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
|
|
*
|
|
* Purpose : abort a command
|
|
*
|
|
- * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
|
|
- * host byte of the result field to, if zero DID_ABORTED is
|
|
+ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
|
|
+ * host byte of the result field to, if zero DID_ABORTED is
|
|
* used.
|
|
*
|
|
- * Returns : 0 - success, -1 on failure.
|
|
+ * Returns : SUCCESS - success, FAILED on failure.
|
|
*
|
|
- * XXX - there is no way to abort the command that is currently
|
|
- * connected, you have to wait for it to complete. If this is
|
|
+ * XXX - there is no way to abort the command that is currently
|
|
+ * connected, you have to wait for it to complete. If this is
|
|
* a problem, we could implement longjmp() / setjmp(), setjmp()
|
|
* called where the loop started in NCR5380_main().
|
|
*
|
|
@@ -2712,7 +2712,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
|
* aborted flag and get back into our main loop.
|
|
*/
|
|
|
|
- return 0;
|
|
+ return SUCCESS;
|
|
}
|
|
#endif
|
|
|
|
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
|
|
index 5f31017..31ace4b 100644
|
|
--- a/drivers/scsi/aha1740.c
|
|
+++ b/drivers/scsi/aha1740.c
|
|
@@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
|
|
* quiet as possible...
|
|
*/
|
|
|
|
- return 0;
|
|
+ return SUCCESS;
|
|
}
|
|
|
|
static struct scsi_host_template aha1740_template = {
|
|
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
|
|
index 0f3cdbc..30073d4 100644
|
|
--- a/drivers/scsi/atari_NCR5380.c
|
|
+++ b/drivers/scsi/atari_NCR5380.c
|
|
@@ -2613,7 +2613,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
|
* host byte of the result field to, if zero DID_ABORTED is
|
|
* used.
|
|
*
|
|
- * Returns : 0 - success, -1 on failure.
|
|
+ * Returns : SUCCESS - success, FAILED on failure.
|
|
*
|
|
* XXX - there is no way to abort the command that is currently
|
|
* connected, you have to wait for it to complete. If this is
|
|
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
|
|
index 5642a9b..19ddd43 100644
|
|
--- a/drivers/scsi/be2iscsi/be_main.c
|
|
+++ b/drivers/scsi/be2iscsi/be_main.c
|
|
@@ -581,7 +581,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
|
|
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
|
|
return NULL;
|
|
}
|
|
- shost->dma_boundary = pcidev->dma_mask;
|
|
shost->max_id = BE2_MAX_SESSIONS;
|
|
shost->max_channel = 0;
|
|
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
|
|
@@ -5685,9 +5684,9 @@ free_port:
|
|
hba_free:
|
|
if (phba->msix_enabled)
|
|
pci_disable_msix(phba->pcidev);
|
|
- iscsi_host_remove(phba->shost);
|
|
pci_dev_put(phba->pcidev);
|
|
iscsi_host_free(phba->shost);
|
|
+ pci_set_drvdata(pcidev, NULL);
|
|
disable_pci:
|
|
pci_disable_device(pcidev);
|
|
return ret;
|
|
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
|
|
index b2fcac7..5bb9406 100644
|
|
--- a/drivers/scsi/be2iscsi/be_mgmt.c
|
|
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
|
|
@@ -897,17 +897,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
|
|
|
|
if (ip_action == IP_ACTION_ADD) {
|
|
memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
|
|
- ip_param->len);
|
|
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
|
|
|
|
if (subnet_param)
|
|
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
|
|
- subnet_param->value, subnet_param->len);
|
|
+ subnet_param->value,
|
|
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
|
|
} else {
|
|
memcpy(req->ip_params.ip_record.ip_addr.addr,
|
|
- if_info->ip_addr.addr, ip_param->len);
|
|
+ if_info->ip_addr.addr,
|
|
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
|
|
|
|
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
|
|
- if_info->ip_addr.subnet_mask, ip_param->len);
|
|
+ if_info->ip_addr.subnet_mask,
|
|
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
|
|
}
|
|
|
|
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
|
|
@@ -935,7 +938,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
|
|
req->action = gtway_action;
|
|
req->ip_addr.ip_type = BE2_IPV4;
|
|
|
|
- memcpy(req->ip_addr.addr, gt_addr, param_len);
|
|
+ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
|
|
|
|
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
|
|
}
|
|
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
|
|
index 65180e1..50c75e1 100644
|
|
--- a/drivers/scsi/bfa/bfa_ioc.c
|
|
+++ b/drivers/scsi/bfa/bfa_ioc.c
|
|
@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
|
|
while (!bfa_raw_sem_get(bar)) {
|
|
if (--n <= 0)
|
|
return BFA_STATUS_BADFLASH;
|
|
- udelay(10000);
|
|
+ mdelay(10);
|
|
}
|
|
return BFA_STATUS_OK;
|
|
}
|
|
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
|
|
index 2e28392..a38aafa0 100644
|
|
--- a/drivers/scsi/bfa/bfa_ioc.h
|
|
+++ b/drivers/scsi/bfa/bfa_ioc.h
|
|
@@ -72,7 +72,7 @@ struct bfa_sge_s {
|
|
} while (0)
|
|
|
|
#define bfa_swap_words(_x) ( \
|
|
- ((_x) << 32) | ((_x) >> 32))
|
|
+ ((u64)(_x) << 32) | ((u64)(_x) >> 32))
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
#define bfa_sge_to_be(_x)
|
|
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
|
|
index 9b94850..cc6b13b 100644
|
|
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
|
|
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
|
|
@@ -411,6 +411,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
struct fc_frame_header *fh;
|
|
struct fcoe_rcv_info *fr;
|
|
struct fcoe_percpu_s *bg;
|
|
+ struct sk_buff *tmp_skb;
|
|
unsigned short oxid;
|
|
|
|
interface = container_of(ptype, struct bnx2fc_interface,
|
|
@@ -423,6 +424,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
goto err;
|
|
}
|
|
|
|
+ tmp_skb = skb_share_check(skb, GFP_ATOMIC);
|
|
+ if (!tmp_skb)
|
|
+ goto err;
|
|
+
|
|
+ skb = tmp_skb;
|
|
+
|
|
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
|
|
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
|
|
goto err;
|
|
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
|
|
index f37f3e3..28fe6fe 100644
|
|
--- a/drivers/scsi/esas2r/esas2r_main.c
|
|
+++ b/drivers/scsi/esas2r/esas2r_main.c
|
|
@@ -1057,7 +1057,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
|
|
|
|
cmd->scsi_done(cmd);
|
|
|
|
- return 0;
|
|
+ return SUCCESS;
|
|
}
|
|
|
|
spin_lock_irqsave(&a->queue_lock, flags);
|
|
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
|
|
index 868318a..85d370e 100644
|
|
--- a/drivers/scsi/hpsa.c
|
|
+++ b/drivers/scsi/hpsa.c
|
|
@@ -3131,7 +3131,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|
}
|
|
if (ioc->Request.Type.Direction == XFER_WRITE) {
|
|
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
|
|
- status = -ENOMEM;
|
|
+ status = -EFAULT;
|
|
goto cleanup1;
|
|
}
|
|
} else
|
|
@@ -3984,10 +3984,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
|
|
|
/* Save the PCI command register */
|
|
pci_read_config_word(pdev, 4, &command_register);
|
|
- /* Turn the board off. This is so that later pci_restore_state()
|
|
- * won't turn the board on before the rest of config space is ready.
|
|
- */
|
|
- pci_disable_device(pdev);
|
|
pci_save_state(pdev);
|
|
|
|
/* find the first memory BAR, so we can find the cfg table */
|
|
@@ -4035,11 +4031,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
|
goto unmap_cfgtable;
|
|
|
|
pci_restore_state(pdev);
|
|
- rc = pci_enable_device(pdev);
|
|
- if (rc) {
|
|
- dev_warn(&pdev->dev, "failed to enable device.\n");
|
|
- goto unmap_cfgtable;
|
|
- }
|
|
pci_write_config_word(pdev, 4, command_register);
|
|
|
|
/* Some devices (notably the HP Smart Array 5i Controller)
|
|
@@ -4367,9 +4358,9 @@ static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
|
|
{
|
|
u32 driver_support;
|
|
|
|
-#ifdef CONFIG_X86
|
|
- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
|
|
driver_support = readl(&(h->cfgtable->driver_support));
|
|
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
|
|
+#ifdef CONFIG_X86
|
|
driver_support |= ENABLE_SCSI_PREFETCH;
|
|
#endif
|
|
driver_support |= ENABLE_UNIT_ATTN;
|
|
@@ -4525,6 +4516,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
|
|
if (!reset_devices)
|
|
return 0;
|
|
|
|
+ /* kdump kernel is loading, we don't know in which state is
|
|
+ * the pci interface. The dev->enable_cnt is equal zero
|
|
+ * so we call enable+disable, wait a while and switch it on.
|
|
+ */
|
|
+ rc = pci_enable_device(pdev);
|
|
+ if (rc) {
|
|
+ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ pci_disable_device(pdev);
|
|
+ msleep(260); /* a randomly chosen number */
|
|
+ rc = pci_enable_device(pdev);
|
|
+ if (rc) {
|
|
+ dev_warn(&pdev->dev, "failed to enable device.\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ pci_set_master(pdev);
|
|
/* Reset the controller with a PCI power-cycle or via doorbell */
|
|
rc = hpsa_kdump_hard_reset_controller(pdev);
|
|
|
|
@@ -4533,10 +4541,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
|
|
* "performant mode". Or, it might be 640x, which can't reset
|
|
* due to concerns about shared bbwc between 6402/6404 pair.
|
|
*/
|
|
- if (rc == -ENOTSUPP)
|
|
- return rc; /* just try to do the kdump anyhow. */
|
|
- if (rc)
|
|
- return -ENODEV;
|
|
+ if (rc) {
|
|
+ if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
|
|
+ rc = -ENODEV;
|
|
+ goto out_disable;
|
|
+ }
|
|
|
|
/* Now try to get the controller to respond to a no-op */
|
|
dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
|
|
@@ -4547,7 +4556,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
|
|
dev_warn(&pdev->dev, "no-op failed%s\n",
|
|
(i < 11 ? "; re-trying" : ""));
|
|
}
|
|
- return 0;
|
|
+
|
|
+out_disable:
|
|
+
|
|
+ pci_disable_device(pdev);
|
|
+ return rc;
|
|
}
|
|
|
|
static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
|
|
@@ -4690,6 +4703,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
|
iounmap(h->transtable);
|
|
if (h->cfgtable)
|
|
iounmap(h->cfgtable);
|
|
+ pci_disable_device(h->pdev);
|
|
pci_release_regions(h->pdev);
|
|
kfree(h);
|
|
}
|
|
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
|
|
index 3f5b56a..128dc2f 100644
|
|
--- a/drivers/scsi/ipr.c
|
|
+++ b/drivers/scsi/ipr.c
|
|
@@ -592,9 +592,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
|
|
{
|
|
struct ipr_trace_entry *trace_entry;
|
|
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
|
|
+ unsigned int trace_index;
|
|
|
|
- trace_entry = &ioa_cfg->trace[atomic_add_return
|
|
- (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
|
|
+ trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
|
|
+ trace_entry = &ioa_cfg->trace[trace_index];
|
|
trace_entry->time = jiffies;
|
|
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
|
|
trace_entry->type = type;
|
|
@@ -683,6 +684,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
|
|
ipr_reinit_ipr_cmnd(ipr_cmd);
|
|
ipr_cmd->u.scratch = 0;
|
|
ipr_cmd->sibling = NULL;
|
|
+ ipr_cmd->eh_comp = NULL;
|
|
ipr_cmd->fast_done = fast_done;
|
|
init_timer(&ipr_cmd->timer);
|
|
}
|
|
@@ -848,6 +850,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
|
|
|
|
scsi_dma_unmap(ipr_cmd->scsi_cmd);
|
|
scsi_cmd->scsi_done(scsi_cmd);
|
|
+ if (ipr_cmd->eh_comp)
|
|
+ complete(ipr_cmd->eh_comp);
|
|
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
|
|
}
|
|
|
|
@@ -1041,10 +1045,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
|
|
|
|
static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
|
|
{
|
|
+ unsigned int hrrq;
|
|
+
|
|
if (ioa_cfg->hrrq_num == 1)
|
|
- return 0;
|
|
- else
|
|
- return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
|
|
+ hrrq = 0;
|
|
+ else {
|
|
+ hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
|
|
+ hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
|
|
+ }
|
|
+ return hrrq;
|
|
}
|
|
|
|
/**
|
|
@@ -4805,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
|
|
return rc;
|
|
}
|
|
|
|
+/**
|
|
+ * ipr_match_lun - Match function for specified LUN
|
|
+ * @ipr_cmd: ipr command struct
|
|
+ * @device: device to match (sdev)
|
|
+ *
|
|
+ * Returns:
|
|
+ * 1 if command matches sdev / 0 if command does not match sdev
|
|
+ **/
|
|
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
|
|
+{
|
|
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
|
|
+ return 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ipr_wait_for_ops - Wait for matching commands to complete
|
|
+ * @ipr_cmd: ipr command struct
|
|
+ * @device: device to match (sdev)
|
|
+ * @match: match function to use
|
|
+ *
|
|
+ * Returns:
|
|
+ * SUCCESS / FAILED
|
|
+ **/
|
|
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
|
|
+ int (*match)(struct ipr_cmnd *, void *))
|
|
+{
|
|
+ struct ipr_cmnd *ipr_cmd;
|
|
+ int wait;
|
|
+ unsigned long flags;
|
|
+ struct ipr_hrr_queue *hrrq;
|
|
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
|
|
+ DECLARE_COMPLETION_ONSTACK(comp);
|
|
+
|
|
+ ENTER;
|
|
+ do {
|
|
+ wait = 0;
|
|
+
|
|
+ for_each_hrrq(hrrq, ioa_cfg) {
|
|
+ spin_lock_irqsave(hrrq->lock, flags);
|
|
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
|
|
+ if (match(ipr_cmd, device)) {
|
|
+ ipr_cmd->eh_comp = ∁
|
|
+ wait++;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(hrrq->lock, flags);
|
|
+ }
|
|
+
|
|
+ if (wait) {
|
|
+ timeout = wait_for_completion_timeout(&comp, timeout);
|
|
+
|
|
+ if (!timeout) {
|
|
+ wait = 0;
|
|
+
|
|
+ for_each_hrrq(hrrq, ioa_cfg) {
|
|
+ spin_lock_irqsave(hrrq->lock, flags);
|
|
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
|
|
+ if (match(ipr_cmd, device)) {
|
|
+ ipr_cmd->eh_comp = NULL;
|
|
+ wait++;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(hrrq->lock, flags);
|
|
+ }
|
|
+
|
|
+ if (wait)
|
|
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
|
|
+ LEAVE;
|
|
+ return wait ? FAILED : SUCCESS;
|
|
+ }
|
|
+ }
|
|
+ } while (wait);
|
|
+
|
|
+ LEAVE;
|
|
+ return SUCCESS;
|
|
+}
|
|
+
|
|
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
|
|
{
|
|
struct ipr_ioa_cfg *ioa_cfg;
|
|
@@ -5023,11 +5110,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
|
|
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
|
|
{
|
|
int rc;
|
|
+ struct ipr_ioa_cfg *ioa_cfg;
|
|
+
|
|
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
|
|
|
|
spin_lock_irq(cmd->device->host->host_lock);
|
|
rc = __ipr_eh_dev_reset(cmd);
|
|
spin_unlock_irq(cmd->device->host->host_lock);
|
|
|
|
+ if (rc == SUCCESS)
|
|
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
|
|
+
|
|
return rc;
|
|
}
|
|
|
|
@@ -5205,13 +5298,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
|
|
{
|
|
unsigned long flags;
|
|
int rc;
|
|
+ struct ipr_ioa_cfg *ioa_cfg;
|
|
|
|
ENTER;
|
|
|
|
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
|
|
+
|
|
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
|
|
rc = ipr_cancel_op(scsi_cmd);
|
|
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
|
|
|
|
+ if (rc == SUCCESS)
|
|
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
|
|
LEAVE;
|
|
return rc;
|
|
}
|
|
@@ -6087,21 +6185,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
|
|
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
|
|
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
|
|
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
|
|
- unsigned long hrrq_flags;
|
|
+ unsigned long lock_flags;
|
|
|
|
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
|
|
|
|
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
|
|
scsi_dma_unmap(scsi_cmd);
|
|
|
|
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
|
|
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
|
|
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
|
|
scsi_cmd->scsi_done(scsi_cmd);
|
|
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
|
|
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
|
|
} else {
|
|
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
|
|
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
|
+ spin_lock(&ipr_cmd->hrrq->_lock);
|
|
ipr_erp_start(ioa_cfg, ipr_cmd);
|
|
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
|
|
+ spin_unlock(&ipr_cmd->hrrq->_lock);
|
|
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
|
|
index 9ce38a2..694ec20 100644
|
|
--- a/drivers/scsi/ipr.h
|
|
+++ b/drivers/scsi/ipr.h
|
|
@@ -264,7 +264,7 @@
|
|
#define IPR_RUNTIME_RESET 0x40000000
|
|
|
|
#define IPR_IPL_INIT_MIN_STAGE_TIME 5
|
|
-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
|
|
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30
|
|
#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
|
|
#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
|
|
#define IPR_IPL_INIT_STAGE_MASK 0xff000000
|
|
@@ -1459,6 +1459,7 @@ struct ipr_ioa_cfg {
|
|
|
|
#define IPR_NUM_TRACE_INDEX_BITS 8
|
|
#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
|
|
+#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
|
|
#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
|
|
char trace_start[8];
|
|
#define IPR_TRACE_START_LABEL "trace"
|
|
@@ -1585,6 +1586,7 @@ struct ipr_cmnd {
|
|
struct scsi_device *sdev;
|
|
} u;
|
|
|
|
+ struct completion *eh_comp;
|
|
struct ipr_hrr_queue *hrrq;
|
|
struct ipr_ioa_cfg *ioa_cfg;
|
|
};
|
|
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
|
|
index 1b3a094..30f9ef0 100644
|
|
--- a/drivers/scsi/libfc/fc_exch.c
|
|
+++ b/drivers/scsi/libfc/fc_exch.c
|
|
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
|
|
if (resp) {
|
|
resp(sp, fp, arg);
|
|
res = true;
|
|
- } else if (!IS_ERR(fp)) {
|
|
- fc_frame_free(fp);
|
|
}
|
|
|
|
spin_lock_bh(&ep->ex_lock);
|
|
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|
* If new exch resp handler is valid then call that
|
|
* first.
|
|
*/
|
|
- fc_invoke_resp(ep, sp, fp);
|
|
+ if (!fc_invoke_resp(ep, sp, fp))
|
|
+ fc_frame_free(fp);
|
|
|
|
fc_exch_release(ep);
|
|
return;
|
|
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|
fc_exch_hold(ep);
|
|
if (!rc)
|
|
fc_exch_delete(ep);
|
|
- fc_invoke_resp(ep, sp, fp);
|
|
+ if (!fc_invoke_resp(ep, sp, fp))
|
|
+ fc_frame_free(fp);
|
|
if (has_rec)
|
|
fc_exch_timer_set(ep, ep->r_a_tov);
|
|
fc_exch_release(ep);
|
|
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
|
|
index 1d7e76e..ae6fc1a 100644
|
|
--- a/drivers/scsi/libfc/fc_fcp.c
|
|
+++ b/drivers/scsi/libfc/fc_fcp.c
|
|
@@ -1039,11 +1039,26 @@ restart:
|
|
fc_fcp_pkt_hold(fsp);
|
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
|
|
|
- if (!fc_fcp_lock_pkt(fsp)) {
|
|
+ spin_lock_bh(&fsp->scsi_pkt_lock);
|
|
+ if (!(fsp->state & FC_SRB_COMPL)) {
|
|
+ fsp->state |= FC_SRB_COMPL;
|
|
+ /*
|
|
+ * TODO: dropping scsi_pkt_lock and then reacquiring
|
|
+ * again around fc_fcp_cleanup_cmd() is required,
|
|
+ * since fc_fcp_cleanup_cmd() calls into
|
|
+ * fc_seq_set_resp() and that func preempts cpu using
|
|
+ * schedule. May be schedule and related code should be
|
|
+ * removed instead of unlocking here to avoid scheduling
|
|
+ * while atomic bug.
|
|
+ */
|
|
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
|
|
+
|
|
fc_fcp_cleanup_cmd(fsp, error);
|
|
+
|
|
+ spin_lock_bh(&fsp->scsi_pkt_lock);
|
|
fc_io_compl(fsp);
|
|
- fc_fcp_unlock_pkt(fsp);
|
|
}
|
|
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
|
|
|
|
fc_fcp_pkt_release(fsp);
|
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
|
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
|
|
index 4046241..4549986 100644
|
|
--- a/drivers/scsi/libiscsi.c
|
|
+++ b/drivers/scsi/libiscsi.c
|
|
@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
return NULL;
|
|
}
|
|
|
|
+ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
|
|
+ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
task = conn->login_task;
|
|
} else {
|
|
if (session->state != ISCSI_STATE_LOGGED_IN)
|
|
return NULL;
|
|
|
|
+ if (data_size != 0) {
|
|
+ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
|
|
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
|
|
|
|
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
|
|
index 62b58d3..60de662 100644
|
|
--- a/drivers/scsi/libsas/sas_discover.c
|
|
+++ b/drivers/scsi/libsas/sas_discover.c
|
|
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
|
|
struct sas_discovery_event *ev = to_sas_discovery_event(work);
|
|
struct asd_sas_port *port = ev->port;
|
|
struct sas_ha_struct *ha = port->ha;
|
|
+ struct domain_device *ddev = port->port_dev;
|
|
|
|
/* prevent revalidation from finding sata links in recovery */
|
|
mutex_lock(&ha->disco_mutex);
|
|
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
|
|
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
|
|
task_pid_nr(current));
|
|
|
|
- if (port->port_dev)
|
|
- res = sas_ex_revalidate_domain(port->port_dev);
|
|
+ if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
|
|
+ ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
|
|
+ res = sas_ex_revalidate_domain(ddev);
|
|
|
|
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
|
|
port->id, task_pid_nr(current), res);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index 8f580fd..ce21132 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
|
|
return NULL;
|
|
|
|
q->hba_index = idx;
|
|
+
|
|
+ /*
|
|
+ * insert barrier for instruction interlock : data from the hardware
|
|
+ * must have the valid bit checked before it can be copied and acted
|
|
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
|
|
+ * instructions allowing action on content before valid bit checked,
|
|
+ * add barrier here as well. May not be needed as "content" is a
|
|
+ * single 32-bit entity here (vs multi word structure for cq's).
|
|
+ */
|
|
+ mb();
|
|
return eqe;
|
|
}
|
|
|
|
@@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
|
|
|
|
cqe = q->qe[q->hba_index].cqe;
|
|
q->hba_index = idx;
|
|
+
|
|
+ /*
|
|
+ * insert barrier for instruction interlock : data from the hardware
|
|
+ * must have the valid bit checked before it can be copied and acted
|
|
+ * upon. Speculative instructions were allowing a bcopy at the start
|
|
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
|
|
+ * after our return, to copy data before the valid bit check above
|
|
+ * was done. As such, some of the copied data was stale. The barrier
|
|
+ * ensures the check is before any data is copied.
|
|
+ */
|
|
+ mb();
|
|
return cqe;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
|
|
index 816db12..52587ce 100644
|
|
--- a/drivers/scsi/megaraid.c
|
|
+++ b/drivers/scsi/megaraid.c
|
|
@@ -1967,7 +1967,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
|
cmd->device->id, cmd->device->lun);
|
|
|
|
if(list_empty(&adapter->pending_list))
|
|
- return FALSE;
|
|
+ return FAILED;
|
|
|
|
list_for_each_safe(pos, next, &adapter->pending_list) {
|
|
|
|
@@ -1990,7 +1990,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
|
(aor==SCB_ABORT) ? "ABORTING":"RESET",
|
|
scb->idx);
|
|
|
|
- return FALSE;
|
|
+ return FAILED;
|
|
}
|
|
else {
|
|
|
|
@@ -2015,12 +2015,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
|
list_add_tail(SCSI_LIST(cmd),
|
|
&adapter->completed_list);
|
|
|
|
- return TRUE;
|
|
+ return SUCCESS;
|
|
}
|
|
}
|
|
}
|
|
|
|
- return FALSE;
|
|
+ return FAILED;
|
|
}
|
|
|
|
static inline int
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
index 3b7ad10..c80afde 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
|
|
@@ -953,7 +953,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
|
|
cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
|
|
|
|
cmd->sync_cmd = 1;
|
|
- cmd->cmd_status = 0xFF;
|
|
+ cmd->cmd_status = ENODATA;
|
|
|
|
instance->instancet->issue_dcmd(instance, cmd);
|
|
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
index f655592..a1f04e3 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
@@ -92,6 +92,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
|
|
{
|
|
struct megasas_register_set __iomem *regs;
|
|
regs = instance->reg_set;
|
|
+
|
|
+ instance->mask_interrupts = 0;
|
|
/* For Thunderbolt/Invader also clear intr on enable */
|
|
writel(~0, ®s->outbound_intr_status);
|
|
readl(®s->outbound_intr_status);
|
|
@@ -100,7 +102,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
|
|
|
|
/* Dummy readl to force pci flush */
|
|
readl(®s->outbound_intr_mask);
|
|
- instance->mask_interrupts = 0;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
|
|
index 410f4a3..72f9c55 100644
|
|
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
|
|
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
|
|
@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
|
|
&mpt2sas_phy->remote_identify);
|
|
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
|
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
|
|
- } else {
|
|
+ } else
|
|
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
|
|
sas_identify));
|
|
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
|
|
- mpt2sas_phy);
|
|
- }
|
|
|
|
if (mpt2sas_phy->phy)
|
|
mpt2sas_phy->phy->negotiated_linkrate =
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
index 65170cb..55aa597 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
|
|
&mpt3sas_phy->remote_identify);
|
|
_transport_add_phy_to_an_existing_port(ioc, sas_node,
|
|
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
|
|
- } else {
|
|
+ } else
|
|
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
|
|
sas_identify));
|
|
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
|
|
- mpt3sas_phy);
|
|
- }
|
|
|
|
if (mpt3sas_phy->phy)
|
|
mpt3sas_phy->phy->negotiated_linkrate =
|
|
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
|
|
index 6c1f223..4c0b8b4 100644
|
|
--- a/drivers/scsi/mvsas/mv_sas.c
|
|
+++ b/drivers/scsi/mvsas/mv_sas.c
|
|
@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
|
|
static int mvs_task_prep_ata(struct mvs_info *mvi,
|
|
struct mvs_task_exec_info *tei)
|
|
{
|
|
- struct sas_ha_struct *sha = mvi->sas;
|
|
struct sas_task *task = tei->task;
|
|
struct domain_device *dev = task->dev;
|
|
struct mvs_device *mvi_dev = dev->lldd_dev;
|
|
struct mvs_cmd_hdr *hdr = tei->hdr;
|
|
struct asd_sas_port *sas_port = dev->port;
|
|
- struct sas_phy *sphy = dev->phy;
|
|
- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
|
|
struct mvs_slot_info *slot;
|
|
void *buf_prd;
|
|
u32 tag = tei->tag, hdr_tag;
|
|
@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
|
slot->tx = mvi->tx_prod;
|
|
del_q = TXQ_MODE_I | tag |
|
|
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
|
|
- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
|
|
+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
|
|
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
|
|
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
|
|
index 1f42662..b5f22a9 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_gbl.h
|
|
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
|
|
@@ -728,8 +728,6 @@ extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
|
|
extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
|
|
extern void qla8044_wr_direct(struct scsi_qla_host *vha,
|
|
const uint32_t crb_reg, const uint32_t value);
|
|
-extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
|
|
-extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
|
|
extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
|
|
extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
|
|
extern void qla8044_clear_drv_active(struct qla_hw_data *);
|
|
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
|
|
index 0a1dcb4..13f4bef 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_isr.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_isr.c
|
|
@@ -572,8 +572,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
|
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
|
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
|
|
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
|
|
- uint32_t rscn_entry, host_pid;
|
|
+ uint32_t rscn_entry, host_pid, tmp_pid;
|
|
unsigned long flags;
|
|
+ fc_port_t *fcport = NULL;
|
|
|
|
/* Setup to process RIO completion. */
|
|
handle_cnt = 0;
|
|
@@ -968,6 +969,20 @@ skip_rio:
|
|
if (qla2x00_is_a_vp_did(vha, rscn_entry))
|
|
break;
|
|
|
|
+ /*
|
|
+ * Search for the rport related to this RSCN entry and mark it
|
|
+ * as lost.
|
|
+ */
|
|
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
|
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
|
|
+ continue;
|
|
+ tmp_pid = fcport->d_id.b24;
|
|
+ if (fcport->d_id.b24 == rscn_entry) {
|
|
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
atomic_set(&vha->loop_down_timer, 0);
|
|
vha->flags.management_server_logged_in = 0;
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
|
|
index f60989d..24f69ac 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_nx2.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
|
|
@@ -146,7 +146,7 @@ qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
|
|
return;
|
|
}
|
|
|
|
-inline void
|
|
+static inline void
|
|
qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
|
|
{
|
|
uint32_t qsnt_state;
|
|
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
|
|
index 83cb612..23c1b0c 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_os.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_os.c
|
|
@@ -3039,10 +3039,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
|
|
}
|
|
|
|
static void
|
|
-qla2x00_clear_drv_active(scsi_qla_host_t *vha)
|
|
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
|
|
{
|
|
- struct qla_hw_data *ha = vha->hw;
|
|
-
|
|
if (IS_QLA8044(ha)) {
|
|
qla8044_idc_lock(ha);
|
|
qla8044_clear_drv_active(ha);
|
|
@@ -3111,7 +3109,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
|
|
|
scsi_host_put(base_vha->host);
|
|
|
|
- qla2x00_clear_drv_active(base_vha);
|
|
+ qla2x00_clear_drv_active(ha);
|
|
|
|
qla2x00_unmap_iobases(ha);
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
|
|
index 0cb7307..2f264ac 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_target.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_target.c
|
|
@@ -1382,12 +1382,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
|
|
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
|
|
uint32_t req_cnt)
|
|
{
|
|
- struct qla_hw_data *ha = vha->hw;
|
|
- device_reg_t __iomem *reg = ha->iobase;
|
|
uint32_t cnt;
|
|
|
|
if (vha->req->cnt < (req_cnt + 2)) {
|
|
- cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out);
|
|
+ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
|
|
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe00a,
|
|
"Request ring circled: cnt=%d, vha->->ring_index=%d, "
|
|
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
index 788c4fe..1817f3f 100644
|
|
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
@@ -707,7 +707,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
|
|
pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
|
|
|
|
node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
|
|
- WARN_ON(node && (node != se_nacl));
|
|
+ if (WARN_ON(node && (node != se_nacl))) {
|
|
+ /*
|
|
+ * The nacl no longer matches what we think it should be.
|
|
+ * Most likely a new dynamic acl has been added while
|
|
+ * someone dropped the hardware lock. It clearly is a
|
|
+ * bug elsewhere, but this bit can't make things worse.
|
|
+ */
|
|
+ btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
|
|
+ node, GFP_ATOMIC);
|
|
+ }
|
|
|
|
pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
|
|
se_nacl, nacl->nport_wwnn, nacl->nport_id);
|
|
@@ -1506,7 +1515,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
|
|
/*
|
|
* Finally register the new FC Nexus with TCM
|
|
*/
|
|
- __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
|
|
+ transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
|
|
index f969aca..262ab83 100644
|
|
--- a/drivers/scsi/scsi_devinfo.c
|
|
+++ b/drivers/scsi/scsi_devinfo.c
|
|
@@ -202,6 +202,7 @@ static struct {
|
|
{"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
|
|
{"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
|
|
{"INSITE", "I325VM", NULL, BLIST_KEY},
|
|
+ {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
|
|
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
|
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
|
|
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
@@ -210,6 +211,7 @@ static struct {
|
|
{"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
|
|
{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
|
|
{"MICROP", "4110", NULL, BLIST_NOTQ},
|
|
+ {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
|
|
{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
|
|
{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
|
|
{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
@@ -222,6 +224,7 @@ static struct {
|
|
{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
+ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
|
|
{"Promise", "", NULL, BLIST_SPARSELUN},
|
|
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
|
|
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
|
|
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
|
|
index edb4d46..96b6664 100644
|
|
--- a/drivers/scsi/scsi_error.c
|
|
+++ b/drivers/scsi/scsi_error.c
|
|
@@ -1984,8 +1984,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
|
|
* is no point trying to lock the door of an off-line device.
|
|
*/
|
|
shost_for_each_device(sdev, shost) {
|
|
- if (scsi_device_online(sdev) && sdev->locked)
|
|
+ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
|
|
scsi_eh_lock_door(sdev);
|
|
+ sdev->was_reset = 0;
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index 62ec84b..719bd82 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -831,6 +831,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|
scsi_next_command(cmd);
|
|
return;
|
|
}
|
|
+ } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
|
|
+ /*
|
|
+ * Certain non BLOCK_PC requests are commands that don't
|
|
+ * actually transfer anything (FLUSH), so cannot use
|
|
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
|
|
+ * This sets the error explicitly for the problem case.
|
|
+ */
|
|
+ error = __scsi_error_from_host_byte(cmd, result);
|
|
}
|
|
|
|
/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
|
|
@@ -1250,9 +1258,11 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
|
|
"rejecting I/O to dead device\n");
|
|
ret = BLKPREP_KILL;
|
|
break;
|
|
- case SDEV_QUIESCE:
|
|
case SDEV_BLOCK:
|
|
case SDEV_CREATED_BLOCK:
|
|
+ ret = BLKPREP_DEFER;
|
|
+ break;
|
|
+ case SDEV_QUIESCE:
|
|
/*
|
|
* If the devices is blocked we defer normal commands.
|
|
*/
|
|
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
|
|
index 001e9ce..a59be67 100644
|
|
--- a/drivers/scsi/scsi_pm.c
|
|
+++ b/drivers/scsi/scsi_pm.c
|
|
@@ -149,15 +149,15 @@ static int sdev_runtime_suspend(struct device *dev)
|
|
{
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
struct scsi_device *sdev = to_scsi_device(dev);
|
|
- int err;
|
|
+ int err = 0;
|
|
|
|
- err = blk_pre_runtime_suspend(sdev->request_queue);
|
|
- if (err)
|
|
- return err;
|
|
- if (pm && pm->runtime_suspend)
|
|
+ if (pm && pm->runtime_suspend) {
|
|
+ err = blk_pre_runtime_suspend(sdev->request_queue);
|
|
+ if (err)
|
|
+ return err;
|
|
err = pm->runtime_suspend(dev);
|
|
- blk_post_runtime_suspend(sdev->request_queue, err);
|
|
-
|
|
+ blk_post_runtime_suspend(sdev->request_queue, err);
|
|
+ }
|
|
return err;
|
|
}
|
|
|
|
@@ -180,11 +180,11 @@ static int sdev_runtime_resume(struct device *dev)
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
int err = 0;
|
|
|
|
- blk_pre_runtime_resume(sdev->request_queue);
|
|
- if (pm && pm->runtime_resume)
|
|
+ if (pm && pm->runtime_resume) {
|
|
+ blk_pre_runtime_resume(sdev->request_queue);
|
|
err = pm->runtime_resume(dev);
|
|
- blk_post_runtime_resume(sdev->request_queue, err);
|
|
-
|
|
+ blk_post_runtime_resume(sdev->request_queue, err);
|
|
+ }
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
|
|
index 4109530..054ec2c 100644
|
|
--- a/drivers/scsi/scsi_scan.c
|
|
+++ b/drivers/scsi/scsi_scan.c
|
|
@@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|
if (*bflags & BLIST_USE_10_BYTE_MS)
|
|
sdev->use_10_for_ms = 1;
|
|
|
|
+ /* some devices don't like REPORT SUPPORTED OPERATION CODES
|
|
+ * and will simply timeout causing sd_mod init to take a very
|
|
+ * very long time */
|
|
+ if (*bflags & BLIST_NO_RSOC)
|
|
+ sdev->no_report_opcodes = 1;
|
|
+
|
|
/* set the device running here so that slave configure
|
|
* may do I/O */
|
|
ret = scsi_device_set_state(sdev, SDEV_RUNNING);
|
|
@@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|
|
|
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
|
|
|
|
- if (*bflags & BLIST_SKIP_VPD_PAGES)
|
|
+ if (*bflags & BLIST_TRY_VPD_PAGES)
|
|
+ sdev->try_vpd_pages = 1;
|
|
+ else if (*bflags & BLIST_SKIP_VPD_PAGES)
|
|
sdev->skip_vpd_pages = 1;
|
|
|
|
transport_configure_device(&sdev->sdev_gendev);
|
|
@@ -1236,6 +1244,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
|
|
max_dev_lun = min(8U, max_dev_lun);
|
|
|
|
/*
|
|
+ * Stop scanning at 255 unless BLIST_SCSI3LUN
|
|
+ */
|
|
+ if (!(bflags & BLIST_SCSI3LUN))
|
|
+ max_dev_lun = min(256U, max_dev_lun);
|
|
+
|
|
+ /*
|
|
* We have already scanned LUN 0, so start at LUN 1. Keep scanning
|
|
* until we reach the max, or no LUN is found and we are not
|
|
* sparse_lun.
|
|
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
|
|
index d47ffc8..b85eaa0 100644
|
|
--- a/drivers/scsi/scsi_transport_srp.c
|
|
+++ b/drivers/scsi/scsi_transport_srp.c
|
|
@@ -397,6 +397,36 @@ static void srp_reconnect_work(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
+/**
|
|
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
|
|
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
|
|
+ *
|
|
+ * To do: add support for scsi-mq in this function.
|
|
+ */
|
|
+static int scsi_request_fn_active(struct Scsi_Host *shost)
|
|
+{
|
|
+ struct scsi_device *sdev;
|
|
+ struct request_queue *q;
|
|
+ int request_fn_active = 0;
|
|
+
|
|
+ shost_for_each_device(sdev, shost) {
|
|
+ q = sdev->request_queue;
|
|
+
|
|
+ spin_lock_irq(q->queue_lock);
|
|
+ request_fn_active += q->request_fn_active;
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
+ }
|
|
+
|
|
+ return request_fn_active;
|
|
+}
|
|
+
|
|
+/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
|
|
+static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
|
|
+{
|
|
+ while (scsi_request_fn_active(shost))
|
|
+ msleep(20);
|
|
+}
|
|
+
|
|
static void __rport_fail_io_fast(struct srp_rport *rport)
|
|
{
|
|
struct Scsi_Host *shost = rport_to_shost(rport);
|
|
@@ -410,8 +440,10 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
|
|
|
|
/* Involve the LLD if possible to terminate all I/O on the rport. */
|
|
i = to_srp_internal(shost->transportt);
|
|
- if (i->f->terminate_rport_io)
|
|
+ if (i->f->terminate_rport_io) {
|
|
+ srp_wait_for_queuecommand(shost);
|
|
i->f->terminate_rport_io(rport);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -473,7 +505,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
|
|
if (delay > 0)
|
|
queue_delayed_work(system_long_wq, &rport->reconnect_work,
|
|
1UL * delay * HZ);
|
|
- if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
|
|
+ if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
|
|
+ srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
|
|
pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
|
|
rport->state);
|
|
scsi_target_block(&shost->shost_gendev);
|
|
@@ -504,27 +537,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport)
|
|
EXPORT_SYMBOL(srp_start_tl_fail_timers);
|
|
|
|
/**
|
|
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
|
|
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
|
|
- */
|
|
-static int scsi_request_fn_active(struct Scsi_Host *shost)
|
|
-{
|
|
- struct scsi_device *sdev;
|
|
- struct request_queue *q;
|
|
- int request_fn_active = 0;
|
|
-
|
|
- shost_for_each_device(sdev, shost) {
|
|
- q = sdev->request_queue;
|
|
-
|
|
- spin_lock_irq(q->queue_lock);
|
|
- request_fn_active += q->request_fn_active;
|
|
- spin_unlock_irq(q->queue_lock);
|
|
- }
|
|
-
|
|
- return request_fn_active;
|
|
-}
|
|
-
|
|
-/**
|
|
* srp_reconnect_rport() - reconnect to an SRP target port
|
|
* @rport: SRP target port.
|
|
*
|
|
@@ -559,8 +571,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
|
|
if (res)
|
|
goto out;
|
|
scsi_target_block(&shost->shost_gendev);
|
|
- while (scsi_request_fn_active(shost))
|
|
- msleep(20);
|
|
+ srp_wait_for_queuecommand(shost);
|
|
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
|
|
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
|
|
dev_name(&shost->shost_gendev), rport->state, res);
|
|
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
|
|
index 36d1a23..a107064 100644
|
|
--- a/drivers/scsi/sd.c
|
|
+++ b/drivers/scsi/sd.c
|
|
@@ -1599,6 +1599,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
|
|
{
|
|
u64 start_lba = blk_rq_pos(scmd->request);
|
|
u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
|
|
+ u64 factor = scmd->device->sector_size / 512;
|
|
u64 bad_lba;
|
|
int info_valid;
|
|
/*
|
|
@@ -1620,16 +1621,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
|
|
if (scsi_bufflen(scmd) <= scmd->device->sector_size)
|
|
return 0;
|
|
|
|
- if (scmd->device->sector_size < 512) {
|
|
- /* only legitimate sector_size here is 256 */
|
|
- start_lba <<= 1;
|
|
- end_lba <<= 1;
|
|
- } else {
|
|
- /* be careful ... don't want any overflows */
|
|
- unsigned int factor = scmd->device->sector_size / 512;
|
|
- do_div(start_lba, factor);
|
|
- do_div(end_lba, factor);
|
|
- }
|
|
+ /* be careful ... don't want any overflows */
|
|
+ do_div(start_lba, factor);
|
|
+ do_div(end_lba, factor);
|
|
|
|
/* The bad lba was reported incorrectly, we have no idea where
|
|
* the error is.
|
|
@@ -2196,8 +2190,7 @@ got_data:
|
|
if (sector_size != 512 &&
|
|
sector_size != 1024 &&
|
|
sector_size != 2048 &&
|
|
- sector_size != 4096 &&
|
|
- sector_size != 256) {
|
|
+ sector_size != 4096) {
|
|
sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
|
|
sector_size);
|
|
/*
|
|
@@ -2248,8 +2241,6 @@ got_data:
|
|
sdkp->capacity <<= 2;
|
|
else if (sector_size == 1024)
|
|
sdkp->capacity <<= 1;
|
|
- else if (sector_size == 256)
|
|
- sdkp->capacity >>= 1;
|
|
|
|
blk_queue_physical_block_size(sdp->request_queue,
|
|
sdkp->physical_block_size);
|
|
@@ -2686,6 +2677,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
|
|
|
static int sd_try_extended_inquiry(struct scsi_device *sdp)
|
|
{
|
|
+ /* Attempt VPD inquiry if the device blacklist explicitly calls
|
|
+ * for it.
|
|
+ */
|
|
+ if (sdp->try_vpd_pages)
|
|
+ return 1;
|
|
/*
|
|
* Although VPD inquiries can go to SCSI-2 type devices,
|
|
* some USB ones crash on receiving them, and the pages
|
|
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
|
|
index df5e961..721d839 100644
|
|
--- a/drivers/scsi/sg.c
|
|
+++ b/drivers/scsi/sg.c
|
|
@@ -522,7 +522,7 @@ static ssize_t
|
|
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
|
{
|
|
sg_io_hdr_t *hp = &srp->header;
|
|
- int err = 0;
|
|
+ int err = 0, err2;
|
|
int len;
|
|
|
|
if (count < SZ_SG_IO_HDR) {
|
|
@@ -551,8 +551,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
|
|
goto err_out;
|
|
}
|
|
err_out:
|
|
- err = sg_finish_rem_req(srp);
|
|
- return (0 == err) ? count : err;
|
|
+ err2 = sg_finish_rem_req(srp);
|
|
+ return err ? : err2 ? : count;
|
|
}
|
|
|
|
static ssize_t
|
|
@@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
|
|
md->from_user = 0;
|
|
}
|
|
|
|
+ if (unlikely(iov_count > UIO_MAXIOV))
|
|
+ return -EINVAL;
|
|
+
|
|
if (iov_count) {
|
|
int len, size = sizeof(struct sg_iovec) * iov_count;
|
|
struct iovec *iov;
|
|
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
|
|
index a1d6986..f310982 100644
|
|
--- a/drivers/scsi/st.c
|
|
+++ b/drivers/scsi/st.c
|
|
@@ -1262,9 +1262,9 @@ static int st_open(struct inode *inode, struct file *filp)
|
|
spin_lock(&st_use_lock);
|
|
STp->in_use = 0;
|
|
spin_unlock(&st_use_lock);
|
|
- scsi_tape_put(STp);
|
|
if (resumed)
|
|
scsi_autopm_put_device(STp->device);
|
|
+ scsi_tape_put(STp);
|
|
return retval;
|
|
|
|
}
|
|
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
|
|
index 9969fa1..3bb6646 100644
|
|
--- a/drivers/scsi/storvsc_drv.c
|
|
+++ b/drivers/scsi/storvsc_drv.c
|
|
@@ -33,6 +33,7 @@
|
|
#include <linux/device.h>
|
|
#include <linux/hyperv.h>
|
|
#include <linux/mempool.h>
|
|
+#include <linux/blkdev.h>
|
|
#include <scsi/scsi.h>
|
|
#include <scsi/scsi_cmnd.h>
|
|
#include <scsi/scsi_host.h>
|
|
@@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
|
|
|
|
static void storvsc_on_channel_callback(void *context);
|
|
|
|
-/*
|
|
- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
|
|
- * reality, the path/target is not used (ie always set to 0) so our
|
|
- * scsi host adapter essentially has 1 bus with 1 target that contains
|
|
- * up to 256 luns.
|
|
- */
|
|
-#define STORVSC_MAX_LUNS_PER_TARGET 64
|
|
-#define STORVSC_MAX_TARGETS 1
|
|
-#define STORVSC_MAX_CHANNELS 1
|
|
+#define STORVSC_MAX_LUNS_PER_TARGET 255
|
|
+#define STORVSC_MAX_TARGETS 2
|
|
+#define STORVSC_MAX_CHANNELS 8
|
|
|
|
+#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
|
|
+#define STORVSC_FC_MAX_TARGETS 128
|
|
+#define STORVSC_FC_MAX_CHANNELS 8
|
|
|
|
+#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
|
|
+#define STORVSC_IDE_MAX_TARGETS 1
|
|
+#define STORVSC_IDE_MAX_CHANNELS 1
|
|
|
|
struct storvsc_cmd_request {
|
|
struct list_head entry;
|
|
@@ -738,21 +739,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
|
|
if (bounce_sgl[j].length == PAGE_SIZE) {
|
|
/* full..move to next entry */
|
|
sg_kunmap_atomic(bounce_addr);
|
|
+ bounce_addr = 0;
|
|
j++;
|
|
+ }
|
|
|
|
- /* if we need to use another bounce buffer */
|
|
- if (srclen || i != orig_sgl_count - 1)
|
|
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
|
|
+ /* if we need to use another bounce buffer */
|
|
+ if (srclen && bounce_addr == 0)
|
|
+ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
|
|
|
|
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
|
|
- /* unmap the last bounce that is < PAGE_SIZE */
|
|
- sg_kunmap_atomic(bounce_addr);
|
|
- }
|
|
}
|
|
|
|
sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
|
|
}
|
|
|
|
+ if (bounce_addr)
|
|
+ sg_kunmap_atomic(bounce_addr);
|
|
+
|
|
local_irq_restore(flags);
|
|
|
|
return total_copied;
|
|
@@ -1017,6 +1019,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
|
|
case ATA_12:
|
|
set_host_byte(scmnd, DID_PASSTHROUGH);
|
|
break;
|
|
+ /*
|
|
+ * On Some Windows hosts TEST_UNIT_READY command can return
|
|
+ * SRB_STATUS_ERROR, let the upper level code deal with it
|
|
+ * based on the sense information.
|
|
+ */
|
|
+ case TEST_UNIT_READY:
|
|
+ break;
|
|
default:
|
|
set_host_byte(scmnd, DID_TARGET_FAILURE);
|
|
}
|
|
@@ -1518,6 +1527,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
|
|
return SUCCESS;
|
|
}
|
|
|
|
+/*
|
|
+ * The host guarantees to respond to each command, although I/O latencies might
|
|
+ * be unbounded on Azure. Reset the timer unconditionally to give the host a
|
|
+ * chance to perform EH.
|
|
+ */
|
|
+static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
|
|
+{
|
|
+ return BLK_EH_RESET_TIMER;
|
|
+}
|
|
+
|
|
static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
|
|
{
|
|
bool allowed = true;
|
|
@@ -1553,9 +1572,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
|
struct vmscsi_request *vm_srb;
|
|
struct stor_mem_pools *memp = scmnd->device->hostdata;
|
|
|
|
- if (!storvsc_scsi_cmd_ok(scmnd)) {
|
|
- scmnd->scsi_done(scmnd);
|
|
- return 0;
|
|
+ if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
|
|
+ /*
|
|
+ * On legacy hosts filter unimplemented commands.
|
|
+ * Future hosts are expected to correctly handle
|
|
+ * unsupported commands. Furthermore, it is
|
|
+ * possible that some of the currently
|
|
+ * unsupported commands maybe supported in
|
|
+ * future versions of the host.
|
|
+ */
|
|
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
|
|
+ scmnd->scsi_done(scmnd);
|
|
+ return 0;
|
|
+ }
|
|
}
|
|
|
|
request_size = sizeof(struct storvsc_cmd_request);
|
|
@@ -1580,26 +1609,23 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
|
vm_srb = &cmd_request->vstor_packet.vm_srb;
|
|
vm_srb->win8_extension.time_out_value = 60;
|
|
|
|
+ vm_srb->win8_extension.srb_flags |=
|
|
+ (SRB_FLAGS_QUEUE_ACTION_ENABLE |
|
|
+ SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
|
|
|
|
/* Build the SRB */
|
|
switch (scmnd->sc_data_direction) {
|
|
case DMA_TO_DEVICE:
|
|
vm_srb->data_in = WRITE_TYPE;
|
|
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
|
|
- vm_srb->win8_extension.srb_flags |=
|
|
- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
|
|
- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
|
|
break;
|
|
case DMA_FROM_DEVICE:
|
|
vm_srb->data_in = READ_TYPE;
|
|
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
|
|
- vm_srb->win8_extension.srb_flags |=
|
|
- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
|
|
- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
|
|
break;
|
|
default:
|
|
vm_srb->data_in = UNKNOWN_TYPE;
|
|
- vm_srb->win8_extension.srb_flags = 0;
|
|
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
|
|
break;
|
|
}
|
|
|
|
@@ -1664,13 +1690,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
|
if (ret == -EAGAIN) {
|
|
/* no more space */
|
|
|
|
- if (cmd_request->bounce_sgl_count) {
|
|
+ if (cmd_request->bounce_sgl_count)
|
|
destroy_bounce_buffer(cmd_request->bounce_sgl,
|
|
cmd_request->bounce_sgl_count);
|
|
|
|
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
|
|
- goto queue_error;
|
|
- }
|
|
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
|
|
+ goto queue_error;
|
|
}
|
|
|
|
return 0;
|
|
@@ -1687,11 +1712,11 @@ static struct scsi_host_template scsi_driver = {
|
|
.bios_param = storvsc_get_chs,
|
|
.queuecommand = storvsc_queuecommand,
|
|
.eh_host_reset_handler = storvsc_host_reset_handler,
|
|
+ .eh_timed_out = storvsc_eh_timed_out,
|
|
.slave_alloc = storvsc_device_alloc,
|
|
.slave_destroy = storvsc_device_destroy,
|
|
.slave_configure = storvsc_device_configure,
|
|
- .cmd_per_lun = 1,
|
|
- /* 64 max_queue * 1 target */
|
|
+ .cmd_per_lun = 255,
|
|
.can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
|
|
.this_id = -1,
|
|
/* no use setting to 0 since ll_blk_rw reset it to 1 */
|
|
@@ -1743,19 +1768,25 @@ static int storvsc_probe(struct hv_device *device,
|
|
* set state to properly communicate with the host.
|
|
*/
|
|
|
|
- if (vmbus_proto_version == VERSION_WIN8) {
|
|
- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
|
|
- vmscsi_size_delta = 0;
|
|
- vmstor_current_major = VMSTOR_WIN8_MAJOR;
|
|
- vmstor_current_minor = VMSTOR_WIN8_MINOR;
|
|
- } else {
|
|
+ switch (vmbus_proto_version) {
|
|
+ case VERSION_WS2008:
|
|
+ case VERSION_WIN7:
|
|
sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
|
|
vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
|
|
vmstor_current_major = VMSTOR_WIN7_MAJOR;
|
|
vmstor_current_minor = VMSTOR_WIN7_MINOR;
|
|
+ break;
|
|
+ default:
|
|
+ sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
|
|
+ vmscsi_size_delta = 0;
|
|
+ vmstor_current_major = VMSTOR_WIN8_MAJOR;
|
|
+ vmstor_current_minor = VMSTOR_WIN8_MINOR;
|
|
+ break;
|
|
}
|
|
|
|
-
|
|
+ if (dev_id->driver_data == SFC_GUID)
|
|
+ scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
|
|
+ STORVSC_FC_MAX_TARGETS);
|
|
host = scsi_host_alloc(&scsi_driver,
|
|
sizeof(struct hv_host_device));
|
|
if (!host)
|
|
@@ -1789,12 +1820,25 @@ static int storvsc_probe(struct hv_device *device,
|
|
host_dev->path = stor_device->path_id;
|
|
host_dev->target = stor_device->target_id;
|
|
|
|
- /* max # of devices per target */
|
|
- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
|
|
- /* max # of targets per channel */
|
|
- host->max_id = STORVSC_MAX_TARGETS;
|
|
- /* max # of channels */
|
|
- host->max_channel = STORVSC_MAX_CHANNELS - 1;
|
|
+ switch (dev_id->driver_data) {
|
|
+ case SFC_GUID:
|
|
+ host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
|
|
+ host->max_id = STORVSC_FC_MAX_TARGETS;
|
|
+ host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
|
|
+ break;
|
|
+
|
|
+ case SCSI_GUID:
|
|
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
|
|
+ host->max_id = STORVSC_MAX_TARGETS;
|
|
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
|
|
+ host->max_id = STORVSC_IDE_MAX_TARGETS;
|
|
+ host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
|
|
+ break;
|
|
+ }
|
|
/* max cmd length */
|
|
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
|
|
|
|
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
|
|
index 636bbe0..fc57c8a 100644
|
|
--- a/drivers/scsi/sun3_NCR5380.c
|
|
+++ b/drivers/scsi/sun3_NCR5380.c
|
|
@@ -2597,15 +2597,15 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
|
|
* Purpose : abort a command
|
|
*
|
|
* Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
|
|
- * host byte of the result field to, if zero DID_ABORTED is
|
|
+ * host byte of the result field to, if zero DID_ABORTED is
|
|
* used.
|
|
*
|
|
- * Returns : 0 - success, -1 on failure.
|
|
+ * Returns : SUCCESS - success, FAILED on failure.
|
|
*
|
|
- * XXX - there is no way to abort the command that is currently
|
|
- * connected, you have to wait for it to complete. If this is
|
|
+ * XXX - there is no way to abort the command that is currently
|
|
+ * connected, you have to wait for it to complete. If this is
|
|
* a problem, we could implement longjmp() / setjmp(), setjmp()
|
|
- * called where the loop started in NCR5380_main().
|
|
+ * called where the loop started in NCR5380_main().
|
|
*/
|
|
|
|
static int NCR5380_abort(struct scsi_cmnd *cmd)
|
|
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
|
|
index 5d7b07f..5f8c6d2 100644
|
|
--- a/drivers/spi/spi-atmel.c
|
|
+++ b/drivers/spi/spi-atmel.c
|
|
@@ -781,17 +781,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
|
|
(unsigned long long)xfer->rx_dma);
|
|
}
|
|
|
|
- /* REVISIT: We're waiting for ENDRX before we start the next
|
|
+ /* REVISIT: We're waiting for RXBUFF before we start the next
|
|
* transfer because we need to handle some difficult timing
|
|
- * issues otherwise. If we wait for ENDTX in one transfer and
|
|
- * then starts waiting for ENDRX in the next, it's difficult
|
|
- * to tell the difference between the ENDRX interrupt we're
|
|
- * actually waiting for and the ENDRX interrupt of the
|
|
+ * issues otherwise. If we wait for TXBUFE in one transfer and
|
|
+ * then starts waiting for RXBUFF in the next, it's difficult
|
|
+ * to tell the difference between the RXBUFF interrupt we're
|
|
+ * actually waiting for and the RXBUFF interrupt of the
|
|
* previous transfer.
|
|
*
|
|
* It should be doable, though. Just not now...
|
|
*/
|
|
- spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
|
|
+ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
|
|
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
|
|
}
|
|
|
|
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
|
|
index 6d207af..996e16d 100644
|
|
--- a/drivers/spi/spi-dw-mid.c
|
|
+++ b/drivers/spi/spi-dw-mid.c
|
|
@@ -89,7 +89,13 @@ err_exit:
|
|
|
|
static void mid_spi_dma_exit(struct dw_spi *dws)
|
|
{
|
|
+ if (!dws->dma_inited)
|
|
+ return;
|
|
+
|
|
+ dmaengine_terminate_all(dws->txchan);
|
|
dma_release_channel(dws->txchan);
|
|
+
|
|
+ dmaengine_terminate_all(dws->rxchan);
|
|
dma_release_channel(dws->rxchan);
|
|
}
|
|
|
|
@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
|
txconf.dst_addr = dws->dma_addr;
|
|
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
|
|
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
|
+ txconf.dst_addr_width = dws->dma_width;
|
|
txconf.device_fc = false;
|
|
|
|
txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
|
|
@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
|
rxconf.src_addr = dws->dma_addr;
|
|
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
|
|
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
|
+ rxconf.src_addr_width = dws->dma_width;
|
|
rxconf.device_fc = false;
|
|
|
|
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
|
|
@@ -216,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
|
|
iounmap(clk_reg);
|
|
|
|
dws->num_cs = 16;
|
|
- dws->fifo_len = 40; /* FIFO has 40 words buffer */
|
|
|
|
#ifdef CONFIG_SPI_DW_MID_DMA
|
|
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
|
|
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
|
|
index 3f3dc12..e149604 100644
|
|
--- a/drivers/spi/spi-dw-pci.c
|
|
+++ b/drivers/spi/spi-dw-pci.c
|
|
@@ -62,6 +62,8 @@ static int spi_pci_probe(struct pci_dev *pdev,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ dws->regs = pcim_iomap_table(pdev)[pci_bar];
|
|
+
|
|
dws->bus_num = 0;
|
|
dws->num_cs = 4;
|
|
dws->irq = pdev->irq;
|
|
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
|
|
index e63d270..e543b80 100644
|
|
--- a/drivers/spi/spi-dw.c
|
|
+++ b/drivers/spi/spi-dw.c
|
|
@@ -394,9 +394,6 @@ static void pump_transfers(unsigned long data)
|
|
chip = dws->cur_chip;
|
|
spi = message->spi;
|
|
|
|
- if (unlikely(!chip->clk_div))
|
|
- chip->clk_div = dws->max_freq / chip->speed_hz;
|
|
-
|
|
if (message->state == ERROR_STATE) {
|
|
message->status = -EIO;
|
|
goto early_exit;
|
|
@@ -437,7 +434,7 @@ static void pump_transfers(unsigned long data)
|
|
if (transfer->speed_hz) {
|
|
speed = chip->speed_hz;
|
|
|
|
- if (transfer->speed_hz != speed) {
|
|
+ if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
|
|
speed = transfer->speed_hz;
|
|
if (speed > dws->max_freq) {
|
|
printk(KERN_ERR "MRST SPI0: unsupported"
|
|
@@ -659,7 +656,6 @@ static int dw_spi_setup(struct spi_device *spi)
|
|
dev_err(&spi->dev, "No max speed HZ parameter\n");
|
|
return -EINVAL;
|
|
}
|
|
- chip->speed_hz = spi->max_speed_hz;
|
|
|
|
chip->tmode = 0; /* Tx & Rx */
|
|
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
|
|
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
|
|
index a253920..a5db6f9 100644
|
|
--- a/drivers/spi/spi-fsl-dspi.c
|
|
+++ b/drivers/spi/spi-fsl-dspi.c
|
|
@@ -45,7 +45,7 @@
|
|
|
|
#define SPI_TCR 0x08
|
|
|
|
-#define SPI_CTAR(x) (0x0c + (x * 4))
|
|
+#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
|
|
#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
|
|
#define SPI_CTAR_CPOL(x) ((x) << 26)
|
|
#define SPI_CTAR_CPHA(x) ((x) << 25)
|
|
@@ -69,7 +69,7 @@
|
|
|
|
#define SPI_PUSHR 0x34
|
|
#define SPI_PUSHR_CONT (1 << 31)
|
|
-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
|
|
+#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
|
|
#define SPI_PUSHR_EOQ (1 << 27)
|
|
#define SPI_PUSHR_CTCNT (1 << 26)
|
|
#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
|
|
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
|
|
index 119f7af..4dcb292 100644
|
|
--- a/drivers/spi/spi-fsl-spi.c
|
|
+++ b/drivers/spi/spi-fsl-spi.c
|
|
@@ -362,18 +362,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
|
|
static void fsl_spi_do_one_msg(struct spi_message *m)
|
|
{
|
|
struct spi_device *spi = m->spi;
|
|
- struct spi_transfer *t;
|
|
+ struct spi_transfer *t, *first;
|
|
unsigned int cs_change;
|
|
const int nsecs = 50;
|
|
int status;
|
|
|
|
- cs_change = 1;
|
|
- status = 0;
|
|
+ /* Don't allow changes if CS is active */
|
|
+ first = list_first_entry(&m->transfers, struct spi_transfer,
|
|
+ transfer_list);
|
|
list_for_each_entry(t, &m->transfers, transfer_list) {
|
|
- if (t->bits_per_word || t->speed_hz) {
|
|
- /* Don't allow changes if CS is active */
|
|
+ if ((first->bits_per_word != t->bits_per_word) ||
|
|
+ (first->speed_hz != t->speed_hz)) {
|
|
status = -EINVAL;
|
|
+ dev_err(&spi->dev,
|
|
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
|
|
+ cs_change = 1;
|
|
+ status = -EINVAL;
|
|
+ list_for_each_entry(t, &m->transfers, transfer_list) {
|
|
+ if (t->bits_per_word || t->speed_hz) {
|
|
if (cs_change)
|
|
status = fsl_spi_setup_transfer(spi, t);
|
|
if (status < 0)
|
|
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
|
|
index a72127f..b0059e7 100644
|
|
--- a/drivers/spi/spi-omap2-mcspi.c
|
|
+++ b/drivers/spi/spi-omap2-mcspi.c
|
|
@@ -147,6 +147,7 @@ struct omap2_mcspi_cs {
|
|
void __iomem *base;
|
|
unsigned long phys;
|
|
int word_len;
|
|
+ u16 mode;
|
|
struct list_head node;
|
|
/* Context save and restore shadow register */
|
|
u32 chconf0;
|
|
@@ -320,7 +321,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
|
|
disable_fifo:
|
|
if (t->rx_buf != NULL)
|
|
chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
|
|
- else
|
|
+
|
|
+ if (t->tx_buf != NULL)
|
|
chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
|
|
|
|
mcspi_write_chconf0(spi, chconf);
|
|
@@ -899,6 +901,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
|
|
|
|
mcspi_write_chconf0(spi, l);
|
|
|
|
+ cs->mode = spi->mode;
|
|
+
|
|
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
|
|
OMAP2_MCSPI_MAX_FREQ >> div,
|
|
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
|
|
@@ -971,6 +975,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
|
|
return -ENOMEM;
|
|
cs->base = mcspi->base + spi->chip_select * 0x14;
|
|
cs->phys = mcspi->phys + spi->chip_select * 0x14;
|
|
+ cs->mode = 0;
|
|
cs->chconf0 = 0;
|
|
spi->controller_state = cs;
|
|
/* Link this to context save list */
|
|
@@ -1051,6 +1056,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
|
|
cs = spi->controller_state;
|
|
cd = spi->controller_data;
|
|
|
|
+ /*
|
|
+ * The slave driver could have changed spi->mode in which case
|
|
+ * it will be different from cs->mode (the current hardware setup).
|
|
+ * If so, set par_override (even though its not a parity issue) so
|
|
+ * omap2_mcspi_setup_transfer will be called to configure the hardware
|
|
+ * with the correct mode on the first iteration of the loop below.
|
|
+ */
|
|
+ if (spi->mode != cs->mode)
|
|
+ par_override = 1;
|
|
+
|
|
omap2_mcspi_set_enable(spi, 0);
|
|
list_for_each_entry(t, &m->transfers, transfer_list) {
|
|
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
|
|
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
|
|
index 7f2121f..977b061 100644
|
|
--- a/drivers/spi/spi-orion.c
|
|
+++ b/drivers/spi/spi-orion.c
|
|
@@ -404,8 +404,6 @@ static int orion_spi_probe(struct platform_device *pdev)
|
|
struct resource *r;
|
|
unsigned long tclk_hz;
|
|
int status = 0;
|
|
- const u32 *iprop;
|
|
- int size;
|
|
|
|
master = spi_alloc_master(&pdev->dev, sizeof(*spi));
|
|
if (master == NULL) {
|
|
@@ -416,10 +414,10 @@ static int orion_spi_probe(struct platform_device *pdev)
|
|
if (pdev->id != -1)
|
|
master->bus_num = pdev->id;
|
|
if (pdev->dev.of_node) {
|
|
- iprop = of_get_property(pdev->dev.of_node, "cell-index",
|
|
- &size);
|
|
- if (iprop && size == sizeof(*iprop))
|
|
- master->bus_num = *iprop;
|
|
+ u32 cell_index;
|
|
+ if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
|
|
+ &cell_index))
|
|
+ master->bus_num = cell_index;
|
|
}
|
|
|
|
/* we support only mode 0, and no options */
|
|
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
|
|
index 2789b45..fe091a8 100644
|
|
--- a/drivers/spi/spi-pl022.c
|
|
+++ b/drivers/spi/spi-pl022.c
|
|
@@ -503,12 +503,12 @@ static void giveback(struct pl022 *pl022)
|
|
pl022->cur_msg = NULL;
|
|
pl022->cur_transfer = NULL;
|
|
pl022->cur_chip = NULL;
|
|
- spi_finalize_current_message(pl022->master);
|
|
|
|
/* disable the SPI/SSP operation */
|
|
writew((readw(SSP_CR1(pl022->virtbase)) &
|
|
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
|
|
|
|
+ spi_finalize_current_message(pl022->master);
|
|
}
|
|
|
|
/**
|
|
@@ -1075,7 +1075,7 @@ err_rxdesc:
|
|
pl022->sgt_tx.nents, DMA_TO_DEVICE);
|
|
err_tx_sgmap:
|
|
dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
|
|
- pl022->sgt_tx.nents, DMA_FROM_DEVICE);
|
|
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
|
|
err_rx_sgmap:
|
|
sg_free_table(&pl022->sgt_tx);
|
|
err_alloc_tx_sg:
|
|
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
|
|
index c702fc5..458a148 100644
|
|
--- a/drivers/spi/spi-pxa2xx.c
|
|
+++ b/drivers/spi/spi-pxa2xx.c
|
|
@@ -400,8 +400,8 @@ static void giveback(struct driver_data *drv_data)
|
|
cs_deassert(drv_data);
|
|
}
|
|
|
|
- spi_finalize_current_message(drv_data->master);
|
|
drv_data->cur_chip = NULL;
|
|
+ spi_finalize_current_message(drv_data->master);
|
|
}
|
|
|
|
static void reset_sccr1(struct driver_data *drv_data)
|
|
@@ -1078,6 +1078,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
|
|
{ "INT3430", 0 },
|
|
{ "INT3431", 0 },
|
|
{ "80860F0E", 0 },
|
|
+ { "8086228E", 0 },
|
|
{ },
|
|
};
|
|
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
|
|
@@ -1279,7 +1280,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
|
|
if (status != 0)
|
|
return status;
|
|
write_SSCR0(0, drv_data->ioaddr);
|
|
- clk_disable_unprepare(ssp->clk);
|
|
+
|
|
+ if (!pm_runtime_suspended(dev))
|
|
+ clk_disable_unprepare(ssp->clk);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1293,7 +1296,8 @@ static int pxa2xx_spi_resume(struct device *dev)
|
|
pxa2xx_spi_dma_resume(drv_data);
|
|
|
|
/* Enable the SSP clock */
|
|
- clk_prepare_enable(ssp->clk);
|
|
+ if (!pm_runtime_suspended(dev))
|
|
+ clk_prepare_enable(ssp->clk);
|
|
|
|
/* Restore LPSS private register bits */
|
|
lpss_ssp_setup(drv_data);
|
|
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
|
|
index fbf3b22..f3e3ae8 100644
|
|
--- a/drivers/spi/spi.c
|
|
+++ b/drivers/spi/spi.c
|
|
@@ -834,9 +834,6 @@ void spi_finalize_current_message(struct spi_master *master)
|
|
|
|
spin_lock_irqsave(&master->queue_lock, flags);
|
|
mesg = master->cur_msg;
|
|
- master->cur_msg = NULL;
|
|
-
|
|
- queue_kthread_work(&master->kworker, &master->pump_messages);
|
|
spin_unlock_irqrestore(&master->queue_lock, flags);
|
|
|
|
if (master->cur_msg_prepared && master->unprepare_message) {
|
|
@@ -846,13 +843,18 @@ void spi_finalize_current_message(struct spi_master *master)
|
|
"failed to unprepare message: %d\n", ret);
|
|
}
|
|
}
|
|
+
|
|
+ spin_lock_irqsave(&master->queue_lock, flags);
|
|
+ master->cur_msg = NULL;
|
|
master->cur_msg_prepared = false;
|
|
+ queue_kthread_work(&master->kworker, &master->pump_messages);
|
|
+ spin_unlock_irqrestore(&master->queue_lock, flags);
|
|
+
|
|
+ trace_spi_message_done(mesg);
|
|
|
|
mesg->state = NULL;
|
|
if (mesg->complete)
|
|
mesg->complete(mesg->context);
|
|
-
|
|
- trace_spi_message_done(mesg);
|
|
}
|
|
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
|
|
|
|
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
|
|
index d7c6e36..2fe5b61 100644
|
|
--- a/drivers/spi/spidev.c
|
|
+++ b/drivers/spi/spidev.c
|
|
@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev,
|
|
k_tmp->len = u_tmp->len;
|
|
|
|
total += k_tmp->len;
|
|
- if (total > bufsiz) {
|
|
+ /* Check total length of transfers. Also check each
|
|
+ * transfer length to avoid arithmetic overflow.
|
|
+ */
|
|
+ if (total > bufsiz || k_tmp->len > bufsiz) {
|
|
status = -EMSGSIZE;
|
|
goto done;
|
|
}
|
|
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
|
|
index 1e9da40..5287810 100644
|
|
--- a/drivers/staging/comedi/comedi_compat32.c
|
|
+++ b/drivers/staging/comedi/comedi_compat32.c
|
|
@@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
|
|
{
|
|
struct comedi_cmd __user *cmd;
|
|
struct comedi32_cmd_struct __user *cmd32;
|
|
- int rc;
|
|
+ int rc, err;
|
|
|
|
cmd32 = compat_ptr(arg);
|
|
cmd = compat_alloc_user_space(sizeof(*cmd));
|
|
@@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
|
|
if (rc)
|
|
return rc;
|
|
|
|
- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
|
|
+ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
|
|
+ if (rc == -EAGAIN) {
|
|
+ /* Special case: copy cmd back to user. */
|
|
+ err = put_compat_cmd(cmd32, cmd);
|
|
+ if (err)
|
|
+ rc = err;
|
|
+ }
|
|
+
|
|
+ return rc;
|
|
}
|
|
|
|
/* Handle 32-bit COMEDI_CMDTEST ioctl. */
|
|
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
index 6f622b4..927edd1 100644
|
|
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
@@ -113,8 +113,20 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
|
|
{
|
|
unsigned long reg = (unsigned long)s->private;
|
|
|
|
- if (comedi_dio_update_state(s, data))
|
|
- outl(s->state, dev->iobase + reg);
|
|
+ if (comedi_dio_update_state(s, data)) {
|
|
+ unsigned int val = s->state;
|
|
+
|
|
+ if (s->n_chan == 16) {
|
|
+ /*
|
|
+ * It seems the PCI-7230 needs the 16-bit DO state
|
|
+ * to be shifted left by 16 bits before being written
|
|
+ * to the 32-bit register. Set the value in both
|
|
+ * halves of the register to be sure.
|
|
+ */
|
|
+ val |= val << 16;
|
|
+ }
|
|
+ outl(val, dev->iobase + reg);
|
|
+ }
|
|
|
|
data[1] = s->state;
|
|
|
|
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
|
|
index 4fff173..3d1cb5b 100644
|
|
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
|
|
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
|
|
@@ -441,6 +441,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
|
|
}
|
|
};
|
|
|
|
+static const uint8_t ai_range_code_64xx[8] = {
|
|
+ 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */
|
|
+ 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */
|
|
+};
|
|
+
|
|
+/* analog input ranges for 64-Mx boards */
|
|
+static const struct comedi_lrange ai_ranges_64_mx = {
|
|
+ 7, {
|
|
+ BIP_RANGE(5),
|
|
+ BIP_RANGE(2.5),
|
|
+ BIP_RANGE(1.25),
|
|
+ BIP_RANGE(0.625),
|
|
+ UNI_RANGE(5),
|
|
+ UNI_RANGE(2.5),
|
|
+ UNI_RANGE(1.25)
|
|
+ }
|
|
+};
|
|
+
|
|
+static const uint8_t ai_range_code_64_mx[7] = {
|
|
+ 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */
|
|
+ 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */
|
|
+};
|
|
+
|
|
/* analog input ranges for 60xx boards */
|
|
static const struct comedi_lrange ai_ranges_60xx = {
|
|
4, {
|
|
@@ -451,6 +474,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
|
|
}
|
|
};
|
|
|
|
+static const uint8_t ai_range_code_60xx[4] = {
|
|
+ 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */
|
|
+};
|
|
+
|
|
/* analog input ranges for 6030, etc boards */
|
|
static const struct comedi_lrange ai_ranges_6030 = {
|
|
14, {
|
|
@@ -471,6 +498,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
|
|
}
|
|
};
|
|
|
|
+static const uint8_t ai_range_code_6030[14] = {
|
|
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
|
|
+ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
|
|
+};
|
|
+
|
|
/* analog input ranges for 6052, etc boards */
|
|
static const struct comedi_lrange ai_ranges_6052 = {
|
|
15, {
|
|
@@ -492,6 +524,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
|
|
}
|
|
};
|
|
|
|
+static const uint8_t ai_range_code_6052[15] = {
|
|
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */
|
|
+ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */
|
|
+};
|
|
+
|
|
/* analog input ranges for 4020 board */
|
|
static const struct comedi_lrange ai_ranges_4020 = {
|
|
2, {
|
|
@@ -595,6 +632,7 @@ struct pcidas64_board {
|
|
int ai_bits; /* analog input resolution */
|
|
int ai_speed; /* fastest conversion period in ns */
|
|
const struct comedi_lrange *ai_range_table;
|
|
+ const uint8_t *ai_range_code;
|
|
int ao_nchan; /* number of analog out channels */
|
|
int ao_bits; /* analog output resolution */
|
|
int ao_scan_speed; /* analog output scan speed */
|
|
@@ -653,6 +691,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
.ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_code = ai_range_code_64xx,
|
|
.ao_range_table = &ao_ranges_64xx,
|
|
.ao_range_code = ao_range_code_64xx,
|
|
.ai_fifo = &ai_fifo_64xx,
|
|
@@ -668,6 +707,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
.ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_code = ai_range_code_64xx,
|
|
.ao_range_table = &ao_ranges_64xx,
|
|
.ao_range_code = ao_range_code_64xx,
|
|
.ai_fifo = &ai_fifo_64xx,
|
|
@@ -682,7 +722,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_bits = 16,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ao_range_table = &ao_ranges_64xx,
|
|
.ao_range_code = ao_range_code_64xx,
|
|
.ai_fifo = &ai_fifo_64xx,
|
|
@@ -697,7 +738,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_bits = 16,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ao_range_table = &ao_ranges_64xx,
|
|
.ao_range_code = ao_range_code_64xx,
|
|
.ai_fifo = &ai_fifo_64xx,
|
|
@@ -712,7 +754,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_bits = 16,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ao_range_table = &ao_ranges_64xx,
|
|
.ao_range_code = ao_range_code_64xx,
|
|
.ai_fifo = &ai_fifo_64xx,
|
|
@@ -727,6 +770,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_bits = 16,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -742,6 +786,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 100000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -756,6 +801,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 100000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -771,6 +817,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 100000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -786,6 +833,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6030,
|
|
+ .ai_range_code = ai_range_code_6030,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -801,6 +849,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6030,
|
|
+ .ai_range_code = ai_range_code_6030,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -814,6 +863,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 0,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6030,
|
|
+ .ai_range_code = ai_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
.has_8255 = 0,
|
|
},
|
|
@@ -825,6 +875,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 0,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6030,
|
|
+ .ai_range_code = ai_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
.has_8255 = 0,
|
|
},
|
|
@@ -837,6 +888,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 0,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
.has_8255 = 0,
|
|
},
|
|
@@ -850,6 +902,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 100000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -865,6 +918,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 100000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_60xx,
|
|
+ .ai_range_code = ai_range_code_60xx,
|
|
.ao_range_table = &range_bipolar10,
|
|
.ao_range_code = ao_range_code_60xx,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -880,6 +934,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 1000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6052,
|
|
+ .ai_range_code = ai_range_code_6052,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -895,6 +950,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 3333,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6052,
|
|
+ .ai_range_code = ai_range_code_6052,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -910,6 +966,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 1000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6052,
|
|
+ .ai_range_code = ai_range_code_6052,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -925,6 +982,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 1000,
|
|
.layout = LAYOUT_60XX,
|
|
.ai_range_table = &ai_ranges_6052,
|
|
+ .ai_range_code = ai_range_code_6052,
|
|
.ao_range_table = &ao_ranges_6030,
|
|
.ao_range_code = ao_range_code_6030,
|
|
.ai_fifo = &ai_fifo_60xx,
|
|
@@ -959,6 +1017,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
.ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_code = ai_range_code_64xx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -970,7 +1029,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 0,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -982,7 +1042,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 0,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -994,7 +1055,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 0,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -1006,7 +1068,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 2,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -1018,7 +1081,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 2,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -1030,7 +1094,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
|
|
.ao_nchan = 2,
|
|
.ao_scan_speed = 10000,
|
|
.layout = LAYOUT_64XX,
|
|
- .ai_range_table = &ai_ranges_64xx,
|
|
+ .ai_range_table = &ai_ranges_64_mx,
|
|
+ .ai_range_code = ai_range_code_64_mx,
|
|
.ai_fifo = ai_fifo_64xx,
|
|
.has_8255 = 1,
|
|
},
|
|
@@ -1127,45 +1192,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
|
|
unsigned int range_index)
|
|
{
|
|
const struct pcidas64_board *thisboard = comedi_board(dev);
|
|
- const struct comedi_krange *range =
|
|
- &thisboard->ai_range_table->range[range_index];
|
|
- unsigned int bits = 0;
|
|
|
|
- switch (range->max) {
|
|
- case 10000000:
|
|
- bits = 0x000;
|
|
- break;
|
|
- case 5000000:
|
|
- bits = 0x100;
|
|
- break;
|
|
- case 2000000:
|
|
- case 2500000:
|
|
- bits = 0x200;
|
|
- break;
|
|
- case 1000000:
|
|
- case 1250000:
|
|
- bits = 0x300;
|
|
- break;
|
|
- case 500000:
|
|
- bits = 0x400;
|
|
- break;
|
|
- case 200000:
|
|
- case 250000:
|
|
- bits = 0x500;
|
|
- break;
|
|
- case 100000:
|
|
- bits = 0x600;
|
|
- break;
|
|
- case 50000:
|
|
- bits = 0x700;
|
|
- break;
|
|
- default:
|
|
- comedi_error(dev, "bug! in ai_range_bits_6xxx");
|
|
- break;
|
|
- }
|
|
- if (range->min == 0)
|
|
- bits += 0x900;
|
|
- return bits;
|
|
+ return thisboard->ai_range_code[range_index] << 8;
|
|
}
|
|
|
|
static unsigned int hw_revision(const struct comedi_device *dev,
|
|
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
|
|
index e516bb6..907aa30 100644
|
|
--- a/drivers/staging/et131x/et131x.c
|
|
+++ b/drivers/staging/et131x/et131x.c
|
|
@@ -1422,22 +1422,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
|
|
* @reg: the register to read
|
|
* @value: 16-bit value to write
|
|
*/
|
|
-static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
|
|
+static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
|
|
+ u16 value)
|
|
{
|
|
struct mac_regs __iomem *mac = &adapter->regs->mac;
|
|
- struct phy_device *phydev = adapter->phydev;
|
|
int status = 0;
|
|
- u8 addr;
|
|
u32 delay = 0;
|
|
u32 mii_addr;
|
|
u32 mii_cmd;
|
|
u32 mii_indicator;
|
|
|
|
- if (!phydev)
|
|
- return -EIO;
|
|
-
|
|
- addr = phydev->addr;
|
|
-
|
|
/* Save a local copy of the registers we are dealing with so we can
|
|
* set them back
|
|
*/
|
|
@@ -1632,17 +1626,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
|
|
struct net_device *netdev = bus->priv;
|
|
struct et131x_adapter *adapter = netdev_priv(netdev);
|
|
|
|
- return et131x_mii_write(adapter, reg, value);
|
|
-}
|
|
-
|
|
-static int et131x_mdio_reset(struct mii_bus *bus)
|
|
-{
|
|
- struct net_device *netdev = bus->priv;
|
|
- struct et131x_adapter *adapter = netdev_priv(netdev);
|
|
-
|
|
- et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
|
|
-
|
|
- return 0;
|
|
+ return et131x_mii_write(adapter, phy_addr, reg, value);
|
|
}
|
|
|
|
/* et1310_phy_power_switch - PHY power control
|
|
@@ -1657,18 +1641,20 @@ static int et131x_mdio_reset(struct mii_bus *bus)
|
|
static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
|
|
{
|
|
u16 data;
|
|
+ struct phy_device *phydev = adapter->phydev;
|
|
|
|
et131x_mii_read(adapter, MII_BMCR, &data);
|
|
data &= ~BMCR_PDOWN;
|
|
if (down)
|
|
data |= BMCR_PDOWN;
|
|
- et131x_mii_write(adapter, MII_BMCR, data);
|
|
+ et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
|
|
}
|
|
|
|
/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
|
|
static void et131x_xcvr_init(struct et131x_adapter *adapter)
|
|
{
|
|
u16 lcr2;
|
|
+ struct phy_device *phydev = adapter->phydev;
|
|
|
|
/* Set the LED behavior such that LED 1 indicates speed (off =
|
|
* 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
|
|
@@ -1689,7 +1675,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
|
|
else
|
|
lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
|
|
|
|
- et131x_mii_write(adapter, PHY_LED_2, lcr2);
|
|
+ et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
|
|
}
|
|
}
|
|
|
|
@@ -3638,14 +3624,14 @@ static void et131x_adjust_link(struct net_device *netdev)
|
|
|
|
et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
|
|
®ister18);
|
|
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
|
|
- register18 | 0x4);
|
|
- et131x_mii_write(adapter, PHY_INDEX_REG,
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_MPHY_CONTROL_REG, register18 | 0x4);
|
|
+ et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
|
|
register18 | 0x8402);
|
|
- et131x_mii_write(adapter, PHY_DATA_REG,
|
|
+ et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
|
|
register18 | 511);
|
|
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
|
|
- register18);
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_MPHY_CONTROL_REG, register18);
|
|
}
|
|
|
|
et1310_config_flow_control(adapter);
|
|
@@ -3657,7 +3643,8 @@ static void et131x_adjust_link(struct net_device *netdev)
|
|
et131x_mii_read(adapter, PHY_CONFIG, ®);
|
|
reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
|
|
reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
|
|
- et131x_mii_write(adapter, PHY_CONFIG, reg);
|
|
+ et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
|
|
+ reg);
|
|
}
|
|
|
|
et131x_set_rx_dma_timer(adapter);
|
|
@@ -3670,14 +3657,14 @@ static void et131x_adjust_link(struct net_device *netdev)
|
|
|
|
et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
|
|
®ister18);
|
|
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
|
|
- register18 | 0x4);
|
|
- et131x_mii_write(adapter, PHY_INDEX_REG,
|
|
- register18 | 0x8402);
|
|
- et131x_mii_write(adapter, PHY_DATA_REG,
|
|
- register18 | 511);
|
|
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
|
|
- register18);
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_MPHY_CONTROL_REG, register18 | 0x4);
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_INDEX_REG, register18 | 0x8402);
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_DATA_REG, register18 | 511);
|
|
+ et131x_mii_write(adapter, phydev->addr,
|
|
+ PHY_MPHY_CONTROL_REG, register18);
|
|
}
|
|
|
|
/* Free the packets being actively sent & stopped */
|
|
@@ -4646,10 +4633,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
|
|
/* Copy address into the net_device struct */
|
|
memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
|
|
|
|
- /* Init variable for counting how long we do not have link status */
|
|
- adapter->boot_coma = 0;
|
|
- et1310_disable_phy_coma(adapter);
|
|
-
|
|
rc = -ENOMEM;
|
|
|
|
/* Setup the mii_bus struct */
|
|
@@ -4665,7 +4648,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
|
|
adapter->mii_bus->priv = netdev;
|
|
adapter->mii_bus->read = et131x_mdio_read;
|
|
adapter->mii_bus->write = et131x_mdio_write;
|
|
- adapter->mii_bus->reset = et131x_mdio_reset;
|
|
adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
|
|
GFP_KERNEL);
|
|
if (!adapter->mii_bus->irq)
|
|
@@ -4689,6 +4671,10 @@ static int et131x_pci_setup(struct pci_dev *pdev,
|
|
/* Setup et1310 as per the documentation */
|
|
et131x_adapter_setup(adapter);
|
|
|
|
+ /* Init variable for counting how long we do not have link status */
|
|
+ adapter->boot_coma = 0;
|
|
+ et1310_disable_phy_coma(adapter);
|
|
+
|
|
/* We can enable interrupts now
|
|
*
|
|
* NOTE - Because registration of interrupt handler is done in the
|
|
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
|
|
index 2fa3a5a..f2e6599 100644
|
|
--- a/drivers/staging/gdm724x/gdm_mux.c
|
|
+++ b/drivers/staging/gdm724x/gdm_mux.c
|
|
@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
|
|
unsigned int start_flag;
|
|
unsigned int payload_size;
|
|
unsigned short packet_type;
|
|
- int dummy_cnt;
|
|
+ int total_len;
|
|
u32 packet_size_sum = r->offset;
|
|
int index;
|
|
int ret = TO_HOST_INVALID_PACKET;
|
|
@@ -175,10 +175,10 @@ static int up_to_host(struct mux_rx *r)
|
|
break;
|
|
}
|
|
|
|
- dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
|
|
+ total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
|
|
|
|
if (len - packet_size_sum <
|
|
- MUX_HEADER_SIZE + payload_size + dummy_cnt) {
|
|
+ total_len) {
|
|
pr_err("invalid payload : %d %d %04x\n",
|
|
payload_size, len, packet_type);
|
|
break;
|
|
@@ -201,7 +201,7 @@ static int up_to_host(struct mux_rx *r)
|
|
break;
|
|
}
|
|
|
|
- packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
|
|
+ packet_size_sum += total_len;
|
|
if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
|
|
ret = r->callback(NULL,
|
|
0,
|
|
@@ -359,7 +359,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
|
|
struct mux_pkt_header *mux_header;
|
|
struct mux_tx *t = NULL;
|
|
static u32 seq_num = 1;
|
|
- int dummy_cnt;
|
|
int total_len;
|
|
int ret;
|
|
unsigned long flags;
|
|
@@ -372,9 +371,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
|
|
|
|
spin_lock_irqsave(&mux_dev->write_lock, flags);
|
|
|
|
- dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
|
|
-
|
|
- total_len = len + MUX_HEADER_SIZE + dummy_cnt;
|
|
+ total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
|
|
|
|
t = alloc_mux_tx(total_len);
|
|
if (!t) {
|
|
@@ -390,7 +387,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
|
|
mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
|
|
|
|
memcpy(t->buf+MUX_HEADER_SIZE, data, len);
|
|
- memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
|
|
+ memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
|
|
+ len);
|
|
|
|
t->len = total_len;
|
|
t->callback = cb;
|
|
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
|
|
index 9ec1df9..27e1a6e 100644
|
|
--- a/drivers/staging/iio/adc/mxs-lradc.c
|
|
+++ b/drivers/staging/iio/adc/mxs-lradc.c
|
|
@@ -214,11 +214,17 @@ struct mxs_lradc {
|
|
unsigned long is_divided;
|
|
|
|
/*
|
|
- * Touchscreen LRADC channels receives a private slot in the CTRL4
|
|
- * register, the slot #7. Therefore only 7 slots instead of 8 in the
|
|
- * CTRL4 register can be mapped to LRADC channels when using the
|
|
- * touchscreen.
|
|
- *
|
|
+ * When the touchscreen is enabled, we give it two private virtual
|
|
+ * channels: #6 and #7. This means that only 6 virtual channels (instead
|
|
+ * of 8) will be available for buffered capture.
|
|
+ */
|
|
+#define TOUCHSCREEN_VCHANNEL1 7
|
|
+#define TOUCHSCREEN_VCHANNEL2 6
|
|
+#define BUFFER_VCHANS_LIMITED 0x3f
|
|
+#define BUFFER_VCHANS_ALL 0xff
|
|
+ u8 buffer_vchans;
|
|
+
|
|
+ /*
|
|
* Furthermore, certain LRADC channels are shared between touchscreen
|
|
* and/or touch-buttons and generic LRADC block. Therefore when using
|
|
* either of these, these channels are not available for the regular
|
|
@@ -342,6 +348,9 @@ struct mxs_lradc {
|
|
#define LRADC_CTRL4 0x140
|
|
#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
|
|
#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
|
|
+#define LRADC_CTRL4_LRADCSELECT(n, x) \
|
|
+ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
|
|
+ LRADC_CTRL4_LRADCSELECT_MASK(n))
|
|
|
|
#define LRADC_RESOLUTION 12
|
|
#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
|
|
@@ -423,6 +432,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
|
|
LRADC_STATUS_TOUCH_DETECT_RAW);
|
|
}
|
|
|
|
+static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
|
|
+ unsigned ch)
|
|
+{
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
|
|
+ LRADC_CTRL4);
|
|
+ mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
|
|
+}
|
|
+
|
|
static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
|
|
{
|
|
/*
|
|
@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
|
|
LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
|
|
LRADC_DELAY(3));
|
|
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
|
|
- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
|
|
- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
|
|
|
|
- /* wake us again, when the complete conversion is done */
|
|
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
|
|
/*
|
|
* after changing the touchscreen plates setting
|
|
* the signals need some initial time to settle. Start the
|
|
@@ -508,12 +521,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
|
|
LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
|
|
LRADC_DELAY(3));
|
|
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
|
|
- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
|
|
- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
|
|
|
|
- /* wake us again, when the conversions are done */
|
|
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
|
|
/*
|
|
* after changing the touchscreen plates setting
|
|
* the signals need some initial time to settle. Start the
|
|
@@ -578,36 +587,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
|
|
#define TS_CH_XM 4
|
|
#define TS_CH_YM 5
|
|
|
|
-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
|
|
-{
|
|
- u32 reg;
|
|
- int val;
|
|
-
|
|
- reg = readl(lradc->base + LRADC_CTRL1);
|
|
-
|
|
- /* only channels 3 to 5 are of interest here */
|
|
- if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
|
|
- LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
|
|
- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
|
|
- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
|
|
- LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
|
|
- val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
|
|
- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
|
|
- LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
|
|
- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
|
|
- } else {
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
|
|
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
|
|
-
|
|
- return val;
|
|
-}
|
|
-
|
|
/*
|
|
* YP(open)--+-------------+
|
|
* | |--+
|
|
@@ -651,7 +630,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
|
|
mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
|
|
|
|
lradc->cur_plate = LRADC_SAMPLE_X;
|
|
- mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
|
|
+ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
|
|
+ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
|
|
}
|
|
|
|
/*
|
|
@@ -672,7 +652,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
|
|
mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
|
|
|
|
lradc->cur_plate = LRADC_SAMPLE_Y;
|
|
- mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
|
|
+ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
|
|
+ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
|
|
}
|
|
|
|
/*
|
|
@@ -693,7 +674,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
|
|
mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
|
|
|
|
lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
|
|
- mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
|
|
+ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
|
|
+ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
|
|
+ mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
|
|
+ TOUCHSCREEN_VCHANNEL1);
|
|
}
|
|
|
|
static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
|
|
@@ -706,6 +690,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
|
|
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
|
|
}
|
|
|
|
+static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
|
|
+{
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
|
|
+ LRADC_CTRL1);
|
|
+ mxs_lradc_reg_set(lradc,
|
|
+ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
|
|
+ /*
|
|
+ * start with the Y-pos, because it uses nearly the same plate
|
|
+ * settings like the touch detection
|
|
+ */
|
|
+ mxs_lradc_prepare_y_pos(lradc);
|
|
+}
|
|
+
|
|
static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
|
|
{
|
|
input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
|
|
@@ -723,10 +720,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
|
|
* start a dummy conversion to burn time to settle the signals
|
|
* note: we are not interested in the conversion's value
|
|
*/
|
|
- mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
|
|
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
|
|
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
|
|
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
|
|
+ mxs_lradc_reg_clear(lradc,
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
|
|
+ mxs_lradc_reg_wrt(lradc,
|
|
+ LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
|
|
LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
|
|
LRADC_DELAY(2));
|
|
}
|
|
@@ -758,59 +757,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
|
|
|
|
/* if it is released, wait for the next touch via IRQ */
|
|
lradc->cur_plate = LRADC_TOUCH;
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
|
|
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
|
|
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
|
|
+ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
|
|
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
|
|
}
|
|
|
|
/* touchscreen's state machine */
|
|
static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
|
|
{
|
|
- int val;
|
|
-
|
|
switch (lradc->cur_plate) {
|
|
case LRADC_TOUCH:
|
|
- /*
|
|
- * start with the Y-pos, because it uses nearly the same plate
|
|
- * settings like the touch detection
|
|
- */
|
|
- if (mxs_lradc_check_touch_event(lradc)) {
|
|
- mxs_lradc_reg_clear(lradc,
|
|
- LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
|
|
- LRADC_CTRL1);
|
|
- mxs_lradc_prepare_y_pos(lradc);
|
|
- }
|
|
+ if (mxs_lradc_check_touch_event(lradc))
|
|
+ mxs_lradc_start_touch_event(lradc);
|
|
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
|
|
LRADC_CTRL1);
|
|
return;
|
|
|
|
case LRADC_SAMPLE_Y:
|
|
- val = mxs_lradc_read_ts_channel(lradc);
|
|
- if (val < 0) {
|
|
- mxs_lradc_enable_touch_detection(lradc); /* re-start */
|
|
- return;
|
|
- }
|
|
- lradc->ts_y_pos = val;
|
|
+ lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
|
|
+ TOUCHSCREEN_VCHANNEL1);
|
|
mxs_lradc_prepare_x_pos(lradc);
|
|
return;
|
|
|
|
case LRADC_SAMPLE_X:
|
|
- val = mxs_lradc_read_ts_channel(lradc);
|
|
- if (val < 0) {
|
|
- mxs_lradc_enable_touch_detection(lradc); /* re-start */
|
|
- return;
|
|
- }
|
|
- lradc->ts_x_pos = val;
|
|
+ lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
|
|
+ TOUCHSCREEN_VCHANNEL1);
|
|
mxs_lradc_prepare_pressure(lradc);
|
|
return;
|
|
|
|
case LRADC_SAMPLE_PRESSURE:
|
|
- lradc->ts_pressure =
|
|
- mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
|
|
+ lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
|
|
+ TOUCHSCREEN_VCHANNEL2,
|
|
+ TOUCHSCREEN_VCHANNEL1);
|
|
mxs_lradc_complete_touch_event(lradc);
|
|
return;
|
|
|
|
case LRADC_SAMPLE_VALID:
|
|
- val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
|
|
mxs_lradc_finish_touch_event(lradc, 1);
|
|
break;
|
|
}
|
|
@@ -842,9 +827,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
|
|
* used if doing raw sampling.
|
|
*/
|
|
if (lradc->soc == IMX28_LRADC)
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
|
|
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
|
|
LRADC_CTRL1);
|
|
- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
|
|
+ mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
|
|
|
|
/* Enable / disable the divider per requirement */
|
|
if (test_bit(chan, &lradc->is_divided))
|
|
@@ -1091,9 +1076,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
|
|
{
|
|
/* stop all interrupts from firing */
|
|
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
|
|
- LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
|
|
- LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
|
|
- LRADC_CTRL1);
|
|
+ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
|
|
+ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
|
|
|
|
/* Power-down touchscreen touch-detect circuitry. */
|
|
mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
|
|
@@ -1159,25 +1143,30 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
|
|
struct iio_dev *iio = data;
|
|
struct mxs_lradc *lradc = iio_priv(iio);
|
|
unsigned long reg = readl(lradc->base + LRADC_CTRL1);
|
|
+ uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
|
|
const uint32_t ts_irq_mask =
|
|
LRADC_CTRL1_TOUCH_DETECT_IRQ |
|
|
- LRADC_CTRL1_LRADC_IRQ(2) |
|
|
- LRADC_CTRL1_LRADC_IRQ(3) |
|
|
- LRADC_CTRL1_LRADC_IRQ(4) |
|
|
- LRADC_CTRL1_LRADC_IRQ(5);
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
|
|
|
|
if (!(reg & mxs_lradc_irq_mask(lradc)))
|
|
return IRQ_NONE;
|
|
|
|
- if (lradc->use_touchscreen && (reg & ts_irq_mask))
|
|
+ if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
|
|
mxs_lradc_handle_touch(lradc);
|
|
+ /* Make sure we don't clear the next conversion's interrupt. */
|
|
+ clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
|
|
+ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
|
|
+ }
|
|
|
|
- if (iio_buffer_enabled(iio))
|
|
- iio_trigger_poll(iio->trig, iio_get_time_ns());
|
|
- else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
|
|
+ if (iio_buffer_enabled(iio)) {
|
|
+ if (reg & lradc->buffer_vchans)
|
|
+ iio_trigger_poll(iio->trig, iio_get_time_ns());
|
|
+ } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
|
|
complete(&lradc->completion);
|
|
+ }
|
|
|
|
- mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1);
|
|
+ mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
@@ -1288,9 +1277,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
|
|
}
|
|
|
|
if (lradc->soc == IMX28_LRADC)
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
|
|
- LRADC_CTRL1);
|
|
- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
|
|
+ mxs_lradc_reg_clear(lradc,
|
|
+ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
|
|
+ LRADC_CTRL1);
|
|
+ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
|
|
|
|
for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
|
|
ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
|
|
@@ -1323,10 +1313,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
|
|
mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
|
|
LRADC_DELAY_KICK, LRADC_DELAY(0));
|
|
|
|
- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
|
|
+ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
|
|
if (lradc->soc == IMX28_LRADC)
|
|
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
|
|
- LRADC_CTRL1);
|
|
+ mxs_lradc_reg_clear(lradc,
|
|
+ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
|
|
+ LRADC_CTRL1);
|
|
|
|
kfree(lradc->buffer);
|
|
mutex_unlock(&lradc->lock);
|
|
@@ -1352,7 +1343,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
|
|
if (lradc->use_touchbutton)
|
|
rsvd_chans++;
|
|
if (lradc->use_touchscreen)
|
|
- rsvd_chans++;
|
|
+ rsvd_chans += 2;
|
|
|
|
/* Test for attempts to map channels with special mode of operation. */
|
|
if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
|
|
@@ -1412,6 +1403,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
|
|
.channel = 8,
|
|
.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
|
|
},
|
|
+ /* Hidden channel to keep indexes */
|
|
+ {
|
|
+ .type = IIO_TEMP,
|
|
+ .indexed = 1,
|
|
+ .scan_index = -1,
|
|
+ .channel = 9,
|
|
+ },
|
|
MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
|
|
MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
|
|
MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
|
|
@@ -1563,6 +1561,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
|
|
|
|
touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
|
|
|
|
+ if (touch_ret == 0)
|
|
+ lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
|
|
+ else
|
|
+ lradc->buffer_vchans = BUFFER_VCHANS_ALL;
|
|
+
|
|
/* Grab all IRQ sources */
|
|
for (i = 0; i < of_cfg->irq_count; i++) {
|
|
lradc->irq[i] = platform_get_irq(pdev, i);
|
|
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
|
|
index 2b96665..97d4b3f 100644
|
|
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
|
|
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
|
|
@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
|
|
.channel = 0,
|
|
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
|
|
.address = AD5933_REG_TEMP_DATA,
|
|
+ .scan_index = -1,
|
|
.scan_type = {
|
|
.sign = 's',
|
|
.realbits = 14,
|
|
@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
|
|
.type = IIO_VOLTAGE,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "real_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
|
|
- BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "real",
|
|
.address = AD5933_REG_REAL_DATA,
|
|
.scan_index = 0,
|
|
.scan_type = {
|
|
@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
|
|
.type = IIO_VOLTAGE,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "imag_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
|
|
- BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "imag",
|
|
.address = AD5933_REG_IMAG_DATA,
|
|
.scan_index = 1,
|
|
.scan_type = {
|
|
@@ -748,14 +745,14 @@ static int ad5933_probe(struct i2c_client *client,
|
|
indio_dev->name = id->name;
|
|
indio_dev->modes = INDIO_DIRECT_MODE;
|
|
indio_dev->channels = ad5933_channels;
|
|
- indio_dev->num_channels = 1; /* only register temp0_input */
|
|
+ indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
|
|
|
|
ret = ad5933_register_ring_funcs_and_init(indio_dev);
|
|
if (ret)
|
|
goto error_disable_reg;
|
|
|
|
- /* skip temp0_input, register in0_(real|imag)_raw */
|
|
- ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
|
|
+ ret = iio_buffer_register(indio_dev, ad5933_channels,
|
|
+ ARRAY_SIZE(ad5933_channels));
|
|
if (ret)
|
|
goto error_unreg_ring;
|
|
|
|
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
|
|
index 0731820..e8c98cf 100644
|
|
--- a/drivers/staging/iio/meter/ade7758.h
|
|
+++ b/drivers/staging/iio/meter/ade7758.h
|
|
@@ -119,7 +119,6 @@ struct ade7758_state {
|
|
u8 *tx;
|
|
u8 *rx;
|
|
struct mutex buf_lock;
|
|
- const struct iio_chan_spec *ade7758_ring_channels;
|
|
struct spi_transfer ring_xfer[4];
|
|
struct spi_message ring_msg;
|
|
/*
|
|
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
|
|
index cba183e..94d9914 100644
|
|
--- a/drivers/staging/iio/meter/ade7758_core.c
|
|
+++ b/drivers/staging/iio/meter/ade7758_core.c
|
|
@@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_VOLTAGE,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
|
|
.scan_index = 0,
|
|
.scan_type = {
|
|
@@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_CURRENT,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
|
|
.scan_index = 1,
|
|
.scan_type = {
|
|
@@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "apparent_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "apparent",
|
|
.address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
|
|
.scan_index = 2,
|
|
.scan_type = {
|
|
@@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "active_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "active",
|
|
.address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
|
|
.scan_index = 3,
|
|
.scan_type = {
|
|
@@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 0,
|
|
- .extend_name = "reactive_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "reactive",
|
|
.address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
|
|
.scan_index = 4,
|
|
.scan_type = {
|
|
@@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_VOLTAGE,
|
|
.indexed = 1,
|
|
.channel = 1,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
|
|
.scan_index = 5,
|
|
.scan_type = {
|
|
@@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_CURRENT,
|
|
.indexed = 1,
|
|
.channel = 1,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
|
|
.scan_index = 6,
|
|
.scan_type = {
|
|
@@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 1,
|
|
- .extend_name = "apparent_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "apparent",
|
|
.address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
|
|
.scan_index = 7,
|
|
.scan_type = {
|
|
@@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 1,
|
|
- .extend_name = "active_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "active",
|
|
.address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
|
|
.scan_index = 8,
|
|
.scan_type = {
|
|
@@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 1,
|
|
- .extend_name = "reactive_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "reactive",
|
|
.address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
|
|
.scan_index = 9,
|
|
.scan_type = {
|
|
@@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_VOLTAGE,
|
|
.indexed = 1,
|
|
.channel = 2,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
|
|
.scan_index = 10,
|
|
.scan_type = {
|
|
@@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_CURRENT,
|
|
.indexed = 1,
|
|
.channel = 2,
|
|
- .extend_name = "raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
.address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
|
|
.scan_index = 11,
|
|
.scan_type = {
|
|
@@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 2,
|
|
- .extend_name = "apparent_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "apparent",
|
|
.address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
|
|
.scan_index = 12,
|
|
.scan_type = {
|
|
@@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 2,
|
|
- .extend_name = "active_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "active",
|
|
.address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
|
|
.scan_index = 13,
|
|
.scan_type = {
|
|
@@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
|
|
.type = IIO_POWER,
|
|
.indexed = 1,
|
|
.channel = 2,
|
|
- .extend_name = "reactive_raw",
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
|
+ .extend_name = "reactive",
|
|
.address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
|
|
.scan_index = 14,
|
|
.scan_type = {
|
|
@@ -869,13 +833,14 @@ static int ade7758_probe(struct spi_device *spi)
|
|
goto error_free_rx;
|
|
}
|
|
st->us = spi;
|
|
- st->ade7758_ring_channels = &ade7758_channels[0];
|
|
mutex_init(&st->buf_lock);
|
|
|
|
indio_dev->name = spi->dev.driver->name;
|
|
indio_dev->dev.parent = &spi->dev;
|
|
indio_dev->info = &ade7758_info;
|
|
indio_dev->modes = INDIO_DIRECT_MODE;
|
|
+ indio_dev->channels = ade7758_channels;
|
|
+ indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
|
|
|
|
ret = ade7758_configure_ring(indio_dev);
|
|
if (ret)
|
|
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
|
|
index c0accf8..6e90064 100644
|
|
--- a/drivers/staging/iio/meter/ade7758_ring.c
|
|
+++ b/drivers/staging/iio/meter/ade7758_ring.c
|
|
@@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
|
|
**/
|
|
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
|
|
{
|
|
- struct ade7758_state *st = iio_priv(indio_dev);
|
|
unsigned channel;
|
|
|
|
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
|
|
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
|
|
return -EINVAL;
|
|
|
|
channel = find_first_bit(indio_dev->active_scan_mask,
|
|
indio_dev->masklength);
|
|
|
|
ade7758_write_waveform_type(&indio_dev->dev,
|
|
- st->ade7758_ring_channels[channel].address);
|
|
+ indio_dev->channels[channel].address);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
|
|
index 7a94ddd..8c4f289 100644
|
|
--- a/drivers/staging/iio/meter/ade7758_trigger.c
|
|
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
|
|
@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
|
|
ret = iio_trigger_register(st->trig);
|
|
|
|
/* select default trigger */
|
|
- indio_dev->trig = st->trig;
|
|
+ indio_dev->trig = iio_trigger_get(st->trig);
|
|
if (ret)
|
|
goto error_free_irq;
|
|
|
|
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
|
|
index 34b642a..c70f173 100644
|
|
--- a/drivers/staging/imx-drm/ipuv3-plane.c
|
|
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
|
|
@@ -277,7 +277,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
|
|
|
|
ipu_idmac_put(ipu_plane->ipu_ch);
|
|
ipu_dmfc_put(ipu_plane->dmfc);
|
|
- ipu_dp_put(ipu_plane->dp);
|
|
+ if (ipu_plane->dp)
|
|
+ ipu_dp_put(ipu_plane->dp);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
|
|
index 209e4c7..4f65ba1 100644
|
|
--- a/drivers/staging/lustre/lustre/Kconfig
|
|
+++ b/drivers/staging/lustre/lustre/Kconfig
|
|
@@ -57,4 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS
|
|
config LUSTRE_LLITE_LLOOP
|
|
tristate "Lustre virtual block device"
|
|
depends on LUSTRE_FS && BLOCK
|
|
+ depends on !PPC_64K_PAGES && !ARM64_64K_PAGES
|
|
default m
|
|
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
|
|
index cbd663e..19405ed 100644
|
|
--- a/drivers/staging/lustre/lustre/llite/dcache.c
|
|
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
|
|
@@ -278,7 +278,7 @@ void ll_invalidate_aliases(struct inode *inode)
|
|
inode->i_ino, inode->i_generation, inode);
|
|
|
|
ll_lock_dcache(inode);
|
|
- ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
|
|
+ ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
|
|
CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
|
|
"inode %p flags %d\n", dentry->d_name.len,
|
|
dentry->d_name.name, dentry, dentry->d_parent,
|
|
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
|
|
index 6cfdb9e..5ae562e 100644
|
|
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
|
|
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
|
|
@@ -678,7 +678,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
|
|
return;
|
|
|
|
list_for_each(tmp, &dentry->d_subdirs) {
|
|
- struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
|
|
+ struct dentry *d = list_entry(tmp, struct dentry, d_child);
|
|
lustre_dump_dentry(d, recur - 1);
|
|
}
|
|
}
|
|
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
|
|
index fc8d264..8e9a9e9 100644
|
|
--- a/drivers/staging/lustre/lustre/llite/namei.c
|
|
+++ b/drivers/staging/lustre/lustre/llite/namei.c
|
|
@@ -175,14 +175,14 @@ static void ll_invalidate_negative_children(struct inode *dir)
|
|
struct ll_d_hlist_node *p;
|
|
|
|
ll_lock_dcache(dir);
|
|
- ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) {
|
|
+ ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
|
|
spin_lock(&dentry->d_lock);
|
|
if (!list_empty(&dentry->d_subdirs)) {
|
|
struct dentry *child;
|
|
|
|
list_for_each_entry_safe(child, tmp_subdir,
|
|
&dentry->d_subdirs,
|
|
- d_u.d_child) {
|
|
+ d_child) {
|
|
if (child->d_inode == NULL)
|
|
d_lustre_invalidate(child, 1);
|
|
}
|
|
@@ -364,7 +364,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
|
|
discon_alias = invalid_alias = NULL;
|
|
|
|
ll_lock_dcache(inode);
|
|
- ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
|
|
+ ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
|
|
LASSERT(alias != dentry);
|
|
|
|
spin_lock(&alias->d_lock);
|
|
@@ -953,7 +953,7 @@ static void ll_get_child_fid(struct inode * dir, struct qstr *name,
|
|
{
|
|
struct dentry *parent, *child;
|
|
|
|
- parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
|
|
+ parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_u.d_alias);
|
|
child = d_lookup(parent, name);
|
|
if (child) {
|
|
if (child->d_inode)
|
|
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
|
|
index 93cbfbb..6096771 100644
|
|
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
|
|
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
|
|
@@ -642,7 +642,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
|
|
return 0;
|
|
}
|
|
|
|
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
|
|
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
|
|
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
|
|
return -EFAULT;
|
|
}
|
|
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
|
|
index b9fe753..15940f8 100644
|
|
--- a/drivers/staging/media/omap4iss/Kconfig
|
|
+++ b/drivers/staging/media/omap4iss/Kconfig
|
|
@@ -1,6 +1,6 @@
|
|
config VIDEO_OMAP4
|
|
bool "OMAP 4 Camera support"
|
|
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
|
|
+ depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
|
|
select VIDEOBUF2_DMA_CONTIG
|
|
---help---
|
|
Driver for an OMAP 4 ISS controller.
|
|
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
|
|
index 617f51c..b58e87e 100644
|
|
--- a/drivers/staging/ozwpan/ozusbsvc1.c
|
|
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
|
|
@@ -323,7 +323,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
|
|
struct oz_multiple_fixed *body =
|
|
(struct oz_multiple_fixed *)data_hdr;
|
|
u8 *data = body->data;
|
|
- int n = (len - sizeof(struct oz_multiple_fixed)+1)
|
|
+ unsigned int n;
|
|
+ if (!body->unit_size ||
|
|
+ len < sizeof(struct oz_multiple_fixed) - 1)
|
|
+ break;
|
|
+ n = (len - (sizeof(struct oz_multiple_fixed) - 1))
|
|
/ body->unit_size;
|
|
while (n--) {
|
|
oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
|
|
@@ -386,10 +390,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
|
|
case OZ_GET_DESC_RSP: {
|
|
struct oz_get_desc_rsp *body =
|
|
(struct oz_get_desc_rsp *)usb_hdr;
|
|
- int data_len = elt->length -
|
|
- sizeof(struct oz_get_desc_rsp) + 1;
|
|
- u16 offs = le16_to_cpu(get_unaligned(&body->offset));
|
|
- u16 total_size =
|
|
+ u16 offs, total_size;
|
|
+ u8 data_len;
|
|
+
|
|
+ if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
|
|
+ break;
|
|
+ data_len = elt->length -
|
|
+ (sizeof(struct oz_get_desc_rsp) - 1);
|
|
+ offs = le16_to_cpu(get_unaligned(&body->offset));
|
|
+ total_size =
|
|
le16_to_cpu(get_unaligned(&body->total_size));
|
|
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
|
|
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
|
|
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
|
|
index ec4b1fd..790e12e 100644
|
|
--- a/drivers/staging/panel/panel.c
|
|
+++ b/drivers/staging/panel/panel.c
|
|
@@ -275,11 +275,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
|
|
* LCD types
|
|
*/
|
|
#define LCD_TYPE_NONE 0
|
|
-#define LCD_TYPE_OLD 1
|
|
-#define LCD_TYPE_KS0074 2
|
|
-#define LCD_TYPE_HANTRONIX 3
|
|
-#define LCD_TYPE_NEXCOM 4
|
|
-#define LCD_TYPE_CUSTOM 5
|
|
+#define LCD_TYPE_CUSTOM 1
|
|
+#define LCD_TYPE_OLD 2
|
|
+#define LCD_TYPE_KS0074 3
|
|
+#define LCD_TYPE_HANTRONIX 4
|
|
+#define LCD_TYPE_NEXCOM 5
|
|
|
|
/*
|
|
* keypad types
|
|
@@ -457,8 +457,7 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
|
|
static int lcd_type = -1;
|
|
module_param(lcd_type, int, 0000);
|
|
MODULE_PARM_DESC(lcd_type,
|
|
- "LCD type: 0=none, 1=old //, 2=serial ks0074, "
|
|
- "3=hantronix //, 4=nexcom //, 5=compiled-in");
|
|
+ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
|
|
|
|
static int lcd_proto = -1;
|
|
module_param(lcd_proto, int, 0000);
|
|
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
|
|
index 09ffd9b..6ebdd3f 100644
|
|
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
|
|
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
|
|
@@ -1460,12 +1460,12 @@ extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
|
|
|
|
extern const long ieee80211_wlan_frequencies[];
|
|
|
|
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
|
|
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
|
|
{
|
|
ieee->scans++;
|
|
}
|
|
|
|
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
|
|
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
|
|
{
|
|
return ieee->scans;
|
|
}
|
|
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
index 2f40ff5..2185a71 100644
|
|
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
@@ -53,9 +53,12 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
|
|
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
|
|
/*=== Customer ID ===*/
|
|
/****** 8188EUS ********/
|
|
+ {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
|
|
{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
|
|
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
|
|
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
|
|
+ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
|
|
+ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
|
|
{} /* Terminating entry */
|
|
};
|
|
|
|
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
|
|
index 83f5f57..59dc078 100644
|
|
--- a/drivers/staging/rtl8192e/rtllib.h
|
|
+++ b/drivers/staging/rtl8192e/rtllib.h
|
|
@@ -2761,7 +2761,6 @@ extern void rtllib_stop_scan(struct rtllib_device *ieee);
|
|
extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
|
|
extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
|
|
extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
|
|
-extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
|
|
extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
|
|
extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
|
|
short pwr);
|
|
@@ -2943,12 +2942,12 @@ void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
|
|
|
|
extern const long rtllib_wlan_frequencies[];
|
|
|
|
-extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
|
|
+static inline void rtllib_increment_scans(struct rtllib_device *ieee)
|
|
{
|
|
ieee->scans++;
|
|
}
|
|
|
|
-extern inline int rtllib_get_scans(struct rtllib_device *ieee)
|
|
+static inline int rtllib_get_scans(struct rtllib_device *ieee)
|
|
{
|
|
return ieee->scans;
|
|
}
|
|
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
|
|
index 4bf72bc..7b5093a 100644
|
|
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
|
|
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
|
|
@@ -341,7 +341,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
-inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
|
|
+static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
|
|
{
|
|
unsigned int len, rate_len;
|
|
u8 *tag;
|
|
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
|
|
index bc64f05..b1a0380 100644
|
|
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
|
|
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
|
|
@@ -2250,7 +2250,7 @@ static inline void *ieee80211_priv(struct net_device *dev)
|
|
return ((struct ieee80211_device *)netdev_priv(dev))->priv;
|
|
}
|
|
|
|
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
{
|
|
/* Single white space is for Linksys APs */
|
|
if (essid_len == 1 && essid[0] == ' ')
|
|
@@ -2266,7 +2266,7 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
return 1;
|
|
}
|
|
|
|
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
|
|
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
|
|
{
|
|
/*
|
|
* It is possible for both access points and our device to support
|
|
@@ -2292,7 +2292,7 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod
|
|
return 0;
|
|
}
|
|
|
|
-extern inline int ieee80211_get_hdrlen(u16 fc)
|
|
+static inline int ieee80211_get_hdrlen(u16 fc)
|
|
{
|
|
int hdrlen = IEEE80211_3ADDR_LEN;
|
|
|
|
@@ -2578,12 +2578,12 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
|
|
|
|
extern const long ieee80211_wlan_frequencies[];
|
|
|
|
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
|
|
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
|
|
{
|
|
ieee->scans++;
|
|
}
|
|
|
|
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
|
|
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
|
|
{
|
|
return ieee->scans;
|
|
}
|
|
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
|
|
index da4000e..8269be8 100644
|
|
--- a/drivers/staging/rtl8712/ieee80211.h
|
|
+++ b/drivers/staging/rtl8712/ieee80211.h
|
|
@@ -734,7 +734,7 @@ enum ieee80211_state {
|
|
#define IEEE_G (1<<2)
|
|
#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
|
|
|
|
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
{
|
|
/* Single white space is for Linksys APs */
|
|
if (essid_len == 1 && essid[0] == ' ')
|
|
@@ -748,7 +748,7 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
|
|
return 1;
|
|
}
|
|
|
|
-extern inline int ieee80211_get_hdrlen(u16 fc)
|
|
+static inline int ieee80211_get_hdrlen(u16 fc)
|
|
{
|
|
int hdrlen = 24;
|
|
|
|
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
|
|
index ea96537..d060b1f 100644
|
|
--- a/drivers/staging/rtl8712/rtl8712_recv.c
|
|
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
|
|
@@ -1075,7 +1075,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
|
|
/* for first fragment packet, driver need allocate 1536 +
|
|
* drvinfo_sz + RXDESC_SIZE to defrag packet. */
|
|
if ((mf == 1) && (frag == 0))
|
|
- alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
|
|
+ /*1658+6=1664, 1664 is 128 alignment.*/
|
|
+ alloc_sz = max_t(u16, tmp_len, 1658);
|
|
else
|
|
alloc_sz = tmp_len;
|
|
/* 2 is for IP header 4 bytes alignment in QoS packet case.
|
|
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
|
|
index d7efd017..7d75788 100644
|
|
--- a/drivers/staging/vt6655/bssdb.c
|
|
+++ b/drivers/staging/vt6655/bssdb.c
|
|
@@ -983,7 +983,7 @@ start:
|
|
pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
|
|
}
|
|
|
|
- {
|
|
+ if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
|
|
pDevice->byReAssocCount++;
|
|
/* 10 sec timeout */
|
|
if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
|
|
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
|
|
index a952df1..6f13f0e 100644
|
|
--- a/drivers/staging/vt6655/device_main.c
|
|
+++ b/drivers/staging/vt6655/device_main.c
|
|
@@ -2430,6 +2430,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
|
|
int handled = 0;
|
|
unsigned char byData = 0;
|
|
int ii = 0;
|
|
+ unsigned long flags;
|
|
// unsigned char byRSSI;
|
|
|
|
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
|
|
@@ -2455,7 +2456,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
|
|
|
|
handled = 1;
|
|
MACvIntDisable(pDevice->PortOffset);
|
|
- spin_lock_irq(&pDevice->lock);
|
|
+
|
|
+ spin_lock_irqsave(&pDevice->lock, flags);
|
|
|
|
//Make sure current page is 0
|
|
VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
|
|
@@ -2696,7 +2698,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
|
|
MACvSelectPage1(pDevice->PortOffset);
|
|
}
|
|
|
|
- spin_unlock_irq(&pDevice->lock);
|
|
+ spin_unlock_irqrestore(&pDevice->lock, flags);
|
|
+
|
|
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
|
|
|
|
return IRQ_RETVAL(handled);
|
|
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
|
|
index edb1b27..dbd9d44 100644
|
|
--- a/drivers/staging/vt6655/rf.c
|
|
+++ b/drivers/staging/vt6655/rf.c
|
|
@@ -936,6 +936,7 @@ bool RFbSetPower(
|
|
break;
|
|
case RATE_6M:
|
|
case RATE_9M:
|
|
+ case RATE_12M:
|
|
case RATE_18M:
|
|
byPwr = pDevice->abyOFDMPwrTbl[uCH];
|
|
if (pDevice->byRFType == RF_UW2452) {
|
|
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
|
|
index 1e8f64b..2dc48d4 100644
|
|
--- a/drivers/staging/vt6656/rf.c
|
|
+++ b/drivers/staging/vt6656/rf.c
|
|
@@ -752,6 +752,7 @@ int RFbSetPower(struct vnt_private *priv, u32 rate, u32 channel)
|
|
break;
|
|
case RATE_6M:
|
|
case RATE_9M:
|
|
+ case RATE_12M:
|
|
case RATE_18M:
|
|
case RATE_24M:
|
|
case RATE_36M:
|
|
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
|
|
index 78129e9..1ecb5cb 100644
|
|
--- a/drivers/staging/wlags49_h2/wl_internal.h
|
|
+++ b/drivers/staging/wlags49_h2/wl_internal.h
|
|
@@ -1013,7 +1013,7 @@ static inline void wl_unlock(struct wl_private *lp,
|
|
/* Interrupt enable disable functions */
|
|
/********************************************************************/
|
|
|
|
-extern inline void wl_act_int_on(struct wl_private *lp)
|
|
+static inline void wl_act_int_on(struct wl_private *lp)
|
|
{
|
|
/*
|
|
* Only do something when the driver is handling
|
|
@@ -1025,7 +1025,7 @@ extern inline void wl_act_int_on(struct wl_private *lp)
|
|
}
|
|
}
|
|
|
|
-extern inline void wl_act_int_off(struct wl_private *lp)
|
|
+static inline void wl_act_int_off(struct wl_private *lp)
|
|
{
|
|
/*
|
|
* Only do something when the driver is handling
|
|
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
|
|
index f329ad2..9dbf176 100644
|
|
--- a/drivers/target/iscsi/iscsi_target.c
|
|
+++ b/drivers/target/iscsi/iscsi_target.c
|
|
@@ -518,7 +518,7 @@ static struct iscsit_transport iscsi_target_transport = {
|
|
|
|
static int __init iscsi_target_init_module(void)
|
|
{
|
|
- int ret = 0;
|
|
+ int ret = 0, size;
|
|
|
|
pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
|
|
|
|
@@ -527,6 +527,7 @@ static int __init iscsi_target_init_module(void)
|
|
pr_err("Unable to allocate memory for iscsit_global\n");
|
|
return -1;
|
|
}
|
|
+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
|
|
mutex_init(&auth_id_lock);
|
|
spin_lock_init(&sess_idr_lock);
|
|
idr_init(&tiqn_idr);
|
|
@@ -536,15 +537,11 @@ static int __init iscsi_target_init_module(void)
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
- ret = iscsi_thread_set_init();
|
|
- if (ret < 0)
|
|
+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
|
|
+ iscsit_global->ts_bitmap = vzalloc(size);
|
|
+ if (!iscsit_global->ts_bitmap) {
|
|
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
|
|
goto configfs_out;
|
|
-
|
|
- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
|
|
- TARGET_THREAD_SET_COUNT) {
|
|
- pr_err("iscsi_allocate_thread_sets() returned"
|
|
- " unexpected value!\n");
|
|
- goto ts_out1;
|
|
}
|
|
|
|
lio_qr_cache = kmem_cache_create("lio_qr_cache",
|
|
@@ -553,7 +550,7 @@ static int __init iscsi_target_init_module(void)
|
|
if (!lio_qr_cache) {
|
|
pr_err("nable to kmem_cache_create() for"
|
|
" lio_qr_cache\n");
|
|
- goto ts_out2;
|
|
+ goto bitmap_out;
|
|
}
|
|
|
|
lio_dr_cache = kmem_cache_create("lio_dr_cache",
|
|
@@ -597,10 +594,8 @@ dr_out:
|
|
kmem_cache_destroy(lio_dr_cache);
|
|
qr_out:
|
|
kmem_cache_destroy(lio_qr_cache);
|
|
-ts_out2:
|
|
- iscsi_deallocate_thread_sets();
|
|
-ts_out1:
|
|
- iscsi_thread_set_free();
|
|
+bitmap_out:
|
|
+ vfree(iscsit_global->ts_bitmap);
|
|
configfs_out:
|
|
iscsi_target_deregister_configfs();
|
|
out:
|
|
@@ -610,8 +605,6 @@ out:
|
|
|
|
static void __exit iscsi_target_cleanup_module(void)
|
|
{
|
|
- iscsi_deallocate_thread_sets();
|
|
- iscsi_thread_set_free();
|
|
iscsit_release_discovery_tpg();
|
|
iscsit_unregister_transport(&iscsi_target_transport);
|
|
kmem_cache_destroy(lio_qr_cache);
|
|
@@ -621,6 +614,7 @@ static void __exit iscsi_target_cleanup_module(void)
|
|
|
|
iscsi_target_deregister_configfs();
|
|
|
|
+ vfree(iscsit_global->ts_bitmap);
|
|
kfree(iscsit_global);
|
|
}
|
|
|
|
@@ -1165,7 +1159,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
* traditional iSCSI block I/O.
|
|
*/
|
|
if (iscsit_allocate_iovecs(cmd) < 0) {
|
|
- return iscsit_add_reject_cmd(cmd,
|
|
+ return iscsit_reject_cmd(cmd,
|
|
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
|
|
}
|
|
immed_data = cmd->immediate_data;
|
|
@@ -3653,17 +3647,16 @@ static int iscsit_send_reject(
|
|
|
|
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
|
|
{
|
|
- struct iscsi_thread_set *ts = conn->thread_set;
|
|
int ord, cpu;
|
|
/*
|
|
- * thread_id is assigned from iscsit_global->ts_bitmap from
|
|
- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
|
|
+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
|
|
+ * within iscsit_start_kthreads()
|
|
*
|
|
- * Here we use thread_id to determine which CPU that this
|
|
- * iSCSI connection's iscsi_thread_set will be scheduled to
|
|
+ * Here we use bitmap_id to determine which CPU that this
|
|
+ * iSCSI connection's RX/TX threads will be scheduled to
|
|
* execute upon.
|
|
*/
|
|
- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
|
|
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
|
|
for_each_online_cpu(cpu) {
|
|
if (ord-- == 0) {
|
|
cpumask_set_cpu(cpu, conn->conn_cpumask);
|
|
@@ -3855,7 +3848,7 @@ check_rsp_state:
|
|
switch (state) {
|
|
case ISTATE_SEND_LOGOUTRSP:
|
|
if (!iscsit_logout_post_handler(cmd, conn))
|
|
- goto restart;
|
|
+ return -ECONNRESET;
|
|
/* fall through */
|
|
case ISTATE_SEND_STATUS:
|
|
case ISTATE_SEND_ASYNCMSG:
|
|
@@ -3883,8 +3876,6 @@ check_rsp_state:
|
|
|
|
err:
|
|
return -1;
|
|
-restart:
|
|
- return -EAGAIN;
|
|
}
|
|
|
|
static int iscsit_handle_response_queue(struct iscsi_conn *conn)
|
|
@@ -3911,21 +3902,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
|
|
int iscsi_target_tx_thread(void *arg)
|
|
{
|
|
int ret = 0;
|
|
- struct iscsi_conn *conn;
|
|
- struct iscsi_thread_set *ts = arg;
|
|
+ struct iscsi_conn *conn = arg;
|
|
/*
|
|
* Allow ourselves to be interrupted by SIGINT so that a
|
|
* connection recovery / failure event can be triggered externally.
|
|
*/
|
|
allow_signal(SIGINT);
|
|
|
|
-restart:
|
|
- conn = iscsi_tx_thread_pre_handler(ts);
|
|
- if (!conn)
|
|
- goto out;
|
|
-
|
|
- ret = 0;
|
|
-
|
|
while (!kthread_should_stop()) {
|
|
/*
|
|
* Ensure that both TX and RX per connection kthreads
|
|
@@ -3934,11 +3917,9 @@ restart:
|
|
iscsit_thread_check_cpumask(conn, current, 1);
|
|
|
|
wait_event_interruptible(conn->queues_wq,
|
|
- !iscsit_conn_all_queues_empty(conn) ||
|
|
- ts->status == ISCSI_THREAD_SET_RESET);
|
|
+ !iscsit_conn_all_queues_empty(conn));
|
|
|
|
- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
|
|
- signal_pending(current))
|
|
+ if (signal_pending(current))
|
|
goto transport_err;
|
|
|
|
get_immediate:
|
|
@@ -3949,15 +3930,20 @@ get_immediate:
|
|
ret = iscsit_handle_response_queue(conn);
|
|
if (ret == 1)
|
|
goto get_immediate;
|
|
- else if (ret == -EAGAIN)
|
|
- goto restart;
|
|
+ else if (ret == -ECONNRESET)
|
|
+ goto out;
|
|
else if (ret < 0)
|
|
goto transport_err;
|
|
}
|
|
|
|
transport_err:
|
|
- iscsit_take_action_for_connection_exit(conn);
|
|
- goto restart;
|
|
+ /*
|
|
+ * Avoid the normal connection failure code-path if this connection
|
|
+ * is still within LOGIN mode, and iscsi_np process context is
|
|
+ * responsible for cleaning up the early connection failure.
|
|
+ */
|
|
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
|
|
+ iscsit_take_action_for_connection_exit(conn);
|
|
out:
|
|
return 0;
|
|
}
|
|
@@ -4043,33 +4029,33 @@ reject:
|
|
|
|
int iscsi_target_rx_thread(void *arg)
|
|
{
|
|
- int ret;
|
|
+ int ret, rc;
|
|
u8 buffer[ISCSI_HDR_LEN], opcode;
|
|
u32 checksum = 0, digest = 0;
|
|
- struct iscsi_conn *conn = NULL;
|
|
- struct iscsi_thread_set *ts = arg;
|
|
+ struct iscsi_conn *conn = arg;
|
|
struct kvec iov;
|
|
/*
|
|
* Allow ourselves to be interrupted by SIGINT so that a
|
|
* connection recovery / failure event can be triggered externally.
|
|
*/
|
|
allow_signal(SIGINT);
|
|
-
|
|
-restart:
|
|
- conn = iscsi_rx_thread_pre_handler(ts);
|
|
- if (!conn)
|
|
- goto out;
|
|
+ /*
|
|
+ * Wait for iscsi_post_login_handler() to complete before allowing
|
|
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
|
|
+ */
|
|
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
|
|
+ if (rc < 0)
|
|
+ return 0;
|
|
|
|
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
|
|
struct completion comp;
|
|
- int rc;
|
|
|
|
init_completion(&comp);
|
|
rc = wait_for_completion_interruptible(&comp);
|
|
if (rc < 0)
|
|
goto transport_err;
|
|
|
|
- goto out;
|
|
+ goto transport_err;
|
|
}
|
|
|
|
while (!kthread_should_stop()) {
|
|
@@ -4145,8 +4131,6 @@ transport_err:
|
|
if (!signal_pending(current))
|
|
atomic_set(&conn->transport_failed, 1);
|
|
iscsit_take_action_for_connection_exit(conn);
|
|
- goto restart;
|
|
-out:
|
|
return 0;
|
|
}
|
|
|
|
@@ -4196,13 +4180,36 @@ int iscsit_close_connection(
|
|
pr_debug("Closing iSCSI connection CID %hu on SID:"
|
|
" %u\n", conn->cid, sess->sid);
|
|
/*
|
|
- * Always up conn_logout_comp just in case the RX Thread is sleeping
|
|
- * and the logout response never got sent because the connection
|
|
- * failed.
|
|
+ * Always up conn_logout_comp for the traditional TCP case just in case
|
|
+ * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
|
|
+ * response never got sent because the connection failed.
|
|
+ *
|
|
+ * However for iser-target, isert_wait4logout() is using conn_logout_comp
|
|
+ * to signal logout response TX interrupt completion. Go ahead and skip
|
|
+ * this for iser since isert_rx_opcode() does not wait on logout failure,
|
|
+ * and to avoid iscsi_conn pointer dereference in iser-target code.
|
|
*/
|
|
- complete(&conn->conn_logout_comp);
|
|
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
|
|
+ complete(&conn->conn_logout_comp);
|
|
+
|
|
+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
|
|
+ if (conn->tx_thread &&
|
|
+ cmpxchg(&conn->tx_thread_active, true, false)) {
|
|
+ send_sig(SIGINT, conn->tx_thread, 1);
|
|
+ kthread_stop(conn->tx_thread);
|
|
+ }
|
|
+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
|
|
+ if (conn->rx_thread &&
|
|
+ cmpxchg(&conn->rx_thread_active, true, false)) {
|
|
+ send_sig(SIGINT, conn->rx_thread, 1);
|
|
+ kthread_stop(conn->rx_thread);
|
|
+ }
|
|
+ }
|
|
|
|
- iscsi_release_thread_set(conn);
|
|
+ spin_lock(&iscsit_global->ts_bitmap_lock);
|
|
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
|
|
+ get_order(1));
|
|
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
|
|
|
|
iscsit_stop_timers_for_cmds(conn);
|
|
iscsit_stop_nopin_response_timer(conn);
|
|
@@ -4481,15 +4488,24 @@ static void iscsit_logout_post_handler_closesession(
|
|
struct iscsi_conn *conn)
|
|
{
|
|
struct iscsi_session *sess = conn->sess;
|
|
-
|
|
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
|
|
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
|
|
+ int sleep = 1;
|
|
+ /*
|
|
+ * Traditional iscsi/tcp will invoke this logic from TX thread
|
|
+ * context during session logout, so clear tx_thread_active and
|
|
+ * sleep if iscsit_close_connection() has not already occured.
|
|
+ *
|
|
+ * Since iser-target invokes this logic from it's own workqueue,
|
|
+ * always sleep waiting for RX/TX thread shutdown to complete
|
|
+ * within iscsit_close_connection().
|
|
+ */
|
|
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
|
|
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
|
|
|
atomic_set(&conn->conn_logout_remove, 0);
|
|
complete(&conn->conn_logout_comp);
|
|
|
|
iscsit_dec_conn_usage_count(conn);
|
|
- iscsit_stop_session(sess, 1, 1);
|
|
+ iscsit_stop_session(sess, sleep, sleep);
|
|
iscsit_dec_session_usage_count(sess);
|
|
target_put_session(sess->se_sess);
|
|
}
|
|
@@ -4497,13 +4513,15 @@ static void iscsit_logout_post_handler_closesession(
|
|
static void iscsit_logout_post_handler_samecid(
|
|
struct iscsi_conn *conn)
|
|
{
|
|
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
|
|
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
|
|
+ int sleep = 1;
|
|
+
|
|
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
|
|
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
|
|
|
atomic_set(&conn->conn_logout_remove, 0);
|
|
complete(&conn->conn_logout_comp);
|
|
|
|
- iscsit_cause_connection_reinstatement(conn, 1);
|
|
+ iscsit_cause_connection_reinstatement(conn, sleep);
|
|
iscsit_dec_conn_usage_count(conn);
|
|
}
|
|
|
|
@@ -4513,6 +4531,7 @@ static void iscsit_logout_post_handler_diffcid(
|
|
{
|
|
struct iscsi_conn *l_conn;
|
|
struct iscsi_session *sess = conn->sess;
|
|
+ bool conn_found = false;
|
|
|
|
if (!sess)
|
|
return;
|
|
@@ -4521,12 +4540,13 @@ static void iscsit_logout_post_handler_diffcid(
|
|
list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
|
|
if (l_conn->cid == cid) {
|
|
iscsit_inc_conn_usage_count(l_conn);
|
|
+ conn_found = true;
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock_bh(&sess->conn_lock);
|
|
|
|
- if (!l_conn)
|
|
+ if (!conn_found)
|
|
return;
|
|
|
|
if (l_conn->sock)
|
|
@@ -4715,6 +4735,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
|
|
struct iscsi_session *sess;
|
|
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
|
struct se_session *se_sess, *se_sess_tmp;
|
|
+ LIST_HEAD(free_list);
|
|
int session_count = 0;
|
|
|
|
spin_lock_bh(&se_tpg->session_lock);
|
|
@@ -4736,14 +4757,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
|
|
}
|
|
atomic_set(&sess->session_reinstatement, 1);
|
|
spin_unlock(&sess->conn_lock);
|
|
- spin_unlock_bh(&se_tpg->session_lock);
|
|
|
|
- iscsit_free_session(sess);
|
|
- spin_lock_bh(&se_tpg->session_lock);
|
|
+ list_move_tail(&se_sess->sess_list, &free_list);
|
|
+ }
|
|
+ spin_unlock_bh(&se_tpg->session_lock);
|
|
|
|
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
|
|
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
|
|
+
|
|
+ iscsit_free_session(sess);
|
|
session_count++;
|
|
}
|
|
- spin_unlock_bh(&se_tpg->session_lock);
|
|
|
|
pr_debug("Released %d iSCSI Session(s) from Target Portal"
|
|
" Group: %hu\n", session_count, tpg->tpgt);
|
|
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
|
|
index 1d4a8c8..92abbe2 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_core.h
|
|
+++ b/drivers/target/iscsi/iscsi_target_core.h
|
|
@@ -601,6 +601,12 @@ struct iscsi_conn {
|
|
struct iscsi_session *sess;
|
|
/* Pointer to thread_set in use for this conn's threads */
|
|
struct iscsi_thread_set *thread_set;
|
|
+ int bitmap_id;
|
|
+ int rx_thread_active;
|
|
+ struct task_struct *rx_thread;
|
|
+ struct completion rx_login_comp;
|
|
+ int tx_thread_active;
|
|
+ struct task_struct *tx_thread;
|
|
/* list_head for session connection list */
|
|
struct list_head conn_list;
|
|
} ____cacheline_aligned;
|
|
@@ -869,10 +875,12 @@ struct iscsit_global {
|
|
/* Unique identifier used for the authentication daemon */
|
|
u32 auth_id;
|
|
u32 inactive_ts;
|
|
+#define ISCSIT_BITMAP_BITS 262144
|
|
/* Thread Set bitmap count */
|
|
int ts_bitmap_count;
|
|
/* Thread Set bitmap pointer */
|
|
unsigned long *ts_bitmap;
|
|
+ spinlock_t ts_bitmap_lock;
|
|
/* Used for iSCSI discovery session authentication */
|
|
struct iscsi_node_acl discovery_acl;
|
|
struct iscsi_portal_group *discovery_tpg;
|
|
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
|
|
index 0d1e6ee..7396d90 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_erl0.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
|
|
@@ -864,7 +864,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
|
|
}
|
|
spin_unlock_bh(&conn->state_lock);
|
|
|
|
- iscsi_thread_set_force_reinstatement(conn);
|
|
+ if (conn->tx_thread && conn->tx_thread_active)
|
|
+ send_sig(SIGINT, conn->tx_thread, 1);
|
|
+ if (conn->rx_thread && conn->rx_thread_active)
|
|
+ send_sig(SIGINT, conn->rx_thread, 1);
|
|
|
|
sleep:
|
|
wait_for_completion(&conn->conn_wait_rcfr_comp);
|
|
@@ -889,10 +892,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
|
|
return;
|
|
}
|
|
|
|
- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
|
|
- spin_unlock_bh(&conn->state_lock);
|
|
- return;
|
|
- }
|
|
+ if (conn->tx_thread && conn->tx_thread_active)
|
|
+ send_sig(SIGINT, conn->tx_thread, 1);
|
|
+ if (conn->rx_thread && conn->rx_thread_active)
|
|
+ send_sig(SIGINT, conn->rx_thread, 1);
|
|
|
|
atomic_set(&conn->connection_reinstatement, 1);
|
|
if (!sleep) {
|
|
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
|
|
index d509aa7..01c27aa 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_login.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_login.c
|
|
@@ -83,6 +83,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
|
|
init_completion(&conn->conn_logout_comp);
|
|
init_completion(&conn->rx_half_close_comp);
|
|
init_completion(&conn->tx_half_close_comp);
|
|
+ init_completion(&conn->rx_login_comp);
|
|
spin_lock_init(&conn->cmd_lock);
|
|
spin_lock_init(&conn->conn_usage_lock);
|
|
spin_lock_init(&conn->immed_queue_lock);
|
|
@@ -681,7 +682,53 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
|
|
iscsit_start_nopin_timer(conn);
|
|
}
|
|
|
|
-int iscsi_post_login_handler(
|
|
+int iscsit_start_kthreads(struct iscsi_conn *conn)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock(&iscsit_global->ts_bitmap_lock);
|
|
+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
|
|
+ ISCSIT_BITMAP_BITS, get_order(1));
|
|
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
|
|
+
|
|
+ if (conn->bitmap_id < 0) {
|
|
+ pr_err("bitmap_find_free_region() failed for"
|
|
+ " iscsit_start_kthreads()\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
|
|
+ "%s", ISCSI_TX_THREAD_NAME);
|
|
+ if (IS_ERR(conn->tx_thread)) {
|
|
+ pr_err("Unable to start iscsi_target_tx_thread\n");
|
|
+ ret = PTR_ERR(conn->tx_thread);
|
|
+ goto out_bitmap;
|
|
+ }
|
|
+ conn->tx_thread_active = true;
|
|
+
|
|
+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
|
|
+ "%s", ISCSI_RX_THREAD_NAME);
|
|
+ if (IS_ERR(conn->rx_thread)) {
|
|
+ pr_err("Unable to start iscsi_target_rx_thread\n");
|
|
+ ret = PTR_ERR(conn->rx_thread);
|
|
+ goto out_tx;
|
|
+ }
|
|
+ conn->rx_thread_active = true;
|
|
+
|
|
+ return 0;
|
|
+out_tx:
|
|
+ send_sig(SIGINT, conn->tx_thread, 1);
|
|
+ kthread_stop(conn->tx_thread);
|
|
+ conn->tx_thread_active = false;
|
|
+out_bitmap:
|
|
+ spin_lock(&iscsit_global->ts_bitmap_lock);
|
|
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
|
|
+ get_order(1));
|
|
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void iscsi_post_login_handler(
|
|
struct iscsi_np *np,
|
|
struct iscsi_conn *conn,
|
|
u8 zero_tsih)
|
|
@@ -691,7 +738,6 @@ int iscsi_post_login_handler(
|
|
struct se_session *se_sess = sess->se_sess;
|
|
struct iscsi_portal_group *tpg = sess->tpg;
|
|
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
|
- struct iscsi_thread_set *ts;
|
|
|
|
iscsit_inc_conn_usage_count(conn);
|
|
|
|
@@ -706,7 +752,6 @@ int iscsi_post_login_handler(
|
|
/*
|
|
* SCSI Initiator -> SCSI Target Port Mapping
|
|
*/
|
|
- ts = iscsi_get_thread_set();
|
|
if (!zero_tsih) {
|
|
iscsi_set_session_parameters(sess->sess_ops,
|
|
conn->param_list, 0);
|
|
@@ -734,8 +779,6 @@ int iscsi_post_login_handler(
|
|
spin_unlock_bh(&sess->conn_lock);
|
|
|
|
iscsi_post_login_start_timers(conn);
|
|
-
|
|
- iscsi_activate_thread_set(conn, ts);
|
|
/*
|
|
* Determine CPU mask to ensure connection's RX and TX kthreads
|
|
* are scheduled on the same CPU.
|
|
@@ -743,15 +786,20 @@ int iscsi_post_login_handler(
|
|
iscsit_thread_get_cpumask(conn);
|
|
conn->conn_rx_reset_cpumask = 1;
|
|
conn->conn_tx_reset_cpumask = 1;
|
|
-
|
|
+ /*
|
|
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
|
|
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
|
|
+ */
|
|
+ complete(&conn->rx_login_comp);
|
|
iscsit_dec_conn_usage_count(conn);
|
|
+
|
|
if (stop_timer) {
|
|
spin_lock_bh(&se_tpg->session_lock);
|
|
iscsit_stop_time2retain_timer(sess);
|
|
spin_unlock_bh(&se_tpg->session_lock);
|
|
}
|
|
iscsit_dec_session_usage_count(sess);
|
|
- return 0;
|
|
+ return;
|
|
}
|
|
|
|
iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
|
|
@@ -793,7 +841,6 @@ int iscsi_post_login_handler(
|
|
spin_unlock_bh(&se_tpg->session_lock);
|
|
|
|
iscsi_post_login_start_timers(conn);
|
|
- iscsi_activate_thread_set(conn, ts);
|
|
/*
|
|
* Determine CPU mask to ensure connection's RX and TX kthreads
|
|
* are scheduled on the same CPU.
|
|
@@ -801,10 +848,12 @@ int iscsi_post_login_handler(
|
|
iscsit_thread_get_cpumask(conn);
|
|
conn->conn_rx_reset_cpumask = 1;
|
|
conn->conn_tx_reset_cpumask = 1;
|
|
-
|
|
+ /*
|
|
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
|
|
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
|
|
+ */
|
|
+ complete(&conn->rx_login_comp);
|
|
iscsit_dec_conn_usage_count(conn);
|
|
-
|
|
- return 0;
|
|
}
|
|
|
|
static void iscsi_handle_login_thread_timeout(unsigned long data)
|
|
@@ -1186,6 +1235,9 @@ old_sess_out:
|
|
conn->sock = NULL;
|
|
}
|
|
|
|
+ if (conn->conn_transport->iscsit_wait_conn)
|
|
+ conn->conn_transport->iscsit_wait_conn(conn);
|
|
+
|
|
if (conn->conn_transport->iscsit_free_conn)
|
|
conn->conn_transport->iscsit_free_conn(conn);
|
|
|
|
@@ -1366,23 +1418,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
|
|
if (ret < 0)
|
|
goto new_sess_out;
|
|
|
|
- if (!conn->sess) {
|
|
- pr_err("struct iscsi_conn session pointer is NULL!\n");
|
|
- goto new_sess_out;
|
|
- }
|
|
-
|
|
iscsi_stop_login_thread_timer(np);
|
|
|
|
- if (signal_pending(current))
|
|
- goto new_sess_out;
|
|
-
|
|
if (ret == 1) {
|
|
tpg_np = conn->tpg_np;
|
|
|
|
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
|
|
- if (ret < 0)
|
|
- goto new_sess_out;
|
|
-
|
|
+ iscsi_post_login_handler(np, conn, zero_tsih);
|
|
iscsit_deaccess_np(np, tpg, tpg_np);
|
|
}
|
|
|
|
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
|
|
index 29d0983..55cbf45 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_login.h
|
|
+++ b/drivers/target/iscsi/iscsi_target_login.h
|
|
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
|
|
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
|
|
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
|
|
extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
|
|
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
|
|
+extern int iscsit_start_kthreads(struct iscsi_conn *);
|
|
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
|
|
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
|
|
bool, bool);
|
|
extern int iscsi_target_login_thread(void *);
|
|
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
|
|
index 582ba84..25ad113 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_nego.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_nego.c
|
|
@@ -17,6 +17,7 @@
|
|
******************************************************************************/
|
|
|
|
#include <linux/ctype.h>
|
|
+#include <linux/kthread.h>
|
|
#include <scsi/iscsi_proto.h>
|
|
#include <target/target_core_base.h>
|
|
#include <target/target_core_fabric.h>
|
|
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
|
|
ntohl(login_rsp->statsn), login->rsp_length);
|
|
|
|
padding = ((-login->rsp_length) & 3);
|
|
+ /*
|
|
+ * Before sending the last login response containing the transition
|
|
+ * bit for full-feature-phase, go ahead and start up TX/RX threads
|
|
+ * now to avoid potential resource allocation failures after the
|
|
+ * final login response has been sent.
|
|
+ */
|
|
+ if (login->login_complete) {
|
|
+ int rc = iscsit_start_kthreads(conn);
|
|
+ if (rc) {
|
|
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
|
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
|
|
if (conn->conn_transport->iscsit_put_login_tx(conn, login,
|
|
login->rsp_length + padding) < 0)
|
|
- return -1;
|
|
+ goto err;
|
|
|
|
login->rsp_length = 0;
|
|
mutex_lock(&sess->cmdsn_mutex);
|
|
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
|
|
mutex_unlock(&sess->cmdsn_mutex);
|
|
|
|
return 0;
|
|
+
|
|
+err:
|
|
+ if (login->login_complete) {
|
|
+ if (conn->rx_thread && conn->rx_thread_active) {
|
|
+ send_sig(SIGINT, conn->rx_thread, 1);
|
|
+ kthread_stop(conn->rx_thread);
|
|
+ }
|
|
+ if (conn->tx_thread && conn->tx_thread_active) {
|
|
+ send_sig(SIGINT, conn->tx_thread, 1);
|
|
+ kthread_stop(conn->tx_thread);
|
|
+ }
|
|
+ spin_lock(&iscsit_global->ts_bitmap_lock);
|
|
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
|
|
+ get_order(1));
|
|
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
|
|
+ }
|
|
+ return -1;
|
|
}
|
|
|
|
static void iscsi_target_sk_data_ready(struct sock *sk, int count)
|
|
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
|
|
index 4d2e23f..43b7e6a 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_parameters.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
|
|
@@ -601,7 +601,7 @@ int iscsi_copy_param_list(
|
|
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
|
|
if (!param_list) {
|
|
pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
|
|
- goto err_out;
|
|
+ return -1;
|
|
}
|
|
INIT_LIST_HEAD(¶m_list->param_list);
|
|
INIT_LIST_HEAD(¶m_list->extra_response_list);
|
|
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
|
|
index 601e9cc..bb2890e 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_tq.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_tq.c
|
|
@@ -24,36 +24,22 @@
|
|
#include "iscsi_target_tq.h"
|
|
#include "iscsi_target.h"
|
|
|
|
-static LIST_HEAD(active_ts_list);
|
|
static LIST_HEAD(inactive_ts_list);
|
|
-static DEFINE_SPINLOCK(active_ts_lock);
|
|
static DEFINE_SPINLOCK(inactive_ts_lock);
|
|
static DEFINE_SPINLOCK(ts_bitmap_lock);
|
|
|
|
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
|
|
-{
|
|
- spin_lock(&active_ts_lock);
|
|
- list_add_tail(&ts->ts_list, &active_ts_list);
|
|
- iscsit_global->active_ts++;
|
|
- spin_unlock(&active_ts_lock);
|
|
-}
|
|
-
|
|
static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
|
|
{
|
|
+ if (!list_empty(&ts->ts_list)) {
|
|
+ WARN_ON(1);
|
|
+ return;
|
|
+ }
|
|
spin_lock(&inactive_ts_lock);
|
|
list_add_tail(&ts->ts_list, &inactive_ts_list);
|
|
iscsit_global->inactive_ts++;
|
|
spin_unlock(&inactive_ts_lock);
|
|
}
|
|
|
|
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
|
|
-{
|
|
- spin_lock(&active_ts_lock);
|
|
- list_del(&ts->ts_list);
|
|
- iscsit_global->active_ts--;
|
|
- spin_unlock(&active_ts_lock);
|
|
-}
|
|
-
|
|
static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
|
|
{
|
|
struct iscsi_thread_set *ts;
|
|
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
|
|
|
|
ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
|
|
|
|
- list_del(&ts->ts_list);
|
|
+ list_del_init(&ts->ts_list);
|
|
iscsit_global->inactive_ts--;
|
|
spin_unlock(&inactive_ts_lock);
|
|
|
|
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
|
|
|
|
void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
|
|
{
|
|
- iscsi_add_ts_to_active_list(ts);
|
|
-
|
|
spin_lock_bh(&ts->ts_state_lock);
|
|
conn->thread_set = ts;
|
|
ts->conn = conn;
|
|
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
|
|
|
|
if (ts->delay_inactive && (--ts->thread_count == 0)) {
|
|
spin_unlock_bh(&ts->ts_state_lock);
|
|
- iscsi_del_ts_from_active_list(ts);
|
|
|
|
if (!iscsit_global->in_shutdown)
|
|
iscsi_deallocate_extra_thread_sets();
|
|
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
|
|
|
|
if (ts->delay_inactive && (--ts->thread_count == 0)) {
|
|
spin_unlock_bh(&ts->ts_state_lock);
|
|
- iscsi_del_ts_from_active_list(ts);
|
|
|
|
if (!iscsit_global->in_shutdown)
|
|
iscsi_deallocate_extra_thread_sets();
|
|
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
|
|
index ab77f80..1e406af 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_util.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_util.c
|
|
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
|
|
struct iscsi_conn *conn,
|
|
struct iscsi_data_count *count)
|
|
{
|
|
- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
|
|
+ int ret, iov_len;
|
|
struct kvec *iov_p;
|
|
struct msghdr msg;
|
|
|
|
if (!conn || !conn->sock || !conn->conn_ops)
|
|
return -1;
|
|
|
|
- if (data <= 0) {
|
|
- pr_err("Data length is: %d\n", data);
|
|
+ if (count->data_length <= 0) {
|
|
+ pr_err("Data length is: %d\n", count->data_length);
|
|
return -1;
|
|
}
|
|
|
|
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
|
|
iov_p = count->iov;
|
|
iov_len = count->iov_count;
|
|
|
|
- while (total_tx < data) {
|
|
- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
|
|
- (data - total_tx));
|
|
- if (tx_loop <= 0) {
|
|
- pr_debug("tx_loop: %d total_tx %d\n",
|
|
- tx_loop, total_tx);
|
|
- return tx_loop;
|
|
- }
|
|
- total_tx += tx_loop;
|
|
- pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
|
|
- tx_loop, total_tx, data);
|
|
+ ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
|
|
+ count->data_length);
|
|
+ if (ret != count->data_length) {
|
|
+ pr_err("Unexpected ret: %d send data %d\n",
|
|
+ ret, count->data_length);
|
|
+ return -EPIPE;
|
|
}
|
|
+ pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
|
|
|
|
- return total_tx;
|
|
+ return ret;
|
|
}
|
|
|
|
int rx_data(
|
|
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
|
|
index fadad7c..67c802c 100644
|
|
--- a/drivers/target/loopback/tcm_loop.c
|
|
+++ b/drivers/target/loopback/tcm_loop.c
|
|
@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
|
|
/*
|
|
* Locate the SAM Task Attr from struct scsi_cmnd *
|
|
*/
|
|
-static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
|
|
-{
|
|
- if (sc->device->tagged_supported) {
|
|
- switch (sc->tag) {
|
|
- case HEAD_OF_QUEUE_TAG:
|
|
- return MSG_HEAD_TAG;
|
|
- case ORDERED_QUEUE_TAG:
|
|
- return MSG_ORDERED_TAG;
|
|
- default:
|
|
- break;
|
|
- }
|
|
- }
|
|
+static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
|
|
+{
|
|
+ if (sc->device->tagged_supported &&
|
|
+ sc->device->ordered_tags && tag >= 0)
|
|
+ return MSG_ORDERED_TAG;
|
|
|
|
return MSG_SIMPLE_TAG;
|
|
}
|
|
@@ -197,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
|
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
|
|
goto out_done;
|
|
}
|
|
- tl_nexus = tl_hba->tl_nexus;
|
|
+ tl_nexus = tl_tpg->tl_nexus;
|
|
if (!tl_nexus) {
|
|
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
|
|
" does not exist\n");
|
|
@@ -214,7 +207,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
|
}
|
|
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
|
|
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
|
|
- scsi_bufflen(sc), tcm_loop_sam_attr(sc),
|
|
+ scsi_bufflen(sc), tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag),
|
|
sc->sc_data_direction, 0,
|
|
scsi_sglist(sc), scsi_sg_count(sc),
|
|
sgl_bidi, sgl_bidi_count,
|
|
@@ -252,7 +245,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
|
}
|
|
|
|
tl_cmd->sc = sc;
|
|
- tl_cmd->sc_cmd_tag = sc->tag;
|
|
+ tl_cmd->sc_cmd_tag = sc->request->tag;
|
|
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
|
|
queue_work(tcm_loop_workqueue, &tl_cmd->work);
|
|
return 0;
|
|
@@ -263,16 +256,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
|
* to struct scsi_device
|
|
*/
|
|
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
|
- struct tcm_loop_nexus *tl_nexus,
|
|
int lun, int task, enum tcm_tmreq_table tmr)
|
|
{
|
|
struct se_cmd *se_cmd = NULL;
|
|
struct se_session *se_sess;
|
|
struct se_portal_group *se_tpg;
|
|
+ struct tcm_loop_nexus *tl_nexus;
|
|
struct tcm_loop_cmd *tl_cmd = NULL;
|
|
struct tcm_loop_tmr *tl_tmr = NULL;
|
|
int ret = TMR_FUNCTION_FAILED, rc;
|
|
|
|
+ /*
|
|
+ * Locate the tl_nexus and se_sess pointers
|
|
+ */
|
|
+ tl_nexus = tl_tpg->tl_nexus;
|
|
+ if (!tl_nexus) {
|
|
+ pr_err("Unable to perform device reset without"
|
|
+ " active I_T Nexus\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
|
|
if (!tl_cmd) {
|
|
pr_err("Unable to allocate memory for tl_cmd\n");
|
|
@@ -288,7 +291,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
|
|
|
se_cmd = &tl_cmd->tl_se_cmd;
|
|
se_tpg = &tl_tpg->tl_se_tpg;
|
|
- se_sess = tl_nexus->se_sess;
|
|
+ se_sess = tl_tpg->tl_nexus->se_sess;
|
|
/*
|
|
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
|
*/
|
|
@@ -333,7 +336,6 @@ release:
|
|
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
|
{
|
|
struct tcm_loop_hba *tl_hba;
|
|
- struct tcm_loop_nexus *tl_nexus;
|
|
struct tcm_loop_tpg *tl_tpg;
|
|
int ret = FAILED;
|
|
|
|
@@ -341,22 +343,9 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
|
* Locate the tcm_loop_hba_t pointer
|
|
*/
|
|
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
|
- /*
|
|
- * Locate the tl_nexus and se_sess pointers
|
|
- */
|
|
- tl_nexus = tl_hba->tl_nexus;
|
|
- if (!tl_nexus) {
|
|
- pr_err("Unable to perform device reset without"
|
|
- " active I_T Nexus\n");
|
|
- return FAILED;
|
|
- }
|
|
-
|
|
- /*
|
|
- * Locate the tl_tpg pointer from TargetID in sc->device->id
|
|
- */
|
|
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
|
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
|
- sc->tag, TMR_ABORT_TASK);
|
|
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
|
|
+ sc->request->tag, TMR_ABORT_TASK);
|
|
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
|
}
|
|
|
|
@@ -367,7 +356,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
|
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
|
{
|
|
struct tcm_loop_hba *tl_hba;
|
|
- struct tcm_loop_nexus *tl_nexus;
|
|
struct tcm_loop_tpg *tl_tpg;
|
|
int ret = FAILED;
|
|
|
|
@@ -375,20 +363,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
|
* Locate the tcm_loop_hba_t pointer
|
|
*/
|
|
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
|
- /*
|
|
- * Locate the tl_nexus and se_sess pointers
|
|
- */
|
|
- tl_nexus = tl_hba->tl_nexus;
|
|
- if (!tl_nexus) {
|
|
- pr_err("Unable to perform device reset without"
|
|
- " active I_T Nexus\n");
|
|
- return FAILED;
|
|
- }
|
|
- /*
|
|
- * Locate the tl_tpg pointer from TargetID in sc->device->id
|
|
- */
|
|
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
|
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
|
+
|
|
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
|
|
0, TMR_LUN_RESET);
|
|
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
|
}
|
|
@@ -995,8 +972,8 @@ static int tcm_loop_make_nexus(
|
|
struct tcm_loop_nexus *tl_nexus;
|
|
int ret = -ENOMEM;
|
|
|
|
- if (tl_tpg->tl_hba->tl_nexus) {
|
|
- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
|
|
+ if (tl_tpg->tl_nexus) {
|
|
+ pr_debug("tl_tpg->tl_nexus already exists\n");
|
|
return -EEXIST;
|
|
}
|
|
se_tpg = &tl_tpg->tl_se_tpg;
|
|
@@ -1031,7 +1008,7 @@ static int tcm_loop_make_nexus(
|
|
*/
|
|
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
|
|
tl_nexus->se_sess, tl_nexus);
|
|
- tl_tpg->tl_hba->tl_nexus = tl_nexus;
|
|
+ tl_tpg->tl_nexus = tl_nexus;
|
|
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
|
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
|
name);
|
|
@@ -1047,12 +1024,8 @@ static int tcm_loop_drop_nexus(
|
|
{
|
|
struct se_session *se_sess;
|
|
struct tcm_loop_nexus *tl_nexus;
|
|
- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
|
|
|
|
- if (!tl_hba)
|
|
- return -ENODEV;
|
|
-
|
|
- tl_nexus = tl_hba->tl_nexus;
|
|
+ tl_nexus = tpg->tl_nexus;
|
|
if (!tl_nexus)
|
|
return -ENODEV;
|
|
|
|
@@ -1068,13 +1041,13 @@ static int tcm_loop_drop_nexus(
|
|
}
|
|
|
|
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
|
|
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
|
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
|
|
tl_nexus->se_sess->se_node_acl->initiatorname);
|
|
/*
|
|
* Release the SCSI I_T Nexus to the emulated SAS Target Port
|
|
*/
|
|
transport_deregister_session(tl_nexus->se_sess);
|
|
- tpg->tl_hba->tl_nexus = NULL;
|
|
+ tpg->tl_nexus = NULL;
|
|
kfree(tl_nexus);
|
|
return 0;
|
|
}
|
|
@@ -1090,7 +1063,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
|
|
struct tcm_loop_nexus *tl_nexus;
|
|
ssize_t ret;
|
|
|
|
- tl_nexus = tl_tpg->tl_hba->tl_nexus;
|
|
+ tl_nexus = tl_tpg->tl_nexus;
|
|
if (!tl_nexus)
|
|
return -ENODEV;
|
|
|
|
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
|
|
index 54c59d0..6ae49f2 100644
|
|
--- a/drivers/target/loopback/tcm_loop.h
|
|
+++ b/drivers/target/loopback/tcm_loop.h
|
|
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
|
|
};
|
|
|
|
struct tcm_loop_nexus {
|
|
- int it_nexus_active;
|
|
- /*
|
|
- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
|
|
- */
|
|
- struct scsi_host *sh;
|
|
/*
|
|
* Pointer to TCM session for I_T Nexus
|
|
*/
|
|
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
|
|
atomic_t tl_tpg_port_count;
|
|
struct se_portal_group tl_se_tpg;
|
|
struct tcm_loop_hba *tl_hba;
|
|
+ struct tcm_loop_nexus *tl_nexus;
|
|
};
|
|
|
|
struct tcm_loop_hba {
|
|
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
|
|
struct se_hba_s *se_hba;
|
|
struct se_lun *tl_hba_lun;
|
|
struct se_port *tl_hba_lun_sep;
|
|
- struct tcm_loop_nexus *tl_nexus;
|
|
struct device dev;
|
|
struct Scsi_Host *sh;
|
|
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
|
|
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
|
|
index 483d324..f303853 100644
|
|
--- a/drivers/target/target_core_configfs.c
|
|
+++ b/drivers/target/target_core_configfs.c
|
|
@@ -2359,7 +2359,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
|
|
pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
|
|
return -EINVAL; \
|
|
} \
|
|
- if (!tmp) \
|
|
+ if (tmp) \
|
|
t->_var |= _bit; \
|
|
else \
|
|
t->_var &= ~_bit; \
|
|
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
|
|
index 6ea95d2..093b8cb 100644
|
|
--- a/drivers/target/target_core_device.c
|
|
+++ b/drivers/target/target_core_device.c
|
|
@@ -1153,10 +1153,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
|
" changed for TCM/pSCSI\n", dev);
|
|
return -EINVAL;
|
|
}
|
|
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
|
|
+ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
|
|
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
|
|
- " greater than fabric_max_sectors: %u\n", dev,
|
|
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
|
|
+ " greater than hw_max_sectors: %u\n", dev,
|
|
+ optimal_sectors, dev->dev_attrib.hw_max_sectors);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -1409,7 +1409,8 @@ int core_dev_add_initiator_node_lun_acl(
|
|
* Check to see if there are any existing persistent reservation APTPL
|
|
* pre-registrations that need to be enabled for this LUN ACL..
|
|
*/
|
|
- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
|
|
+ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
|
|
+ lacl->mapped_lun);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1564,7 +1565,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
|
|
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
|
|
dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
|
|
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
|
|
|
|
xcopy_lun = &dev->xcopy_lun;
|
|
xcopy_lun->lun_se_dev = dev;
|
|
@@ -1591,8 +1591,6 @@ int target_configure_device(struct se_device *dev)
|
|
ret = dev->transport->configure_device(dev);
|
|
if (ret)
|
|
goto out;
|
|
- dev->dev_flags |= DF_CONFIGURED;
|
|
-
|
|
/*
|
|
* XXX: there is not much point to have two different values here..
|
|
*/
|
|
@@ -1605,6 +1603,7 @@ int target_configure_device(struct se_device *dev)
|
|
dev->dev_attrib.hw_max_sectors =
|
|
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
|
|
dev->dev_attrib.hw_block_size);
|
|
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
|
|
|
|
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
|
|
dev->creation_time = get_jiffies_64();
|
|
@@ -1653,6 +1652,8 @@ int target_configure_device(struct se_device *dev)
|
|
list_add_tail(&dev->g_dev_node, &g_device_list);
|
|
mutex_unlock(&g_device_mutex);
|
|
|
|
+ dev->dev_flags |= DF_CONFIGURED;
|
|
+
|
|
return 0;
|
|
|
|
out_free_alua:
|
|
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
|
|
index cf991a9..b199f1e 100644
|
|
--- a/drivers/target/target_core_file.c
|
|
+++ b/drivers/target/target_core_file.c
|
|
@@ -263,40 +263,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
|
|
struct se_device *se_dev = cmd->se_dev;
|
|
struct fd_dev *dev = FD_DEV(se_dev);
|
|
struct file *prot_fd = dev->fd_prot_file;
|
|
- struct scatterlist *sg;
|
|
loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
|
|
unsigned char *buf;
|
|
- u32 prot_size, len, size;
|
|
- int rc, ret = 1, i;
|
|
+ u32 prot_size;
|
|
+ int rc, ret = 1;
|
|
|
|
prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
|
|
se_dev->prot_length;
|
|
|
|
if (!is_write) {
|
|
- fd_prot->prot_buf = vzalloc(prot_size);
|
|
+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
|
|
if (!fd_prot->prot_buf) {
|
|
pr_err("Unable to allocate fd_prot->prot_buf\n");
|
|
return -ENOMEM;
|
|
}
|
|
buf = fd_prot->prot_buf;
|
|
|
|
- fd_prot->prot_sg_nents = cmd->t_prot_nents;
|
|
- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
|
|
- fd_prot->prot_sg_nents, GFP_KERNEL);
|
|
+ fd_prot->prot_sg_nents = 1;
|
|
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
|
|
+ GFP_KERNEL);
|
|
if (!fd_prot->prot_sg) {
|
|
pr_err("Unable to allocate fd_prot->prot_sg\n");
|
|
- vfree(fd_prot->prot_buf);
|
|
+ kfree(fd_prot->prot_buf);
|
|
return -ENOMEM;
|
|
}
|
|
- size = prot_size;
|
|
-
|
|
- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
|
|
-
|
|
- len = min_t(u32, PAGE_SIZE, size);
|
|
- sg_set_buf(sg, buf, len);
|
|
- size -= len;
|
|
- buf += len;
|
|
- }
|
|
+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
|
|
+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
|
|
}
|
|
|
|
if (is_write) {
|
|
@@ -317,7 +309,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
|
|
|
|
if (is_write || ret < 0) {
|
|
kfree(fd_prot->prot_sg);
|
|
- vfree(fd_prot->prot_buf);
|
|
+ kfree(fd_prot->prot_buf);
|
|
}
|
|
|
|
return ret;
|
|
@@ -620,7 +612,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
struct fd_prot fd_prot;
|
|
sense_reason_t rc;
|
|
int ret = 0;
|
|
-
|
|
+ /*
|
|
+ * We are currently limited by the number of iovecs (2048) per
|
|
+ * single vfs_[writev,readv] call.
|
|
+ */
|
|
+ if (cmd->data_length > FD_MAX_BYTES) {
|
|
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
|
|
+ "FD_MAX_BYTES: %u iovec count limitiation\n",
|
|
+ cmd->data_length, FD_MAX_BYTES);
|
|
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
+ }
|
|
/*
|
|
* Call vectorized fileio functions to map struct scatterlist
|
|
* physical memory addresses to struct iovec virtual memory.
|
|
@@ -643,11 +644,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
0, fd_prot.prot_sg, 0);
|
|
if (rc) {
|
|
kfree(fd_prot.prot_sg);
|
|
- vfree(fd_prot.prot_buf);
|
|
+ kfree(fd_prot.prot_buf);
|
|
return rc;
|
|
}
|
|
kfree(fd_prot.prot_sg);
|
|
- vfree(fd_prot.prot_buf);
|
|
+ kfree(fd_prot.prot_buf);
|
|
}
|
|
} else {
|
|
memset(&fd_prot, 0, sizeof(struct fd_prot));
|
|
@@ -663,7 +664,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
0, fd_prot.prot_sg, 0);
|
|
if (rc) {
|
|
kfree(fd_prot.prot_sg);
|
|
- vfree(fd_prot.prot_buf);
|
|
+ kfree(fd_prot.prot_buf);
|
|
return rc;
|
|
}
|
|
}
|
|
@@ -694,7 +695,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
|
|
if (ret < 0) {
|
|
kfree(fd_prot.prot_sg);
|
|
- vfree(fd_prot.prot_buf);
|
|
+ kfree(fd_prot.prot_buf);
|
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
}
|
|
|
|
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
|
|
index 9e0232c..feefe24 100644
|
|
--- a/drivers/target/target_core_iblock.c
|
|
+++ b/drivers/target/target_core_iblock.c
|
|
@@ -123,7 +123,7 @@ static int iblock_configure_device(struct se_device *dev)
|
|
q = bdev_get_queue(bd);
|
|
|
|
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
|
|
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
|
|
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
|
|
dev->dev_attrib.hw_queue_depth = q->nr_requests;
|
|
|
|
/*
|
|
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
|
|
index 3013287..70cb375 100644
|
|
--- a/drivers/target/target_core_pr.c
|
|
+++ b/drivers/target/target_core_pr.c
|
|
@@ -76,7 +76,7 @@ enum preempt_type {
|
|
};
|
|
|
|
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
|
|
- struct t10_pr_registration *, int);
|
|
+ struct t10_pr_registration *, int, int);
|
|
|
|
static sense_reason_t
|
|
target_scsi2_reservation_check(struct se_cmd *cmd)
|
|
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
|
|
|
|
return 0;
|
|
}
|
|
+ } else if (we && registered_nexus) {
|
|
+ /*
|
|
+ * Reads are allowed for Write Exclusive locks
|
|
+ * from all registrants.
|
|
+ */
|
|
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
|
|
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
|
|
+ " reservation\n", cdb[0],
|
|
+ core_scsi3_pr_dump_type(pr_reg_type));
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
}
|
|
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
|
|
" for %s reservation\n", transport_dump_cmd_direction(cmd),
|
|
@@ -944,10 +956,10 @@ int core_scsi3_check_aptpl_registration(
|
|
struct se_device *dev,
|
|
struct se_portal_group *tpg,
|
|
struct se_lun *lun,
|
|
- struct se_lun_acl *lun_acl)
|
|
+ struct se_node_acl *nacl,
|
|
+ u32 mapped_lun)
|
|
{
|
|
- struct se_node_acl *nacl = lun_acl->se_lun_nacl;
|
|
- struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
|
|
+ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
|
|
|
|
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
|
|
return 0;
|
|
@@ -1186,7 +1198,7 @@ static int core_scsi3_check_implicit_release(
|
|
* service action with the SERVICE ACTION RESERVATION KEY
|
|
* field set to zero (see 5.7.11.3).
|
|
*/
|
|
- __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
|
|
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
|
|
ret = 1;
|
|
/*
|
|
* For 'All Registrants' reservation types, all existing
|
|
@@ -1228,7 +1240,8 @@ static void __core_scsi3_free_registration(
|
|
|
|
pr_reg->pr_reg_deve->def_pr_registered = 0;
|
|
pr_reg->pr_reg_deve->pr_res_key = 0;
|
|
- list_del(&pr_reg->pr_reg_list);
|
|
+ if (!list_empty(&pr_reg->pr_reg_list))
|
|
+ list_del(&pr_reg->pr_reg_list);
|
|
/*
|
|
* Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
|
|
* so call core_scsi3_put_pr_reg() to decrement our reference.
|
|
@@ -1280,6 +1293,7 @@ void core_scsi3_free_pr_reg_from_nacl(
|
|
{
|
|
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
|
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
|
|
+ bool free_reg = false;
|
|
/*
|
|
* If the passed se_node_acl matches the reservation holder,
|
|
* release the reservation.
|
|
@@ -1287,13 +1301,18 @@ void core_scsi3_free_pr_reg_from_nacl(
|
|
spin_lock(&dev->dev_reservation_lock);
|
|
pr_res_holder = dev->dev_pr_res_holder;
|
|
if ((pr_res_holder != NULL) &&
|
|
- (pr_res_holder->pr_reg_nacl == nacl))
|
|
- __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
|
|
+ (pr_res_holder->pr_reg_nacl == nacl)) {
|
|
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
|
|
+ free_reg = true;
|
|
+ }
|
|
spin_unlock(&dev->dev_reservation_lock);
|
|
/*
|
|
* Release any registration associated with the struct se_node_acl.
|
|
*/
|
|
spin_lock(&pr_tmpl->registration_lock);
|
|
+ if (pr_res_holder && free_reg)
|
|
+ __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
|
|
+
|
|
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
|
|
&pr_tmpl->registration_list, pr_reg_list) {
|
|
|
|
@@ -1316,7 +1335,7 @@ void core_scsi3_free_all_registrations(
|
|
if (pr_res_holder != NULL) {
|
|
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
|
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
|
- pr_res_holder, 0);
|
|
+ pr_res_holder, 0, 0);
|
|
}
|
|
spin_unlock(&dev->dev_reservation_lock);
|
|
|
|
@@ -1877,8 +1896,8 @@ static int core_scsi3_update_aptpl_buf(
|
|
}
|
|
|
|
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
|
|
- pr_err("Unable to update renaming"
|
|
- " APTPL metadata\n");
|
|
+ pr_err("Unable to update renaming APTPL metadata,"
|
|
+ " reallocating larger buffer\n");
|
|
ret = -EMSGSIZE;
|
|
goto out;
|
|
}
|
|
@@ -1895,8 +1914,8 @@ static int core_scsi3_update_aptpl_buf(
|
|
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
|
|
|
|
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
|
|
- pr_err("Unable to update renaming"
|
|
- " APTPL metadata\n");
|
|
+ pr_err("Unable to update renaming APTPL metadata,"
|
|
+ " reallocating larger buffer\n");
|
|
ret = -EMSGSIZE;
|
|
goto out;
|
|
}
|
|
@@ -1959,7 +1978,7 @@ static int __core_scsi3_write_aptpl_to_file(
|
|
static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
|
|
{
|
|
unsigned char *buf;
|
|
- int rc;
|
|
+ int rc, len = PR_APTPL_BUF_LEN;
|
|
|
|
if (!aptpl) {
|
|
char *null_buf = "No Registrations or Reservations\n";
|
|
@@ -1973,25 +1992,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
|
|
|
|
return 0;
|
|
}
|
|
-
|
|
- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
|
|
+retry:
|
|
+ buf = vzalloc(len);
|
|
if (!buf)
|
|
return TCM_OUT_OF_RESOURCES;
|
|
|
|
- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
|
|
+ rc = core_scsi3_update_aptpl_buf(dev, buf, len);
|
|
if (rc < 0) {
|
|
- kfree(buf);
|
|
- return TCM_OUT_OF_RESOURCES;
|
|
+ vfree(buf);
|
|
+ len *= 2;
|
|
+ goto retry;
|
|
}
|
|
|
|
rc = __core_scsi3_write_aptpl_to_file(dev, buf);
|
|
if (rc != 0) {
|
|
pr_err("SPC-3 PR: Could not update APTPL\n");
|
|
- kfree(buf);
|
|
+ vfree(buf);
|
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
}
|
|
dev->t10_pr.pr_aptpl_active = 1;
|
|
- kfree(buf);
|
|
+ vfree(buf);
|
|
pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
|
|
return 0;
|
|
}
|
|
@@ -2125,13 +2145,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
|
|
/*
|
|
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
|
|
*/
|
|
- pr_holder = core_scsi3_check_implicit_release(
|
|
- cmd->se_dev, pr_reg);
|
|
+ type = pr_reg->pr_res_type;
|
|
+ pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
|
|
+ pr_reg);
|
|
if (pr_holder < 0) {
|
|
ret = TCM_RESERVATION_CONFLICT;
|
|
goto out;
|
|
}
|
|
- type = pr_reg->pr_res_type;
|
|
|
|
spin_lock(&pr_tmpl->registration_lock);
|
|
/*
|
|
@@ -2289,6 +2309,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
|
|
spin_lock(&dev->dev_reservation_lock);
|
|
pr_res_holder = dev->dev_pr_res_holder;
|
|
if (pr_res_holder) {
|
|
+ int pr_res_type = pr_res_holder->pr_res_type;
|
|
/*
|
|
* From spc4r17 Section 5.7.9: Reserving:
|
|
*
|
|
@@ -2299,7 +2320,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
|
|
* the logical unit, then the command shall be completed with
|
|
* RESERVATION CONFLICT status.
|
|
*/
|
|
- if (pr_res_holder != pr_reg) {
|
|
+ if ((pr_res_holder != pr_reg) &&
|
|
+ (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
|
|
+ (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
|
|
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
|
pr_err("SPC-3 PR: Attempted RESERVE from"
|
|
" [%s]: %s while reservation already held by"
|
|
@@ -2405,23 +2428,59 @@ static void __core_scsi3_complete_pro_release(
|
|
struct se_device *dev,
|
|
struct se_node_acl *se_nacl,
|
|
struct t10_pr_registration *pr_reg,
|
|
- int explicit)
|
|
+ int explicit,
|
|
+ int unreg)
|
|
{
|
|
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
|
|
char i_buf[PR_REG_ISID_ID_LEN];
|
|
+ int pr_res_type = 0, pr_res_scope = 0;
|
|
|
|
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
|
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
|
/*
|
|
* Go ahead and release the current PR reservation holder.
|
|
+ * If an All Registrants reservation is currently active and
|
|
+ * a unregister operation is requested, replace the current
|
|
+ * dev_pr_res_holder with another active registration.
|
|
*/
|
|
- dev->dev_pr_res_holder = NULL;
|
|
+ if (dev->dev_pr_res_holder) {
|
|
+ pr_res_type = dev->dev_pr_res_holder->pr_res_type;
|
|
+ pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
|
|
+ dev->dev_pr_res_holder->pr_res_type = 0;
|
|
+ dev->dev_pr_res_holder->pr_res_scope = 0;
|
|
+ dev->dev_pr_res_holder->pr_res_holder = 0;
|
|
+ dev->dev_pr_res_holder = NULL;
|
|
+ }
|
|
+ if (!unreg)
|
|
+ goto out;
|
|
|
|
- pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
|
|
- " reservation holder TYPE: %s ALL_TG_PT: %d\n",
|
|
- tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
|
|
- core_scsi3_pr_dump_type(pr_reg->pr_res_type),
|
|
- (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
|
|
+ spin_lock(&dev->t10_pr.registration_lock);
|
|
+ list_del_init(&pr_reg->pr_reg_list);
|
|
+ /*
|
|
+ * If the I_T nexus is a reservation holder, the persistent reservation
|
|
+ * is of an all registrants type, and the I_T nexus is the last remaining
|
|
+ * registered I_T nexus, then the device server shall also release the
|
|
+ * persistent reservation.
|
|
+ */
|
|
+ if (!list_empty(&dev->t10_pr.registration_list) &&
|
|
+ ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
|
|
+ (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
|
|
+ dev->dev_pr_res_holder =
|
|
+ list_entry(dev->t10_pr.registration_list.next,
|
|
+ struct t10_pr_registration, pr_reg_list);
|
|
+ dev->dev_pr_res_holder->pr_res_type = pr_res_type;
|
|
+ dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
|
|
+ dev->dev_pr_res_holder->pr_res_holder = 1;
|
|
+ }
|
|
+ spin_unlock(&dev->t10_pr.registration_lock);
|
|
+out:
|
|
+ if (!dev->dev_pr_res_holder) {
|
|
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
|
|
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
|
|
+ tfo->get_fabric_name(), (explicit) ? "explicit" :
|
|
+ "implicit", core_scsi3_pr_dump_type(pr_res_type),
|
|
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
|
|
+ }
|
|
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
|
|
tfo->get_fabric_name(), se_nacl->initiatorname,
|
|
i_buf);
|
|
@@ -2552,7 +2611,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
|
|
* server shall not establish a unit attention condition.
|
|
*/
|
|
__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
|
|
- pr_reg, 1);
|
|
+ pr_reg, 1, 0);
|
|
|
|
spin_unlock(&dev->dev_reservation_lock);
|
|
|
|
@@ -2640,7 +2699,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
|
|
if (pr_res_holder) {
|
|
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
|
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
|
- pr_res_holder, 0);
|
|
+ pr_res_holder, 0, 0);
|
|
}
|
|
spin_unlock(&dev->dev_reservation_lock);
|
|
/*
|
|
@@ -2699,7 +2758,7 @@ static void __core_scsi3_complete_pro_preempt(
|
|
*/
|
|
if (dev->dev_pr_res_holder)
|
|
__core_scsi3_complete_pro_release(dev, nacl,
|
|
- dev->dev_pr_res_holder, 0);
|
|
+ dev->dev_pr_res_holder, 0, 0);
|
|
|
|
dev->dev_pr_res_holder = pr_reg;
|
|
pr_reg->pr_res_holder = 1;
|
|
@@ -2943,8 +3002,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
|
*/
|
|
if (pr_reg_n != pr_res_holder)
|
|
__core_scsi3_complete_pro_release(dev,
|
|
- pr_res_holder->pr_reg_nacl,
|
|
- dev->dev_pr_res_holder, 0);
|
|
+ pr_res_holder->pr_reg_nacl,
|
|
+ dev->dev_pr_res_holder, 0, 0);
|
|
/*
|
|
* b) Remove the registrations for all I_T nexuses identified
|
|
* by the SERVICE ACTION RESERVATION KEY field, except the
|
|
@@ -3414,7 +3473,7 @@ after_iport_check:
|
|
* holder (i.e., the I_T nexus on which the
|
|
*/
|
|
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
|
- dev->dev_pr_res_holder, 0);
|
|
+ dev->dev_pr_res_holder, 0, 0);
|
|
/*
|
|
* g) Move the persistent reservation to the specified I_T nexus using
|
|
* the same scope and type as the persistent reservation released in
|
|
@@ -3854,7 +3913,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
|
unsigned char *buf;
|
|
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
|
|
u32 off = 8; /* off into first Full Status descriptor */
|
|
- int format_code = 0;
|
|
+ int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
|
|
+ bool all_reg = false;
|
|
|
|
if (cmd->data_length < 8) {
|
|
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
|
|
@@ -3871,6 +3931,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
|
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
|
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
|
|
|
+ spin_lock(&dev->dev_reservation_lock);
|
|
+ if (dev->dev_pr_res_holder) {
|
|
+ struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
|
|
+
|
|
+ if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
|
|
+ pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
|
|
+ all_reg = true;
|
|
+ pr_res_type = pr_holder->pr_res_type;
|
|
+ pr_res_scope = pr_holder->pr_res_scope;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&dev->dev_reservation_lock);
|
|
+
|
|
spin_lock(&pr_tmpl->registration_lock);
|
|
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
|
|
&pr_tmpl->registration_list, pr_reg_list) {
|
|
@@ -3920,14 +3993,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
|
* reservation holder for PR_HOLDER bit.
|
|
*
|
|
* Also, if this registration is the reservation
|
|
- * holder, fill in SCOPE and TYPE in the next byte.
|
|
+ * holder or there is an All Registrants reservation
|
|
+ * active, fill in SCOPE and TYPE in the next byte.
|
|
*/
|
|
if (pr_reg->pr_res_holder) {
|
|
buf[off++] |= 0x01;
|
|
buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
|
|
(pr_reg->pr_res_type & 0x0f);
|
|
- } else
|
|
+ } else if (all_reg) {
|
|
+ buf[off++] |= 0x01;
|
|
+ buf[off++] = (pr_res_scope & 0xf0) |
|
|
+ (pr_res_type & 0x0f);
|
|
+ } else {
|
|
off += 2;
|
|
+ }
|
|
|
|
off += 4; /* Skip over reserved area */
|
|
/*
|
|
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
|
|
index 2ee2936..749fd7b 100644
|
|
--- a/drivers/target/target_core_pr.h
|
|
+++ b/drivers/target/target_core_pr.h
|
|
@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
|
|
unsigned char *, u16, u32, int, int, u8);
|
|
extern int core_scsi3_check_aptpl_registration(struct se_device *,
|
|
struct se_portal_group *, struct se_lun *,
|
|
- struct se_lun_acl *);
|
|
+ struct se_node_acl *, u32);
|
|
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
|
|
struct se_node_acl *);
|
|
extern void core_scsi3_free_all_registrations(struct se_device *);
|
|
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
|
|
index 0f199f6..9b90cfa 100644
|
|
--- a/drivers/target/target_core_pscsi.c
|
|
+++ b/drivers/target/target_core_pscsi.c
|
|
@@ -520,6 +520,7 @@ static int pscsi_configure_device(struct se_device *dev)
|
|
" pdv_host_id: %d\n", pdv->pdv_host_id);
|
|
return -EINVAL;
|
|
}
|
|
+ pdv->pdv_lld_host = sh;
|
|
}
|
|
} else {
|
|
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
|
|
@@ -602,6 +603,8 @@ static void pscsi_free_device(struct se_device *dev)
|
|
if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
|
|
(phv->phv_lld_host != NULL))
|
|
scsi_host_put(phv->phv_lld_host);
|
|
+ else if (pdv->pdv_lld_host)
|
|
+ scsi_host_put(pdv->pdv_lld_host);
|
|
|
|
if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
|
|
scsi_device_put(sd);
|
|
@@ -1111,7 +1114,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
|
|
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
|
|
struct scsi_device *sd = pdv->pdv_sd;
|
|
|
|
- return sd->type;
|
|
+ return (sd) ? sd->type : TYPE_NO_LUN;
|
|
}
|
|
|
|
static sector_t pscsi_get_blocks(struct se_device *dev)
|
|
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
|
|
index 1bd757d..820d305 100644
|
|
--- a/drivers/target/target_core_pscsi.h
|
|
+++ b/drivers/target/target_core_pscsi.h
|
|
@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
|
|
int pdv_lun_id;
|
|
struct block_device *pdv_bd;
|
|
struct scsi_device *pdv_sd;
|
|
+ struct Scsi_Host *pdv_lld_host;
|
|
} ____cacheline_aligned;
|
|
|
|
typedef enum phv_modes {
|
|
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
|
|
index 379033f..f89b24a 100644
|
|
--- a/drivers/target/target_core_sbc.c
|
|
+++ b/drivers/target/target_core_sbc.c
|
|
@@ -266,6 +266,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
|
|
static sense_reason_t
|
|
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
|
|
{
|
|
+ struct se_device *dev = cmd->se_dev;
|
|
+ sector_t end_lba = dev->transport->get_blocks(dev) + 1;
|
|
unsigned int sectors = sbc_get_write_same_sectors(cmd);
|
|
|
|
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
|
|
@@ -279,6 +281,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
|
|
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
|
|
return TCM_INVALID_CDB_FIELD;
|
|
}
|
|
+ /*
|
|
+ * Sanity check for LBA wrap and request past end of device.
|
|
+ */
|
|
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
|
|
+ ((cmd->t_task_lba + sectors) > end_lba)) {
|
|
+ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
|
|
+ (unsigned long long)end_lba, cmd->t_task_lba, sectors);
|
|
+ return TCM_ADDRESS_OUT_OF_RANGE;
|
|
+ }
|
|
+
|
|
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
|
|
if (flags[0] & 0x10) {
|
|
pr_warn("WRITE SAME with ANCHOR not supported\n");
|
|
@@ -302,7 +314,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
|
|
return 0;
|
|
}
|
|
|
|
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
|
|
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
|
|
{
|
|
unsigned char *buf, *addr;
|
|
struct scatterlist *sg;
|
|
@@ -366,7 +378,7 @@ sbc_execute_rw(struct se_cmd *cmd)
|
|
cmd->data_direction);
|
|
}
|
|
|
|
-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
|
|
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
|
|
{
|
|
struct se_device *dev = cmd->se_dev;
|
|
|
|
@@ -389,7 +401,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
|
|
return TCM_NO_SENSE;
|
|
}
|
|
|
|
-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
|
|
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
|
|
{
|
|
struct se_device *dev = cmd->se_dev;
|
|
struct scatterlist *write_sg = NULL, *sg;
|
|
@@ -404,11 +416,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
|
|
|
|
/*
|
|
* Handle early failure in transport_generic_request_failure(),
|
|
- * which will not have taken ->caw_mutex yet..
|
|
+ * which will not have taken ->caw_sem yet..
|
|
*/
|
|
- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
|
|
+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
|
|
return TCM_NO_SENSE;
|
|
/*
|
|
+ * Handle special case for zero-length COMPARE_AND_WRITE
|
|
+ */
|
|
+ if (!cmd->data_length)
|
|
+ goto out;
|
|
+ /*
|
|
* Immediately exit + release dev->caw_sem if command has already
|
|
* been failed with a non-zero SCSI status.
|
|
*/
|
|
@@ -910,23 +927,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
|
|
unsigned long long end_lba;
|
|
|
|
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
|
|
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
|
|
- " big sectors %u exceeds fabric_max_sectors:"
|
|
- " %u\n", cdb[0], sectors,
|
|
- dev->dev_attrib.fabric_max_sectors);
|
|
- return TCM_INVALID_CDB_FIELD;
|
|
- }
|
|
- if (sectors > dev->dev_attrib.hw_max_sectors) {
|
|
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
|
|
- " big sectors %u exceeds backend hw_max_sectors:"
|
|
- " %u\n", cdb[0], sectors,
|
|
- dev->dev_attrib.hw_max_sectors);
|
|
- return TCM_INVALID_CDB_FIELD;
|
|
- }
|
|
-
|
|
end_lba = dev->transport->get_blocks(dev) + 1;
|
|
- if (cmd->t_task_lba + sectors > end_lba) {
|
|
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
|
|
+ ((cmd->t_task_lba + sectors) > end_lba)) {
|
|
pr_err("cmd exceeds last lba %llu "
|
|
"(lba %llu, sectors %u)\n",
|
|
end_lba, cmd->t_task_lba, sectors);
|
|
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
|
|
index fcdf98f..12a74f6 100644
|
|
--- a/drivers/target/target_core_spc.c
|
|
+++ b/drivers/target/target_core_spc.c
|
|
@@ -503,7 +503,6 @@ static sense_reason_t
|
|
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|
{
|
|
struct se_device *dev = cmd->se_dev;
|
|
- u32 max_sectors;
|
|
int have_tp = 0;
|
|
int opt, min;
|
|
|
|
@@ -537,9 +536,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|
/*
|
|
* Set MAXIMUM TRANSFER LENGTH
|
|
*/
|
|
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
|
|
- dev->dev_attrib.hw_max_sectors);
|
|
- put_unaligned_be32(max_sectors, &buf[8]);
|
|
+ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
|
|
|
|
/*
|
|
* Set OPTIMAL TRANSFER LENGTH
|
|
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
|
|
index c036595..fb8a1a1 100644
|
|
--- a/drivers/target/target_core_tpg.c
|
|
+++ b/drivers/target/target_core_tpg.c
|
|
@@ -40,6 +40,7 @@
|
|
#include <target/target_core_fabric.h>
|
|
|
|
#include "target_core_internal.h"
|
|
+#include "target_core_pr.h"
|
|
|
|
extern struct se_device *g_lun0_dev;
|
|
|
|
@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
|
|
|
|
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
|
|
lun_access, acl, tpg);
|
|
+ /*
|
|
+ * Check to see if there are any existing persistent reservation
|
|
+ * APTPL pre-registrations that need to be enabled for this dynamic
|
|
+ * LUN ACL now..
|
|
+ */
|
|
+ core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
|
|
+ lun->unpacked_lun);
|
|
spin_lock(&tpg->tpg_lun_lock);
|
|
}
|
|
spin_unlock(&tpg->tpg_lun_lock);
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index 24f5279..6fc3890 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -1600,11 +1600,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|
transport_complete_task_attr(cmd);
|
|
/*
|
|
* Handle special case for COMPARE_AND_WRITE failure, where the
|
|
- * callback is expected to drop the per device ->caw_mutex.
|
|
+ * callback is expected to drop the per device ->caw_sem.
|
|
*/
|
|
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
|
|
cmd->transport_complete_callback)
|
|
- cmd->transport_complete_callback(cmd);
|
|
+ cmd->transport_complete_callback(cmd, false);
|
|
|
|
switch (sense_reason) {
|
|
case TCM_NON_EXISTENT_LUN:
|
|
@@ -1855,8 +1855,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
|
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
|
|
trace_target_cmd_complete(cmd);
|
|
ret = cmd->se_tfo->queue_status(cmd);
|
|
- if (ret)
|
|
- goto out;
|
|
+ goto out;
|
|
}
|
|
|
|
switch (cmd->data_direction) {
|
|
@@ -1942,8 +1941,12 @@ static void target_complete_ok_work(struct work_struct *work)
|
|
if (cmd->transport_complete_callback) {
|
|
sense_reason_t rc;
|
|
|
|
- rc = cmd->transport_complete_callback(cmd);
|
|
+ rc = cmd->transport_complete_callback(cmd, true);
|
|
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
|
|
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
|
|
+ !cmd->data_length)
|
|
+ goto queue_rsp;
|
|
+
|
|
return;
|
|
} else if (rc) {
|
|
ret = transport_send_check_condition_and_sense(cmd,
|
|
@@ -1957,6 +1960,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
+queue_rsp:
|
|
switch (cmd->data_direction) {
|
|
case DMA_FROM_DEVICE:
|
|
spin_lock(&cmd->se_lun->lun_sep_lock);
|
|
@@ -2045,6 +2049,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
|
|
static inline void transport_free_pages(struct se_cmd *cmd)
|
|
{
|
|
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
|
|
+ /*
|
|
+ * Release special case READ buffer payload required for
|
|
+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
|
|
+ */
|
|
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
|
|
+ transport_free_sgl(cmd->t_bidi_data_sg,
|
|
+ cmd->t_bidi_data_nents);
|
|
+ cmd->t_bidi_data_sg = NULL;
|
|
+ cmd->t_bidi_data_nents = 0;
|
|
+ }
|
|
transport_reset_sgl_orig(cmd);
|
|
return;
|
|
}
|
|
@@ -2193,6 +2207,7 @@ sense_reason_t
|
|
transport_generic_new_cmd(struct se_cmd *cmd)
|
|
{
|
|
int ret = 0;
|
|
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
|
|
|
|
/*
|
|
* Determine is the TCM fabric module has already allocated physical
|
|
@@ -2201,7 +2216,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
|
*/
|
|
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
|
|
cmd->data_length) {
|
|
- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
|
|
|
|
if ((cmd->se_cmd_flags & SCF_BIDI) ||
|
|
(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
|
|
@@ -2224,6 +2238,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
|
cmd->data_length, zero_flag);
|
|
if (ret < 0)
|
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
|
|
+ cmd->data_length) {
|
|
+ /*
|
|
+ * Special case for COMPARE_AND_WRITE with fabrics
|
|
+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
|
|
+ */
|
|
+ u32 caw_length = cmd->t_task_nolb *
|
|
+ cmd->se_dev->dev_attrib.block_size;
|
|
+
|
|
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
|
|
+ &cmd->t_bidi_data_nents,
|
|
+ caw_length, zero_flag);
|
|
+ if (ret < 0)
|
|
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
}
|
|
/*
|
|
* If this command is not a write we can execute it right here,
|
|
@@ -2231,7 +2259,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
|
* and let it call back once the write buffers are ready.
|
|
*/
|
|
target_add_to_state_list(cmd);
|
|
- if (cmd->data_direction != DMA_TO_DEVICE) {
|
|
+ if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
|
|
target_execute_cmd(cmd);
|
|
return 0;
|
|
}
|
|
@@ -2328,6 +2356,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
|
|
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
|
|
out:
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
+
|
|
+ if (ret && ack_kref)
|
|
+ target_put_sess_cmd(se_sess, se_cmd);
|
|
+
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(target_get_sess_cmd);
|
|
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
|
|
index e415af3..c67d379 100644
|
|
--- a/drivers/target/tcm_fc/tfc_io.c
|
|
+++ b/drivers/target/tcm_fc/tfc_io.c
|
|
@@ -346,7 +346,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
|
|
ep = fc_seq_exch(seq);
|
|
if (ep) {
|
|
lport = ep->lp;
|
|
- if (lport && (ep->xid <= lport->lro_xid))
|
|
+ if (lport && (ep->xid <= lport->lro_xid)) {
|
|
/*
|
|
* "ddp_done" trigger invalidation of HW
|
|
* specific DDP context
|
|
@@ -361,6 +361,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
|
|
* identified using ep->xid)
|
|
*/
|
|
cmd->was_ddp_setup = 0;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
|
|
index a084325..6e75177 100644
|
|
--- a/drivers/thermal/intel_powerclamp.c
|
|
+++ b/drivers/thermal/intel_powerclamp.c
|
|
@@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
|
|
* allowed. thus jiffies are updated properly.
|
|
*/
|
|
preempt_disable();
|
|
- tick_nohz_idle_enter();
|
|
/* mwait until target jiffies is reached */
|
|
while (time_before(jiffies, target_jiffies)) {
|
|
unsigned long ecx = 1;
|
|
@@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
|
|
start_critical_timings();
|
|
atomic_inc(&idle_wakeup_counter);
|
|
}
|
|
- tick_nohz_idle_exit();
|
|
preempt_enable();
|
|
}
|
|
del_timer_sync(&wakeup_timer);
|
|
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
|
|
index 4db78ea..8a6f06b 100644
|
|
--- a/drivers/thermal/step_wise.c
|
|
+++ b/drivers/thermal/step_wise.c
|
|
@@ -79,7 +79,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
|
|
dev_dbg(&cdev->device, "THERMAL_TREND_RAISE_FULL: next_target=%ld\n", next_target);
|
|
break;
|
|
case THERMAL_TREND_DROPPING:
|
|
- if (cur_state == instance->lower) {
|
|
+ if (cur_state <= instance->lower) {
|
|
if (!throttle)
|
|
next_target = THERMAL_NO_TARGET;
|
|
} else {
|
|
@@ -153,9 +153,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
|
|
dev_dbg(&instance->cdev->device, "old_target=%ld, target=%ld\n",
|
|
old_target, instance->target);
|
|
|
|
- if (old_target == instance->target)
|
|
- continue;
|
|
-
|
|
/* Activate a passive thermal instance */
|
|
if (old_target == THERMAL_NO_TARGET &&
|
|
instance->target != THERMAL_NO_TARGET)
|
|
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
|
|
index 691c756..0821074 100644
|
|
--- a/drivers/thermal/thermal_core.c
|
|
+++ b/drivers/thermal/thermal_core.c
|
|
@@ -1830,10 +1830,10 @@ static int __init thermal_init(void)
|
|
|
|
exit_netlink:
|
|
genetlink_exit();
|
|
-unregister_governors:
|
|
- thermal_unregister_governors();
|
|
unregister_class:
|
|
class_unregister(&thermal_class);
|
|
+unregister_governors:
|
|
+ thermal_unregister_governors();
|
|
error:
|
|
idr_destroy(&thermal_tz_idr);
|
|
idr_destroy(&thermal_cdev_idr);
|
|
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
|
|
index 2dc2831..59a7da7 100644
|
|
--- a/drivers/tty/hvc/hvc_xen.c
|
|
+++ b/drivers/tty/hvc/hvc_xen.c
|
|
@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
|
|
+ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
|
|
info->vtermno = HVC_COOKIE;
|
|
|
|
spin_lock(&xencons_lock);
|
|
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
|
|
return 0;
|
|
}
|
|
|
|
+static void xen_console_update_evtchn(struct xencons_info *info)
|
|
+{
|
|
+ if (xen_hvm_domain()) {
|
|
+ uint64_t v;
|
|
+ int err;
|
|
+
|
|
+ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
|
|
+ if (!err && v)
|
|
+ info->evtchn = v;
|
|
+ } else
|
|
+ info->evtchn = xen_start_info->console.domU.evtchn;
|
|
+}
|
|
+
|
|
void xen_console_resume(void)
|
|
{
|
|
struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
|
|
- if (info != NULL && info->irq)
|
|
+ if (info != NULL && info->irq) {
|
|
+ if (!xen_initial_domain())
|
|
+ xen_console_update_evtchn(info);
|
|
rebind_evtchn_irq(info->evtchn, info->irq);
|
|
+ }
|
|
}
|
|
|
|
static void xencons_disconnect_backend(struct xencons_info *info)
|
|
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
|
|
index 2ebe47b..5bfd807 100644
|
|
--- a/drivers/tty/n_gsm.c
|
|
+++ b/drivers/tty/n_gsm.c
|
|
@@ -3166,7 +3166,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
|
|
return gsmtty_modem_update(dlci, encode);
|
|
}
|
|
|
|
-static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
|
|
+static void gsmtty_cleanup(struct tty_struct *tty)
|
|
{
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
@@ -3174,7 +3174,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
|
|
dlci_put(dlci);
|
|
dlci_put(gsm->dlci[0]);
|
|
mux_put(gsm);
|
|
- driver->ttys[tty->index] = NULL;
|
|
}
|
|
|
|
/* Virtual ttys for the demux */
|
|
@@ -3195,7 +3194,7 @@ static const struct tty_operations gsmtty_ops = {
|
|
.tiocmget = gsmtty_tiocmget,
|
|
.tiocmset = gsmtty_tiocmset,
|
|
.break_ctl = gsmtty_break_ctl,
|
|
- .remove = gsmtty_remove,
|
|
+ .cleanup = gsmtty_cleanup,
|
|
};
|
|
|
|
|
|
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
|
|
index 28ac3f3..8195190 100644
|
|
--- a/drivers/tty/n_tty.c
|
|
+++ b/drivers/tty/n_tty.c
|
|
@@ -186,6 +186,17 @@ static int receive_room(struct tty_struct *tty)
|
|
return left;
|
|
}
|
|
|
|
+static inline int tty_copy_to_user(struct tty_struct *tty,
|
|
+ void __user *to,
|
|
+ const void *from,
|
|
+ unsigned long n)
|
|
+{
|
|
+ struct n_tty_data *ldata = tty->disc_data;
|
|
+
|
|
+ tty_audit_add_data(tty, to, n, ldata->icanon);
|
|
+ return copy_to_user(to, from, n);
|
|
+}
|
|
+
|
|
/**
|
|
* n_tty_set_room - receive space
|
|
* @tty: terminal
|
|
@@ -247,8 +258,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
|
|
|
|
static void n_tty_check_throttle(struct tty_struct *tty)
|
|
{
|
|
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
|
|
- return;
|
|
/*
|
|
* Check the remaining room for the input canonicalization
|
|
* mode. We don't want to throttle the driver if we're in
|
|
@@ -321,7 +330,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
|
|
|
|
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
|
|
{
|
|
- *read_buf_addr(ldata, ldata->read_head++) = c;
|
|
+ *read_buf_addr(ldata, ldata->read_head) = c;
|
|
+ ldata->read_head++;
|
|
}
|
|
|
|
/**
|
|
@@ -1511,23 +1521,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
|
|
n_tty_receive_char_flagged(tty, c, flag);
|
|
}
|
|
|
|
-/**
|
|
- * n_tty_receive_buf - data receive
|
|
- * @tty: terminal device
|
|
- * @cp: buffer
|
|
- * @fp: flag buffer
|
|
- * @count: characters
|
|
- *
|
|
- * Called by the terminal driver when a block of characters has
|
|
- * been received. This function must be called from soft contexts
|
|
- * not from interrupt context. The driver is responsible for making
|
|
- * calls one at a time and in order (or using flush_to_ldisc)
|
|
- *
|
|
- * n_tty_receive_buf()/producer path:
|
|
- * claims non-exclusive termios_rwsem
|
|
- * publishes read_head and canon_head
|
|
- */
|
|
-
|
|
static void
|
|
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
|
|
char *fp, int count)
|
|
@@ -1683,24 +1676,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
|
|
}
|
|
}
|
|
|
|
+/**
|
|
+ * n_tty_receive_buf_common - process input
|
|
+ * @tty: device to receive input
|
|
+ * @cp: input chars
|
|
+ * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
|
|
+ * @count: number of input chars in @cp
|
|
+ *
|
|
+ * Called by the terminal driver when a block of characters has
|
|
+ * been received. This function must be called from soft contexts
|
|
+ * not from interrupt context. The driver is responsible for making
|
|
+ * calls one at a time and in order (or using flush_to_ldisc)
|
|
+ *
|
|
+ * Returns the # of input chars from @cp which were processed.
|
|
+ *
|
|
+ * In canonical mode, the maximum line length is 4096 chars (including
|
|
+ * the line termination char); lines longer than 4096 chars are
|
|
+ * truncated. After 4095 chars, input data is still processed but
|
|
+ * not stored. Overflow processing ensures the tty can always
|
|
+ * receive more input until at least one line can be read.
|
|
+ *
|
|
+ * In non-canonical mode, the read buffer will only accept 4095 chars;
|
|
+ * this provides the necessary space for a newline char if the input
|
|
+ * mode is switched to canonical.
|
|
+ *
|
|
+ * Note it is possible for the read buffer to _contain_ 4096 chars
|
|
+ * in non-canonical mode: the read buffer could already contain the
|
|
+ * maximum canon line of 4096 chars when the mode is switched to
|
|
+ * non-canonical.
|
|
+ *
|
|
+ * n_tty_receive_buf()/producer path:
|
|
+ * claims non-exclusive termios_rwsem
|
|
+ * publishes commit_head or canon_head
|
|
+ */
|
|
static int
|
|
n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
|
|
char *fp, int count, int flow)
|
|
{
|
|
struct n_tty_data *ldata = tty->disc_data;
|
|
- int room, n, rcvd = 0;
|
|
+ int room, n, rcvd = 0, overflow;
|
|
|
|
down_read(&tty->termios_rwsem);
|
|
|
|
while (1) {
|
|
- room = receive_room(tty);
|
|
+ /*
|
|
+ * When PARMRK is set, each input char may take up to 3 chars
|
|
+ * in the read buf; reduce the buffer space avail by 3x
|
|
+ *
|
|
+ * If we are doing input canonicalization, and there are no
|
|
+ * pending newlines, let characters through without limit, so
|
|
+ * that erase characters will be handled. Other excess
|
|
+ * characters will be beeped.
|
|
+ *
|
|
+ * paired with store in *_copy_from_read_buf() -- guarantees
|
|
+ * the consumer has loaded the data in read_buf up to the new
|
|
+ * read_tail (so this producer will not overwrite unread data)
|
|
+ */
|
|
+ size_t tail = ldata->read_tail;
|
|
+
|
|
+ room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
|
|
+ if (I_PARMRK(tty))
|
|
+ room = (room + 2) / 3;
|
|
+ room--;
|
|
+ if (room <= 0) {
|
|
+ overflow = ldata->icanon && ldata->canon_head == tail;
|
|
+ if (overflow && room < 0)
|
|
+ ldata->read_head--;
|
|
+ room = overflow;
|
|
+ ldata->no_room = flow && !room;
|
|
+ } else
|
|
+ overflow = 0;
|
|
+
|
|
n = min(count, room);
|
|
- if (!n) {
|
|
- if (flow && !room)
|
|
- ldata->no_room = 1;
|
|
+ if (!n)
|
|
break;
|
|
- }
|
|
- __receive_buf(tty, cp, fp, n);
|
|
+
|
|
+ /* ignore parity errors if handling overflow */
|
|
+ if (!overflow || !fp || *fp != TTY_PARITY)
|
|
+ __receive_buf(tty, cp, fp, n);
|
|
+
|
|
cp += n;
|
|
if (fp)
|
|
fp += n;
|
|
@@ -1709,7 +1763,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
|
|
}
|
|
|
|
tty->receive_room = room;
|
|
- n_tty_check_throttle(tty);
|
|
+
|
|
+ /* Unthrottle if handling overflow on pty */
|
|
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
|
|
+ if (overflow) {
|
|
+ tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
|
|
+ tty_unthrottle_safe(tty);
|
|
+ __tty_set_flow_change(tty, 0);
|
|
+ }
|
|
+ } else
|
|
+ n_tty_check_throttle(tty);
|
|
+
|
|
up_read(&tty->termios_rwsem);
|
|
|
|
return rcvd;
|
|
@@ -2031,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
|
|
__func__, eol, found, n, c, size, more);
|
|
|
|
if (n > size) {
|
|
- ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
|
|
+ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
|
|
if (ret)
|
|
return -EFAULT;
|
|
- ret = copy_to_user(*b + size, ldata->read_buf, n - size);
|
|
+ ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
|
|
} else
|
|
- ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
|
|
+ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
|
|
|
|
if (ret)
|
|
return -EFAULT;
|
|
@@ -2416,12 +2480,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
|
|
|
|
poll_wait(file, &tty->read_wait, wait);
|
|
poll_wait(file, &tty->write_wait, wait);
|
|
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
|
|
+ mask |= POLLHUP;
|
|
if (input_available_p(tty, 1))
|
|
mask |= POLLIN | POLLRDNORM;
|
|
+ else if (mask & POLLHUP) {
|
|
+ tty_flush_to_ldisc(tty);
|
|
+ if (input_available_p(tty, 1))
|
|
+ mask |= POLLIN | POLLRDNORM;
|
|
+ }
|
|
if (tty->packet && tty->link->ctrl_status)
|
|
mask |= POLLPRI | POLLIN | POLLRDNORM;
|
|
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
|
|
- mask |= POLLHUP;
|
|
if (tty_hung_up_p(file))
|
|
mask |= POLLHUP;
|
|
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
|
|
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
|
|
index 25c9bc7..e49616e 100644
|
|
--- a/drivers/tty/pty.c
|
|
+++ b/drivers/tty/pty.c
|
|
@@ -209,6 +209,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
|
|
unsigned long flags;
|
|
struct pid *pgrp;
|
|
|
|
+ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
|
|
+ return -EINVAL;
|
|
+
|
|
if (tty->link) {
|
|
spin_lock_irqsave(&tty->link->ctrl_lock, flags);
|
|
pgrp = get_pid(tty->link->pgrp);
|
|
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
|
|
index ab9096d..148ffe4 100644
|
|
--- a/drivers/tty/serial/8250/8250_dma.c
|
|
+++ b/drivers/tty/serial/8250/8250_dma.c
|
|
@@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8250_port *p)
|
|
|
|
dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
|
|
&dma->rx_addr, GFP_KERNEL);
|
|
- if (!dma->rx_buf) {
|
|
- dma_release_channel(dma->rxchan);
|
|
- dma_release_channel(dma->txchan);
|
|
- return -ENOMEM;
|
|
- }
|
|
+ if (!dma->rx_buf)
|
|
+ goto err;
|
|
|
|
/* TX buffer */
|
|
dma->tx_addr = dma_map_single(dma->txchan->device->dev,
|
|
p->port.state->xmit.buf,
|
|
UART_XMIT_SIZE,
|
|
DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
|
|
+ dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
|
|
+ dma->rx_buf, dma->rx_addr);
|
|
+ goto err;
|
|
+ }
|
|
|
|
dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
|
|
|
|
return 0;
|
|
+err:
|
|
+ dma_release_channel(dma->rxchan);
|
|
+ dma_release_channel(dma->txchan);
|
|
+
|
|
+ return -ENOMEM;
|
|
}
|
|
EXPORT_SYMBOL_GPL(serial8250_request_dma);
|
|
|
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
|
index 0ff3e36..5892eab 100644
|
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
|
@@ -66,7 +66,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
|
|
"Please send the output of lspci -vv, this\n"
|
|
"message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
|
|
"manufacturer and name of serial board or\n"
|
|
- "modem board to rmk+serial@arm.linux.org.uk.\n",
|
|
+ "modem board to <linux-serial@vger.kernel.org>.\n",
|
|
pci_name(dev), str, dev->vendor, dev->device,
|
|
dev->subsystem_vendor, dev->subsystem_device);
|
|
}
|
|
@@ -1766,6 +1766,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
|
|
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
|
|
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
|
|
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
|
|
+#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936
|
|
|
|
#define PCI_VENDOR_ID_SUNIX 0x1fd4
|
|
#define PCI_DEVICE_ID_SUNIX_1999 0x1999
|
|
@@ -1876,6 +1877,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
|
.subdevice = PCI_ANY_ID,
|
|
.setup = byt_serial_setup,
|
|
},
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_INTEL,
|
|
+ .device = PCI_DEVICE_ID_INTEL_QRK_UART,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_default_setup,
|
|
+ },
|
|
/*
|
|
* ITE
|
|
*/
|
|
@@ -2710,6 +2718,7 @@ enum pci_board_num_t {
|
|
pbn_ADDIDATA_PCIe_8_3906250,
|
|
pbn_ce4100_1_115200,
|
|
pbn_byt,
|
|
+ pbn_qrk,
|
|
pbn_omegapci,
|
|
pbn_NETMOS9900_2s_115200,
|
|
pbn_brcm_trumanage,
|
|
@@ -3456,6 +3465,12 @@ static struct pciserial_board pci_boards[] = {
|
|
.uart_offset = 0x80,
|
|
.reg_shift = 2,
|
|
},
|
|
+ [pbn_qrk] = {
|
|
+ .flags = FL_BASE0,
|
|
+ .num_ports = 1,
|
|
+ .base_baud = 2764800,
|
|
+ .reg_shift = 2,
|
|
+ },
|
|
[pbn_omegapci] = {
|
|
.flags = FL_BASE0,
|
|
.num_ports = 8,
|
|
@@ -5150,6 +5165,12 @@ static struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_byt },
|
|
|
|
/*
|
|
+ * Intel Quark x1000
|
|
+ */
|
|
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
|
|
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
+ pbn_qrk },
|
|
+ /*
|
|
* Cronyx Omega PCI
|
|
*/
|
|
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
|
|
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
|
|
index 682a2fb..2b22cc1 100644
|
|
--- a/drivers/tty/serial/8250/8250_pnp.c
|
|
+++ b/drivers/tty/serial/8250/8250_pnp.c
|
|
@@ -364,6 +364,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
|
|
/* Winbond CIR port, should not be probed. We should keep track
|
|
of it to prevent the legacy serial driver from probing it */
|
|
{ "WEC1022", CIR_PORT },
|
|
+ /*
|
|
+ * SMSC IrCC SIR/FIR port, should not be probed by serial driver
|
|
+ * as well so its own driver can bind to it.
|
|
+ */
|
|
+ { "SMCF010", CIR_PORT },
|
|
{ "", 0 }
|
|
};
|
|
|
|
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
|
|
index ce352b8..9d162ef7 100644
|
|
--- a/drivers/tty/serial/atmel_serial.c
|
|
+++ b/drivers/tty/serial/atmel_serial.c
|
|
@@ -757,6 +757,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
|
|
config.direction = DMA_MEM_TO_DEV;
|
|
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
config.dst_addr = port->mapbase + ATMEL_US_THR;
|
|
+ config.dst_maxburst = 1;
|
|
|
|
ret = dmaengine_device_control(atmel_port->chan_tx,
|
|
DMA_SLAVE_CONFIG,
|
|
@@ -921,6 +922,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
|
|
config.direction = DMA_DEV_TO_MEM;
|
|
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
config.src_addr = port->mapbase + ATMEL_US_RHR;
|
|
+ config.src_maxburst = 1;
|
|
|
|
ret = dmaengine_device_control(atmel_port->chan_rx,
|
|
DMA_SLAVE_CONFIG,
|
|
@@ -2392,7 +2394,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
|
|
|
|
ret = atmel_init_port(port, pdev);
|
|
if (ret)
|
|
- goto err;
|
|
+ goto err_clear_bit;
|
|
|
|
if (!atmel_use_pdc_rx(&port->uart)) {
|
|
ret = -ENOMEM;
|
|
@@ -2441,6 +2443,8 @@ err_alloc_ring:
|
|
clk_put(port->clk);
|
|
port->clk = NULL;
|
|
}
|
|
+err_clear_bit:
|
|
+ clear_bit(port->uart.line, atmel_ports_in_use);
|
|
err:
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
|
|
index 175f123..501c465 100644
|
|
--- a/drivers/tty/serial/fsl_lpuart.c
|
|
+++ b/drivers/tty/serial/fsl_lpuart.c
|
|
@@ -362,6 +362,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
|
|
writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
|
|
sport->port.membase + UARTPFIFO);
|
|
|
|
+ /* explicitly clear RDRF */
|
|
+ readb(sport->port.membase + UARTSR1);
|
|
+
|
|
/* flush Tx and Rx FIFO */
|
|
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
|
|
sport->port.membase + UARTCFIFO);
|
|
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
|
|
index d799140..1fd9bc6 100644
|
|
--- a/drivers/tty/serial/imx.c
|
|
+++ b/drivers/tty/serial/imx.c
|
|
@@ -907,6 +907,14 @@ static void dma_rx_callback(void *data)
|
|
|
|
status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
|
|
count = RX_BUF_SIZE - state.residue;
|
|
+
|
|
+ if (readl(sport->port.membase + USR2) & USR2_IDLE) {
|
|
+ /* In condition [3] the SDMA counted up too early */
|
|
+ count--;
|
|
+
|
|
+ writel(USR2_IDLE, sport->port.membase + USR2);
|
|
+ }
|
|
+
|
|
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
|
|
|
|
if (count) {
|
|
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
|
|
index 9924660..9dd8a71 100644
|
|
--- a/drivers/tty/serial/of_serial.c
|
|
+++ b/drivers/tty/serial/of_serial.c
|
|
@@ -261,7 +261,6 @@ static struct of_device_id of_platform_serial_table[] = {
|
|
{ .compatible = "ibm,qpace-nwp-serial",
|
|
.data = (void *)PORT_NWPSERIAL, },
|
|
#endif
|
|
- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
|
|
{ /* end of list */ },
|
|
};
|
|
|
|
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
|
|
index db8434d..f4e68b3 100644
|
|
--- a/drivers/tty/serial/omap-serial.c
|
|
+++ b/drivers/tty/serial/omap-serial.c
|
|
@@ -260,8 +260,16 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
|
|
{
|
|
unsigned int n13 = port->uartclk / (13 * baud);
|
|
unsigned int n16 = port->uartclk / (16 * baud);
|
|
- int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
|
|
- int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
|
|
+ int baudAbsDiff13;
|
|
+ int baudAbsDiff16;
|
|
+
|
|
+ if (n13 == 0)
|
|
+ n13 = 1;
|
|
+ if (n16 == 0)
|
|
+ n16 = 1;
|
|
+
|
|
+ baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
|
|
+ baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
|
|
if (baudAbsDiff13 < 0)
|
|
baudAbsDiff13 = -baudAbsDiff13;
|
|
if (baudAbsDiff16 < 0)
|
|
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
|
|
index 9cd706d..7d3a3f5 100644
|
|
--- a/drivers/tty/serial/samsung.c
|
|
+++ b/drivers/tty/serial/samsung.c
|
|
@@ -544,11 +544,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
|
|
unsigned int old)
|
|
{
|
|
struct s3c24xx_uart_port *ourport = to_ourport(port);
|
|
+ int timeout = 10000;
|
|
|
|
ourport->pm_level = level;
|
|
|
|
switch (level) {
|
|
case 3:
|
|
+ while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
|
|
+ udelay(100);
|
|
+
|
|
if (!IS_ERR(ourport->baudclk))
|
|
clk_disable_unprepare(ourport->baudclk);
|
|
|
|
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
|
|
index ece2049..27b5554 100644
|
|
--- a/drivers/tty/serial/serial_core.c
|
|
+++ b/drivers/tty/serial/serial_core.c
|
|
@@ -235,6 +235,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
|
|
/*
|
|
* Turn off DTR and RTS early.
|
|
*/
|
|
+ if (uart_console(uport) && tty)
|
|
+ uport->cons->cflag = tty->termios.c_cflag;
|
|
+
|
|
if (!tty || (tty->termios.c_cflag & HUPCL))
|
|
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
|
|
|
|
@@ -350,7 +353,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
|
|
* The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
|
|
* Die! Die! Die!
|
|
*/
|
|
- if (baud == 38400)
|
|
+ if (try == 0 && baud == 38400)
|
|
baud = altbaud;
|
|
|
|
/*
|
|
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
|
|
index 80a58ec..e8f7760 100644
|
|
--- a/drivers/tty/serial/sunsab.c
|
|
+++ b/drivers/tty/serial/sunsab.c
|
|
@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
|
|
(up->port.line == up->port.cons->index))
|
|
saw_console_brk = 1;
|
|
|
|
+ if (count == 0) {
|
|
+ if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
|
|
+ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
|
|
+ SAB82532_ISR0_FERR);
|
|
+ up->port.icount.brk++;
|
|
+ uart_handle_break(&up->port);
|
|
+ }
|
|
+ }
|
|
+
|
|
for (i = 0; i < count; i++) {
|
|
unsigned char ch = buf[i], flag;
|
|
|
|
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
|
|
index d3448a9..39988fa 100644
|
|
--- a/drivers/tty/tty_io.c
|
|
+++ b/drivers/tty/tty_io.c
|
|
@@ -996,8 +996,8 @@ EXPORT_SYMBOL(start_tty);
|
|
/* We limit tty time update visibility to every 8 seconds or so. */
|
|
static void tty_update_time(struct timespec *time)
|
|
{
|
|
- unsigned long sec = get_seconds() & ~7;
|
|
- if ((long)(sec - time->tv_sec) > 0)
|
|
+ unsigned long sec = get_seconds();
|
|
+ if (abs(sec - time->tv_sec) & ~7)
|
|
time->tv_sec = sec;
|
|
}
|
|
|
|
@@ -1701,6 +1701,7 @@ int tty_release(struct inode *inode, struct file *filp)
|
|
int pty_master, tty_closing, o_tty_closing, do_sleep;
|
|
int idx;
|
|
char buf[64];
|
|
+ long timeout = 0;
|
|
|
|
if (tty_paranoia_check(tty, inode, __func__))
|
|
return 0;
|
|
@@ -1785,7 +1786,11 @@ int tty_release(struct inode *inode, struct file *filp)
|
|
__func__, tty_name(tty, buf));
|
|
tty_unlock_pair(tty, o_tty);
|
|
mutex_unlock(&tty_mutex);
|
|
- schedule();
|
|
+ schedule_timeout_killable(timeout);
|
|
+ if (timeout < 120 * HZ)
|
|
+ timeout = 2 * timeout + 1;
|
|
+ else
|
|
+ timeout = MAX_SCHEDULE_TIMEOUT;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
|
|
index 6fd60fe..22da05d 100644
|
|
--- a/drivers/tty/tty_ioctl.c
|
|
+++ b/drivers/tty/tty_ioctl.c
|
|
@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
|
|
#endif
|
|
if (!timeout)
|
|
timeout = MAX_SCHEDULE_TIMEOUT;
|
|
+
|
|
if (wait_event_interruptible_timeout(tty->write_wait,
|
|
- !tty_chars_in_buffer(tty), timeout) >= 0) {
|
|
- if (tty->ops->wait_until_sent)
|
|
- tty->ops->wait_until_sent(tty, timeout);
|
|
+ !tty_chars_in_buffer(tty), timeout) < 0) {
|
|
+ return;
|
|
}
|
|
+
|
|
+ if (timeout == MAX_SCHEDULE_TIMEOUT)
|
|
+ timeout = 0;
|
|
+
|
|
+ if (tty->ops->wait_until_sent)
|
|
+ tty->ops->wait_until_sent(tty, timeout);
|
|
}
|
|
EXPORT_SYMBOL(tty_wait_until_sent);
|
|
|
|
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
|
|
index 2978ca5..0e75d2a 100644
|
|
--- a/drivers/tty/vt/consolemap.c
|
|
+++ b/drivers/tty/vt/consolemap.c
|
|
@@ -540,6 +540,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
|
|
|
|
/* Save original vc_unipagdir_loc in case we allocate a new one */
|
|
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
|
|
+
|
|
+ if (!p) {
|
|
+ err = -EINVAL;
|
|
+
|
|
+ goto out_unlock;
|
|
+ }
|
|
if (p->readonly) {
|
|
console_unlock();
|
|
return -EIO;
|
|
@@ -633,6 +639,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
|
|
set_inverse_transl(vc, p, i); /* Update inverse translations */
|
|
set_inverse_trans_unicode(vc, p);
|
|
|
|
+out_unlock:
|
|
console_unlock();
|
|
return err;
|
|
}
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index 23b5d32..693091a 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
|
|
#endif
|
|
if (DO_UPDATE(vc))
|
|
do_update_region(vc, (unsigned long) p, count);
|
|
+ notify_update(vc);
|
|
}
|
|
|
|
/* used by selection: complement pointer position */
|
|
@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset)
|
|
scr_writew(old, screenpos(vc, old_offset, 1));
|
|
if (DO_UPDATE(vc))
|
|
vc->vc_sw->con_putc(vc, old, oldy, oldx);
|
|
+ notify_update(vc);
|
|
}
|
|
|
|
old_offset = offset;
|
|
@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset)
|
|
oldy = (offset >> 1) / vc->vc_cols;
|
|
vc->vc_sw->con_putc(vc, new, oldy, oldx);
|
|
}
|
|
+ notify_update(vc);
|
|
}
|
|
-
|
|
}
|
|
|
|
static void insert_char(struct vc_data *vc, unsigned int nr)
|
|
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
|
|
index 0b24e6e..7a9c5a4 100644
|
|
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
|
|
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
|
|
@@ -20,13 +20,13 @@
|
|
static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
|
|
{
|
|
struct device *dev = ci->gadget.dev.parent;
|
|
- int val;
|
|
|
|
switch (event) {
|
|
case CI_HDRC_CONTROLLER_RESET_EVENT:
|
|
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
|
|
writel(0, USB_AHBBURST);
|
|
writel(0, USB_AHBMODE);
|
|
+ usb_phy_init(ci->transceiver);
|
|
break;
|
|
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
|
|
dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
|
|
@@ -34,10 +34,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
|
|
* Put the transceiver in non-driving mode. Otherwise host
|
|
* may not detect soft-disconnection.
|
|
*/
|
|
- val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
|
|
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
|
|
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
|
|
- usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
|
|
+ usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
|
|
break;
|
|
default:
|
|
dev_dbg(dev, "unknown ci_hdrc event\n");
|
|
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
|
index eabccd4..d7049c3 100644
|
|
--- a/drivers/usb/class/cdc-acm.c
|
|
+++ b/drivers/usb/class/cdc-acm.c
|
|
@@ -965,11 +965,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
|
|
/* FIXME: Needs to clear unsupported bits in the termios */
|
|
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
|
|
|
|
- if (!newline.dwDTERate) {
|
|
+ if (C_BAUD(tty) == B0) {
|
|
newline.dwDTERate = acm->line.dwDTERate;
|
|
newctrl &= ~ACM_CTRL_DTR;
|
|
- } else
|
|
+ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
|
|
newctrl |= ACM_CTRL_DTR;
|
|
+ }
|
|
|
|
if (newctrl != acm->ctrlout)
|
|
acm_set_control(acm, acm->ctrlout = newctrl);
|
|
@@ -1168,10 +1169,11 @@ next_desc:
|
|
} else {
|
|
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
|
|
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
|
|
- if (!control_interface || !data_interface) {
|
|
- dev_dbg(&intf->dev, "no interfaces\n");
|
|
- return -ENODEV;
|
|
- }
|
|
+ }
|
|
+
|
|
+ if (!control_interface || !data_interface) {
|
|
+ dev_dbg(&intf->dev, "no interfaces\n");
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
if (data_interface_num != call_interface_num)
|
|
@@ -1447,6 +1449,7 @@ alloc_fail8:
|
|
&dev_attr_wCountryCodes);
|
|
device_remove_file(&acm->control->dev,
|
|
&dev_attr_iCountryCodeRelDate);
|
|
+ kfree(acm->country_codes);
|
|
}
|
|
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
|
|
alloc_fail7:
|
|
@@ -1672,6 +1675,7 @@ static const struct usb_device_id acm_ids[] = {
|
|
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
|
|
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
|
},
|
|
+ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
|
|
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
|
|
},
|
|
/* Motorola H24 HSPA module: */
|
|
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
|
|
index a051a7a..a81f9dd 100644
|
|
--- a/drivers/usb/class/cdc-wdm.c
|
|
+++ b/drivers/usb/class/cdc-wdm.c
|
|
@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
|
|
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
|
|
dev_dbg(&desc->intf->dev,
|
|
"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
|
|
- dr->wIndex, dr->wLength);
|
|
+ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
|
|
break;
|
|
|
|
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
|
|
@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
|
|
clear_bit(WDM_POLL_RUNNING, &desc->flags);
|
|
dev_err(&desc->intf->dev,
|
|
"unknown notification %d received: index %d len %d\n",
|
|
- dr->bNotificationType, dr->wIndex, dr->wLength);
|
|
+ dr->bNotificationType,
|
|
+ le16_to_cpu(dr->wIndex),
|
|
+ le16_to_cpu(dr->wLength));
|
|
goto exit;
|
|
}
|
|
|
|
@@ -408,7 +410,7 @@ static ssize_t wdm_write
|
|
USB_RECIP_INTERFACE);
|
|
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
|
|
req->wValue = 0;
|
|
- req->wIndex = desc->inum;
|
|
+ req->wIndex = desc->inum; /* already converted */
|
|
req->wLength = cpu_to_le16(count);
|
|
set_bit(WDM_IN_USE, &desc->flags);
|
|
desc->outbuf = buf;
|
|
@@ -422,7 +424,7 @@ static ssize_t wdm_write
|
|
rv = usb_translate_errors(rv);
|
|
} else {
|
|
dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
|
|
- req->wIndex);
|
|
+ le16_to_cpu(req->wIndex));
|
|
}
|
|
out:
|
|
usb_autopm_put_interface(desc->intf);
|
|
@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
|
|
desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
|
|
desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
|
|
desc->irq->wValue = 0;
|
|
- desc->irq->wIndex = desc->inum;
|
|
+ desc->irq->wIndex = desc->inum; /* already converted */
|
|
desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
|
|
|
|
usb_fill_control_urb(
|
|
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
|
|
index 684ef70..506b969 100644
|
|
--- a/drivers/usb/core/buffer.c
|
|
+++ b/drivers/usb/core/buffer.c
|
|
@@ -22,17 +22,25 @@
|
|
*/
|
|
|
|
/* FIXME tune these based on pool statistics ... */
|
|
-static const size_t pool_max[HCD_BUFFER_POOLS] = {
|
|
- /* platforms without dma-friendly caches might need to
|
|
- * prevent cacheline sharing...
|
|
- */
|
|
- 32,
|
|
- 128,
|
|
- 512,
|
|
- PAGE_SIZE / 2
|
|
- /* bigger --> allocate pages */
|
|
+static size_t pool_max[HCD_BUFFER_POOLS] = {
|
|
+ 32, 128, 512, 2048,
|
|
};
|
|
|
|
+void __init usb_init_pool_max(void)
|
|
+{
|
|
+ /*
|
|
+ * The pool_max values must never be smaller than
|
|
+ * ARCH_KMALLOC_MINALIGN.
|
|
+ */
|
|
+ if (ARCH_KMALLOC_MINALIGN <= 32)
|
|
+ ; /* Original value is okay */
|
|
+ else if (ARCH_KMALLOC_MINALIGN <= 64)
|
|
+ pool_max[0] = 64;
|
|
+ else if (ARCH_KMALLOC_MINALIGN <= 128)
|
|
+ pool_max[0] = 0; /* Don't use this pool */
|
|
+ else
|
|
+ BUILD_BUG(); /* We don't allow this */
|
|
+}
|
|
|
|
/* SETUP primitives */
|
|
|
|
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
|
|
index 90e18f6..8016aaa 100644
|
|
--- a/drivers/usb/core/devio.c
|
|
+++ b/drivers/usb/core/devio.c
|
|
@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
|
|
as->status = urb->status;
|
|
signr = as->signr;
|
|
if (signr) {
|
|
+ memset(&sinfo, 0, sizeof(sinfo));
|
|
sinfo.si_signo = as->signr;
|
|
sinfo.si_errno = as->status;
|
|
sinfo.si_code = SI_ASYNCIO;
|
|
@@ -512,7 +513,7 @@ static void async_completed(struct urb *urb)
|
|
snoop(&urb->dev->dev, "urb complete\n");
|
|
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
|
|
as->status, COMPLETE, NULL, 0);
|
|
- if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
|
|
+ if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
|
|
snoop_urb_data(urb, urb->actual_length);
|
|
|
|
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
|
|
@@ -1411,7 +1412,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
|
|
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
|
|
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
|
|
u |= URB_ISO_ASAP;
|
|
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
|
|
+ if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
|
|
u |= URB_SHORT_NOT_OK;
|
|
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
|
|
u |= URB_NO_FSBR;
|
|
@@ -1590,7 +1591,7 @@ static struct async *reap_as(struct dev_state *ps)
|
|
for (;;) {
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
as = async_getcompleted(ps);
|
|
- if (as)
|
|
+ if (as || !connected(ps))
|
|
break;
|
|
if (signal_pending(current))
|
|
break;
|
|
@@ -1613,7 +1614,7 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
|
|
}
|
|
if (signal_pending(current))
|
|
return -EINTR;
|
|
- return -EIO;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
|
|
@@ -1622,10 +1623,11 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
|
|
struct async *as;
|
|
|
|
as = async_getcompleted(ps);
|
|
- retval = -EAGAIN;
|
|
if (as) {
|
|
retval = processcompl(as, (void __user * __user *)arg);
|
|
free_async(as);
|
|
+ } else {
|
|
+ retval = (connected(ps) ? -EAGAIN : -ENODEV);
|
|
}
|
|
return retval;
|
|
}
|
|
@@ -1755,7 +1757,7 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
|
|
}
|
|
if (signal_pending(current))
|
|
return -EINTR;
|
|
- return -EIO;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
|
|
@@ -1763,11 +1765,12 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
|
|
int retval;
|
|
struct async *as;
|
|
|
|
- retval = -EAGAIN;
|
|
as = async_getcompleted(ps);
|
|
if (as) {
|
|
retval = processcompl_compat(as, (void __user * __user *)arg);
|
|
free_async(as);
|
|
+ } else {
|
|
+ retval = (connected(ps) ? -EAGAIN : -ENODEV);
|
|
}
|
|
return retval;
|
|
}
|
|
@@ -1939,7 +1942,8 @@ static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
|
|
{
|
|
__u32 caps;
|
|
|
|
- caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM;
|
|
+ caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
|
|
+ USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
|
|
if (!ps->dev->bus->no_stop_on_short)
|
|
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
|
|
if (ps->dev->bus->sg_tablesize)
|
|
@@ -2000,6 +2004,32 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
|
|
return -EPERM;
|
|
|
|
usb_lock_device(dev);
|
|
+
|
|
+ /* Reap operations are allowed even after disconnection */
|
|
+ switch (cmd) {
|
|
+ case USBDEVFS_REAPURB:
|
|
+ snoop(&dev->dev, "%s: REAPURB\n", __func__);
|
|
+ ret = proc_reapurb(ps, p);
|
|
+ goto done;
|
|
+
|
|
+ case USBDEVFS_REAPURBNDELAY:
|
|
+ snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
|
|
+ ret = proc_reapurbnonblock(ps, p);
|
|
+ goto done;
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+ case USBDEVFS_REAPURB32:
|
|
+ snoop(&dev->dev, "%s: REAPURB32\n", __func__);
|
|
+ ret = proc_reapurb_compat(ps, p);
|
|
+ goto done;
|
|
+
|
|
+ case USBDEVFS_REAPURBNDELAY32:
|
|
+ snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
|
|
+ ret = proc_reapurbnonblock_compat(ps, p);
|
|
+ goto done;
|
|
+#endif
|
|
+ }
|
|
+
|
|
if (!connected(ps)) {
|
|
usb_unlock_device(dev);
|
|
return -ENODEV;
|
|
@@ -2093,16 +2123,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
|
|
inode->i_mtime = CURRENT_TIME;
|
|
break;
|
|
|
|
- case USBDEVFS_REAPURB32:
|
|
- snoop(&dev->dev, "%s: REAPURB32\n", __func__);
|
|
- ret = proc_reapurb_compat(ps, p);
|
|
- break;
|
|
-
|
|
- case USBDEVFS_REAPURBNDELAY32:
|
|
- snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
|
|
- ret = proc_reapurbnonblock_compat(ps, p);
|
|
- break;
|
|
-
|
|
case USBDEVFS_IOCTL32:
|
|
snoop(&dev->dev, "%s: IOCTL32\n", __func__);
|
|
ret = proc_ioctl_compat(ps, ptr_to_compat(p));
|
|
@@ -2114,16 +2134,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
|
|
ret = proc_unlinkurb(ps, p);
|
|
break;
|
|
|
|
- case USBDEVFS_REAPURB:
|
|
- snoop(&dev->dev, "%s: REAPURB\n", __func__);
|
|
- ret = proc_reapurb(ps, p);
|
|
- break;
|
|
-
|
|
- case USBDEVFS_REAPURBNDELAY:
|
|
- snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
|
|
- ret = proc_reapurbnonblock(ps, p);
|
|
- break;
|
|
-
|
|
case USBDEVFS_DISCSIGNAL:
|
|
snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
|
|
ret = proc_disconnectsignal(ps, p);
|
|
@@ -2160,6 +2170,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
|
|
ret = proc_disconnect_claim(ps, p);
|
|
break;
|
|
}
|
|
+
|
|
+ done:
|
|
usb_unlock_device(dev);
|
|
if (ret >= 0)
|
|
inode->i_atime = CURRENT_TIME;
|
|
@@ -2227,6 +2239,7 @@ static void usbdev_remove(struct usb_device *udev)
|
|
wake_up_all(&ps->wait);
|
|
list_del_init(&ps->list);
|
|
if (ps->discsignr) {
|
|
+ memset(&sinfo, 0, sizeof(sinfo));
|
|
sinfo.si_signo = ps->discsignr;
|
|
sinfo.si_errno = EPIPE;
|
|
sinfo.si_code = SI_ASYNCIO;
|
|
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
|
|
index 2518c32..ee6c556 100644
|
|
--- a/drivers/usb/core/hcd.c
|
|
+++ b/drivers/usb/core/hcd.c
|
|
@@ -1617,6 +1617,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
|
|
int usb_hcd_unlink_urb (struct urb *urb, int status)
|
|
{
|
|
struct usb_hcd *hcd;
|
|
+ struct usb_device *udev = urb->dev;
|
|
int retval = -EIDRM;
|
|
unsigned long flags;
|
|
|
|
@@ -1628,20 +1629,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
|
|
spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
|
|
if (atomic_read(&urb->use_count) > 0) {
|
|
retval = 0;
|
|
- usb_get_dev(urb->dev);
|
|
+ usb_get_dev(udev);
|
|
}
|
|
spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
|
|
if (retval == 0) {
|
|
hcd = bus_to_hcd(urb->dev->bus);
|
|
retval = unlink1(hcd, urb, status);
|
|
- usb_put_dev(urb->dev);
|
|
+ if (retval == 0)
|
|
+ retval = -EINPROGRESS;
|
|
+ else if (retval != -EIDRM && retval != -EBUSY)
|
|
+ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
|
|
+ urb, retval);
|
|
+ usb_put_dev(udev);
|
|
}
|
|
-
|
|
- if (retval == 0)
|
|
- retval = -EINPROGRESS;
|
|
- else if (retval != -EIDRM && retval != -EBUSY)
|
|
- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
|
|
- urb, retval);
|
|
return retval;
|
|
}
|
|
|
|
@@ -2057,6 +2057,8 @@ int usb_alloc_streams(struct usb_interface *interface,
|
|
return -EINVAL;
|
|
if (dev->speed != USB_SPEED_SUPER)
|
|
return -EINVAL;
|
|
+ if (dev->state < USB_STATE_CONFIGURED)
|
|
+ return -ENODEV;
|
|
|
|
/* Streams only apply to bulk endpoints. */
|
|
for (i = 0; i < num_eps; i++)
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 5af4a66..e4e06ed 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -1695,8 +1695,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|
* - Change autosuspend delay of hub can avoid unnecessary auto
|
|
* suspend timer for hub, also may decrease power consumption
|
|
* of USB bus.
|
|
+ *
|
|
+ * - If user has indicated to prevent autosuspend by passing
|
|
+ * usbcore.autosuspend = -1 then keep autosuspend disabled.
|
|
*/
|
|
- pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
|
|
+#ifdef CONFIG_PM_RUNTIME
|
|
+ if (hdev->dev.power.autosuspend_delay >= 0)
|
|
+ pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
|
|
+#endif
|
|
|
|
/*
|
|
* Hubs have proper suspend/resume support, except for root hubs
|
|
@@ -1942,8 +1948,10 @@ void usb_set_device_state(struct usb_device *udev,
|
|
|| new_state == USB_STATE_SUSPENDED)
|
|
; /* No change to wakeup settings */
|
|
else if (new_state == USB_STATE_CONFIGURED)
|
|
- wakeup = udev->actconfig->desc.bmAttributes
|
|
- & USB_CONFIG_ATT_WAKEUP;
|
|
+ wakeup = (udev->quirks &
|
|
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
|
|
+ udev->actconfig->desc.bmAttributes &
|
|
+ USB_CONFIG_ATT_WAKEUP;
|
|
else
|
|
wakeup = 0;
|
|
}
|
|
@@ -3174,6 +3182,43 @@ static int finish_port_resume(struct usb_device *udev)
|
|
}
|
|
|
|
/*
|
|
+ * There are some SS USB devices which take longer time for link training.
|
|
+ * XHCI specs 4.19.4 says that when Link training is successful, port
|
|
+ * sets CSC bit to 1. So if SW reads port status before successful link
|
|
+ * training, then it will not find device to be present.
|
|
+ * USB Analyzer log with such buggy devices show that in some cases
|
|
+ * device switch on the RX termination after long delay of host enabling
|
|
+ * the VBUS. In few other cases it has been seen that device fails to
|
|
+ * negotiate link training in first attempt. It has been
|
|
+ * reported till now that few devices take as long as 2000 ms to train
|
|
+ * the link after host enabling its VBUS and termination. Following
|
|
+ * routine implements a 2000 ms timeout for link training. If in a case
|
|
+ * link trains before timeout, loop will exit earlier.
|
|
+ *
|
|
+ * FIXME: If a device was connected before suspend, but was removed
|
|
+ * while system was asleep, then the loop in the following routine will
|
|
+ * only exit at timeout.
|
|
+ *
|
|
+ * This routine should only be called when persist is enabled for a SS
|
|
+ * device.
|
|
+ */
|
|
+static int wait_for_ss_port_enable(struct usb_device *udev,
|
|
+ struct usb_hub *hub, int *port1,
|
|
+ u16 *portchange, u16 *portstatus)
|
|
+{
|
|
+ int status = 0, delay_ms = 0;
|
|
+
|
|
+ while (delay_ms < 2000) {
|
|
+ if (status || *portstatus & USB_PORT_STAT_CONNECTION)
|
|
+ break;
|
|
+ msleep(20);
|
|
+ delay_ms += 20;
|
|
+ status = hub_port_status(hub, *port1, portstatus, portchange);
|
|
+ }
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/*
|
|
* usb_port_resume - re-activate a suspended usb device's upstream port
|
|
* @udev: device to re-activate, not a root hub
|
|
* Context: must be able to sleep; device not locked; pm locks held
|
|
@@ -3244,10 +3289,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
|
|
dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
|
|
port1, status);
|
|
} else {
|
|
- /* drive resume for at least 20 msec */
|
|
+ /* drive resume for USB_RESUME_TIMEOUT msec */
|
|
dev_dbg(&udev->dev, "usb %sresume\n",
|
|
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
|
|
- msleep(25);
|
|
+ msleep(USB_RESUME_TIMEOUT);
|
|
|
|
/* Virtual root hubs can trigger on GET_PORT_STATUS to
|
|
* stop resume signaling. Then finish the resume
|
|
@@ -3275,6 +3320,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
|
|
|
|
clear_bit(port1, hub->busy_bits);
|
|
|
|
+ if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
|
|
+ status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
|
|
+ &portstatus);
|
|
+
|
|
status = check_port_resume_type(udev,
|
|
hub, port1, status, portchange, portstatus);
|
|
if (status == 0)
|
|
@@ -4335,6 +4384,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
|
|
struct usb_qualifier_descriptor *qual;
|
|
int status;
|
|
|
|
+ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
|
|
+ return;
|
|
+
|
|
qual = kmalloc (sizeof *qual, GFP_KERNEL);
|
|
if (qual == NULL)
|
|
return;
|
|
@@ -4722,9 +4774,10 @@ static void hub_events(void)
|
|
|
|
hub = list_entry(tmp, struct usb_hub, event_list);
|
|
kref_get(&hub->kref);
|
|
+ hdev = hub->hdev;
|
|
+ usb_get_dev(hdev);
|
|
spin_unlock_irq(&hub_event_lock);
|
|
|
|
- hdev = hub->hdev;
|
|
hub_dev = hub->intfdev;
|
|
intf = to_usb_interface(hub_dev);
|
|
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
|
|
@@ -4937,6 +4990,7 @@ static void hub_events(void)
|
|
usb_autopm_put_interface(intf);
|
|
loop_disconnected:
|
|
usb_unlock_device(hdev);
|
|
+ usb_put_dev(hdev);
|
|
kref_put(&hub->kref, hub_release);
|
|
|
|
} /* end while (1) */
|
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
|
index 739ee8e..b195fdb 100644
|
|
--- a/drivers/usb/core/quirks.c
|
|
+++ b/drivers/usb/core/quirks.c
|
|
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
/* Creative SB Audigy 2 NX */
|
|
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
+ /* Microsoft Wireless Laser Mouse 6000 Receiver */
|
|
+ { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
+
|
|
/* Microsoft LifeCam-VX700 v2.0 */
|
|
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
@@ -93,6 +96,16 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
|
|
USB_QUIRK_CONFIG_INTF_STRINGS },
|
|
|
|
+ /* Elan Touchscreen */
|
|
+ { USB_DEVICE(0x04f3, 0x0089), .driver_info =
|
|
+ USB_QUIRK_DEVICE_QUALIFIER },
|
|
+
|
|
+ { USB_DEVICE(0x04f3, 0x009b), .driver_info =
|
|
+ USB_QUIRK_DEVICE_QUALIFIER },
|
|
+
|
|
+ { USB_DEVICE(0x04f3, 0x016f), .driver_info =
|
|
+ USB_QUIRK_DEVICE_QUALIFIER },
|
|
+
|
|
/* Roland SC-8820 */
|
|
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
@@ -160,6 +173,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
|
|
.driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
+ /* ASUS Base Station(T100) */
|
|
+ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
|
|
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
|
+
|
|
{ } /* terminating entry must be last */
|
|
};
|
|
|
|
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
|
|
index 4d11449..a922730 100644
|
|
--- a/drivers/usb/core/usb.c
|
|
+++ b/drivers/usb/core/usb.c
|
|
@@ -1050,6 +1050,7 @@ static int __init usb_init(void)
|
|
pr_info("%s: USB support disabled\n", usbcore_name);
|
|
return 0;
|
|
}
|
|
+ usb_init_pool_max();
|
|
|
|
retval = usb_debugfs_init();
|
|
if (retval)
|
|
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
|
|
index 4d918ed..0f99800 100644
|
|
--- a/drivers/usb/dwc2/hcd.c
|
|
+++ b/drivers/usb/dwc2/hcd.c
|
|
@@ -1501,7 +1501,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
|
|
dev_dbg(hsotg->dev,
|
|
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
|
|
writel(0, hsotg->regs + PCGCTL);
|
|
- usleep_range(20000, 40000);
|
|
+ msleep(USB_RESUME_TIMEOUT);
|
|
|
|
hprt0 = dwc2_read_hprt0(hsotg);
|
|
hprt0 |= HPRT0_RES;
|
|
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
|
|
index a49217a..f074755 100644
|
|
--- a/drivers/usb/dwc3/core.c
|
|
+++ b/drivers/usb/dwc3/core.c
|
|
@@ -583,12 +583,6 @@ static int dwc3_remove(struct platform_device *pdev)
|
|
{
|
|
struct dwc3 *dwc = platform_get_drvdata(pdev);
|
|
|
|
- usb_phy_set_suspend(dwc->usb2_phy, 1);
|
|
- usb_phy_set_suspend(dwc->usb3_phy, 1);
|
|
-
|
|
- pm_runtime_put_sync(&pdev->dev);
|
|
- pm_runtime_disable(&pdev->dev);
|
|
-
|
|
dwc3_debugfs_exit(dwc);
|
|
|
|
switch (dwc->dr_mode) {
|
|
@@ -609,8 +603,15 @@ static int dwc3_remove(struct platform_device *pdev)
|
|
|
|
dwc3_event_buffers_cleanup(dwc);
|
|
dwc3_free_event_buffers(dwc);
|
|
+
|
|
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
|
|
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
|
|
+
|
|
dwc3_core_exit(dwc);
|
|
|
|
+ pm_runtime_put_sync(&pdev->dev);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
|
|
index b269dbd..cfca302 100644
|
|
--- a/drivers/usb/dwc3/dwc3-omap.c
|
|
+++ b/drivers/usb/dwc3/dwc3-omap.c
|
|
@@ -211,6 +211,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
|
|
omap->irq0_offset, value);
|
|
}
|
|
|
|
+static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
|
|
+{
|
|
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
|
|
+ omap->irqmisc_offset, value);
|
|
+}
|
|
+
|
|
+static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
|
|
+{
|
|
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
|
|
+ omap->irq0_offset, value);
|
|
+}
|
|
+
|
|
static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
|
|
enum omap_dwc3_vbus_id_status status)
|
|
{
|
|
@@ -351,9 +363,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
|
|
|
|
static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
|
|
{
|
|
+ u32 reg;
|
|
+
|
|
/* disable all IRQs */
|
|
- dwc3_omap_write_irqmisc_set(omap, 0x00);
|
|
- dwc3_omap_write_irq0_set(omap, 0x00);
|
|
+ reg = USBOTGSS_IRQO_COREIRQ_ST;
|
|
+ dwc3_omap_write_irq0_clr(omap, reg);
|
|
+
|
|
+ reg = (USBOTGSS_IRQMISC_OEVT |
|
|
+ USBOTGSS_IRQMISC_DRVVBUS_RISE |
|
|
+ USBOTGSS_IRQMISC_CHRGVBUS_RISE |
|
|
+ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
|
|
+ USBOTGSS_IRQMISC_IDPULLUP_RISE |
|
|
+ USBOTGSS_IRQMISC_DRVVBUS_FALL |
|
|
+ USBOTGSS_IRQMISC_CHRGVBUS_FALL |
|
|
+ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
|
|
+ USBOTGSS_IRQMISC_IDPULLUP_FALL);
|
|
+
|
|
+ dwc3_omap_write_irqmisc_clr(omap, reg);
|
|
}
|
|
|
|
static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
|
|
@@ -582,9 +608,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
|
|
if (omap->extcon_id_dev.edev)
|
|
extcon_unregister_interest(&omap->extcon_id_dev);
|
|
dwc3_omap_disable_irqs(omap);
|
|
+ device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
|
|
pm_runtime_put_sync(&pdev->dev);
|
|
pm_runtime_disable(&pdev->dev);
|
|
- device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
|
|
index 21a3520..965c9ac 100644
|
|
--- a/drivers/usb/dwc3/ep0.c
|
|
+++ b/drivers/usb/dwc3/ep0.c
|
|
@@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
|
|
|
|
/* stall is always issued on EP0 */
|
|
dep = dwc->eps[0];
|
|
- __dwc3_gadget_ep_set_halt(dep, 1);
|
|
+ __dwc3_gadget_ep_set_halt(dep, 1, false);
|
|
dep->flags = DWC3_EP_ENABLED;
|
|
dwc->delayed_status = false;
|
|
|
|
@@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
|
|
return -EINVAL;
|
|
if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
|
|
break;
|
|
- ret = __dwc3_gadget_ep_set_halt(dep, set);
|
|
+ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
|
|
if (ret)
|
|
return -EINVAL;
|
|
break;
|
|
@@ -707,6 +707,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
|
|
dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
|
|
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
|
|
break;
|
|
+ case USB_REQ_SET_INTERFACE:
|
|
+ dev_vdbg(dwc->dev, "USB_REQ_SET_INTERFACE\n");
|
|
+ dwc->start_config_issued = false;
|
|
+ /* Fall through */
|
|
default:
|
|
dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
|
|
ret = dwc3_ep0_delegate_req(dwc, ctrl);
|
|
@@ -789,6 +793,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
|
|
unsigned maxp = ep0->endpoint.maxpacket;
|
|
|
|
transfer_size += (maxp - (transfer_size % maxp));
|
|
+
|
|
+ /* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
|
|
+ if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
|
|
+ transfer_size = DWC3_EP0_BOUNCE_SIZE;
|
|
+
|
|
transferred = min_t(u32, ur->length,
|
|
transfer_size - length);
|
|
memcpy(ur->buf, dwc->ep0_bounce, transferred);
|
|
@@ -901,11 +910,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
|
|
return;
|
|
}
|
|
|
|
- WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
|
|
-
|
|
maxpacket = dep->endpoint.maxpacket;
|
|
transfer_size = roundup(req->request.length, maxpacket);
|
|
|
|
+ if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
|
|
+ dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
|
|
+ transfer_size = DWC3_EP0_BOUNCE_SIZE;
|
|
+ }
|
|
+
|
|
dwc->ep0_bounced = true;
|
|
|
|
/*
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 09e9619..a57ad1f 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -299,6 +299,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
|
|
if (!(reg & DWC3_DGCMD_CMDACT)) {
|
|
dev_vdbg(dwc->dev, "Command Complete --> %d\n",
|
|
DWC3_DGCMD_STATUS(reg));
|
|
+ if (DWC3_DGCMD_STATUS(reg))
|
|
+ return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
@@ -335,6 +337,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
|
|
if (!(reg & DWC3_DEPCMD_CMDACT)) {
|
|
dev_vdbg(dwc->dev, "Command Complete --> %d\n",
|
|
DWC3_DEPCMD_STATUS(reg));
|
|
+ if (DWC3_DEPCMD_STATUS(reg))
|
|
+ return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
@@ -532,12 +536,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
|
|
if (!usb_endpoint_xfer_isoc(desc))
|
|
return 0;
|
|
|
|
- memset(&trb_link, 0, sizeof(trb_link));
|
|
-
|
|
/* Link TRB for ISOC. The HWO bit is never reset */
|
|
trb_st_hw = &dep->trb_pool[0];
|
|
|
|
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
|
|
+ memset(trb_link, 0, sizeof(*trb_link));
|
|
|
|
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
|
|
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
|
|
@@ -588,7 +591,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
|
|
|
|
/* make sure HW endpoint isn't stalled */
|
|
if (dep->flags & DWC3_EP_STALL)
|
|
- __dwc3_gadget_ep_set_halt(dep, 0);
|
|
+ __dwc3_gadget_ep_set_halt(dep, 0, false);
|
|
|
|
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
|
|
reg &= ~DWC3_DALEPENA_EP(dep->number);
|
|
@@ -888,8 +891,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
|
|
|
|
if (i == (request->num_mapped_sgs - 1) ||
|
|
sg_is_last(s)) {
|
|
- if (list_is_last(&req->list,
|
|
- &dep->request_list))
|
|
+ if (list_empty(&dep->request_list))
|
|
last_one = true;
|
|
chain = false;
|
|
}
|
|
@@ -907,6 +909,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
|
|
if (last_one)
|
|
break;
|
|
}
|
|
+
|
|
+ if (last_one)
|
|
+ break;
|
|
} else {
|
|
dma = req->request.dma;
|
|
length = req->request.length;
|
|
@@ -1186,7 +1191,7 @@ out0:
|
|
return ret;
|
|
}
|
|
|
|
-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
|
|
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
|
|
{
|
|
struct dwc3_gadget_ep_cmd_params params;
|
|
struct dwc3 *dwc = dep->dwc;
|
|
@@ -1195,6 +1200,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
|
|
memset(¶ms, 0x00, sizeof(params));
|
|
|
|
if (value) {
|
|
+ if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
|
|
+ (!list_empty(&dep->req_queued) ||
|
|
+ !list_empty(&dep->request_list)))) {
|
|
+ dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
|
|
+ dep->name);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
|
|
DWC3_DEPCMD_SETSTALL, ¶ms);
|
|
if (ret)
|
|
@@ -1234,7 +1247,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
|
|
goto out;
|
|
}
|
|
|
|
- ret = __dwc3_gadget_ep_set_halt(dep, value);
|
|
+ ret = __dwc3_gadget_ep_set_halt(dep, value, false);
|
|
out:
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
@@ -1254,7 +1267,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
|
|
if (dep->number == 0 || dep->number == 1)
|
|
return dwc3_gadget_ep0_set_halt(ep, 1);
|
|
else
|
|
- return dwc3_gadget_ep_set_halt(ep, 1);
|
|
+ return __dwc3_gadget_ep_set_halt(dep, 1, false);
|
|
}
|
|
|
|
/* -------------------------------------------------------------------------- */
|
|
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
|
|
index a0ee75b..ac625582 100644
|
|
--- a/drivers/usb/dwc3/gadget.h
|
|
+++ b/drivers/usb/dwc3/gadget.h
|
|
@@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
|
|
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
|
|
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
|
|
gfp_t gfp_flags);
|
|
-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
|
|
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
|
|
|
|
/**
|
|
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
|
|
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
|
|
index ebaae9a..a6101e0 100644
|
|
--- a/drivers/usb/gadget/Kconfig
|
|
+++ b/drivers/usb/gadget/Kconfig
|
|
@@ -445,7 +445,7 @@ config USB_GOKU
|
|
gadget drivers to also be dynamically linked.
|
|
|
|
config USB_EG20T
|
|
- tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
|
|
+ tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
|
|
depends on PCI
|
|
help
|
|
This is a USB device driver for EG20T PCH.
|
|
@@ -466,6 +466,7 @@ config USB_EG20T
|
|
ML7213/ML7831 is companion chip for Intel Atom E6xx series.
|
|
ML7213/ML7831 is completely compatible for Intel EG20T PCH.
|
|
|
|
+ This driver can be used with Intel's Quark X1000 SOC platform
|
|
#
|
|
# LAST -- dummy/emulated controller
|
|
#
|
|
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
|
|
index d742bed..82df926 100644
|
|
--- a/drivers/usb/gadget/composite.c
|
|
+++ b/drivers/usb/gadget/composite.c
|
|
@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
|
|
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
|
|
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
|
|
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
|
|
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
|
|
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
|
|
|
|
/*
|
|
* The Superspeed USB Capability descriptor shall be implemented by all
|
|
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
|
|
index 7d1cc01..3740a3f 100644
|
|
--- a/drivers/usb/gadget/configfs.c
|
|
+++ b/drivers/usb/gadget/configfs.c
|
|
@@ -765,6 +765,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
|
|
}
|
|
}
|
|
c->next_interface_id = 0;
|
|
+ memset(c->interface, 0, sizeof(c->interface));
|
|
c->superspeed = 0;
|
|
c->highspeed = 0;
|
|
c->fullspeed = 0;
|
|
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
|
|
index ab1065a..3384486 100644
|
|
--- a/drivers/usb/gadget/f_acm.c
|
|
+++ b/drivers/usb/gadget/f_acm.c
|
|
@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
|
if (acm->notify->driver_data) {
|
|
VDBG(cdev, "reset acm control interface %d\n", intf);
|
|
usb_ep_disable(acm->notify);
|
|
- } else {
|
|
- VDBG(cdev, "init acm ctrl interface %d\n", intf);
|
|
+ }
|
|
+
|
|
+ if (!acm->notify->desc)
|
|
if (config_ep_by_speed(cdev->gadget, f, acm->notify))
|
|
return -EINVAL;
|
|
- }
|
|
+
|
|
usb_ep_enable(acm->notify);
|
|
acm->notify->driver_data = acm;
|
|
|
|
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
|
|
index 5bcf7d0..afd0a15 100644
|
|
--- a/drivers/usb/gadget/f_fs.c
|
|
+++ b/drivers/usb/gadget/f_fs.c
|
|
@@ -1995,8 +1995,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
|
|
func->conf = c;
|
|
func->gadget = c->cdev->gadget;
|
|
|
|
- ffs_data_get(func->ffs);
|
|
-
|
|
/*
|
|
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
|
|
* configurations are bound in sequence with list_for_each_entry,
|
|
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
|
|
index eb8c3be..460d953 100644
|
|
--- a/drivers/usb/gadget/pch_udc.c
|
|
+++ b/drivers/usb/gadget/pch_udc.c
|
|
@@ -343,6 +343,7 @@ struct pch_vbus_gpio_data {
|
|
* @setup_data: Received setup data
|
|
* @phys_addr: of device memory
|
|
* @base_addr: for mapped device memory
|
|
+ * @bar: Indicates which PCI BAR for USB regs
|
|
* @irq: IRQ line for the device
|
|
* @cfg_data: current cfg, intf, and alt in use
|
|
* @vbus_gpio: GPIO informaton for detecting VBUS
|
|
@@ -370,14 +371,17 @@ struct pch_udc_dev {
|
|
struct usb_ctrlrequest setup_data;
|
|
unsigned long phys_addr;
|
|
void __iomem *base_addr;
|
|
+ unsigned bar;
|
|
unsigned irq;
|
|
struct pch_udc_cfg_data cfg_data;
|
|
struct pch_vbus_gpio_data vbus_gpio;
|
|
};
|
|
#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
|
|
|
|
+#define PCH_UDC_PCI_BAR_QUARK_X1000 0
|
|
#define PCH_UDC_PCI_BAR 1
|
|
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
|
|
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
|
|
#define PCI_VENDOR_ID_ROHM 0x10DB
|
|
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
|
|
#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
|
|
@@ -3076,7 +3080,7 @@ static void pch_udc_remove(struct pci_dev *pdev)
|
|
iounmap(dev->base_addr);
|
|
if (dev->mem_region)
|
|
release_mem_region(dev->phys_addr,
|
|
- pci_resource_len(pdev, PCH_UDC_PCI_BAR));
|
|
+ pci_resource_len(pdev, dev->bar));
|
|
if (dev->active)
|
|
pci_disable_device(pdev);
|
|
kfree(dev);
|
|
@@ -3144,9 +3148,15 @@ static int pch_udc_probe(struct pci_dev *pdev,
|
|
dev->active = 1;
|
|
pci_set_drvdata(pdev, dev);
|
|
|
|
+ /* Determine BAR based on PCI ID */
|
|
+ if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
|
|
+ dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
|
|
+ else
|
|
+ dev->bar = PCH_UDC_PCI_BAR;
|
|
+
|
|
/* PCI resource allocation */
|
|
- resource = pci_resource_start(pdev, 1);
|
|
- len = pci_resource_len(pdev, 1);
|
|
+ resource = pci_resource_start(pdev, dev->bar);
|
|
+ len = pci_resource_len(pdev, dev->bar);
|
|
|
|
if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
|
|
dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
|
|
@@ -3212,6 +3222,12 @@ finished:
|
|
|
|
static const struct pci_device_id pch_udc_pcidev_id[] = {
|
|
{
|
|
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
|
|
+ PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
|
|
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
|
|
+ .class_mask = 0xffffffff,
|
|
+ },
|
|
+ {
|
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
|
|
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
|
|
.class_mask = 0xffffffff,
|
|
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
|
|
index 69b76ef..44e698c 100644
|
|
--- a/drivers/usb/gadget/printer.c
|
|
+++ b/drivers/usb/gadget/printer.c
|
|
@@ -975,6 +975,15 @@ unknown:
|
|
break;
|
|
}
|
|
/* host either stalls (value < 0) or reports success */
|
|
+ if (value >= 0) {
|
|
+ req->length = value;
|
|
+ req->zero = value < wLength;
|
|
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
|
|
+ if (value < 0) {
|
|
+ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
|
|
+ req->status = 0;
|
|
+ }
|
|
+ }
|
|
return value;
|
|
}
|
|
|
|
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
|
|
index 27768a7..9ce0b13 100644
|
|
--- a/drivers/usb/gadget/udc-core.c
|
|
+++ b/drivers/usb/gadget/udc-core.c
|
|
@@ -456,6 +456,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
|
|
{
|
|
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
|
|
|
|
+ if (!udc->driver) {
|
|
+ dev_err(dev, "soft-connect without a gadget driver\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
if (sysfs_streq(buf, "connect")) {
|
|
usb_gadget_udc_start(udc->gadget, udc->driver);
|
|
usb_gadget_connect(udc->gadget);
|
|
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
|
|
index 142ebd8..1e0b9b9 100644
|
|
--- a/drivers/usb/host/ehci-hcd.c
|
|
+++ b/drivers/usb/host/ehci-hcd.c
|
|
@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
|
|
ehci->reset_done[i] == 0))
|
|
continue;
|
|
|
|
- /* start 20 msec resume signaling from this port,
|
|
- * and make khubd collect PORT_STAT_C_SUSPEND to
|
|
- * stop that signaling. Use 5 ms extra for safety,
|
|
- * like usb_port_resume() does.
|
|
+ /* start USB_RESUME_TIMEOUT msec resume signaling from
|
|
+ * this port, and make hub_wq collect
|
|
+ * PORT_STAT_C_SUSPEND to stop that signaling.
|
|
*/
|
|
- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
|
|
+ ehci->reset_done[i] = jiffies +
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
set_bit(i, &ehci->resuming_ports);
|
|
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
|
|
usb_hcd_start_port_resume(&hcd->self, i);
|
|
@@ -970,8 +970,6 @@ rescan:
|
|
}
|
|
|
|
qh->exception = 1;
|
|
- if (ehci->rh_state < EHCI_RH_RUNNING)
|
|
- qh->qh_state = QH_STATE_IDLE;
|
|
switch (qh->qh_state) {
|
|
case QH_STATE_LINKED:
|
|
WARN_ON(!list_empty(&qh->qtd_list));
|
|
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
|
|
index de63775..857953c 100644
|
|
--- a/drivers/usb/host/ehci-hub.c
|
|
+++ b/drivers/usb/host/ehci-hub.c
|
|
@@ -491,10 +491,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
|
|
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
|
|
}
|
|
|
|
- /* msleep for 20ms only if code is trying to resume port */
|
|
+ /*
|
|
+ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
|
|
+ * port
|
|
+ */
|
|
if (resume_needed) {
|
|
spin_unlock_irq(&ehci->lock);
|
|
- msleep(20);
|
|
+ msleep(USB_RESUME_TIMEOUT);
|
|
spin_lock_irq(&ehci->lock);
|
|
if (ehci->shutdown)
|
|
goto shutdown;
|
|
@@ -962,7 +965,7 @@ static int ehci_hub_control (
|
|
temp &= ~PORT_WAKE_BITS;
|
|
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
|
|
ehci->reset_done[wIndex] = jiffies
|
|
- + msecs_to_jiffies(20);
|
|
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
set_bit(wIndex, &ehci->resuming_ports);
|
|
usb_hcd_start_port_resume(&hcd->self, wIndex);
|
|
break;
|
|
@@ -1248,7 +1251,7 @@ static int ehci_hub_control (
|
|
if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
|
retval = ehset_single_step_set_feature(hcd,
|
|
- wIndex);
|
|
+ wIndex + 1);
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
|
break;
|
|
}
|
|
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
|
|
index 3e86bf4..ca7b964 100644
|
|
--- a/drivers/usb/host/ehci-pci.c
|
|
+++ b/drivers/usb/host/ehci-pci.c
|
|
@@ -35,6 +35,21 @@ static const char hcd_name[] = "ehci-pci";
|
|
#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939
|
|
+static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
|
|
+{
|
|
+ return pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
|
+ pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * 0x84 is the offset of in/out threshold register,
|
|
+ * and it is the same offset as the register of 'hostpc'.
|
|
+ */
|
|
+#define intel_quark_x1000_insnreg01 hostpc
|
|
+
|
|
+/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
|
|
+#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f
|
|
|
|
/* called after powerup, by probe or system-pm "wakeup" */
|
|
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
|
|
@@ -50,6 +65,16 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
|
|
if (!retval)
|
|
ehci_dbg(ehci, "MWI active\n");
|
|
|
|
+ /* Reset the threshold limit */
|
|
+ if (is_intel_quark_x1000(pdev)) {
|
|
+ /*
|
|
+ * For the Intel QUARK X1000, raise the I/O threshold to the
|
|
+ * maximum usable value in order to improve performance.
|
|
+ */
|
|
+ ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
|
|
+ ehci->regs->intel_quark_x1000_insnreg01);
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
|
|
index e113fd7..c399606 100644
|
|
--- a/drivers/usb/host/ehci-sched.c
|
|
+++ b/drivers/usb/host/ehci-sched.c
|
|
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
|
|
else
|
|
next = (now + 2 + 7) & ~0x07; /* full frame cache */
|
|
|
|
+ /* If needed, initialize last_iso_frame so that this URB will be seen */
|
|
+ if (ehci->isoc_count == 0)
|
|
+ ehci->last_iso_frame = now >> 3;
|
|
+
|
|
/*
|
|
* Use ehci->last_iso_frame as the base. There can't be any
|
|
* TDs scheduled for earlier than that.
|
|
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
|
|
urb->start_frame = start & (mod - 1);
|
|
if (!stream->highspeed)
|
|
urb->start_frame >>= 3;
|
|
-
|
|
- /* Make sure scan_isoc() sees these */
|
|
- if (ehci->isoc_count == 0)
|
|
- ehci->last_iso_frame = now >> 3;
|
|
return status;
|
|
|
|
fail:
|
|
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
|
|
index f6459df..94054da 100644
|
|
--- a/drivers/usb/host/ehci-sysfs.c
|
|
+++ b/drivers/usb/host/ehci-sysfs.c
|
|
@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
|
|
int count = PAGE_SIZE;
|
|
char *ptr = buf;
|
|
|
|
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
|
|
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
|
|
nports = HCS_N_PORTS(ehci->hcs_params);
|
|
|
|
for (index = 0; index < nports; ++index) {
|
|
@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
|
|
struct ehci_hcd *ehci;
|
|
int portnum, new_owner;
|
|
|
|
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
|
|
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
|
|
new_owner = PORT_OWNER; /* Owned by companion */
|
|
if (sscanf(buf, "%d", &portnum) != 1)
|
|
return -EINVAL;
|
|
@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
|
|
struct ehci_hcd *ehci;
|
|
int n;
|
|
|
|
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
|
|
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
|
|
n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
|
|
return n;
|
|
}
|
|
@@ -101,7 +101,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
|
|
unsigned long flags;
|
|
ssize_t ret;
|
|
|
|
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
|
|
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
|
|
if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
|
|
index 98a89d1..8aa4ba0 100644
|
|
--- a/drivers/usb/host/fotg210-hcd.c
|
|
+++ b/drivers/usb/host/fotg210-hcd.c
|
|
@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
|
|
/* resume signaling for 20 msec */
|
|
fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
|
|
fotg210->reset_done[wIndex] = jiffies
|
|
- + msecs_to_jiffies(20);
|
|
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
break;
|
|
case USB_PORT_FEAT_C_SUSPEND:
|
|
clear_bit(wIndex, &fotg210->port_c_suspend);
|
|
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
|
|
index ba94990..3e3926a 100644
|
|
--- a/drivers/usb/host/fusbh200-hcd.c
|
|
+++ b/drivers/usb/host/fusbh200-hcd.c
|
|
@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
|
|
if ((temp & PORT_PE) == 0)
|
|
goto error;
|
|
|
|
- /* resume signaling for 20 msec */
|
|
fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
|
|
fusbh200->reset_done[wIndex] = jiffies
|
|
- + msecs_to_jiffies(20);
|
|
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
break;
|
|
case USB_PORT_FEAT_C_SUSPEND:
|
|
clear_bit(wIndex, &fusbh200->port_c_suspend);
|
|
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
|
|
index 240e792..b62298f 100644
|
|
--- a/drivers/usb/host/isp116x-hcd.c
|
|
+++ b/drivers/usb/host/isp116x-hcd.c
|
|
@@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
|
|
spin_unlock_irq(&isp116x->lock);
|
|
|
|
hcd->state = HC_STATE_RESUMING;
|
|
- msleep(20);
|
|
+ msleep(USB_RESUME_TIMEOUT);
|
|
|
|
/* Go operational */
|
|
spin_lock_irq(&isp116x->lock);
|
|
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
|
|
index 45032e9..04f2186 100644
|
|
--- a/drivers/usb/host/ohci-dbg.c
|
|
+++ b/drivers/usb/host/ohci-dbg.c
|
|
@@ -236,7 +236,7 @@ ohci_dump_roothub (
|
|
}
|
|
}
|
|
|
|
-static void ohci_dump (struct ohci_hcd *controller, int verbose)
|
|
+static void ohci_dump(struct ohci_hcd *controller)
|
|
{
|
|
ohci_dbg (controller, "OHCI controller state\n");
|
|
|
|
@@ -464,15 +464,16 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
|
|
static ssize_t fill_async_buffer(struct debug_buffer *buf)
|
|
{
|
|
struct ohci_hcd *ohci;
|
|
- size_t temp;
|
|
+ size_t temp, size;
|
|
unsigned long flags;
|
|
|
|
ohci = buf->ohci;
|
|
+ size = PAGE_SIZE;
|
|
|
|
/* display control and bulk lists together, for simplicity */
|
|
spin_lock_irqsave (&ohci->lock, flags);
|
|
- temp = show_list(ohci, buf->page, buf->count, ohci->ed_controltail);
|
|
- temp += show_list(ohci, buf->page + temp, buf->count - temp,
|
|
+ temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
|
|
+ temp += show_list(ohci, buf->page + temp, size - temp,
|
|
ohci->ed_bulktail);
|
|
spin_unlock_irqrestore (&ohci->lock, flags);
|
|
|
|
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
|
|
index 3586460..34fc86c 100644
|
|
--- a/drivers/usb/host/ohci-hcd.c
|
|
+++ b/drivers/usb/host/ohci-hcd.c
|
|
@@ -76,8 +76,8 @@ static const char hcd_name [] = "ohci_hcd";
|
|
#include "ohci.h"
|
|
#include "pci-quirks.h"
|
|
|
|
-static void ohci_dump (struct ohci_hcd *ohci, int verbose);
|
|
-static void ohci_stop (struct usb_hcd *hcd);
|
|
+static void ohci_dump(struct ohci_hcd *ohci);
|
|
+static void ohci_stop(struct usb_hcd *hcd);
|
|
|
|
#include "ohci-hub.c"
|
|
#include "ohci-dbg.c"
|
|
@@ -744,7 +744,7 @@ retry:
|
|
ohci->ed_to_check = NULL;
|
|
}
|
|
|
|
- ohci_dump (ohci, 1);
|
|
+ ohci_dump(ohci);
|
|
|
|
return 0;
|
|
}
|
|
@@ -825,7 +825,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
|
|
usb_hc_died(hcd);
|
|
}
|
|
|
|
- ohci_dump (ohci, 1);
|
|
+ ohci_dump(ohci);
|
|
ohci_usb_reset (ohci);
|
|
}
|
|
|
|
@@ -925,7 +925,7 @@ static void ohci_stop (struct usb_hcd *hcd)
|
|
{
|
|
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
|
|
|
|
- ohci_dump (ohci, 1);
|
|
+ ohci_dump(ohci);
|
|
|
|
if (quirk_nec(ohci))
|
|
flush_work(&ohci->nec_work);
|
|
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
|
|
index d4253e3..a8bde5b 100644
|
|
--- a/drivers/usb/host/ohci-q.c
|
|
+++ b/drivers/usb/host/ohci-q.c
|
|
@@ -311,8 +311,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
|
|
* - ED_OPER: when there's any request queued, the ED gets rescheduled
|
|
* immediately. HC should be working on them.
|
|
*
|
|
- * - ED_IDLE: when there's no TD queue. there's no reason for the HC
|
|
- * to care about this ED; safe to disable the endpoint.
|
|
+ * - ED_IDLE: when there's no TD queue or the HC isn't running.
|
|
*
|
|
* When finish_unlinks() runs later, after SOF interrupt, it will often
|
|
* complete one or more URB unlinks before making that state change.
|
|
@@ -926,6 +925,10 @@ rescan_all:
|
|
int completed, modified;
|
|
__hc32 *prev;
|
|
|
|
+ /* Is this ED already invisible to the hardware? */
|
|
+ if (ed->state == ED_IDLE)
|
|
+ goto ed_idle;
|
|
+
|
|
/* only take off EDs that the HC isn't using, accounting for
|
|
* frame counter wraps and EDs with partially retired TDs
|
|
*/
|
|
@@ -955,12 +958,20 @@ skip_ed:
|
|
}
|
|
}
|
|
|
|
+ /* ED's now officially unlinked, hc doesn't see */
|
|
+ ed->state = ED_IDLE;
|
|
+ if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
|
|
+ ohci->eds_scheduled--;
|
|
+ ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
|
|
+ ed->hwNextED = 0;
|
|
+ wmb();
|
|
+ ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
|
|
+ed_idle:
|
|
+
|
|
/* reentrancy: if we drop the schedule lock, someone might
|
|
* have modified this list. normally it's just prepending
|
|
* entries (which we'd ignore), but paranoia won't hurt.
|
|
*/
|
|
- *last = ed->ed_next;
|
|
- ed->ed_next = NULL;
|
|
modified = 0;
|
|
|
|
/* unlink urbs as requested, but rescan the list after
|
|
@@ -1018,19 +1029,20 @@ rescan_this:
|
|
if (completed && !list_empty (&ed->td_list))
|
|
goto rescan_this;
|
|
|
|
- /* ED's now officially unlinked, hc doesn't see */
|
|
- ed->state = ED_IDLE;
|
|
- if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
|
|
- ohci->eds_scheduled--;
|
|
- ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
|
|
- ed->hwNextED = 0;
|
|
- wmb ();
|
|
- ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
|
|
-
|
|
- /* but if there's work queued, reschedule */
|
|
- if (!list_empty (&ed->td_list)) {
|
|
- if (ohci->rh_state == OHCI_RH_RUNNING)
|
|
- ed_schedule (ohci, ed);
|
|
+ /*
|
|
+ * If no TDs are queued, take ED off the ed_rm_list.
|
|
+ * Otherwise, if the HC is running, reschedule.
|
|
+ * If not, leave it on the list for further dequeues.
|
|
+ */
|
|
+ if (list_empty(&ed->td_list)) {
|
|
+ *last = ed->ed_next;
|
|
+ ed->ed_next = NULL;
|
|
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
|
|
+ *last = ed->ed_next;
|
|
+ ed->ed_next = NULL;
|
|
+ ed_schedule(ohci, ed);
|
|
+ } else {
|
|
+ last = &ed->ed_next;
|
|
}
|
|
|
|
if (modified)
|
|
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
|
|
index e07248b..1b1e6e0 100644
|
|
--- a/drivers/usb/host/oxu210hp-hcd.c
|
|
+++ b/drivers/usb/host/oxu210hp-hcd.c
|
|
@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
|
|
|| oxu->reset_done[i] != 0)
|
|
continue;
|
|
|
|
- /* start 20 msec resume signaling from this port,
|
|
- * and make khubd collect PORT_STAT_C_SUSPEND to
|
|
+ /* start USB_RESUME_TIMEOUT resume signaling from this
|
|
+ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
|
|
* stop that signaling.
|
|
*/
|
|
- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
|
|
+ oxu->reset_done[i] = jiffies +
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
|
|
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
|
|
}
|
|
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
|
|
index 2f3aceb..f4e6b94 100644
|
|
--- a/drivers/usb/host/pci-quirks.c
|
|
+++ b/drivers/usb/host/pci-quirks.c
|
|
@@ -571,7 +571,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
|
|
{
|
|
void __iomem *base;
|
|
u32 control;
|
|
- u32 fminterval;
|
|
+ u32 fminterval = 0;
|
|
+ bool no_fminterval = false;
|
|
int cnt;
|
|
|
|
if (!mmio_resource_enabled(pdev, 0))
|
|
@@ -581,6 +582,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
|
|
if (base == NULL)
|
|
return;
|
|
|
|
+ /*
|
|
+ * ULi M5237 OHCI controller locks the whole system when accessing
|
|
+ * the OHCI_FMINTERVAL offset.
|
|
+ */
|
|
+ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
|
|
+ no_fminterval = true;
|
|
+
|
|
control = readl(base + OHCI_CONTROL);
|
|
|
|
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
|
|
@@ -619,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
|
|
}
|
|
|
|
/* software reset of the controller, preserving HcFmInterval */
|
|
- fminterval = readl(base + OHCI_FMINTERVAL);
|
|
+ if (!no_fminterval)
|
|
+ fminterval = readl(base + OHCI_FMINTERVAL);
|
|
+
|
|
writel(OHCI_HCR, base + OHCI_CMDSTATUS);
|
|
|
|
/* reset requires max 10 us delay */
|
|
@@ -628,7 +638,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
|
|
break;
|
|
udelay(1);
|
|
}
|
|
- writel(fminterval, base + OHCI_FMINTERVAL);
|
|
+
|
|
+ if (!no_fminterval)
|
|
+ writel(fminterval, base + OHCI_FMINTERVAL);
|
|
|
|
/* Now the controller is safely in SUSPEND and nothing can wake it up */
|
|
iounmap(base);
|
|
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
|
|
index 110b4b9..f130bb2 100644
|
|
--- a/drivers/usb/host/r8a66597-hcd.c
|
|
+++ b/drivers/usb/host/r8a66597-hcd.c
|
|
@@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
|
|
rh->port &= ~USB_PORT_STAT_SUSPEND;
|
|
rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
|
|
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
|
|
- msleep(50);
|
|
+ msleep(USB_RESUME_TIMEOUT);
|
|
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
|
|
}
|
|
|
|
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
|
|
index a517151..0f53cc8 100644
|
|
--- a/drivers/usb/host/sl811-hcd.c
|
|
+++ b/drivers/usb/host/sl811-hcd.c
|
|
@@ -1259,7 +1259,7 @@ sl811h_hub_control(
|
|
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
|
|
|
|
mod_timer(&sl811->timer, jiffies
|
|
- + msecs_to_jiffies(20));
|
|
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
|
|
break;
|
|
case USB_PORT_FEAT_POWER:
|
|
port_power(sl811, 0);
|
|
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
|
|
index 93e17b1..98c66d8 100644
|
|
--- a/drivers/usb/host/uhci-hub.c
|
|
+++ b/drivers/usb/host/uhci-hub.c
|
|
@@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
|
|
/* Port received a wakeup request */
|
|
set_bit(port, &uhci->resuming_ports);
|
|
uhci->ports_timeout = jiffies +
|
|
- msecs_to_jiffies(25);
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
usb_hcd_start_port_resume(
|
|
&uhci_to_hcd(uhci)->self, port);
|
|
|
|
@@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|
uhci_finish_suspend(uhci, port, port_addr);
|
|
|
|
/* USB v2.0 7.1.7.5 */
|
|
- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
|
|
+ uhci->ports_timeout = jiffies +
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
break;
|
|
case USB_PORT_FEAT_POWER:
|
|
/* UHCI has no power switching */
|
|
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
|
|
index 9992fbf..5c95765 100644
|
|
--- a/drivers/usb/host/xhci-hub.c
|
|
+++ b/drivers/usb/host/xhci-hub.c
|
|
@@ -383,6 +383,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
|
|
status = PORT_PLC;
|
|
port_change_bit = "link state";
|
|
break;
|
|
+ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
|
|
+ status = PORT_CEC;
|
|
+ port_change_bit = "config error";
|
|
+ break;
|
|
default:
|
|
/* Should never happen */
|
|
return;
|
|
@@ -470,15 +474,19 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
|
|
}
|
|
|
|
/* Updates Link Status for super Speed port */
|
|
-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
|
|
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
|
|
+ u32 *status, u32 status_reg)
|
|
{
|
|
u32 pls = status_reg & PORT_PLS_MASK;
|
|
|
|
/* resume state is a xHCI internal state.
|
|
- * Do not report it to usb core.
|
|
+ * Do not report it to usb core, instead, pretend to be U3,
|
|
+ * thus usb core knows it's not ready for transfer
|
|
*/
|
|
- if (pls == XDEV_RESUME)
|
|
+ if (pls == XDEV_RESUME) {
|
|
+ *status |= USB_SS_PORT_LS_U3;
|
|
return;
|
|
+ }
|
|
|
|
/* When the CAS bit is set then warm reset
|
|
* should be performed on port
|
|
@@ -509,7 +517,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
|
|
* in which sometimes the port enters compliance mode
|
|
* caused by a delay on the host-device negotiation.
|
|
*/
|
|
- if (pls == USB_SS_PORT_LS_COMP_MOD)
|
|
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
|
+ (pls == USB_SS_PORT_LS_COMP_MOD))
|
|
pls |= USB_PORT_STAT_CONNECTION;
|
|
}
|
|
|
|
@@ -578,10 +587,19 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
|
|
status |= USB_PORT_STAT_C_RESET << 16;
|
|
/* USB3.0 only */
|
|
if (hcd->speed == HCD_USB3) {
|
|
- if ((raw_port_status & PORT_PLC))
|
|
+ /* Port link change with port in resume state should not be
|
|
+ * reported to usbcore, as this is an internal state to be
|
|
+ * handled by xhci driver. Reporting PLC to usbcore may
|
|
+ * cause usbcore clearing PLC first and port change event
|
|
+ * irq won't be generated.
|
|
+ */
|
|
+ if ((raw_port_status & PORT_PLC) &&
|
|
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
|
|
status |= USB_PORT_STAT_C_LINK_STATE << 16;
|
|
if ((raw_port_status & PORT_WRC))
|
|
status |= USB_PORT_STAT_C_BH_RESET << 16;
|
|
+ if ((raw_port_status & PORT_CEC))
|
|
+ status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
|
|
}
|
|
|
|
if (hcd->speed != HCD_USB3) {
|
|
@@ -668,7 +686,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
|
|
}
|
|
/* Update Port Link State */
|
|
if (hcd->speed == HCD_USB3) {
|
|
- xhci_hub_report_usb3_link_state(&status, raw_port_status);
|
|
+ xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
|
|
/*
|
|
* Verify if all USB3 Ports Have entered U0 already.
|
|
* Delete Compliance Mode Timer if so.
|
|
@@ -997,6 +1015,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|
case USB_PORT_FEAT_C_OVER_CURRENT:
|
|
case USB_PORT_FEAT_C_ENABLE:
|
|
case USB_PORT_FEAT_C_PORT_LINK_STATE:
|
|
+ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
|
|
xhci_clear_port_change_bit(xhci, wValue, wIndex,
|
|
port_array[wIndex], temp);
|
|
break;
|
|
@@ -1061,7 +1080,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
|
|
*/
|
|
status = bus_state->resuming_ports;
|
|
|
|
- mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
|
|
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
|
|
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
/* For each port, did anything change? If so, set that bit in buf. */
|
|
@@ -1105,10 +1124,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
|
|
if (hcd->self.root_hub->do_remote_wakeup) {
|
|
- if (bus_state->resuming_ports) {
|
|
+ if (bus_state->resuming_ports || /* USB2 */
|
|
+ bus_state->port_remote_wakeup) { /* USB3 */
|
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
- xhci_dbg(xhci, "suspend failed because "
|
|
- "a port is resuming\n");
|
|
+ xhci_dbg(xhci, "suspend failed because a port is resuming\n");
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
|
|
index 4133a00..f8893b3 100644
|
|
--- a/drivers/usb/host/xhci-mem.c
|
|
+++ b/drivers/usb/host/xhci-mem.c
|
|
@@ -1331,10 +1331,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
|
/* Attempt to use the ring cache */
|
|
if (virt_dev->num_rings_cached == 0)
|
|
return -ENOMEM;
|
|
+ virt_dev->num_rings_cached--;
|
|
virt_dev->eps[ep_index].new_ring =
|
|
virt_dev->ring_cache[virt_dev->num_rings_cached];
|
|
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
|
|
- virt_dev->num_rings_cached--;
|
|
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
|
|
1, type);
|
|
}
|
|
@@ -1723,7 +1723,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|
}
|
|
|
|
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
|
|
- for (i = 0; i < num_ports; i++) {
|
|
+ for (i = 0; i < num_ports && xhci->rh_bw; i++) {
|
|
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
|
|
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
|
|
struct list_head *ep = &bwt->interval_bw[j].endpoints;
|
|
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
|
|
index 08a5f92..eb3399f 100644
|
|
--- a/drivers/usb/host/xhci-pci.c
|
|
+++ b/drivers/usb/host/xhci-pci.c
|
|
@@ -101,9 +101,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|
/* AMD PLL quirk */
|
|
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
|
|
xhci->quirks |= XHCI_AMD_PLL_FIX;
|
|
+
|
|
+ if (pdev->vendor == PCI_VENDOR_ID_AMD)
|
|
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
|
+
|
|
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
|
|
xhci->quirks |= XHCI_LPM_SUPPORT;
|
|
xhci->quirks |= XHCI_INTEL_HOST;
|
|
+ xhci->quirks |= XHCI_AVOID_BEI;
|
|
}
|
|
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
|
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
|
|
@@ -119,7 +124,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|
* PPT chipsets.
|
|
*/
|
|
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
|
- xhci->quirks |= XHCI_AVOID_BEI;
|
|
}
|
|
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
|
(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
|
|
@@ -277,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
|
|
if (xhci_compliance_mode_recovery_timer_quirk_check())
|
|
pdev->no_d3cold = true;
|
|
|
|
- return xhci_suspend(xhci);
|
|
+ return xhci_suspend(xhci, do_wakeup);
|
|
}
|
|
|
|
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
|
|
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
|
|
index 8abda5c..1d5ba3c 100644
|
|
--- a/drivers/usb/host/xhci-plat.c
|
|
+++ b/drivers/usb/host/xhci-plat.c
|
|
@@ -205,7 +205,15 @@ static int xhci_plat_suspend(struct device *dev)
|
|
struct usb_hcd *hcd = dev_get_drvdata(dev);
|
|
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
|
|
|
- return xhci_suspend(xhci);
|
|
+ /*
|
|
+ * xhci_suspend() needs `do_wakeup` to know whether host is allowed
|
|
+ * to do wakeup during suspend. Since xhci_plat_suspend is currently
|
|
+ * only designed for system suspend, device_may_wakeup() is enough
|
|
+ * to dertermine whether host is allowed to do wakeup. Need to
|
|
+ * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
|
|
+ * also applies to runtime suspend.
|
|
+ */
|
|
+ return xhci_suspend(xhci, device_may_wakeup(dev));
|
|
}
|
|
|
|
static int xhci_plat_resume(struct device *dev)
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index 65091d9..a365e97 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -86,7 +86,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
|
|
return 0;
|
|
/* offset in TRBs */
|
|
segment_offset = trb - seg->trbs;
|
|
- if (segment_offset > TRBS_PER_SEGMENT)
|
|
+ if (segment_offset >= TRBS_PER_SEGMENT)
|
|
return 0;
|
|
return seg->dma + (segment_offset * sizeof(*trb));
|
|
}
|
|
@@ -1180,9 +1180,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
|
|
false);
|
|
xhci_ring_cmd_db(xhci);
|
|
} else {
|
|
- /* Clear our internal halted state and restart the ring(s) */
|
|
+ /* Clear our internal halted state */
|
|
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
|
|
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
|
}
|
|
}
|
|
|
|
@@ -1741,6 +1740,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|
usb_hcd_resume_root_hub(hcd);
|
|
}
|
|
|
|
+ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
|
|
+ bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
|
|
+
|
|
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
|
|
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
|
|
|
|
@@ -1769,7 +1771,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|
} else {
|
|
xhci_dbg(xhci, "resume HS port %d\n", port_id);
|
|
bus_state->resume_done[faked_port_index] = jiffies +
|
|
- msecs_to_jiffies(20);
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
set_bit(faked_port_index, &bus_state->resuming_ports);
|
|
mod_timer(&hcd->rh_timer,
|
|
bus_state->resume_done[faked_port_index]);
|
|
@@ -1999,22 +2001,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
ep->stopped_td = td;
|
|
return 0;
|
|
} else {
|
|
- if (trb_comp_code == COMP_STALL) {
|
|
- /* The transfer is completed from the driver's
|
|
- * perspective, but we need to issue a set dequeue
|
|
- * command for this stalled endpoint to move the dequeue
|
|
- * pointer past the TD. We can't do that here because
|
|
- * the halt condition must be cleared first. Let the
|
|
- * USB class driver clear the stall later.
|
|
- */
|
|
- ep->stopped_td = td;
|
|
- ep->stopped_stream = ep_ring->stream_id;
|
|
- } else if (xhci_requires_manual_halt_cleanup(xhci,
|
|
- ep_ctx, trb_comp_code)) {
|
|
- /* Other types of errors halt the endpoint, but the
|
|
- * class driver doesn't call usb_reset_endpoint() unless
|
|
- * the error is -EPIPE. Clear the halted status in the
|
|
- * xHCI hardware manually.
|
|
+ if (trb_comp_code == COMP_STALL ||
|
|
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
|
|
+ trb_comp_code)) {
|
|
+ /* Issue a reset endpoint command to clear the host side
|
|
+ * halt, followed by a set dequeue command to move the
|
|
+ * dequeue pointer past the TD.
|
|
+ * The class driver clears the device side halt later.
|
|
*/
|
|
xhci_cleanup_halted_endpoint(xhci,
|
|
slot_id, ep_index, ep_ring->stream_id,
|
|
@@ -2134,9 +2127,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
else
|
|
td->urb->actual_length = 0;
|
|
|
|
- xhci_cleanup_halted_endpoint(xhci,
|
|
- slot_id, ep_index, 0, td, event_trb);
|
|
- return finish_td(xhci, td, event_trb, event, ep, status, true);
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
|
|
}
|
|
/*
|
|
* Did we transfer any data, despite the errors that might have
|
|
@@ -2145,7 +2136,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
if (event_trb != ep_ring->dequeue) {
|
|
/* The event was for the status stage */
|
|
if (event_trb == td->last_trb) {
|
|
- if (td->urb->actual_length != 0) {
|
|
+ if (td->urb_length_set) {
|
|
/* Don't overwrite a previously set error code
|
|
*/
|
|
if ((*status == -EINPROGRESS || *status == 0) &&
|
|
@@ -2159,7 +2150,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
td->urb->transfer_buffer_length;
|
|
}
|
|
} else {
|
|
- /* Maybe the event was for the data stage? */
|
|
+ /*
|
|
+ * Maybe the event was for the data stage? If so, update
|
|
+ * already the actual_length of the URB and flag it as
|
|
+ * set, so that it is not overwritten in the event for
|
|
+ * the last TRB.
|
|
+ */
|
|
+ td->urb_length_set = true;
|
|
td->urb->actual_length =
|
|
td->urb->transfer_buffer_length -
|
|
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
|
|
@@ -2219,8 +2216,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
break;
|
|
case COMP_DEV_ERR:
|
|
case COMP_STALL:
|
|
+ frame->status = -EPROTO;
|
|
+ skip_td = true;
|
|
+ break;
|
|
case COMP_TX_ERR:
|
|
frame->status = -EPROTO;
|
|
+ if (event_trb != td->last_trb)
|
|
+ return 0;
|
|
skip_td = true;
|
|
break;
|
|
case COMP_STOP:
|
|
@@ -2612,7 +2614,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|
* last TRB of the previous TD. The command completion handle
|
|
* will take care the rest.
|
|
*/
|
|
- if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
|
|
+ if (!event_seg && (trb_comp_code == COMP_STOP ||
|
|
+ trb_comp_code == COMP_STOP_INVAL)) {
|
|
ret = 0;
|
|
goto cleanup;
|
|
}
|
|
@@ -2689,17 +2692,8 @@ cleanup:
|
|
if (ret) {
|
|
urb = td->urb;
|
|
urb_priv = urb->hcpriv;
|
|
- /* Leave the TD around for the reset endpoint function
|
|
- * to use(but only if it's not a control endpoint,
|
|
- * since we already queued the Set TR dequeue pointer
|
|
- * command for stalled control endpoints).
|
|
- */
|
|
- if (usb_endpoint_xfer_control(&urb->ep->desc) ||
|
|
- (trb_comp_code != COMP_STALL &&
|
|
- trb_comp_code != COMP_BABBLE))
|
|
- xhci_urb_free_priv(xhci, urb_priv);
|
|
- else
|
|
- kfree(urb_priv);
|
|
+
|
|
+ xhci_urb_free_priv(xhci, urb_priv);
|
|
|
|
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
|
|
if ((urb->actual_length != urb->transfer_buffer_length &&
|
|
@@ -2836,7 +2830,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
|
|
xhci_halt(xhci);
|
|
hw_died:
|
|
spin_unlock(&xhci->lock);
|
|
- return -ESHUTDOWN;
|
|
+ return IRQ_HANDLED;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index ab83104..fc61e663b 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -35,6 +35,8 @@
|
|
#define DRIVER_AUTHOR "Sarah Sharp"
|
|
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
|
|
|
+#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
|
|
+
|
|
/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
|
|
static int link_quirk;
|
|
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
|
|
@@ -842,13 +844,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
|
|
xhci_set_cmd_ring_deq(xhci);
|
|
}
|
|
|
|
+static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
|
|
+{
|
|
+ int port_index;
|
|
+ __le32 __iomem **port_array;
|
|
+ unsigned long flags;
|
|
+ u32 t1, t2;
|
|
+
|
|
+ spin_lock_irqsave(&xhci->lock, flags);
|
|
+
|
|
+ /* disble usb3 ports Wake bits*/
|
|
+ port_index = xhci->num_usb3_ports;
|
|
+ port_array = xhci->usb3_ports;
|
|
+ while (port_index--) {
|
|
+ t1 = readl(port_array[port_index]);
|
|
+ t1 = xhci_port_state_to_neutral(t1);
|
|
+ t2 = t1 & ~PORT_WAKE_BITS;
|
|
+ if (t1 != t2)
|
|
+ writel(t2, port_array[port_index]);
|
|
+ }
|
|
+
|
|
+ /* disble usb2 ports Wake bits*/
|
|
+ port_index = xhci->num_usb2_ports;
|
|
+ port_array = xhci->usb2_ports;
|
|
+ while (port_index--) {
|
|
+ t1 = readl(port_array[port_index]);
|
|
+ t1 = xhci_port_state_to_neutral(t1);
|
|
+ t2 = t1 & ~PORT_WAKE_BITS;
|
|
+ if (t1 != t2)
|
|
+ writel(t2, port_array[port_index]);
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
+}
|
|
+
|
|
/*
|
|
* Stop HC (not bus-specific)
|
|
*
|
|
* This is called when the machine transition into S3/S4 mode.
|
|
*
|
|
*/
|
|
-int xhci_suspend(struct xhci_hcd *xhci)
|
|
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
|
|
{
|
|
int rc = 0;
|
|
unsigned int delay = XHCI_MAX_HALT_USEC;
|
|
@@ -859,6 +895,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
|
|
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
|
|
return -EINVAL;
|
|
|
|
+ /* Clear root port wake on bits if wakeup not allowed. */
|
|
+ if (!do_wakeup)
|
|
+ xhci_disable_port_wake_on_bits(xhci);
|
|
+
|
|
/* Don't poll the roothubs on bus suspend. */
|
|
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
|
|
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
|
@@ -2885,63 +2925,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
|
|
}
|
|
}
|
|
|
|
-/* Deal with stalled endpoints. The core should have sent the control message
|
|
- * to clear the halt condition. However, we need to make the xHCI hardware
|
|
- * reset its sequence number, since a device will expect a sequence number of
|
|
- * zero after the halt condition is cleared.
|
|
+/* Called when clearing halted device. The core should have sent the control
|
|
+ * message to clear the device halt condition. The host side of the halt should
|
|
+ * already be cleared with a reset endpoint command issued when the STALL tx
|
|
+ * event was received.
|
|
+ *
|
|
* Context: in_interrupt
|
|
*/
|
|
+
|
|
void xhci_endpoint_reset(struct usb_hcd *hcd,
|
|
struct usb_host_endpoint *ep)
|
|
{
|
|
struct xhci_hcd *xhci;
|
|
- struct usb_device *udev;
|
|
- unsigned int ep_index;
|
|
- unsigned long flags;
|
|
- int ret;
|
|
- struct xhci_virt_ep *virt_ep;
|
|
|
|
xhci = hcd_to_xhci(hcd);
|
|
- udev = (struct usb_device *) ep->hcpriv;
|
|
- /* Called with a root hub endpoint (or an endpoint that wasn't added
|
|
- * with xhci_add_endpoint()
|
|
- */
|
|
- if (!ep->hcpriv)
|
|
- return;
|
|
- ep_index = xhci_get_endpoint_index(&ep->desc);
|
|
- virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
|
|
- if (!virt_ep->stopped_td) {
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
|
- "Endpoint 0x%x not halted, refusing to reset.",
|
|
- ep->desc.bEndpointAddress);
|
|
- return;
|
|
- }
|
|
- if (usb_endpoint_xfer_control(&ep->desc)) {
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
|
- "Control endpoint stall already handled.");
|
|
- return;
|
|
- }
|
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
|
- "Queueing reset endpoint command");
|
|
- spin_lock_irqsave(&xhci->lock, flags);
|
|
- ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
|
|
/*
|
|
- * Can't change the ring dequeue pointer until it's transitioned to the
|
|
- * stopped state, which is only upon a successful reset endpoint
|
|
- * command. Better hope that last command worked!
|
|
+ * We might need to implement the config ep cmd in xhci 4.8.1 note:
|
|
+ * The Reset Endpoint Command may only be issued to endpoints in the
|
|
+ * Halted state. If software wishes reset the Data Toggle or Sequence
|
|
+ * Number of an endpoint that isn't in the Halted state, then software
|
|
+ * may issue a Configure Endpoint Command with the Drop and Add bits set
|
|
+ * for the target endpoint. that is in the Stopped state.
|
|
*/
|
|
- if (!ret) {
|
|
- xhci_cleanup_stalled_ring(xhci, udev, ep_index);
|
|
- kfree(virt_ep->stopped_td);
|
|
- xhci_ring_cmd_db(xhci);
|
|
- }
|
|
- virt_ep->stopped_td = NULL;
|
|
- virt_ep->stopped_stream = 0;
|
|
- spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
|
- if (ret)
|
|
- xhci_warn(xhci, "FIXME allocate a new ring segment\n");
|
|
+ /* For now just print debug to follow the situation */
|
|
+ xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
|
|
+ ep->desc.bEndpointAddress);
|
|
}
|
|
|
|
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
|
|
@@ -3414,6 +3424,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (virt_dev->tt_info)
|
|
+ old_active_eps = virt_dev->tt_info->active_eps;
|
|
+
|
|
if (virt_dev->udev != udev) {
|
|
/* If the virt_dev and the udev does not match, this virt_dev
|
|
* may belong to another udev.
|
|
@@ -3928,13 +3941,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
|
int ret;
|
|
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
- if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
|
|
+
|
|
+ virt_dev = xhci->devs[udev->slot_id];
|
|
+
|
|
+ /*
|
|
+ * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
|
|
+ * xHC was re-initialized. Exit latency will be set later after
|
|
+ * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
|
|
+ */
|
|
+
|
|
+ if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
|
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
return 0;
|
|
}
|
|
|
|
/* Attempt to issue an Evaluate Context command to change the MEL. */
|
|
- virt_dev = xhci->devs[udev->slot_id];
|
|
command = xhci->lpm_command;
|
|
ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
|
|
if (!ctrl_ctx) {
|
|
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
|
|
index 8faef64..c167485 100644
|
|
--- a/drivers/usb/host/xhci.h
|
|
+++ b/drivers/usb/host/xhci.h
|
|
@@ -1,3 +1,4 @@
|
|
+
|
|
/*
|
|
* xHCI host controller driver
|
|
*
|
|
@@ -88,9 +89,10 @@ struct xhci_cap_regs {
|
|
#define HCS_IST(p) (((p) >> 0) & 0xf)
|
|
/* bits 4:7, max number of Event Ring segments */
|
|
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
|
|
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
|
|
/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
|
|
-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
|
|
-#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
|
|
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
|
|
+#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
|
|
|
|
/* HCSPARAMS3 - hcs_params3 - bitmasks */
|
|
/* bits 0:7, Max U1 to U0 latency for the roothub ports */
|
|
@@ -283,6 +285,7 @@ struct xhci_op_regs {
|
|
#define XDEV_U0 (0x0 << 5)
|
|
#define XDEV_U2 (0x2 << 5)
|
|
#define XDEV_U3 (0x3 << 5)
|
|
+#define XDEV_INACTIVE (0x6 << 5)
|
|
#define XDEV_RESUME (0xf << 5)
|
|
/* true: port has power (see HCC_PPC) */
|
|
#define PORT_POWER (1 << 9)
|
|
@@ -1266,7 +1269,7 @@ union xhci_trb {
|
|
* since the command ring is 64-byte aligned.
|
|
* It must also be greater than 16.
|
|
*/
|
|
-#define TRBS_PER_SEGMENT 64
|
|
+#define TRBS_PER_SEGMENT 256
|
|
/* Allow two commands + a link TRB, along with any reserved command TRBs */
|
|
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
|
|
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
|
|
@@ -1289,6 +1292,8 @@ struct xhci_td {
|
|
struct xhci_segment *start_seg;
|
|
union xhci_trb *first_trb;
|
|
union xhci_trb *last_trb;
|
|
+ /* actual_length of the URB has already been set */
|
|
+ bool urb_length_set;
|
|
};
|
|
|
|
/* xHCI command default timeout value */
|
|
@@ -1760,7 +1765,7 @@ void xhci_shutdown(struct usb_hcd *hcd);
|
|
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
|
|
|
|
#ifdef CONFIG_PM
|
|
-int xhci_suspend(struct xhci_hcd *xhci);
|
|
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
|
|
int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
|
|
#else
|
|
#define xhci_suspend NULL
|
|
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
|
|
index de98906..0aef801 100644
|
|
--- a/drivers/usb/misc/sisusbvga/sisusb.c
|
|
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
|
|
@@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = {
|
|
{ USB_DEVICE(0x0711, 0x0918) },
|
|
{ USB_DEVICE(0x0711, 0x0920) },
|
|
{ USB_DEVICE(0x0711, 0x0950) },
|
|
+ { USB_DEVICE(0x0711, 0x5200) },
|
|
{ USB_DEVICE(0x182d, 0x021c) },
|
|
{ USB_DEVICE(0x182d, 0x0269) },
|
|
{ }
|
|
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
|
|
index 0757690..ec69b90 100644
|
|
--- a/drivers/usb/musb/musb_core.c
|
|
+++ b/drivers/usb/musb/musb_core.c
|
|
@@ -99,6 +99,7 @@
|
|
#include <linux/platform_device.h>
|
|
#include <linux/io.h>
|
|
#include <linux/dma-mapping.h>
|
|
+#include <linux/usb.h>
|
|
|
|
#include "musb_core.h"
|
|
|
|
@@ -477,10 +478,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
|
|
(USB_PORT_STAT_C_SUSPEND << 16)
|
|
| MUSB_PORT_STAT_RESUME;
|
|
musb->rh_timer = jiffies
|
|
- + msecs_to_jiffies(20);
|
|
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
|
|
+
|
|
schedule_delayed_work(
|
|
&musb->finish_resume_work,
|
|
- msecs_to_jiffies(20));
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
|
|
|
|
musb->xceiv->state = OTG_STATE_A_HOST;
|
|
musb->is_active = 1;
|
|
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
|
|
index c2d5afc..1d29bbf 100644
|
|
--- a/drivers/usb/musb/musb_cppi41.c
|
|
+++ b/drivers/usb/musb/musb_cppi41.c
|
|
@@ -190,7 +190,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
|
|
}
|
|
}
|
|
|
|
- if (!list_empty(&controller->early_tx_list)) {
|
|
+ if (!list_empty(&controller->early_tx_list) &&
|
|
+ !hrtimer_is_queued(&controller->early_tx)) {
|
|
ret = HRTIMER_RESTART;
|
|
hrtimer_forward_now(&controller->early_tx,
|
|
ktime_set(0, 150 * NSEC_PER_USEC));
|
|
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
|
|
index 7a109ea..865243e 100644
|
|
--- a/drivers/usb/musb/musb_dsps.c
|
|
+++ b/drivers/usb/musb/musb_dsps.c
|
|
@@ -707,6 +707,7 @@ static int dsps_suspend(struct device *dev)
|
|
struct musb *musb = platform_get_drvdata(glue->musb);
|
|
void __iomem *mbase = musb->ctrl_base;
|
|
|
|
+ del_timer_sync(&glue->timer);
|
|
glue->context.control = dsps_readl(mbase, wrp->control);
|
|
glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
|
|
glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
|
|
@@ -732,6 +733,9 @@ static int dsps_resume(struct device *dev)
|
|
dsps_writel(mbase, wrp->mode, glue->context.mode);
|
|
dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
|
|
dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
|
|
+ if (musb->xceiv->state == OTG_STATE_B_IDLE &&
|
|
+ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
|
|
+ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
|
|
index abb38c3..6b0fb6a 100644
|
|
--- a/drivers/usb/musb/musb_host.c
|
|
+++ b/drivers/usb/musb/musb_host.c
|
|
@@ -2640,7 +2640,6 @@ void musb_host_cleanup(struct musb *musb)
|
|
if (musb->port_mode == MUSB_PORT_MODE_GADGET)
|
|
return;
|
|
usb_remove_hcd(musb->hcd);
|
|
- musb->hcd = NULL;
|
|
}
|
|
|
|
void musb_host_free(struct musb *musb)
|
|
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
|
|
index e2d2d8c..1e9bde4 100644
|
|
--- a/drivers/usb/musb/musb_virthub.c
|
|
+++ b/drivers/usb/musb/musb_virthub.c
|
|
@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
/* later, GetPortStatus will stop RESUME signaling */
|
|
musb->port1_status |= MUSB_PORT_STAT_RESUME;
|
|
schedule_delayed_work(&musb->finish_resume_work,
|
|
- msecs_to_jiffies(20));
|
|
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
|
|
}
|
|
}
|
|
|
|
@@ -273,9 +273,7 @@ static int musb_has_gadget(struct musb *musb)
|
|
#ifdef CONFIG_USB_MUSB_HOST
|
|
return 1;
|
|
#else
|
|
- if (musb->port_mode == MUSB_PORT_MODE_HOST)
|
|
- return 1;
|
|
- return musb->g.dev.driver != NULL;
|
|
+ return musb->port_mode == MUSB_PORT_MODE_HOST;
|
|
#endif
|
|
}
|
|
|
|
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
|
|
index bbe4f8e..8834b70 100644
|
|
--- a/drivers/usb/phy/phy-tegra-usb.c
|
|
+++ b/drivers/usb/phy/phy-tegra-usb.c
|
|
@@ -881,8 +881,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- tegra_phy->config = devm_kzalloc(&pdev->dev,
|
|
- sizeof(*tegra_phy->config), GFP_KERNEL);
|
|
+ tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
|
|
+ GFP_KERNEL);
|
|
if (!tegra_phy->config) {
|
|
dev_err(&pdev->dev,
|
|
"unable to allocate memory for USB UTMIP config\n");
|
|
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
|
|
index 8afa813..964ebaf 100644
|
|
--- a/drivers/usb/phy/phy.c
|
|
+++ b/drivers/usb/phy/phy.c
|
|
@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
|
|
|
|
static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
|
|
{
|
|
- return res == match_data;
|
|
+ struct usb_phy **phy = res;
|
|
+
|
|
+ return *phy == match_data;
|
|
}
|
|
|
|
/**
|
|
@@ -229,6 +231,9 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
|
|
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
|
|
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
|
|
dev_dbg(dev, "unable to find transceiver\n");
|
|
+ if (!IS_ERR(phy))
|
|
+ phy = ERR_PTR(-ENODEV);
|
|
+
|
|
goto err0;
|
|
}
|
|
|
|
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
|
|
index 9374bd2..6f91eb9 100644
|
|
--- a/drivers/usb/serial/bus.c
|
|
+++ b/drivers/usb/serial/bus.c
|
|
@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev)
|
|
{
|
|
struct usb_serial_driver *driver;
|
|
struct usb_serial_port *port;
|
|
+ struct device *tty_dev;
|
|
int retval = 0;
|
|
int minor;
|
|
|
|
@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev)
|
|
retval = device_create_file(dev, &dev_attr_port_number);
|
|
if (retval) {
|
|
if (driver->port_remove)
|
|
- retval = driver->port_remove(port);
|
|
+ driver->port_remove(port);
|
|
goto exit_with_autopm;
|
|
}
|
|
|
|
minor = port->minor;
|
|
- tty_register_device(usb_serial_tty_driver, minor, dev);
|
|
+ tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
|
|
+ if (IS_ERR(tty_dev)) {
|
|
+ retval = PTR_ERR(tty_dev);
|
|
+ device_remove_file(dev, &dev_attr_port_number);
|
|
+ if (driver->port_remove)
|
|
+ driver->port_remove(port);
|
|
+ goto exit_with_autopm;
|
|
+ }
|
|
+
|
|
dev_info(&port->serial->dev->dev,
|
|
"%s converter now attached to ttyUSB%d\n",
|
|
driver->description, minor);
|
|
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
|
|
index 8d7fc48..29fa1c3 100644
|
|
--- a/drivers/usb/serial/console.c
|
|
+++ b/drivers/usb/serial/console.c
|
|
@@ -46,6 +46,8 @@ static struct console usbcons;
|
|
* ------------------------------------------------------------
|
|
*/
|
|
|
|
+static const struct tty_operations usb_console_fake_tty_ops = {
|
|
+};
|
|
|
|
/*
|
|
* The parsing of the command line works exactly like the
|
|
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
|
|
goto reset_open_count;
|
|
}
|
|
kref_init(&tty->kref);
|
|
- tty_port_tty_set(&port->port, tty);
|
|
tty->driver = usb_serial_tty_driver;
|
|
tty->index = co->index;
|
|
+ init_ldsem(&tty->ldisc_sem);
|
|
+ INIT_LIST_HEAD(&tty->tty_files);
|
|
+ kref_get(&tty->driver->kref);
|
|
+ tty->ops = &usb_console_fake_tty_ops;
|
|
if (tty_init_termios(tty)) {
|
|
retval = -ENOMEM;
|
|
- goto free_tty;
|
|
+ goto put_tty;
|
|
}
|
|
+ tty_port_tty_set(&port->port, tty);
|
|
}
|
|
|
|
/* only call the device specific open if this
|
|
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
|
|
serial->type->set_termios(tty, port, &dummy);
|
|
|
|
tty_port_tty_set(&port->port, NULL);
|
|
- kfree(tty);
|
|
+ tty_kref_put(tty);
|
|
}
|
|
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
|
|
}
|
|
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
|
|
|
|
fail:
|
|
tty_port_tty_set(&port->port, NULL);
|
|
- free_tty:
|
|
- kfree(tty);
|
|
+ put_tty:
|
|
+ tty_kref_put(tty);
|
|
reset_open_count:
|
|
port->port.count = 0;
|
|
usb_autopm_put_interface(serial->interface);
|
|
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
|
|
index 330df5c..d11335d 100644
|
|
--- a/drivers/usb/serial/cp210x.c
|
|
+++ b/drivers/usb/serial/cp210x.c
|
|
@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
|
|
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
|
|
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
|
|
+ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
|
|
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
|
|
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
|
|
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
|
|
@@ -120,8 +121,14 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
|
|
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
|
|
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
|
|
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
|
|
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
|
|
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
|
|
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
|
|
+ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
|
|
+ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
|
|
+ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
|
|
+ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
|
|
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
|
|
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
|
|
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
|
|
@@ -142,6 +149,8 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
|
|
{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
|
|
{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
|
|
+ { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
|
|
+ { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
|
|
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
|
|
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
|
|
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
|
|
@@ -154,7 +163,9 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
|
|
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
|
|
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
|
|
+ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
|
|
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
|
|
+ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
|
|
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
|
|
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
|
|
{ USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
|
|
@@ -176,6 +187,7 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
|
|
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
|
|
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
|
|
+ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
|
|
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
|
|
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
|
|
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
|
|
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
|
|
index 8a3813b..7fb81db 100644
|
|
--- a/drivers/usb/serial/ftdi_sio.c
|
|
+++ b/drivers/usb/serial/ftdi_sio.c
|
|
@@ -145,12 +145,14 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
|
|
* /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
|
|
*/
|
|
static const struct usb_device_id id_table_combined[] = {
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
|
|
@@ -481,6 +483,39 @@ static const struct usb_device_id id_table_combined[] = {
|
|
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
|
|
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
|
|
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
|
|
+ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
|
|
@@ -582,6 +617,11 @@ static const struct usb_device_id id_table_combined[] = {
|
|
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
|
|
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
|
|
/*
|
|
* ELV devices:
|
|
*/
|
|
@@ -673,6 +713,11 @@ static const struct usb_device_id id_table_combined[] = {
|
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
|
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
|
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
|
|
+ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
|
|
+ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
|
|
+ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
|
|
+ { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
|
|
+ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
|
|
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
|
|
@@ -738,6 +783,7 @@ static const struct usb_device_id id_table_combined[] = {
|
|
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
|
|
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
|
|
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
|
|
+ { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
|
|
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
|
|
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
|
|
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
|
|
@@ -772,6 +818,8 @@ static const struct usb_device_id id_table_combined[] = {
|
|
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
|
|
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
|
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
|
|
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
|
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
|
|
@@ -945,8 +993,29 @@ static const struct usb_device_id id_table_combined[] = {
|
|
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
|
|
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
|
|
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
|
|
+ /* ekey Devices */
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
|
|
/* Infineon Devices */
|
|
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
|
|
+ /* GE Healthcare devices */
|
|
+ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
|
|
+ /* Active Research (Actisense) devices */
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
|
|
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
|
|
{ } /* Terminating entry */
|
|
};
|
|
|
|
@@ -1838,8 +1907,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
|
|
{
|
|
struct usb_device *udev = serial->dev;
|
|
|
|
- if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
|
|
- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
|
|
+ if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
|
|
+ return ftdi_jtag_probe(serial);
|
|
+
|
|
+ if (udev->product &&
|
|
+ (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
|
|
+ !strcmp(udev->product, "SNAP Connect E10")))
|
|
return ftdi_jtag_probe(serial);
|
|
|
|
return 0;
|
|
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
|
|
index c4777bc..2943b97 100644
|
|
--- a/drivers/usb/serial/ftdi_sio_ids.h
|
|
+++ b/drivers/usb/serial/ftdi_sio_ids.h
|
|
@@ -30,8 +30,17 @@
|
|
|
|
/*** third-party PIDs (using FTDI_VID) ***/
|
|
|
|
+/*
|
|
+ * Certain versions of the official Windows FTDI driver reprogrammed
|
|
+ * counterfeit FTDI devices to PID 0. Support these devices anyway.
|
|
+ */
|
|
+#define FTDI_BRICK_PID 0x0000
|
|
+
|
|
#define FTDI_LUMEL_PD12_PID 0x6002
|
|
|
|
+/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
|
|
+#define CYBER_CORTEX_AV_PID 0x8698
|
|
+
|
|
/*
|
|
* Marvell OpenRD Base, Client
|
|
* http://www.open-rd.org
|
|
@@ -42,6 +51,8 @@
|
|
/* www.candapter.com Ewert Energy Systems CANdapter device */
|
|
#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
|
|
|
|
+#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */
|
|
+
|
|
/*
|
|
* Texas Instruments XDS100v2 JTAG / BeagleBone A3
|
|
* http://processors.wiki.ti.com/index.php/XDS100
|
|
@@ -140,12 +151,20 @@
|
|
/*
|
|
* Xsens Technologies BV products (http://www.xsens.com).
|
|
*/
|
|
-#define XSENS_CONVERTER_0_PID 0xD388
|
|
-#define XSENS_CONVERTER_1_PID 0xD389
|
|
+#define XSENS_VID 0x2639
|
|
+#define XSENS_AWINDA_STATION_PID 0x0101
|
|
+#define XSENS_AWINDA_DONGLE_PID 0x0102
|
|
+#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
|
|
+#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
|
|
+#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
|
|
+
|
|
+/* Xsens devices using FTDI VID */
|
|
+#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
|
|
+#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
|
|
#define XSENS_CONVERTER_2_PID 0xD38A
|
|
-#define XSENS_CONVERTER_3_PID 0xD38B
|
|
-#define XSENS_CONVERTER_4_PID 0xD38C
|
|
-#define XSENS_CONVERTER_5_PID 0xD38D
|
|
+#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */
|
|
+#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */
|
|
+#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */
|
|
#define XSENS_CONVERTER_6_PID 0xD38E
|
|
#define XSENS_CONVERTER_7_PID 0xD38F
|
|
|
|
@@ -543,6 +562,20 @@
|
|
*/
|
|
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
|
|
|
|
+/*
|
|
+ * Synapse Wireless product ids (FTDI_VID)
|
|
+ * http://www.synapse-wireless.com
|
|
+ */
|
|
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
|
|
+
|
|
+/*
|
|
+ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
|
|
+ */
|
|
+#define FTDI_CUSTOMWARE_MINIPLEX_PID 0xfd48 /* MiniPlex first generation NMEA Multiplexer */
|
|
+#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49 /* MiniPlex-USB and MiniPlex-2 series */
|
|
+#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID 0xfd4a /* MiniPlex-2Wi */
|
|
+#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b /* MiniPlex-3 series */
|
|
+
|
|
|
|
/********************************/
|
|
/** third-party VID/PID combos **/
|
|
@@ -832,6 +865,12 @@
|
|
#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
|
|
|
|
/*
|
|
+ * NOVITUS printers
|
|
+ */
|
|
+#define NOVITUS_VID 0x1a28
|
|
+#define NOVITUS_BONO_E_PID 0x6010
|
|
+
|
|
+/*
|
|
* RT Systems programming cables for various ham radios
|
|
*/
|
|
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
|
|
@@ -905,8 +944,8 @@
|
|
#define BAYER_CONTOUR_CABLE_PID 0x6001
|
|
|
|
/*
|
|
- * The following are the values for the Matrix Orbital FTDI Range
|
|
- * Anything in this range will use an FT232RL.
|
|
+ * Matrix Orbital Intelligent USB displays.
|
|
+ * http://www.matrixorbital.com
|
|
*/
|
|
#define MTXORB_VID 0x1B3D
|
|
#define MTXORB_FTDI_RANGE_0100_PID 0x0100
|
|
@@ -1165,8 +1204,39 @@
|
|
#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
|
|
#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
|
|
#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
|
|
-
|
|
-
|
|
+#define MTXORB_FTDI_RANGE_4701_PID 0x4701
|
|
+#define MTXORB_FTDI_RANGE_9300_PID 0x9300
|
|
+#define MTXORB_FTDI_RANGE_9301_PID 0x9301
|
|
+#define MTXORB_FTDI_RANGE_9302_PID 0x9302
|
|
+#define MTXORB_FTDI_RANGE_9303_PID 0x9303
|
|
+#define MTXORB_FTDI_RANGE_9304_PID 0x9304
|
|
+#define MTXORB_FTDI_RANGE_9305_PID 0x9305
|
|
+#define MTXORB_FTDI_RANGE_9306_PID 0x9306
|
|
+#define MTXORB_FTDI_RANGE_9307_PID 0x9307
|
|
+#define MTXORB_FTDI_RANGE_9308_PID 0x9308
|
|
+#define MTXORB_FTDI_RANGE_9309_PID 0x9309
|
|
+#define MTXORB_FTDI_RANGE_930A_PID 0x930A
|
|
+#define MTXORB_FTDI_RANGE_930B_PID 0x930B
|
|
+#define MTXORB_FTDI_RANGE_930C_PID 0x930C
|
|
+#define MTXORB_FTDI_RANGE_930D_PID 0x930D
|
|
+#define MTXORB_FTDI_RANGE_930E_PID 0x930E
|
|
+#define MTXORB_FTDI_RANGE_930F_PID 0x930F
|
|
+#define MTXORB_FTDI_RANGE_9310_PID 0x9310
|
|
+#define MTXORB_FTDI_RANGE_9311_PID 0x9311
|
|
+#define MTXORB_FTDI_RANGE_9312_PID 0x9312
|
|
+#define MTXORB_FTDI_RANGE_9313_PID 0x9313
|
|
+#define MTXORB_FTDI_RANGE_9314_PID 0x9314
|
|
+#define MTXORB_FTDI_RANGE_9315_PID 0x9315
|
|
+#define MTXORB_FTDI_RANGE_9316_PID 0x9316
|
|
+#define MTXORB_FTDI_RANGE_9317_PID 0x9317
|
|
+#define MTXORB_FTDI_RANGE_9318_PID 0x9318
|
|
+#define MTXORB_FTDI_RANGE_9319_PID 0x9319
|
|
+#define MTXORB_FTDI_RANGE_931A_PID 0x931A
|
|
+#define MTXORB_FTDI_RANGE_931B_PID 0x931B
|
|
+#define MTXORB_FTDI_RANGE_931C_PID 0x931C
|
|
+#define MTXORB_FTDI_RANGE_931D_PID 0x931D
|
|
+#define MTXORB_FTDI_RANGE_931E_PID 0x931E
|
|
+#define MTXORB_FTDI_RANGE_931F_PID 0x931F
|
|
|
|
/*
|
|
* The Mobility Lab (TML)
|
|
@@ -1375,3 +1445,34 @@
|
|
#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
|
|
#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
|
|
#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
|
|
+
|
|
+/*
|
|
+ * ekey biometric systems GmbH (http://ekey.net/)
|
|
+ */
|
|
+#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
|
|
+
|
|
+/*
|
|
+ * GE Healthcare devices
|
|
+ */
|
|
+#define GE_HEALTHCARE_VID 0x1901
|
|
+#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
|
|
+
|
|
+/*
|
|
+ * Active Research (Actisense) devices
|
|
+ */
|
|
+#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */
|
|
+#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
|
|
+#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
|
|
+#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
|
|
+#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
|
|
+#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
|
|
+#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
|
|
+#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
|
|
+#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
|
|
+#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
|
|
+#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */
|
|
+#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */
|
|
+#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */
|
|
+#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
|
|
+#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
|
|
+#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
|
|
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
|
|
index b63ce02..d6a1979 100644
|
|
--- a/drivers/usb/serial/generic.c
|
|
+++ b/drivers/usb/serial/generic.c
|
|
@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
|
|
* character or at least one jiffy.
|
|
*/
|
|
period = max_t(unsigned long, (10 * HZ / bps), 1);
|
|
- period = min_t(unsigned long, period, timeout);
|
|
+ if (timeout)
|
|
+ period = min_t(unsigned long, period, timeout);
|
|
|
|
dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
|
|
__func__, jiffies_to_msecs(timeout),
|
|
@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
|
|
schedule_timeout_interruptible(period);
|
|
if (signal_pending(current))
|
|
break;
|
|
- if (time_after(jiffies, expire))
|
|
+ if (timeout && time_after(jiffies, expire))
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
|
|
index 265c677..35297a8 100644
|
|
--- a/drivers/usb/serial/keyspan.c
|
|
+++ b/drivers/usb/serial/keyspan.c
|
|
@@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb)
|
|
if ((data[0] & 0x80) == 0) {
|
|
/* no errors on individual bytes, only
|
|
possible overrun err */
|
|
- if (data[0] & RXERROR_OVERRUN)
|
|
- err = TTY_OVERRUN;
|
|
- else
|
|
- err = 0;
|
|
+ if (data[0] & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(&port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
for (i = 1; i < urb->actual_length ; ++i)
|
|
- tty_insert_flip_char(&port->port, data[i], err);
|
|
+ tty_insert_flip_char(&port->port, data[i],
|
|
+ TTY_NORMAL);
|
|
} else {
|
|
/* some bytes had errors, every byte has status */
|
|
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
|
|
for (i = 0; i + 1 < urb->actual_length; i += 2) {
|
|
- int stat = data[i], flag = 0;
|
|
- if (stat & RXERROR_OVERRUN)
|
|
- flag |= TTY_OVERRUN;
|
|
- if (stat & RXERROR_FRAMING)
|
|
- flag |= TTY_FRAME;
|
|
- if (stat & RXERROR_PARITY)
|
|
- flag |= TTY_PARITY;
|
|
+ int stat = data[i];
|
|
+ int flag = TTY_NORMAL;
|
|
+
|
|
+ if (stat & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(&port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
/* XXX should handle break (0x10) */
|
|
+ if (stat & RXERROR_PARITY)
|
|
+ flag = TTY_PARITY;
|
|
+ else if (stat & RXERROR_FRAMING)
|
|
+ flag = TTY_FRAME;
|
|
+
|
|
tty_insert_flip_char(&port->port, data[i+1],
|
|
flag);
|
|
}
|
|
@@ -415,6 +421,8 @@ static void usa26_instat_callback(struct urb *urb)
|
|
}
|
|
port = serial->port[msg->port];
|
|
p_priv = usb_get_serial_port_data(port);
|
|
+ if (!p_priv)
|
|
+ goto resubmit;
|
|
|
|
/* Update handshaking pin state information */
|
|
old_dcd_state = p_priv->dcd_state;
|
|
@@ -425,7 +433,7 @@ static void usa26_instat_callback(struct urb *urb)
|
|
|
|
if (old_dcd_state != p_priv->dcd_state)
|
|
tty_port_tty_hangup(&port->port, true);
|
|
-
|
|
+resubmit:
|
|
/* Resubmit urb so we continue receiving */
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
if (err != 0)
|
|
@@ -535,6 +543,8 @@ static void usa28_instat_callback(struct urb *urb)
|
|
}
|
|
port = serial->port[msg->port];
|
|
p_priv = usb_get_serial_port_data(port);
|
|
+ if (!p_priv)
|
|
+ goto resubmit;
|
|
|
|
/* Update handshaking pin state information */
|
|
old_dcd_state = p_priv->dcd_state;
|
|
@@ -545,7 +555,7 @@ static void usa28_instat_callback(struct urb *urb)
|
|
|
|
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
|
|
tty_port_tty_hangup(&port->port, true);
|
|
-
|
|
+resubmit:
|
|
/* Resubmit urb so we continue receiving */
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
if (err != 0)
|
|
@@ -618,6 +628,8 @@ static void usa49_instat_callback(struct urb *urb)
|
|
}
|
|
port = serial->port[msg->portNumber];
|
|
p_priv = usb_get_serial_port_data(port);
|
|
+ if (!p_priv)
|
|
+ goto resubmit;
|
|
|
|
/* Update handshaking pin state information */
|
|
old_dcd_state = p_priv->dcd_state;
|
|
@@ -628,7 +640,7 @@ static void usa49_instat_callback(struct urb *urb)
|
|
|
|
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
|
|
tty_port_tty_hangup(&port->port, true);
|
|
-
|
|
+resubmit:
|
|
/* Resubmit urb so we continue receiving */
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
if (err != 0)
|
|
@@ -666,14 +678,19 @@ static void usa49_indat_callback(struct urb *urb)
|
|
} else {
|
|
/* some bytes had errors, every byte has status */
|
|
for (i = 0; i + 1 < urb->actual_length; i += 2) {
|
|
- int stat = data[i], flag = 0;
|
|
- if (stat & RXERROR_OVERRUN)
|
|
- flag |= TTY_OVERRUN;
|
|
- if (stat & RXERROR_FRAMING)
|
|
- flag |= TTY_FRAME;
|
|
- if (stat & RXERROR_PARITY)
|
|
- flag |= TTY_PARITY;
|
|
+ int stat = data[i];
|
|
+ int flag = TTY_NORMAL;
|
|
+
|
|
+ if (stat & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(&port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
/* XXX should handle break (0x10) */
|
|
+ if (stat & RXERROR_PARITY)
|
|
+ flag = TTY_PARITY;
|
|
+ else if (stat & RXERROR_FRAMING)
|
|
+ flag = TTY_FRAME;
|
|
+
|
|
tty_insert_flip_char(&port->port, data[i+1],
|
|
flag);
|
|
}
|
|
@@ -730,15 +747,19 @@ static void usa49wg_indat_callback(struct urb *urb)
|
|
*/
|
|
for (x = 0; x + 1 < len &&
|
|
i + 1 < urb->actual_length; x += 2) {
|
|
- int stat = data[i], flag = 0;
|
|
+ int stat = data[i];
|
|
+ int flag = TTY_NORMAL;
|
|
|
|
- if (stat & RXERROR_OVERRUN)
|
|
- flag |= TTY_OVERRUN;
|
|
- if (stat & RXERROR_FRAMING)
|
|
- flag |= TTY_FRAME;
|
|
- if (stat & RXERROR_PARITY)
|
|
- flag |= TTY_PARITY;
|
|
+ if (stat & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(&port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
/* XXX should handle break (0x10) */
|
|
+ if (stat & RXERROR_PARITY)
|
|
+ flag = TTY_PARITY;
|
|
+ else if (stat & RXERROR_FRAMING)
|
|
+ flag = TTY_FRAME;
|
|
+
|
|
tty_insert_flip_char(&port->port, data[i+1],
|
|
flag);
|
|
i += 2;
|
|
@@ -790,25 +811,31 @@ static void usa90_indat_callback(struct urb *urb)
|
|
if ((data[0] & 0x80) == 0) {
|
|
/* no errors on individual bytes, only
|
|
possible overrun err*/
|
|
- if (data[0] & RXERROR_OVERRUN)
|
|
- err = TTY_OVERRUN;
|
|
- else
|
|
- err = 0;
|
|
+ if (data[0] & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(&port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
for (i = 1; i < urb->actual_length ; ++i)
|
|
tty_insert_flip_char(&port->port,
|
|
- data[i], err);
|
|
+ data[i], TTY_NORMAL);
|
|
} else {
|
|
/* some bytes had errors, every byte has status */
|
|
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
|
|
for (i = 0; i + 1 < urb->actual_length; i += 2) {
|
|
- int stat = data[i], flag = 0;
|
|
- if (stat & RXERROR_OVERRUN)
|
|
- flag |= TTY_OVERRUN;
|
|
- if (stat & RXERROR_FRAMING)
|
|
- flag |= TTY_FRAME;
|
|
- if (stat & RXERROR_PARITY)
|
|
- flag |= TTY_PARITY;
|
|
+ int stat = data[i];
|
|
+ int flag = TTY_NORMAL;
|
|
+
|
|
+ if (stat & RXERROR_OVERRUN) {
|
|
+ tty_insert_flip_char(
|
|
+ &port->port, 0,
|
|
+ TTY_OVERRUN);
|
|
+ }
|
|
/* XXX should handle break (0x10) */
|
|
+ if (stat & RXERROR_PARITY)
|
|
+ flag = TTY_PARITY;
|
|
+ else if (stat & RXERROR_FRAMING)
|
|
+ flag = TTY_FRAME;
|
|
+
|
|
tty_insert_flip_char(&port->port,
|
|
data[i+1], flag);
|
|
}
|
|
@@ -851,6 +878,8 @@ static void usa90_instat_callback(struct urb *urb)
|
|
|
|
port = serial->port[0];
|
|
p_priv = usb_get_serial_port_data(port);
|
|
+ if (!p_priv)
|
|
+ goto resubmit;
|
|
|
|
/* Update handshaking pin state information */
|
|
old_dcd_state = p_priv->dcd_state;
|
|
@@ -861,7 +890,7 @@ static void usa90_instat_callback(struct urb *urb)
|
|
|
|
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
|
|
tty_port_tty_hangup(&port->port, true);
|
|
-
|
|
+resubmit:
|
|
/* Resubmit urb so we continue receiving */
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
if (err != 0)
|
|
@@ -922,6 +951,8 @@ static void usa67_instat_callback(struct urb *urb)
|
|
|
|
port = serial->port[msg->port];
|
|
p_priv = usb_get_serial_port_data(port);
|
|
+ if (!p_priv)
|
|
+ goto resubmit;
|
|
|
|
/* Update handshaking pin state information */
|
|
old_dcd_state = p_priv->dcd_state;
|
|
@@ -930,7 +961,7 @@ static void usa67_instat_callback(struct urb *urb)
|
|
|
|
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
|
|
tty_port_tty_hangup(&port->port, true);
|
|
-
|
|
+resubmit:
|
|
/* Resubmit urb so we continue receiving */
|
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
|
if (err != 0)
|
|
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
|
|
index 618c1c1..5cdb32b 100644
|
|
--- a/drivers/usb/serial/kobil_sct.c
|
|
+++ b/drivers/usb/serial/kobil_sct.c
|
|
@@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
|
|
port->interrupt_out_urb->transfer_buffer_length = length;
|
|
|
|
priv->cur_pos = priv->cur_pos + length;
|
|
- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
|
|
+ result = usb_submit_urb(port->interrupt_out_urb,
|
|
+ GFP_ATOMIC);
|
|
dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
|
|
todo = priv->filled - priv->cur_pos;
|
|
|
|
@@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
|
|
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
|
|
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
|
|
result = usb_submit_urb(port->interrupt_in_urb,
|
|
- GFP_NOIO);
|
|
+ GFP_ATOMIC);
|
|
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
|
|
}
|
|
}
|
|
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
|
|
index ab1d690..460a406 100644
|
|
--- a/drivers/usb/serial/mxuport.c
|
|
+++ b/drivers/usb/serial/mxuport.c
|
|
@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
|
|
}
|
|
|
|
/* Initial port termios */
|
|
- mxuport_set_termios(tty, port, NULL);
|
|
+ if (tty)
|
|
+ mxuport_set_termios(tty, port, NULL);
|
|
|
|
/*
|
|
* TODO: use RQ_VENDOR_GET_MSR, once we know what it
|
|
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
|
|
index 4856fb7..4b7bfb3 100644
|
|
--- a/drivers/usb/serial/opticon.c
|
|
+++ b/drivers/usb/serial/opticon.c
|
|
@@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
|
|
|
|
/* The connected devices do not have a bulk write endpoint,
|
|
* to transmit data to de barcode device the control endpoint is used */
|
|
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
|
|
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
|
|
if (!dr) {
|
|
count = -ENOMEM;
|
|
goto error_no_dr;
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index 240c073..096438e 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -269,14 +269,19 @@ static void option_instat_callback(struct urb *urb);
|
|
#define TELIT_PRODUCT_DE910_DUAL 0x1010
|
|
#define TELIT_PRODUCT_UE910_V2 0x1012
|
|
#define TELIT_PRODUCT_LE920 0x1200
|
|
+#define TELIT_PRODUCT_LE910 0x1201
|
|
|
|
/* ZTE PRODUCTS */
|
|
#define ZTE_VENDOR_ID 0x19d2
|
|
#define ZTE_PRODUCT_MF622 0x0001
|
|
#define ZTE_PRODUCT_MF628 0x0015
|
|
#define ZTE_PRODUCT_MF626 0x0031
|
|
-#define ZTE_PRODUCT_MC2718 0xffe8
|
|
#define ZTE_PRODUCT_AC2726 0xfff1
|
|
+#define ZTE_PRODUCT_CDMA_TECH 0xfffe
|
|
+#define ZTE_PRODUCT_AC8710T 0xffff
|
|
+#define ZTE_PRODUCT_MC2718 0xffe8
|
|
+#define ZTE_PRODUCT_AD3812 0xffeb
|
|
+#define ZTE_PRODUCT_MC2716 0xffed
|
|
|
|
#define BENQ_VENDOR_ID 0x04a5
|
|
#define BENQ_PRODUCT_H10 0x4068
|
|
@@ -357,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
|
|
|
|
/* Haier products */
|
|
#define HAIER_VENDOR_ID 0x201e
|
|
+#define HAIER_PRODUCT_CE81B 0x10f8
|
|
#define HAIER_PRODUCT_CE100 0x2009
|
|
|
|
/* Cinterion (formerly Siemens) products */
|
|
@@ -494,6 +500,10 @@ static void option_instat_callback(struct urb *urb);
|
|
#define INOVIA_VENDOR_ID 0x20a6
|
|
#define INOVIA_SEW858 0x1105
|
|
|
|
+/* VIA Telecom */
|
|
+#define VIATELECOM_VENDOR_ID 0x15eb
|
|
+#define VIATELECOM_PRODUCT_CDS7 0x0001
|
|
+
|
|
/* some devices interfaces need special handling due to a number of reasons */
|
|
enum option_blacklist_reason {
|
|
OPTION_BLACKLIST_NONE = 0,
|
|
@@ -527,10 +537,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
|
|
.reserved = BIT(4),
|
|
};
|
|
|
|
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
|
|
+ .sendsetup = BIT(0) | BIT(1) | BIT(2),
|
|
+};
|
|
+
|
|
static const struct option_blacklist_info zte_mc2718_z_blacklist = {
|
|
.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
|
|
};
|
|
|
|
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
|
|
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
|
|
+};
|
|
+
|
|
static const struct option_blacklist_info huawei_cdc12_blacklist = {
|
|
.reserved = BIT(1) | BIT(2),
|
|
};
|
|
@@ -572,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
|
|
.reserved = BIT(3) | BIT(4),
|
|
};
|
|
|
|
+static const struct option_blacklist_info telit_le910_blacklist = {
|
|
+ .sendsetup = BIT(0),
|
|
+ .reserved = BIT(1) | BIT(2),
|
|
+};
|
|
+
|
|
static const struct option_blacklist_info telit_le920_blacklist = {
|
|
.sendsetup = BIT(0),
|
|
.reserved = BIT(1) | BIT(5),
|
|
@@ -1070,6 +1093,7 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
|
|
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
|
|
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
|
|
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
|
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
|
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
|
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
|
|
@@ -1120,6 +1144,8 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
|
|
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
|
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
|
|
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
|
@@ -1544,13 +1570,18 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
|
|
|
|
- /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
|
|
.driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
|
|
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
|
|
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
|
|
|
|
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
|
|
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
|
|
@@ -1590,6 +1621,7 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
|
|
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
|
|
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
|
|
/* Pirelli */
|
|
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
|
|
@@ -1723,7 +1755,9 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
|
|
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
|
|
+ { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
|
|
{ } /* Terminating entry */
|
|
};
|
|
MODULE_DEVICE_TABLE(usb, option_ids);
|
|
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
|
|
index b3d5a35..5219593 100644
|
|
--- a/drivers/usb/serial/pl2303.c
|
|
+++ b/drivers/usb/serial/pl2303.c
|
|
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
|
|
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
|
|
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
|
|
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
|
|
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
|
|
@@ -60,7 +61,6 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
|
|
{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
|
|
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
|
|
- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
|
|
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
|
|
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
|
|
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
|
|
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
|
|
index 42bc082..e3b7af8 100644
|
|
--- a/drivers/usb/serial/pl2303.h
|
|
+++ b/drivers/usb/serial/pl2303.h
|
|
@@ -22,6 +22,7 @@
|
|
#define PL2303_PRODUCT_ID_GPRS 0x0609
|
|
#define PL2303_PRODUCT_ID_HCR331 0x331a
|
|
#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
|
|
+#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
|
|
|
|
#define ATEN_VENDOR_ID 0x0557
|
|
#define ATEN_VENDOR_ID2 0x0547
|
|
@@ -61,10 +62,6 @@
|
|
#define ALCATEL_VENDOR_ID 0x11f7
|
|
#define ALCATEL_PRODUCT_ID 0x02df
|
|
|
|
-/* Samsung I330 phone cradle */
|
|
-#define SAMSUNG_VENDOR_ID 0x04e8
|
|
-#define SAMSUNG_PRODUCT_ID 0x8001
|
|
-
|
|
#define SIEMENS_VENDOR_ID 0x11f5
|
|
#define SIEMENS_PRODUCT_ID_SX1 0x0001
|
|
#define SIEMENS_PRODUCT_ID_X65 0x0003
|
|
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
|
|
index 3748034..89c55d4 100644
|
|
--- a/drivers/usb/serial/sierra.c
|
|
+++ b/drivers/usb/serial/sierra.c
|
|
@@ -282,14 +282,20 @@ static const struct usb_device_id id_table[] = {
|
|
/* Sierra Wireless HSPA Non-Composite Device */
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
|
|
{ USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
|
|
- { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
|
|
+ /* Sierra Wireless Direct IP modems */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
|
|
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
|
|
},
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
|
|
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
|
|
+ },
|
|
+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
|
|
/* AT&T Direct IP LTE modems */
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
|
|
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
|
|
},
|
|
- { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
|
|
+ /* Airprime/Sierra Wireless Direct IP modems */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
|
|
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
|
|
},
|
|
|
|
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
|
|
index a7fe664..70a098d 100644
|
|
--- a/drivers/usb/serial/ssu100.c
|
|
+++ b/drivers/usb/serial/ssu100.c
|
|
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
|
|
if (*tty_flag == TTY_NORMAL)
|
|
*tty_flag = TTY_FRAME;
|
|
}
|
|
- if (lsr & UART_LSR_OE){
|
|
+ if (lsr & UART_LSR_OE) {
|
|
port->icount.overrun++;
|
|
- if (*tty_flag == TTY_NORMAL)
|
|
- *tty_flag = TTY_OVERRUN;
|
|
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
|
|
}
|
|
}
|
|
|
|
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
|
|
if ((len >= 4) &&
|
|
(packet[0] == 0x1b) && (packet[1] == 0x1b) &&
|
|
((packet[2] == 0x00) || (packet[2] == 0x01))) {
|
|
- if (packet[2] == 0x00) {
|
|
+ if (packet[2] == 0x00)
|
|
ssu100_update_lsr(port, packet[3], &flag);
|
|
- if (flag == TTY_OVERRUN)
|
|
- tty_insert_flip_char(&port->port, 0,
|
|
- TTY_OVERRUN);
|
|
- }
|
|
if (packet[2] == 0x01)
|
|
ssu100_update_msr(port, packet[3]);
|
|
|
|
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
|
|
index 9fa7dd4..5e5e882 100644
|
|
--- a/drivers/usb/serial/symbolserial.c
|
|
+++ b/drivers/usb/serial/symbolserial.c
|
|
@@ -96,7 +96,7 @@ exit:
|
|
|
|
static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
|
|
{
|
|
- struct symbol_private *priv = usb_get_serial_data(port->serial);
|
|
+ struct symbol_private *priv = usb_get_serial_port_data(port);
|
|
unsigned long flags;
|
|
int result = 0;
|
|
|
|
@@ -122,7 +122,7 @@ static void symbol_close(struct usb_serial_port *port)
|
|
static void symbol_throttle(struct tty_struct *tty)
|
|
{
|
|
struct usb_serial_port *port = tty->driver_data;
|
|
- struct symbol_private *priv = usb_get_serial_data(port->serial);
|
|
+ struct symbol_private *priv = usb_get_serial_port_data(port);
|
|
|
|
spin_lock_irq(&priv->lock);
|
|
priv->throttled = true;
|
|
@@ -132,7 +132,7 @@ static void symbol_throttle(struct tty_struct *tty)
|
|
static void symbol_unthrottle(struct tty_struct *tty)
|
|
{
|
|
struct usb_serial_port *port = tty->driver_data;
|
|
- struct symbol_private *priv = usb_get_serial_data(port->serial);
|
|
+ struct symbol_private *priv = usb_get_serial_port_data(port);
|
|
int result;
|
|
bool was_throttled;
|
|
|
|
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
|
|
index b169b0f..3d66e9c 100644
|
|
--- a/drivers/usb/serial/usb-serial.c
|
|
+++ b/drivers/usb/serial/usb-serial.c
|
|
@@ -764,29 +764,39 @@ static int usb_serial_probe(struct usb_interface *interface,
|
|
if (usb_endpoint_is_bulk_in(endpoint)) {
|
|
/* we found a bulk in endpoint */
|
|
dev_dbg(ddev, "found bulk in on endpoint %d\n", i);
|
|
- bulk_in_endpoint[num_bulk_in] = endpoint;
|
|
- ++num_bulk_in;
|
|
+ if (num_bulk_in < MAX_NUM_PORTS) {
|
|
+ bulk_in_endpoint[num_bulk_in] = endpoint;
|
|
+ ++num_bulk_in;
|
|
+ }
|
|
}
|
|
|
|
if (usb_endpoint_is_bulk_out(endpoint)) {
|
|
/* we found a bulk out endpoint */
|
|
dev_dbg(ddev, "found bulk out on endpoint %d\n", i);
|
|
- bulk_out_endpoint[num_bulk_out] = endpoint;
|
|
- ++num_bulk_out;
|
|
+ if (num_bulk_out < MAX_NUM_PORTS) {
|
|
+ bulk_out_endpoint[num_bulk_out] = endpoint;
|
|
+ ++num_bulk_out;
|
|
+ }
|
|
}
|
|
|
|
if (usb_endpoint_is_int_in(endpoint)) {
|
|
/* we found a interrupt in endpoint */
|
|
dev_dbg(ddev, "found interrupt in on endpoint %d\n", i);
|
|
- interrupt_in_endpoint[num_interrupt_in] = endpoint;
|
|
- ++num_interrupt_in;
|
|
+ if (num_interrupt_in < MAX_NUM_PORTS) {
|
|
+ interrupt_in_endpoint[num_interrupt_in] =
|
|
+ endpoint;
|
|
+ ++num_interrupt_in;
|
|
+ }
|
|
}
|
|
|
|
if (usb_endpoint_is_int_out(endpoint)) {
|
|
/* we found an interrupt out endpoint */
|
|
dev_dbg(ddev, "found interrupt out on endpoint %d\n", i);
|
|
- interrupt_out_endpoint[num_interrupt_out] = endpoint;
|
|
- ++num_interrupt_out;
|
|
+ if (num_interrupt_out < MAX_NUM_PORTS) {
|
|
+ interrupt_out_endpoint[num_interrupt_out] =
|
|
+ endpoint;
|
|
+ ++num_interrupt_out;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -809,8 +819,10 @@ static int usb_serial_probe(struct usb_interface *interface,
|
|
if (usb_endpoint_is_int_in(endpoint)) {
|
|
/* we found a interrupt in endpoint */
|
|
dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n");
|
|
- interrupt_in_endpoint[num_interrupt_in] = endpoint;
|
|
- ++num_interrupt_in;
|
|
+ if (num_interrupt_in < MAX_NUM_PORTS) {
|
|
+ interrupt_in_endpoint[num_interrupt_in] = endpoint;
|
|
+ ++num_interrupt_in;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
@@ -850,6 +862,11 @@ static int usb_serial_probe(struct usb_interface *interface,
|
|
num_ports = type->num_ports;
|
|
}
|
|
|
|
+ if (num_ports > MAX_NUM_PORTS) {
|
|
+ dev_warn(ddev, "too many ports requested: %d\n", num_ports);
|
|
+ num_ports = MAX_NUM_PORTS;
|
|
+ }
|
|
+
|
|
serial->num_ports = num_ports;
|
|
serial->num_bulk_in = num_bulk_in;
|
|
serial->num_bulk_out = num_bulk_out;
|
|
@@ -1283,6 +1300,7 @@ static void __exit usb_serial_exit(void)
|
|
tty_unregister_driver(usb_serial_tty_driver);
|
|
put_tty_driver(usb_serial_tty_driver);
|
|
bus_unregister(&usb_serial_bus_type);
|
|
+ idr_destroy(&serial_minors);
|
|
}
|
|
|
|
|
|
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
|
|
index bf2bd40..60afb39 100644
|
|
--- a/drivers/usb/serial/visor.c
|
|
+++ b/drivers/usb/serial/visor.c
|
|
@@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = {
|
|
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
|
|
{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
|
|
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
|
|
- { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
|
|
+ { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
|
|
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
|
|
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
|
|
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
|
|
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
|
|
index e62f2df..6c3734d 100644
|
|
--- a/drivers/usb/serial/whiteheat.c
|
|
+++ b/drivers/usb/serial/whiteheat.c
|
|
@@ -514,6 +514,10 @@ static void command_port_read_callback(struct urb *urb)
|
|
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
|
|
return;
|
|
}
|
|
+ if (!urb->actual_length) {
|
|
+ dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
|
|
+ return;
|
|
+ }
|
|
if (status) {
|
|
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
|
|
if (status != -ENOENT)
|
|
@@ -534,7 +538,8 @@ static void command_port_read_callback(struct urb *urb)
|
|
/* These are unsolicited reports from the firmware, hence no
|
|
waiting command to wakeup */
|
|
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
|
|
- } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
|
|
+ } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
|
|
+ (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
|
|
memcpy(command_info->result_buffer, &data[1],
|
|
urb->actual_length - 1);
|
|
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
|
|
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
|
|
index e40ab73..c9bb107 100644
|
|
--- a/drivers/usb/serial/zte_ev.c
|
|
+++ b/drivers/usb/serial/zte_ev.c
|
|
@@ -272,28 +272,16 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
|
|
}
|
|
|
|
static const struct usb_device_id id_table[] = {
|
|
- /* AC8710, AC8710T */
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
|
|
- /* AC8700 */
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
|
|
- /* MG880 */
|
|
- { USB_DEVICE(0x19d2, 0xfffd) },
|
|
- { USB_DEVICE(0x19d2, 0xfffc) },
|
|
- { USB_DEVICE(0x19d2, 0xfffb) },
|
|
- /* AC8710_V3 */
|
|
+ { USB_DEVICE(0x19d2, 0xffec) },
|
|
+ { USB_DEVICE(0x19d2, 0xffee) },
|
|
{ USB_DEVICE(0x19d2, 0xfff6) },
|
|
{ USB_DEVICE(0x19d2, 0xfff7) },
|
|
{ USB_DEVICE(0x19d2, 0xfff8) },
|
|
{ USB_DEVICE(0x19d2, 0xfff9) },
|
|
- { USB_DEVICE(0x19d2, 0xffee) },
|
|
- /* AC2716, MC2716 */
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
|
|
- /* AD3812 */
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
|
|
- { USB_DEVICE(0x19d2, 0xffec) },
|
|
- { USB_DEVICE(0x05C6, 0x3197) },
|
|
- { USB_DEVICE(0x05C6, 0x6000) },
|
|
- { USB_DEVICE(0x05C6, 0x9008) },
|
|
+ { USB_DEVICE(0x19d2, 0xfffb) },
|
|
+ { USB_DEVICE(0x19d2, 0xfffc) },
|
|
+ /* MG880 */
|
|
+ { USB_DEVICE(0x19d2, 0xfffd) },
|
|
{ },
|
|
};
|
|
MODULE_DEVICE_TABLE(usb, id_table);
|
|
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
|
|
index 22c7d43..b1d815e 100644
|
|
--- a/drivers/usb/storage/transport.c
|
|
+++ b/drivers/usb/storage/transport.c
|
|
@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
|
|
*/
|
|
if (result == USB_STOR_XFER_LONG)
|
|
fake_sense = 1;
|
|
+
|
|
+ /*
|
|
+ * Sometimes a device will mistakenly skip the data phase
|
|
+ * and go directly to the status phase without sending a
|
|
+ * zero-length packet. If we get a 13-byte response here,
|
|
+ * check whether it really is a CSW.
|
|
+ */
|
|
+ if (result == USB_STOR_XFER_SHORT &&
|
|
+ srb->sc_data_direction == DMA_FROM_DEVICE &&
|
|
+ transfer_length - scsi_get_resid(srb) ==
|
|
+ US_BULK_CS_WRAP_LEN) {
|
|
+ struct scatterlist *sg = NULL;
|
|
+ unsigned int offset = 0;
|
|
+
|
|
+ if (usb_stor_access_xfer_buf((unsigned char *) bcs,
|
|
+ US_BULK_CS_WRAP_LEN, srb, &sg,
|
|
+ &offset, FROM_XFER_BUF) ==
|
|
+ US_BULK_CS_WRAP_LEN &&
|
|
+ bcs->Signature ==
|
|
+ cpu_to_le32(US_BULK_CS_SIGN)) {
|
|
+ usb_stor_dbg(us, "Device skipped data phase\n");
|
|
+ scsi_set_resid(srb, transfer_length);
|
|
+ goto skipped_data_phase;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
/* See flow chart on pg 15 of the Bulk Only Transport spec for
|
|
@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
|
|
if (result != USB_STOR_XFER_GOOD)
|
|
return USB_STOR_TRANSPORT_ERROR;
|
|
|
|
+ skipped_data_phase:
|
|
/* check bulk status */
|
|
residue = le32_to_cpu(bcs->Residue);
|
|
usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
|
|
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
|
|
index 042c83b0..da380a9 100644
|
|
--- a/drivers/usb/storage/unusual_devs.h
|
|
+++ b/drivers/usb/storage/unusual_devs.h
|
|
@@ -101,6 +101,12 @@ UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001,
|
|
"PhotoSmart R707",
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
|
|
|
|
+UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999,
|
|
+ "Adaptec",
|
|
+ "USBConnect 2000",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
|
|
+ US_FL_SCM_MULT_TARG ),
|
|
+
|
|
/* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
|
|
* and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
|
|
* for USB floppies that need the SINGLE_LUN enforcement.
|
|
@@ -741,6 +747,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_SINGLE_LUN ),
|
|
|
|
+UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100,
|
|
+ "Iomega",
|
|
+ "Jaz USB Adapter",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
+ US_FL_SINGLE_LUN ),
|
|
+
|
|
/* Reported by <Hendryk.Pfeiffer@gmx.de> */
|
|
UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
|
|
"LaCie",
|
|
@@ -748,6 +760,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_GO_SLOW ),
|
|
|
|
+/* Reported by Christian Schaller <cschalle@redhat.com> */
|
|
+UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
|
|
+ "LaCie",
|
|
+ "External HDD",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
+ US_FL_NO_WP_DETECT ),
|
|
+
|
|
/* Submitted by Joel Bourquard <numlock@freesurf.ch>
|
|
* Some versions of this device need the SubClass and Protocol overrides
|
|
* while others don't.
|
|
@@ -1113,6 +1132,18 @@ UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_NOT_LOCKABLE),
|
|
|
|
+UNUSUAL_DEV( 0x085a, 0x0026, 0x0100, 0x0133,
|
|
+ "Xircom",
|
|
+ "PortGear USB-SCSI (Mac USB Dock)",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
|
|
+ US_FL_SCM_MULT_TARG ),
|
|
+
|
|
+UNUSUAL_DEV( 0x085a, 0x0028, 0x0100, 0x0133,
|
|
+ "Xircom",
|
|
+ "PortGear USB to SCSI Converter",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
|
|
+ US_FL_SCM_MULT_TARG ),
|
|
+
|
|
/* Submitted by Jan De Luyck <lkml@kcore.org> */
|
|
UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
|
|
"CITIZEN",
|
|
@@ -1945,6 +1976,14 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
|
|
|
|
+/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
|
|
+ * and Mac USB Dock USB-SCSI */
|
|
+UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
|
|
+ "Entrega Technologies",
|
|
+ "USB to SCSI Converter",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
|
|
+ US_FL_SCM_MULT_TARG ),
|
|
+
|
|
/* Reported by Robert Schedel <r.schedel@yahoo.de>
|
|
* Note: this is a 'super top' device like the above 14cd/6600 device */
|
|
UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
|
|
@@ -1967,6 +2006,12 @@ UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
|
|
|
|
+UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999,
|
|
+ "Ariston Technologies",
|
|
+ "iConnect USB to SCSI adapter",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
|
|
+ US_FL_SCM_MULT_TARG ),
|
|
+
|
|
/* Reported by Hans de Goede <hdegoede@redhat.com>
|
|
* These Appotech controllers are found in Picture Frames, they provide a
|
|
* (buggy) emulation of a cdrom drive which contains the windows software
|
|
@@ -1987,6 +2032,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_NO_READ_DISC_INFO ),
|
|
|
|
+/* Reported by Oliver Neukum <oneukum@suse.com>
|
|
+ * This device morphes spontaneously into another device if the access
|
|
+ * pattern of Windows isn't followed. Thus writable media would be dirty
|
|
+ * if the initial instance is used. So the device is limited to its
|
|
+ * virtual CD.
|
|
+ * And yes, the concept that BCD goes up to 9 is not heeded */
|
|
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
|
|
+ "ZTE,Incorporated",
|
|
+ "ZTE WCDMA Technologies MSM",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
+ US_FL_SINGLE_LUN ),
|
|
+
|
|
/* Reported by Sven Geggus <sven-usbst@geggus.net>
|
|
* This encrypted pen drive returns bogus data for the initial READ(10).
|
|
*/
|
|
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
|
|
index 80079b8..d0303f0 100644
|
|
--- a/drivers/uwb/lc-dev.c
|
|
+++ b/drivers/uwb/lc-dev.c
|
|
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
|
|
uwb_dev->mac_addr = *bce->mac_addr;
|
|
uwb_dev->dev_addr = bce->dev_addr;
|
|
dev_set_name(&uwb_dev->dev, "%s", macbuf);
|
|
+
|
|
+ /* plug the beacon cache */
|
|
+ bce->uwb_dev = uwb_dev;
|
|
+ uwb_dev->bce = bce;
|
|
+ uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
|
|
+
|
|
result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
|
|
if (result < 0) {
|
|
dev_err(dev, "new device %s: cannot instantiate device\n",
|
|
macbuf);
|
|
goto error_dev_add;
|
|
}
|
|
- /* plug the beacon cache */
|
|
- bce->uwb_dev = uwb_dev;
|
|
- uwb_dev->bce = bce;
|
|
- uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
|
|
+
|
|
dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
|
|
macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
|
|
dev_name(rc->uwb_dev.dev.parent));
|
|
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
|
|
return;
|
|
|
|
error_dev_add:
|
|
+ bce->uwb_dev = NULL;
|
|
+ uwb_bce_put(bce);
|
|
kfree(uwb_dev);
|
|
return;
|
|
}
|
|
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
|
|
index 7ba0424..75e1d03 100644
|
|
--- a/drivers/vfio/pci/vfio_pci.c
|
|
+++ b/drivers/vfio/pci/vfio_pci.c
|
|
@@ -810,13 +810,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
|
|
|
|
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
{
|
|
- u8 type;
|
|
struct vfio_pci_device *vdev;
|
|
struct iommu_group *group;
|
|
int ret;
|
|
|
|
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
|
|
- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
|
|
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
|
|
return -EINVAL;
|
|
|
|
group = iommu_group_get(&pdev->dev);
|
|
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
|
|
index e48d4a6..486d710 100644
|
|
--- a/drivers/vhost/scsi.c
|
|
+++ b/drivers/vhost/scsi.c
|
|
@@ -861,6 +861,23 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
|
|
return 0;
|
|
}
|
|
|
|
+static int vhost_scsi_to_tcm_attr(int attr)
|
|
+{
|
|
+ switch (attr) {
|
|
+ case VIRTIO_SCSI_S_SIMPLE:
|
|
+ return MSG_SIMPLE_TAG;
|
|
+ case VIRTIO_SCSI_S_ORDERED:
|
|
+ return MSG_ORDERED_TAG;
|
|
+ case VIRTIO_SCSI_S_HEAD:
|
|
+ return MSG_HEAD_TAG;
|
|
+ case VIRTIO_SCSI_S_ACA:
|
|
+ return MSG_ACA_TAG;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ return MSG_SIMPLE_TAG;
|
|
+}
|
|
+
|
|
static void tcm_vhost_submission_work(struct work_struct *work)
|
|
{
|
|
struct tcm_vhost_cmd *cmd =
|
|
@@ -887,9 +904,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
|
|
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
|
|
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
|
|
cmd->tvc_lun, cmd->tvc_exp_data_len,
|
|
- cmd->tvc_task_attr, cmd->tvc_data_direction,
|
|
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
|
|
- sg_bidi_ptr, sg_no_bidi, NULL, 0);
|
|
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
|
|
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
|
|
+ sg_ptr, cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi,
|
|
+ NULL, 0);
|
|
if (rc < 0) {
|
|
transport_send_check_condition_and_sense(se_cmd,
|
|
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
|
|
@@ -1200,6 +1218,7 @@ static int
|
|
vhost_scsi_set_endpoint(struct vhost_scsi *vs,
|
|
struct vhost_scsi_target *t)
|
|
{
|
|
+ struct se_portal_group *se_tpg;
|
|
struct tcm_vhost_tport *tv_tport;
|
|
struct tcm_vhost_tpg *tpg;
|
|
struct tcm_vhost_tpg **vs_tpg;
|
|
@@ -1247,6 +1266,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
|
|
ret = -EEXIST;
|
|
goto out;
|
|
}
|
|
+ /*
|
|
+ * In order to ensure individual vhost-scsi configfs
|
|
+ * groups cannot be removed while in use by vhost ioctl,
|
|
+ * go ahead and take an explicit se_tpg->tpg_group.cg_item
|
|
+ * dependency now.
|
|
+ */
|
|
+ se_tpg = &tpg->se_tpg;
|
|
+ ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
|
|
+ &se_tpg->tpg_group.cg_item);
|
|
+ if (ret) {
|
|
+ pr_warn("configfs_depend_item() failed: %d\n", ret);
|
|
+ kfree(vs_tpg);
|
|
+ mutex_unlock(&tpg->tv_tpg_mutex);
|
|
+ goto out;
|
|
+ }
|
|
tpg->tv_tpg_vhost_count++;
|
|
tpg->vhost_scsi = vs;
|
|
vs_tpg[tpg->tport_tpgt] = tpg;
|
|
@@ -1289,6 +1323,7 @@ static int
|
|
vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
|
|
struct vhost_scsi_target *t)
|
|
{
|
|
+ struct se_portal_group *se_tpg;
|
|
struct tcm_vhost_tport *tv_tport;
|
|
struct tcm_vhost_tpg *tpg;
|
|
struct vhost_virtqueue *vq;
|
|
@@ -1337,6 +1372,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
|
|
vs->vs_tpg[target] = NULL;
|
|
match = true;
|
|
mutex_unlock(&tpg->tv_tpg_mutex);
|
|
+ /*
|
|
+ * Release se_tpg->tpg_group.cg_item configfs dependency now
|
|
+ * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
|
|
+ */
|
|
+ se_tpg = &tpg->se_tpg;
|
|
+ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
|
|
+ &se_tpg->tpg_group.cg_item);
|
|
}
|
|
if (match) {
|
|
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
|
|
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
|
|
index 78987e4..85095d7 100644
|
|
--- a/drivers/vhost/vhost.c
|
|
+++ b/drivers/vhost/vhost.c
|
|
@@ -876,6 +876,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
|
|
}
|
|
if (eventfp != d->log_file) {
|
|
filep = d->log_file;
|
|
+ d->log_file = eventfp;
|
|
ctx = d->log_ctx;
|
|
d->log_ctx = eventfp ?
|
|
eventfd_ctx_fileget(eventfp) : NULL;
|
|
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
|
|
index 61b182b..dbfe4ee 100644
|
|
--- a/drivers/video/console/bitblit.c
|
|
+++ b/drivers/video/console/bitblit.c
|
|
@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
|
static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
|
|
int bottom_only)
|
|
{
|
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
|
unsigned int cw = vc->vc_font.width;
|
|
unsigned int ch = vc->vc_font.height;
|
|
unsigned int rw = info->var.xres - (vc->vc_cols*cw);
|
|
@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
|
|
unsigned int bs = info->var.yres - bh;
|
|
struct fb_fillrect region;
|
|
|
|
- region.color = attr_bgcol_ec(bgshift, vc, info);
|
|
+ region.color = 0;
|
|
region.rop = ROP_COPY;
|
|
|
|
if (rw && !bottom_only) {
|
|
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
|
|
index 41b32ae..5a3cbf6 100644
|
|
--- a/drivers/video/console/fbcon_ccw.c
|
|
+++ b/drivers/video/console/fbcon_ccw.c
|
|
@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
|
|
unsigned int bh = info->var.xres - (vc->vc_rows*ch);
|
|
unsigned int bs = vc->vc_rows*ch;
|
|
struct fb_fillrect region;
|
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
|
|
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
|
+ region.color = 0;
|
|
region.rop = ROP_COPY;
|
|
|
|
if (rw && !bottom_only) {
|
|
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
|
|
index a93670e..e7ee44d 100644
|
|
--- a/drivers/video/console/fbcon_cw.c
|
|
+++ b/drivers/video/console/fbcon_cw.c
|
|
@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
|
|
unsigned int bh = info->var.xres - (vc->vc_rows*ch);
|
|
unsigned int rs = info->var.yres - rw;
|
|
struct fb_fillrect region;
|
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
|
|
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
|
+ region.color = 0;
|
|
region.rop = ROP_COPY;
|
|
|
|
if (rw && !bottom_only) {
|
|
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
|
|
index ff0872c..19e3714 100644
|
|
--- a/drivers/video/console/fbcon_ud.c
|
|
+++ b/drivers/video/console/fbcon_ud.c
|
|
@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
|
|
unsigned int rw = info->var.xres - (vc->vc_cols*cw);
|
|
unsigned int bh = info->var.yres - (vc->vc_rows*ch);
|
|
struct fb_fillrect region;
|
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
|
|
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
|
+ region.color = 0;
|
|
region.rop = ROP_COPY;
|
|
|
|
if (rw && !bottom_only) {
|
|
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
|
|
index b670cbd..ffe024b 100644
|
|
--- a/drivers/video/logo/logo.c
|
|
+++ b/drivers/video/logo/logo.c
|
|
@@ -21,6 +21,21 @@ static bool nologo;
|
|
module_param(nologo, bool, 0);
|
|
MODULE_PARM_DESC(nologo, "Disables startup logo");
|
|
|
|
+/*
|
|
+ * Logos are located in the initdata, and will be freed in kernel_init.
|
|
+ * Use late_init to mark the logos as freed to prevent any further use.
|
|
+ */
|
|
+
|
|
+static bool logos_freed;
|
|
+
|
|
+static int __init fb_logo_late_init(void)
|
|
+{
|
|
+ logos_freed = true;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+late_initcall(fb_logo_late_init);
|
|
+
|
|
/* logo's are marked __initdata. Use __init_refok to tell
|
|
* modpost that it is intended that this function uses data
|
|
* marked __initdata.
|
|
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
|
|
{
|
|
const struct linux_logo *logo = NULL;
|
|
|
|
- if (nologo)
|
|
+ if (nologo || logos_freed)
|
|
return NULL;
|
|
|
|
if (depth >= 1) {
|
|
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
|
|
index a416f9b..827b5f8 100644
|
|
--- a/drivers/virtio/virtio_pci.c
|
|
+++ b/drivers/virtio/virtio_pci.c
|
|
@@ -791,6 +791,7 @@ static int virtio_pci_restore(struct device *dev)
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
|
struct virtio_driver *drv;
|
|
+ unsigned status = 0;
|
|
int ret;
|
|
|
|
drv = container_of(vp_dev->vdev.dev.driver,
|
|
@@ -801,14 +802,40 @@ static int virtio_pci_restore(struct device *dev)
|
|
return ret;
|
|
|
|
pci_set_master(pci_dev);
|
|
+ /* We always start by resetting the device, in case a previous
|
|
+ * driver messed it up. */
|
|
+ vp_reset(&vp_dev->vdev);
|
|
+
|
|
+ /* Acknowledge that we've seen the device. */
|
|
+ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
|
|
+ vp_set_status(&vp_dev->vdev, status);
|
|
+
|
|
+ /* Maybe driver failed before freeze.
|
|
+ * Restore the failed status, for debugging. */
|
|
+ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
|
|
+ vp_set_status(&vp_dev->vdev, status);
|
|
+
|
|
+ if (!drv)
|
|
+ return 0;
|
|
+
|
|
+ /* We have a driver! */
|
|
+ status |= VIRTIO_CONFIG_S_DRIVER;
|
|
+ vp_set_status(&vp_dev->vdev, status);
|
|
+
|
|
vp_finalize_features(&vp_dev->vdev);
|
|
|
|
- if (drv && drv->restore)
|
|
+ if (drv->restore) {
|
|
ret = drv->restore(&vp_dev->vdev);
|
|
+ if (ret) {
|
|
+ status |= VIRTIO_CONFIG_S_FAILED;
|
|
+ vp_set_status(&vp_dev->vdev, status);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
|
|
/* Finally, tell the device we're all set */
|
|
- if (!ret)
|
|
- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
|
|
+ status |= VIRTIO_CONFIG_S_DRIVER_OK;
|
|
+ vp_set_status(&vp_dev->vdev, status);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
|
|
index 09cf013..90a6406 100644
|
|
--- a/drivers/watchdog/omap_wdt.c
|
|
+++ b/drivers/watchdog/omap_wdt.c
|
|
@@ -134,6 +134,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
|
|
|
|
pm_runtime_get_sync(wdev->dev);
|
|
|
|
+ /*
|
|
+ * Make sure the watchdog is disabled. This is unfortunately required
|
|
+ * because writing to various registers with the watchdog running has no
|
|
+ * effect.
|
|
+ */
|
|
+ omap_wdt_disable(wdev);
|
|
+
|
|
/* initialize prescaler */
|
|
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
|
|
cpu_relax();
|
|
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
|
|
index d7ff917..843e5d8 100644
|
|
--- a/drivers/xen/events/events_2l.c
|
|
+++ b/drivers/xen/events/events_2l.c
|
|
@@ -352,6 +352,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
+static void evtchn_2l_resume(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for_each_online_cpu(i)
|
|
+ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
|
|
+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
|
+}
|
|
+
|
|
static const struct evtchn_ops evtchn_ops_2l = {
|
|
.max_channels = evtchn_2l_max_channels,
|
|
.nr_channels = evtchn_2l_max_channels,
|
|
@@ -363,6 +372,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
|
|
.mask = evtchn_2l_mask,
|
|
.unmask = evtchn_2l_unmask,
|
|
.handle_events = evtchn_2l_handle_events,
|
|
+ .resume = evtchn_2l_resume,
|
|
};
|
|
|
|
void __init xen_evtchn_2l_init(void)
|
|
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
|
|
index f4a9e33..5af64e9 100644
|
|
--- a/drivers/xen/events/events_base.c
|
|
+++ b/drivers/xen/events/events_base.c
|
|
@@ -547,20 +547,26 @@ static unsigned int __startup_pirq(unsigned int irq)
|
|
pirq_query_unmask(irq);
|
|
|
|
rc = set_evtchn_to_irq(evtchn, irq);
|
|
- if (rc != 0) {
|
|
- pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
|
|
- irq, rc);
|
|
- xen_evtchn_close(evtchn);
|
|
- return 0;
|
|
- }
|
|
- bind_evtchn_to_cpu(evtchn, 0);
|
|
+ if (rc)
|
|
+ goto err;
|
|
+
|
|
info->evtchn = evtchn;
|
|
+ bind_evtchn_to_cpu(evtchn, 0);
|
|
+
|
|
+ rc = xen_evtchn_port_setup(info);
|
|
+ if (rc)
|
|
+ goto err;
|
|
|
|
out:
|
|
unmask_evtchn(evtchn);
|
|
eoi_pirq(irq_get_irq_data(irq));
|
|
|
|
return 0;
|
|
+
|
|
+err:
|
|
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
|
|
+ xen_evtchn_close(evtchn);
|
|
+ return 0;
|
|
}
|
|
|
|
static unsigned int startup_pirq(struct irq_data *data)
|
|
@@ -967,7 +973,7 @@ unsigned xen_evtchn_nr_channels(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
|
|
|
|
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
|
|
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
|
|
{
|
|
struct evtchn_bind_virq bind_virq;
|
|
int evtchn, irq, ret;
|
|
@@ -981,8 +987,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
|
|
if (irq < 0)
|
|
goto out;
|
|
|
|
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
|
|
- handle_percpu_irq, "virq");
|
|
+ if (percpu)
|
|
+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
|
|
+ handle_percpu_irq, "virq");
|
|
+ else
|
|
+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
|
|
+ handle_edge_irq, "virq");
|
|
|
|
bind_virq.virq = virq;
|
|
bind_virq.vcpu = cpu;
|
|
@@ -1072,7 +1082,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
|
|
{
|
|
int irq, retval;
|
|
|
|
- irq = bind_virq_to_irq(virq, cpu);
|
|
+ irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
|
|
if (irq < 0)
|
|
return irq;
|
|
retval = request_irq(irq, handler, irqflags, devname, dev_id);
|
|
@@ -1288,8 +1298,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
|
|
|
|
mutex_unlock(&irq_mapping_update_lock);
|
|
|
|
- /* new event channels are always bound to cpu 0 */
|
|
- irq_set_affinity(irq, cpumask_of(0));
|
|
+ bind_evtchn_to_cpu(evtchn, info->cpu);
|
|
+ /* This will be deferred until interrupt is processed */
|
|
+ irq_set_affinity(irq, cpumask_of(info->cpu));
|
|
|
|
/* Unmask the event channel. */
|
|
enable_irq(irq);
|
|
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
|
|
index 640b3cf..ef7d446 100644
|
|
--- a/drivers/xen/events/events_fifo.c
|
|
+++ b/drivers/xen/events/events_fifo.c
|
|
@@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
|
|
static unsigned event_array_pages __read_mostly;
|
|
|
|
/*
|
|
- * sync_set_bit() and friends must be unsigned long aligned on non-x86
|
|
- * platforms.
|
|
+ * sync_set_bit() and friends must be unsigned long aligned.
|
|
*/
|
|
-#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
|
|
+#if BITS_PER_LONG > 32
|
|
|
|
#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
|
|
#define EVTCHN_FIFO_BIT(b, w) \
|
|
@@ -100,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void)
|
|
return event_array_pages * EVENT_WORDS_PER_PAGE;
|
|
}
|
|
|
|
+static int init_control_block(int cpu,
|
|
+ struct evtchn_fifo_control_block *control_block)
|
|
+{
|
|
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
|
+ struct evtchn_init_control init_control;
|
|
+ unsigned int i;
|
|
+
|
|
+ /* Reset the control block and the local HEADs. */
|
|
+ clear_page(control_block);
|
|
+ for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
|
|
+ q->head[i] = 0;
|
|
+
|
|
+ init_control.control_gfn = virt_to_mfn(control_block);
|
|
+ init_control.offset = 0;
|
|
+ init_control.vcpu = cpu;
|
|
+
|
|
+ return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
|
|
+}
|
|
+
|
|
static void free_unused_array_pages(void)
|
|
{
|
|
unsigned i;
|
|
@@ -328,7 +346,6 @@ static void evtchn_fifo_resume(void)
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
void *control_block = per_cpu(cpu_control_block, cpu);
|
|
- struct evtchn_init_control init_control;
|
|
int ret;
|
|
|
|
if (!control_block)
|
|
@@ -345,12 +362,7 @@ static void evtchn_fifo_resume(void)
|
|
continue;
|
|
}
|
|
|
|
- init_control.control_gfn = virt_to_mfn(control_block);
|
|
- init_control.offset = 0;
|
|
- init_control.vcpu = cpu;
|
|
-
|
|
- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
|
|
- &init_control);
|
|
+ ret = init_control_block(cpu, control_block);
|
|
if (ret < 0)
|
|
BUG();
|
|
}
|
|
@@ -378,30 +390,25 @@ static const struct evtchn_ops evtchn_ops_fifo = {
|
|
.resume = evtchn_fifo_resume,
|
|
};
|
|
|
|
-static int evtchn_fifo_init_control_block(unsigned cpu)
|
|
+static int evtchn_fifo_alloc_control_block(unsigned cpu)
|
|
{
|
|
- struct page *control_block = NULL;
|
|
- struct evtchn_init_control init_control;
|
|
+ void *control_block = NULL;
|
|
int ret = -ENOMEM;
|
|
|
|
- control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
|
+ control_block = (void *)__get_free_page(GFP_KERNEL);
|
|
if (control_block == NULL)
|
|
goto error;
|
|
|
|
- init_control.control_gfn = virt_to_mfn(page_address(control_block));
|
|
- init_control.offset = 0;
|
|
- init_control.vcpu = cpu;
|
|
-
|
|
- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
|
|
+ ret = init_control_block(cpu, control_block);
|
|
if (ret < 0)
|
|
goto error;
|
|
|
|
- per_cpu(cpu_control_block, cpu) = page_address(control_block);
|
|
+ per_cpu(cpu_control_block, cpu) = control_block;
|
|
|
|
return 0;
|
|
|
|
error:
|
|
- __free_page(control_block);
|
|
+ free_page((unsigned long)control_block);
|
|
return ret;
|
|
}
|
|
|
|
@@ -415,7 +422,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
|
|
switch (action) {
|
|
case CPU_UP_PREPARE:
|
|
if (!per_cpu(cpu_control_block, cpu))
|
|
- ret = evtchn_fifo_init_control_block(cpu);
|
|
+ ret = evtchn_fifo_alloc_control_block(cpu);
|
|
break;
|
|
default:
|
|
break;
|
|
@@ -432,7 +439,7 @@ int __init xen_evtchn_fifo_init(void)
|
|
int cpu = get_cpu();
|
|
int ret;
|
|
|
|
- ret = evtchn_fifo_init_control_block(cpu);
|
|
+ ret = evtchn_fifo_alloc_control_block(cpu);
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
|
|
index 073b4a1..ff3c98f 100644
|
|
--- a/drivers/xen/gntdev.c
|
|
+++ b/drivers/xen/gntdev.c
|
|
@@ -529,12 +529,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
|
|
|
|
pr_debug("priv %p\n", priv);
|
|
|
|
+ mutex_lock(&priv->lock);
|
|
while (!list_empty(&priv->maps)) {
|
|
map = list_entry(priv->maps.next, struct grant_map, next);
|
|
list_del(&map->next);
|
|
gntdev_put_map(NULL /* already removed */, map);
|
|
}
|
|
WARN_ON(!list_empty(&priv->freeable_maps));
|
|
+ mutex_unlock(&priv->lock);
|
|
|
|
if (use_ptemod)
|
|
mmu_notifier_unregister(&priv->mn, priv->mm);
|
|
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
|
|
index 624e8dc..edfd797 100644
|
|
--- a/drivers/xen/manage.c
|
|
+++ b/drivers/xen/manage.c
|
|
@@ -111,16 +111,17 @@ static void do_suspend(void)
|
|
|
|
shutting_down = SHUTDOWN_SUSPEND;
|
|
|
|
-#ifdef CONFIG_PREEMPT
|
|
- /* If the kernel is preemptible, we need to freeze all the processes
|
|
- to prevent them from being in the middle of a pagetable update
|
|
- during suspend. */
|
|
err = freeze_processes();
|
|
if (err) {
|
|
- pr_err("%s: freeze failed %d\n", __func__, err);
|
|
+ pr_err("%s: freeze processes failed %d\n", __func__, err);
|
|
goto out;
|
|
}
|
|
-#endif
|
|
+
|
|
+ err = freeze_kernel_threads();
|
|
+ if (err) {
|
|
+ pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
|
|
+ goto out_thaw;
|
|
+ }
|
|
|
|
err = dpm_suspend_start(PMSG_FREEZE);
|
|
if (err) {
|
|
@@ -169,10 +170,8 @@ out_resume:
|
|
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
|
|
|
|
out_thaw:
|
|
-#ifdef CONFIG_PREEMPT
|
|
thaw_processes();
|
|
out:
|
|
-#endif
|
|
shutting_down = SHUTDOWN_INVALID;
|
|
}
|
|
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
|
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
|
|
index ebd8f21..f3a9d83 100644
|
|
--- a/drivers/xen/swiotlb-xen.c
|
|
+++ b/drivers/xen/swiotlb-xen.c
|
|
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
|
|
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
|
|
phys_addr_t paddr = dma;
|
|
|
|
- BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
|
|
-
|
|
paddr |= baddr & ~PAGE_MASK;
|
|
|
|
return paddr;
|
|
@@ -447,7 +445,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
- xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
|
|
+ xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
|
|
|
|
/* NOTE: We use dev_addr here, not paddr! */
|
|
if (is_xen_swiotlb_buffer(dev_addr)) {
|
|
@@ -495,14 +493,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
if (target == SYNC_FOR_CPU)
|
|
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
|
|
+ xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
|
|
|
|
/* NOTE: We use dev_addr here, not paddr! */
|
|
if (is_xen_swiotlb_buffer(dev_addr))
|
|
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
|
|
|
|
if (target == SYNC_FOR_DEVICE)
|
|
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
|
|
+ xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
|
|
|
|
if (dir != DMA_FROM_DEVICE)
|
|
return;
|
|
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
|
|
index 46ae0f9..75fe3d4 100644
|
|
--- a/drivers/xen/xen-pciback/conf_space.c
|
|
+++ b/drivers/xen/xen-pciback/conf_space.c
|
|
@@ -16,7 +16,7 @@
|
|
#include "conf_space.h"
|
|
#include "conf_space_quirks.h"
|
|
|
|
-static bool permissive;
|
|
+bool permissive;
|
|
module_param(permissive, bool, 0644);
|
|
|
|
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
|
|
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
|
|
index e56c934..2e1d73d 100644
|
|
--- a/drivers/xen/xen-pciback/conf_space.h
|
|
+++ b/drivers/xen/xen-pciback/conf_space.h
|
|
@@ -64,6 +64,8 @@ struct config_field_entry {
|
|
void *data;
|
|
};
|
|
|
|
+extern bool permissive;
|
|
+
|
|
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
|
|
|
|
/* Add fields to a device - the add_fields macro expects to get a pointer to
|
|
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
|
|
index c5ee825..2d73693 100644
|
|
--- a/drivers/xen/xen-pciback/conf_space_header.c
|
|
+++ b/drivers/xen/xen-pciback/conf_space_header.c
|
|
@@ -11,6 +11,10 @@
|
|
#include "pciback.h"
|
|
#include "conf_space.h"
|
|
|
|
+struct pci_cmd_info {
|
|
+ u16 val;
|
|
+};
|
|
+
|
|
struct pci_bar_info {
|
|
u32 val;
|
|
u32 len_val;
|
|
@@ -20,22 +24,36 @@ struct pci_bar_info {
|
|
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
|
|
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
|
|
|
|
-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
|
|
+/* Bits guests are allowed to control in permissive mode. */
|
|
+#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
|
|
+ PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
|
|
+ PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
|
|
+
|
|
+static void *command_init(struct pci_dev *dev, int offset)
|
|
{
|
|
- int i;
|
|
- int ret;
|
|
-
|
|
- ret = xen_pcibk_read_config_word(dev, offset, value, data);
|
|
- if (!pci_is_enabled(dev))
|
|
- return ret;
|
|
-
|
|
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
|
|
- if (dev->resource[i].flags & IORESOURCE_IO)
|
|
- *value |= PCI_COMMAND_IO;
|
|
- if (dev->resource[i].flags & IORESOURCE_MEM)
|
|
- *value |= PCI_COMMAND_MEMORY;
|
|
+ struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
|
+ int err;
|
|
+
|
|
+ if (!cmd)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
|
|
+ if (err) {
|
|
+ kfree(cmd);
|
|
+ return ERR_PTR(err);
|
|
}
|
|
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
|
|
+{
|
|
+ int ret = pci_read_config_word(dev, offset, value);
|
|
+ const struct pci_cmd_info *cmd = data;
|
|
+
|
|
+ *value &= PCI_COMMAND_GUEST;
|
|
+ *value |= cmd->val & ~PCI_COMMAND_GUEST;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
|
|
{
|
|
struct xen_pcibk_dev_data *dev_data;
|
|
int err;
|
|
+ u16 val;
|
|
+ struct pci_cmd_info *cmd = data;
|
|
|
|
dev_data = pci_get_drvdata(dev);
|
|
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
|
|
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
|
|
}
|
|
}
|
|
|
|
+ cmd->val = value;
|
|
+
|
|
+ if (!permissive && (!dev_data || !dev_data->permissive))
|
|
+ return 0;
|
|
+
|
|
+ /* Only allow the guest to control certain bits. */
|
|
+ err = pci_read_config_word(dev, offset, &val);
|
|
+ if (err || val == value)
|
|
+ return err;
|
|
+
|
|
+ value &= PCI_COMMAND_GUEST;
|
|
+ value |= val & ~PCI_COMMAND_GUEST;
|
|
+
|
|
return pci_write_config_word(dev, offset, value);
|
|
}
|
|
|
|
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
|
|
{
|
|
.offset = PCI_COMMAND,
|
|
.size = 2,
|
|
+ .init = command_init,
|
|
+ .release = bar_release,
|
|
.u.w.read = command_read,
|
|
.u.w.write = command_write,
|
|
},
|
|
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
|
|
index bb7991c..bfdeadb 100644
|
|
--- a/fs/9p/vfs_inode.c
|
|
+++ b/fs/9p/vfs_inode.c
|
|
@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
|
|
unlock_new_inode(inode);
|
|
return inode;
|
|
error:
|
|
- unlock_new_inode(inode);
|
|
- iput(inode);
|
|
+ iget_failed(inode);
|
|
return ERR_PTR(retval);
|
|
|
|
}
|
|
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
|
|
index 59dc8e8..de8606c 100644
|
|
--- a/fs/9p/vfs_inode_dotl.c
|
|
+++ b/fs/9p/vfs_inode_dotl.c
|
|
@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
|
|
unlock_new_inode(inode);
|
|
return inode;
|
|
error:
|
|
- unlock_new_inode(inode);
|
|
- iput(inode);
|
|
+ iget_failed(inode);
|
|
return ERR_PTR(retval);
|
|
|
|
}
|
|
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
|
|
index d9a4367..9cca0ea 100644
|
|
--- a/fs/affs/amigaffs.c
|
|
+++ b/fs/affs/amigaffs.c
|
|
@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
|
|
{
|
|
struct dentry *dentry;
|
|
spin_lock(&inode->i_lock);
|
|
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
|
|
if (entry_ino == (u32)(long)dentry->d_fsdata) {
|
|
dentry->d_fsdata = (void *)inode->i_ino;
|
|
break;
|
|
diff --git a/fs/aio.c b/fs/aio.c
|
|
index 6d68e01..3241659 100644
|
|
--- a/fs/aio.c
|
|
+++ b/fs/aio.c
|
|
@@ -141,6 +141,7 @@ struct kioctx {
|
|
|
|
struct {
|
|
unsigned tail;
|
|
+ unsigned completed_events;
|
|
spinlock_t completion_lock;
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
@@ -164,6 +165,15 @@ static struct vfsmount *aio_mnt;
|
|
static const struct file_operations aio_ring_fops;
|
|
static const struct address_space_operations aio_ctx_aops;
|
|
|
|
+/* Backing dev info for aio fs.
|
|
+ * -no dirty page accounting or writeback happens
|
|
+ */
|
|
+static struct backing_dev_info aio_fs_backing_dev_info = {
|
|
+ .name = "aiofs",
|
|
+ .state = 0,
|
|
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
|
|
+};
|
|
+
|
|
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
|
|
{
|
|
struct qstr this = QSTR_INIT("[aio]", 5);
|
|
@@ -175,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
|
|
|
|
inode->i_mapping->a_ops = &aio_ctx_aops;
|
|
inode->i_mapping->private_data = ctx;
|
|
+ inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
|
|
inode->i_size = PAGE_SIZE * nr_pages;
|
|
|
|
path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
|
|
@@ -220,6 +231,9 @@ static int __init aio_setup(void)
|
|
if (IS_ERR(aio_mnt))
|
|
panic("Failed to create aio fs mount.");
|
|
|
|
+ if (bdi_init(&aio_fs_backing_dev_info))
|
|
+ panic("Failed to init aio fs backing dev info.");
|
|
+
|
|
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
|
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
|
|
|
@@ -281,11 +295,6 @@ static const struct file_operations aio_ring_fops = {
|
|
.mmap = aio_ring_mmap,
|
|
};
|
|
|
|
-static int aio_set_page_dirty(struct page *page)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
-
|
|
#if IS_ENABLED(CONFIG_MIGRATION)
|
|
static int aio_migratepage(struct address_space *mapping, struct page *new,
|
|
struct page *old, enum migrate_mode mode)
|
|
@@ -357,7 +366,7 @@ out:
|
|
#endif
|
|
|
|
static const struct address_space_operations aio_ctx_aops = {
|
|
- .set_page_dirty = aio_set_page_dirty,
|
|
+ .set_page_dirty = __set_page_dirty_no_writeback,
|
|
#if IS_ENABLED(CONFIG_MIGRATION)
|
|
.migratepage = aio_migratepage,
|
|
#endif
|
|
@@ -412,7 +421,6 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
pr_debug("pid(%d) page[%d]->count=%d\n",
|
|
current->pid, i, page_count(page));
|
|
SetPageUptodate(page);
|
|
- SetPageDirty(page);
|
|
unlock_page(page);
|
|
|
|
ctx->ring_pages[i] = page;
|
|
@@ -711,6 +719,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|
err_cleanup:
|
|
aio_nr_sub(ctx->max_reqs);
|
|
err_ctx:
|
|
+ atomic_set(&ctx->dead, 1);
|
|
+ if (ctx->mmap_size)
|
|
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
|
|
aio_free_ring(ctx);
|
|
err:
|
|
mutex_unlock(&ctx->ring_lock);
|
|
@@ -796,6 +807,9 @@ void exit_aio(struct mm_struct *mm)
|
|
unsigned i = 0;
|
|
|
|
while (1) {
|
|
+ struct completion requests_done =
|
|
+ COMPLETION_INITIALIZER_ONSTACK(requests_done);
|
|
+
|
|
rcu_read_lock();
|
|
table = rcu_dereference(mm->ioctx_table);
|
|
|
|
@@ -823,7 +837,10 @@ void exit_aio(struct mm_struct *mm)
|
|
*/
|
|
ctx->mmap_size = 0;
|
|
|
|
- kill_ioctx(mm, ctx, NULL);
|
|
+ kill_ioctx(mm, ctx, &requests_done);
|
|
+
|
|
+ /* Wait until all IO for the context are done. */
|
|
+ wait_for_completion(&requests_done);
|
|
}
|
|
}
|
|
|
|
@@ -880,6 +897,68 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+/* refill_reqs_available
|
|
+ * Updates the reqs_available reference counts used for tracking the
|
|
+ * number of free slots in the completion ring. This can be called
|
|
+ * from aio_complete() (to optimistically update reqs_available) or
|
|
+ * from aio_get_req() (the we're out of events case). It must be
|
|
+ * called holding ctx->completion_lock.
|
|
+ */
|
|
+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
|
|
+ unsigned tail)
|
|
+{
|
|
+ unsigned events_in_ring, completed;
|
|
+
|
|
+ /* Clamp head since userland can write to it. */
|
|
+ head %= ctx->nr_events;
|
|
+ if (head <= tail)
|
|
+ events_in_ring = tail - head;
|
|
+ else
|
|
+ events_in_ring = ctx->nr_events - (head - tail);
|
|
+
|
|
+ completed = ctx->completed_events;
|
|
+ if (events_in_ring < completed)
|
|
+ completed -= events_in_ring;
|
|
+ else
|
|
+ completed = 0;
|
|
+
|
|
+ if (!completed)
|
|
+ return;
|
|
+
|
|
+ ctx->completed_events -= completed;
|
|
+ put_reqs_available(ctx, completed);
|
|
+}
|
|
+
|
|
+/* user_refill_reqs_available
|
|
+ * Called to refill reqs_available when aio_get_req() encounters an
|
|
+ * out of space in the completion ring.
|
|
+ */
|
|
+static void user_refill_reqs_available(struct kioctx *ctx)
|
|
+{
|
|
+ spin_lock_irq(&ctx->completion_lock);
|
|
+ if (ctx->completed_events) {
|
|
+ struct aio_ring *ring;
|
|
+ unsigned head;
|
|
+
|
|
+ /* Access of ring->head may race with aio_read_events_ring()
|
|
+ * here, but that's okay since whether we read the old version
|
|
+ * or the new version, and either will be valid. The important
|
|
+ * part is that head cannot pass tail since we prevent
|
|
+ * aio_complete() from updating tail by holding
|
|
+ * ctx->completion_lock. Even if head is invalid, the check
|
|
+ * against ctx->completed_events below will make sure we do the
|
|
+ * safe/right thing.
|
|
+ */
|
|
+ ring = kmap_atomic(ctx->ring_pages[0]);
|
|
+ head = ring->head;
|
|
+ kunmap_atomic(ring);
|
|
+
|
|
+ refill_reqs_available(ctx, head, ctx->tail);
|
|
+ }
|
|
+
|
|
+ spin_unlock_irq(&ctx->completion_lock);
|
|
+}
|
|
+
|
|
/* aio_get_req
|
|
* Allocate a slot for an aio request.
|
|
* Returns NULL if no requests are free.
|
|
@@ -888,8 +967,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
|
|
{
|
|
struct kiocb *req;
|
|
|
|
- if (!get_reqs_available(ctx))
|
|
- return NULL;
|
|
+ if (!get_reqs_available(ctx)) {
|
|
+ user_refill_reqs_available(ctx);
|
|
+ if (!get_reqs_available(ctx))
|
|
+ return NULL;
|
|
+ }
|
|
|
|
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
|
|
if (unlikely(!req))
|
|
@@ -948,8 +1030,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
|
|
struct kioctx *ctx = iocb->ki_ctx;
|
|
struct aio_ring *ring;
|
|
struct io_event *ev_page, *event;
|
|
+ unsigned tail, pos, head;
|
|
unsigned long flags;
|
|
- unsigned tail, pos;
|
|
|
|
/*
|
|
* Special case handling for sync iocbs:
|
|
@@ -1010,10 +1092,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
|
|
ctx->tail = tail;
|
|
|
|
ring = kmap_atomic(ctx->ring_pages[0]);
|
|
+ head = ring->head;
|
|
ring->tail = tail;
|
|
kunmap_atomic(ring);
|
|
flush_dcache_page(ctx->ring_pages[0]);
|
|
|
|
+ ctx->completed_events++;
|
|
+ if (ctx->completed_events > 1)
|
|
+ refill_reqs_available(ctx, head, tail);
|
|
spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
|
|
|
pr_debug("added to ring %p at [%u]\n", iocb, tail);
|
|
@@ -1028,7 +1114,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
|
/* everything turned out well, dispose of the aiocb. */
|
|
kiocb_free(iocb);
|
|
- put_reqs_available(ctx, 1);
|
|
|
|
/*
|
|
* We have to order our ring_info tail store above and test
|
|
@@ -1065,6 +1150,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
|
|
tail = ring->tail;
|
|
kunmap_atomic(ring);
|
|
|
|
+ /*
|
|
+ * Ensure that once we've read the current tail pointer, that
|
|
+ * we also see the events that were stored up to the tail.
|
|
+ */
|
|
+ smp_rmb();
|
|
+
|
|
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
|
|
|
|
if (head == tail)
|
|
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
|
|
index 3182c0e..e3399dc 100644
|
|
--- a/fs/autofs4/dev-ioctl.c
|
|
+++ b/fs/autofs4/dev-ioctl.c
|
|
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
|
|
*/
|
|
static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
|
|
{
|
|
- struct autofs_dev_ioctl tmp;
|
|
+ struct autofs_dev_ioctl tmp, *res;
|
|
|
|
if (copy_from_user(&tmp, in, sizeof(tmp)))
|
|
return ERR_PTR(-EFAULT);
|
|
@@ -103,7 +103,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
|
|
if (tmp.size < sizeof(tmp))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- return memdup_user(in, tmp.size);
|
|
+ res = memdup_user(in, tmp.size);
|
|
+ if (!IS_ERR(res))
|
|
+ res->size = tmp.size;
|
|
+
|
|
+ return res;
|
|
}
|
|
|
|
static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
|
|
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
|
|
index 394e90b..edb46e6 100644
|
|
--- a/fs/autofs4/expire.c
|
|
+++ b/fs/autofs4/expire.c
|
|
@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
|
|
spin_lock(&root->d_lock);
|
|
|
|
if (prev)
|
|
- next = prev->d_u.d_child.next;
|
|
+ next = prev->d_child.next;
|
|
else {
|
|
prev = dget_dlock(root);
|
|
next = prev->d_subdirs.next;
|
|
@@ -105,13 +105,13 @@ cont:
|
|
return NULL;
|
|
}
|
|
|
|
- q = list_entry(next, struct dentry, d_u.d_child);
|
|
+ q = list_entry(next, struct dentry, d_child);
|
|
|
|
spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
|
|
/* Already gone or negative dentry (under construction) - try next */
|
|
if (!d_count(q) || !simple_positive(q)) {
|
|
spin_unlock(&q->d_lock);
|
|
- next = q->d_u.d_child.next;
|
|
+ next = q->d_child.next;
|
|
goto cont;
|
|
}
|
|
dget_dlock(q);
|
|
@@ -161,13 +161,13 @@ again:
|
|
goto relock;
|
|
}
|
|
spin_unlock(&p->d_lock);
|
|
- next = p->d_u.d_child.next;
|
|
+ next = p->d_child.next;
|
|
p = parent;
|
|
if (next != &parent->d_subdirs)
|
|
break;
|
|
}
|
|
}
|
|
- ret = list_entry(next, struct dentry, d_u.d_child);
|
|
+ ret = list_entry(next, struct dentry, d_child);
|
|
|
|
spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
|
|
/* Negative dentry - try next */
|
|
@@ -461,7 +461,7 @@ found:
|
|
spin_lock(&sbi->lookup_lock);
|
|
spin_lock(&expired->d_parent->d_lock);
|
|
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
|
|
- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
|
|
+ list_move(&expired->d_parent->d_subdirs, &expired->d_child);
|
|
spin_unlock(&expired->d_lock);
|
|
spin_unlock(&expired->d_parent->d_lock);
|
|
spin_unlock(&sbi->lookup_lock);
|
|
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
|
|
index cc87c1a..9e016e6 100644
|
|
--- a/fs/autofs4/root.c
|
|
+++ b/fs/autofs4/root.c
|
|
@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
|
|
/* only consider parents below dentrys in the root */
|
|
if (IS_ROOT(parent->d_parent))
|
|
return;
|
|
- d_child = &dentry->d_u.d_child;
|
|
+ d_child = &dentry->d_child;
|
|
/* Set parent managed if it's becoming empty */
|
|
if (d_child->next == &parent->d_subdirs &&
|
|
d_child->prev == &parent->d_subdirs)
|
|
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
|
|
index 67be295..35240a7 100644
|
|
--- a/fs/binfmt_elf.c
|
|
+++ b/fs/binfmt_elf.c
|
|
@@ -549,11 +549,12 @@ out:
|
|
|
|
static unsigned long randomize_stack_top(unsigned long stack_top)
|
|
{
|
|
- unsigned int random_variable = 0;
|
|
+ unsigned long random_variable = 0;
|
|
|
|
if ((current->flags & PF_RANDOMIZE) &&
|
|
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
|
- random_variable = get_random_int() & STACK_RND_MASK;
|
|
+ random_variable = (unsigned long) get_random_int();
|
|
+ random_variable &= STACK_RND_MASK;
|
|
random_variable <<= PAGE_SHIFT;
|
|
}
|
|
#ifdef CONFIG_STACK_GROWSUP
|
|
@@ -750,6 +751,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
|
|
int elf_prot = 0, elf_flags;
|
|
unsigned long k, vaddr;
|
|
+ unsigned long total_size = 0;
|
|
|
|
if (elf_ppnt->p_type != PT_LOAD)
|
|
continue;
|
|
@@ -814,10 +816,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|
#else
|
|
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
|
|
#endif
|
|
+ total_size = total_mapping_size(elf_phdata,
|
|
+ loc->elf_ex.e_phnum);
|
|
+ if (!total_size) {
|
|
+ retval = -EINVAL;
|
|
+ goto out_free_dentry;
|
|
+ }
|
|
}
|
|
|
|
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
|
|
- elf_prot, elf_flags, 0);
|
|
+ elf_prot, elf_flags, total_size);
|
|
if (BAD_ADDR(error)) {
|
|
send_sig(SIGKILL, current, 0);
|
|
retval = IS_ERR((void *)error) ?
|
|
diff --git a/fs/bio.c b/fs/bio.c
|
|
index 8754e7b..b2b1451 100644
|
|
--- a/fs/bio.c
|
|
+++ b/fs/bio.c
|
|
@@ -1806,8 +1806,9 @@ EXPORT_SYMBOL(bio_endio_nodec);
|
|
* Allocates and returns a new bio which represents @sectors from the start of
|
|
* @bio, and updates @bio to represent the remaining sectors.
|
|
*
|
|
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
|
|
- * responsibility to ensure that @bio is not freed before the split.
|
|
+ * Unless this is a discard request the newly allocated bio will point
|
|
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
|
|
+ * @bio is not freed before the split.
|
|
*/
|
|
struct bio *bio_split(struct bio *bio, int sectors,
|
|
gfp_t gfp, struct bio_set *bs)
|
|
@@ -1817,7 +1818,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
|
BUG_ON(sectors <= 0);
|
|
BUG_ON(sectors >= bio_sectors(bio));
|
|
|
|
- split = bio_clone_fast(bio, gfp, bs);
|
|
+ /*
|
|
+ * Discards need a mutable bio_vec to accommodate the payload
|
|
+ * required by the DSM TRIM and UNMAP commands.
|
|
+ */
|
|
+ if (bio->bi_rw & REQ_DISCARD)
|
|
+ split = bio_clone_bioset(bio, gfp, bs);
|
|
+ else
|
|
+ split = bio_clone_fast(bio, gfp, bs);
|
|
+
|
|
if (!split)
|
|
return NULL;
|
|
|
|
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
|
|
index 14d29d0..6244f9c 100644
|
|
--- a/fs/btrfs/backref.c
|
|
+++ b/fs/btrfs/backref.c
|
|
@@ -275,9 +275,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|
}
|
|
if (ret > 0)
|
|
goto next;
|
|
- ret = ulist_add_merge(parents, eb->start,
|
|
- (uintptr_t)eie,
|
|
- (u64 *)&old, GFP_NOFS);
|
|
+ ret = ulist_add_merge_ptr(parents, eb->start,
|
|
+ eie, (void **)&old, GFP_NOFS);
|
|
if (ret < 0)
|
|
break;
|
|
if (!ret && extent_item_pos) {
|
|
@@ -985,16 +984,19 @@ again:
|
|
ret = -EIO;
|
|
goto out;
|
|
}
|
|
+ btrfs_tree_read_lock(eb);
|
|
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
|
ret = find_extent_in_eb(eb, bytenr,
|
|
*extent_item_pos, &eie);
|
|
+ btrfs_tree_read_unlock_blocking(eb);
|
|
free_extent_buffer(eb);
|
|
if (ret < 0)
|
|
goto out;
|
|
ref->inode_list = eie;
|
|
}
|
|
- ret = ulist_add_merge(refs, ref->parent,
|
|
- (uintptr_t)ref->inode_list,
|
|
- (u64 *)&eie, GFP_NOFS);
|
|
+ ret = ulist_add_merge_ptr(refs, ref->parent,
|
|
+ ref->inode_list,
|
|
+ (void **)&eie, GFP_NOFS);
|
|
if (ret < 0)
|
|
goto out;
|
|
if (!ret && extent_item_pos) {
|
|
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
|
|
index b01fb6c..d43c544 100644
|
|
--- a/fs/btrfs/compression.c
|
|
+++ b/fs/btrfs/compression.c
|
|
@@ -472,7 +472,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
|
|
rcu_read_lock();
|
|
page = radix_tree_lookup(&mapping->page_tree, pg_index);
|
|
rcu_read_unlock();
|
|
- if (page) {
|
|
+ if (page && !radix_tree_exceptional_entry(page)) {
|
|
misses++;
|
|
if (misses > 4)
|
|
break;
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index cbd3a7d..f8ffee4 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -2655,32 +2655,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
|
|
return 0;
|
|
}
|
|
|
|
-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
|
|
+int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|
u64 iobjectid, u64 ioff, u8 key_type,
|
|
struct btrfs_key *found_key)
|
|
{
|
|
int ret;
|
|
struct btrfs_key key;
|
|
struct extent_buffer *eb;
|
|
- struct btrfs_path *path;
|
|
+
|
|
+ ASSERT(path);
|
|
|
|
key.type = key_type;
|
|
key.objectid = iobjectid;
|
|
key.offset = ioff;
|
|
|
|
- if (found_path == NULL) {
|
|
- path = btrfs_alloc_path();
|
|
- if (!path)
|
|
- return -ENOMEM;
|
|
- } else
|
|
- path = found_path;
|
|
-
|
|
ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
|
|
- if ((ret < 0) || (found_key == NULL)) {
|
|
- if (path != found_path)
|
|
- btrfs_free_path(path);
|
|
+ if ((ret < 0) || (found_key == NULL))
|
|
return ret;
|
|
- }
|
|
|
|
eb = path->nodes[0];
|
|
if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
|
|
@@ -2972,7 +2963,7 @@ done:
|
|
*/
|
|
if (!p->leave_spinning)
|
|
btrfs_set_path_blocking(p);
|
|
- if (ret < 0)
|
|
+ if (ret < 0 && !p->skip_release_on_error)
|
|
btrfs_release_path(p);
|
|
return ret;
|
|
}
|
|
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
|
|
index d3511cc..3b39eb4 100644
|
|
--- a/fs/btrfs/ctree.h
|
|
+++ b/fs/btrfs/ctree.h
|
|
@@ -608,6 +608,7 @@ struct btrfs_path {
|
|
unsigned int skip_locking:1;
|
|
unsigned int leave_spinning:1;
|
|
unsigned int search_commit_root:1;
|
|
+ unsigned int skip_release_on_error:1;
|
|
};
|
|
|
|
/*
|
|
@@ -3609,6 +3610,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
|
|
int verify_dir_item(struct btrfs_root *root,
|
|
struct extent_buffer *leaf,
|
|
struct btrfs_dir_item *dir_item);
|
|
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
|
|
+ struct btrfs_path *path,
|
|
+ const char *name,
|
|
+ int name_len);
|
|
|
|
/* orphan.c */
|
|
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
|
|
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
|
|
index 451b00c..12e3556 100644
|
|
--- a/fs/btrfs/delayed-inode.c
|
|
+++ b/fs/btrfs/delayed-inode.c
|
|
@@ -1854,6 +1854,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
|
|
{
|
|
struct btrfs_delayed_node *delayed_node;
|
|
|
|
+ /*
|
|
+ * we don't do delayed inode updates during log recovery because it
|
|
+ * leads to enospc problems. This means we also can't do
|
|
+ * delayed inode refs
|
|
+ */
|
|
+ if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
|
|
+ return -EAGAIN;
|
|
+
|
|
delayed_node = btrfs_get_or_create_delayed_node(inode);
|
|
if (IS_ERR(delayed_node))
|
|
return PTR_ERR(delayed_node);
|
|
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
|
|
index a0691df..9521a93 100644
|
|
--- a/fs/btrfs/dir-item.c
|
|
+++ b/fs/btrfs/dir-item.c
|
|
@@ -21,10 +21,6 @@
|
|
#include "hash.h"
|
|
#include "transaction.h"
|
|
|
|
-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
|
|
- struct btrfs_path *path,
|
|
- const char *name, int name_len);
|
|
-
|
|
/*
|
|
* insert a name into a directory, doing overflow properly if there is a hash
|
|
* collision. data_size indicates how big the item inserted should be. On
|
|
@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
|
|
* this walks through all the entries in a dir item and finds one
|
|
* for a specific name.
|
|
*/
|
|
-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
|
|
- struct btrfs_path *path,
|
|
- const char *name, int name_len)
|
|
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
|
|
+ struct btrfs_path *path,
|
|
+ const char *name, int name_len)
|
|
{
|
|
struct btrfs_dir_item *dir_item;
|
|
unsigned long name_ptr;
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index 370ef74..f48d5fc 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -1560,6 +1560,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
|
|
bool check_ref)
|
|
{
|
|
struct btrfs_root *root;
|
|
+ struct btrfs_path *path;
|
|
int ret;
|
|
|
|
if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
|
|
@@ -1599,8 +1600,14 @@ again:
|
|
if (ret)
|
|
goto fail;
|
|
|
|
- ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
|
|
+ path = btrfs_alloc_path();
|
|
+ if (!path) {
|
|
+ ret = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+ ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID,
|
|
location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
|
|
+ btrfs_free_path(path);
|
|
if (ret < 0)
|
|
goto fail;
|
|
if (ret == 0)
|
|
@@ -2411,7 +2418,7 @@ int open_ctree(struct super_block *sb,
|
|
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
|
|
|
|
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
|
|
- printk(KERN_ERR "BTRFS: has skinny extents\n");
|
|
+ printk(KERN_INFO "BTRFS: has skinny extents\n");
|
|
|
|
/*
|
|
* flag our filesystem as having big metadata blocks if
|
|
@@ -3978,12 +3985,6 @@ again:
|
|
if (ret)
|
|
break;
|
|
|
|
- /* opt_discard */
|
|
- if (btrfs_test_opt(root, DISCARD))
|
|
- ret = btrfs_error_discard_extent(root, start,
|
|
- end + 1 - start,
|
|
- NULL);
|
|
-
|
|
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
|
btrfs_error_unpin_extent_range(root, start, end);
|
|
cond_resched();
|
|
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
|
index 3ff98e2..794d7c6 100644
|
|
--- a/fs/btrfs/extent-tree.c
|
|
+++ b/fs/btrfs/extent-tree.c
|
|
@@ -5503,7 +5503,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
|
|
update_global_block_rsv(fs_info);
|
|
}
|
|
|
|
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
|
|
+static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
|
|
+ const bool return_free_space)
|
|
{
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
struct btrfs_block_group_cache *cache = NULL;
|
|
@@ -5527,7 +5528,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
|
|
|
|
if (start < cache->last_byte_to_unpin) {
|
|
len = min(len, cache->last_byte_to_unpin - start);
|
|
- btrfs_add_free_space(cache, start, len);
|
|
+ if (return_free_space)
|
|
+ btrfs_add_free_space(cache, start, len);
|
|
}
|
|
|
|
start += len;
|
|
@@ -5590,7 +5592,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
|
end + 1 - start, NULL);
|
|
|
|
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
|
- unpin_extent_range(root, start, end);
|
|
+ unpin_extent_range(root, start, end, true);
|
|
cond_resched();
|
|
}
|
|
|
|
@@ -6643,12 +6645,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
|
|
return -ENOSPC;
|
|
}
|
|
|
|
- if (btrfs_test_opt(root, DISCARD))
|
|
- ret = btrfs_discard_extent(root, start, len, NULL);
|
|
-
|
|
if (pin)
|
|
pin_down_extent(root, cache, start, len, 1);
|
|
else {
|
|
+ if (btrfs_test_opt(root, DISCARD))
|
|
+ ret = btrfs_discard_extent(root, start, len, NULL);
|
|
btrfs_add_free_space(cache, start, len);
|
|
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
|
|
}
|
|
@@ -8886,7 +8887,7 @@ out:
|
|
|
|
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
|
|
{
|
|
- return unpin_extent_range(root, start, end);
|
|
+ return unpin_extent_range(root, start, end, false);
|
|
}
|
|
|
|
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
|
|
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
|
|
index 2eea43f..8adfc65 100644
|
|
--- a/fs/btrfs/extent_io.c
|
|
+++ b/fs/btrfs/extent_io.c
|
|
@@ -2525,6 +2525,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|
test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
if (err)
|
|
uptodate = 0;
|
|
+ offset += len;
|
|
continue;
|
|
}
|
|
}
|
|
@@ -4288,8 +4289,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
}
|
|
ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
|
|
em_len, flags);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ if (ret == 1)
|
|
+ ret = 0;
|
|
goto out_free;
|
|
+ }
|
|
}
|
|
out_free:
|
|
free_extent_map(em);
|
|
@@ -4506,7 +4510,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
|
|
spin_unlock(&eb->refs_lock);
|
|
}
|
|
|
|
-static void mark_extent_buffer_accessed(struct extent_buffer *eb)
|
|
+static void mark_extent_buffer_accessed(struct extent_buffer *eb,
|
|
+ struct page *accessed)
|
|
{
|
|
unsigned long num_pages, i;
|
|
|
|
@@ -4515,7 +4520,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
|
|
num_pages = num_extent_pages(eb->start, eb->len);
|
|
for (i = 0; i < num_pages; i++) {
|
|
struct page *p = extent_buffer_page(eb, i);
|
|
- mark_page_accessed(p);
|
|
+ if (p != accessed)
|
|
+ mark_page_accessed(p);
|
|
}
|
|
}
|
|
|
|
@@ -4529,7 +4535,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
|
|
start >> PAGE_CACHE_SHIFT);
|
|
if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
rcu_read_unlock();
|
|
- mark_extent_buffer_accessed(eb);
|
|
+ mark_extent_buffer_accessed(eb, NULL);
|
|
return eb;
|
|
}
|
|
rcu_read_unlock();
|
|
@@ -4577,7 +4583,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|
spin_unlock(&mapping->private_lock);
|
|
unlock_page(p);
|
|
page_cache_release(p);
|
|
- mark_extent_buffer_accessed(exists);
|
|
+ mark_extent_buffer_accessed(exists, p);
|
|
goto free_eb;
|
|
}
|
|
|
|
@@ -4592,7 +4598,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|
attach_extent_buffer_page(eb, p);
|
|
spin_unlock(&mapping->private_lock);
|
|
WARN_ON(PageDirty(p));
|
|
- mark_page_accessed(p);
|
|
eb->pages[i] = p;
|
|
if (!PageUptodate(p))
|
|
uptodate = 0;
|
|
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
|
|
index 996ad56b..82845a6 100644
|
|
--- a/fs/btrfs/extent_map.c
|
|
+++ b/fs/btrfs/extent_map.c
|
|
@@ -290,8 +290,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
|
|
if (!em)
|
|
goto out;
|
|
|
|
- if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
|
|
- list_move(&em->list, &tree->modified_extents);
|
|
em->generation = gen;
|
|
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
|
|
em->mod_start = em->start;
|
|
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
|
|
index 127555b..196b089 100644
|
|
--- a/fs/btrfs/file-item.c
|
|
+++ b/fs/btrfs/file-item.c
|
|
@@ -423,7 +423,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
|
ret = 0;
|
|
fail:
|
|
while (ret < 0 && !list_empty(&tmplist)) {
|
|
- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
|
|
+ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
|
|
list_del(&sums->list);
|
|
kfree(sums);
|
|
}
|
|
@@ -756,7 +756,7 @@ again:
|
|
found_next = 1;
|
|
if (ret != 0)
|
|
goto insert;
|
|
- slot = 0;
|
|
+ slot = path->slots[0];
|
|
}
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
|
|
if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
|
|
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
|
|
index 0165b86..0a841dd 100644
|
|
--- a/fs/btrfs/file.c
|
|
+++ b/fs/btrfs/file.c
|
|
@@ -425,13 +425,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
|
|
struct page *page = prepared_pages[pg];
|
|
/*
|
|
* Copy data from userspace to the current page
|
|
- *
|
|
- * Disable pagefault to avoid recursive lock since
|
|
- * the pages are already locked
|
|
*/
|
|
- pagefault_disable();
|
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
|
|
- pagefault_enable();
|
|
|
|
/* Flush processor's dcache for this page */
|
|
flush_dcache_page(page);
|
|
@@ -475,11 +470,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
|
|
for (i = 0; i < num_pages; i++) {
|
|
/* page checked is some magic around finding pages that
|
|
* have been modified without going through btrfs_set_page_dirty
|
|
- * clear it here
|
|
+ * clear it here. There should be no need to mark the pages
|
|
+ * accessed as prepare_pages should have marked them accessed
|
|
+ * in prepare_pages via find_or_create_page()
|
|
*/
|
|
ClearPageChecked(pages[i]);
|
|
unlock_page(pages[i]);
|
|
- mark_page_accessed(pages[i]);
|
|
page_cache_release(pages[i]);
|
|
}
|
|
}
|
|
@@ -1778,22 +1774,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
/*
|
|
- * we want to make sure fsync finds this change
|
|
- * but we haven't joined a transaction running right now.
|
|
- *
|
|
- * Later on, someone is sure to update the inode and get the
|
|
- * real transid recorded.
|
|
- *
|
|
- * We set last_trans now to the fs_info generation + 1,
|
|
- * this will either be one more than the running transaction
|
|
- * or the generation used for the next transaction if there isn't
|
|
- * one running right now.
|
|
- *
|
|
* We also have to set last_sub_trans to the current log transid,
|
|
* otherwise subsequent syncs to a file that's been synced in this
|
|
* transaction will appear to have already occured.
|
|
*/
|
|
- BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
|
|
BTRFS_I(inode)->last_sub_trans = root->log_transid;
|
|
if (num_written > 0) {
|
|
err = generic_write_sync(file, pos, num_written);
|
|
@@ -1896,25 +1880,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|
atomic_inc(&root->log_batch);
|
|
|
|
/*
|
|
- * check the transaction that last modified this inode
|
|
- * and see if its already been committed
|
|
- */
|
|
- if (!BTRFS_I(inode)->last_trans) {
|
|
- mutex_unlock(&inode->i_mutex);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- /*
|
|
- * if the last transaction that changed this file was before
|
|
- * the current transaction, we can bail out now without any
|
|
- * syncing
|
|
+ * If the last transaction that changed this file was before the current
|
|
+ * transaction and we have the full sync flag set in our inode, we can
|
|
+ * bail out now without any syncing.
|
|
+ *
|
|
+ * Note that we can't bail out if the full sync flag isn't set. This is
|
|
+ * because when the full sync flag is set we start all ordered extents
|
|
+ * and wait for them to fully complete - when they complete they update
|
|
+ * the inode's last_trans field through:
|
|
+ *
|
|
+ * btrfs_finish_ordered_io() ->
|
|
+ * btrfs_update_inode_fallback() ->
|
|
+ * btrfs_update_inode() ->
|
|
+ * btrfs_set_inode_last_trans()
|
|
+ *
|
|
+ * So we are sure that last_trans is up to date and can do this check to
|
|
+ * bail out safely. For the fast path, when the full sync flag is not
|
|
+ * set in our inode, we can not do it because we start only our ordered
|
|
+ * extents and don't wait for them to complete (that is when
|
|
+ * btrfs_finish_ordered_io runs), so here at this point their last_trans
|
|
+ * value might be less than or equals to fs_info->last_trans_committed,
|
|
+ * and setting a speculative last_trans for an inode when a buffered
|
|
+ * write is made (such as fs_info->generation + 1 for example) would not
|
|
+ * be reliable since after setting the value and before fsync is called
|
|
+ * any number of transactions can start and commit (transaction kthread
|
|
+ * commits the current transaction periodically), and a transaction
|
|
+ * commit does not start nor waits for ordered extents to complete.
|
|
*/
|
|
smp_mb();
|
|
if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
|
|
- BTRFS_I(inode)->last_trans <=
|
|
- root->fs_info->last_trans_committed) {
|
|
- BTRFS_I(inode)->last_trans = 0;
|
|
-
|
|
+ (full_sync && BTRFS_I(inode)->last_trans <=
|
|
+ root->fs_info->last_trans_committed)) {
|
|
/*
|
|
* We'v had everything committed since the last time we were
|
|
* modified so clear this flag in case it was set for whatever
|
|
@@ -2510,23 +2506,28 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
struct extent_map *em = NULL;
|
|
struct extent_state *cached_state = NULL;
|
|
- u64 lockstart = *offset;
|
|
- u64 lockend = i_size_read(inode);
|
|
- u64 start = *offset;
|
|
- u64 len = i_size_read(inode);
|
|
+ u64 lockstart;
|
|
+ u64 lockend;
|
|
+ u64 start;
|
|
+ u64 len;
|
|
int ret = 0;
|
|
|
|
- lockend = max_t(u64, root->sectorsize, lockend);
|
|
+ if (inode->i_size == 0)
|
|
+ return -ENXIO;
|
|
+
|
|
+ /*
|
|
+ * *offset can be negative, in this case we start finding DATA/HOLE from
|
|
+ * the very start of the file.
|
|
+ */
|
|
+ start = max_t(loff_t, 0, *offset);
|
|
+
|
|
+ lockstart = round_down(start, root->sectorsize);
|
|
+ lockend = round_up(i_size_read(inode), root->sectorsize);
|
|
if (lockend <= lockstart)
|
|
lockend = lockstart + root->sectorsize;
|
|
-
|
|
lockend--;
|
|
len = lockend - lockstart + 1;
|
|
|
|
- len = max_t(u64, len, root->sectorsize);
|
|
- if (inode->i_size == 0)
|
|
- return -ENXIO;
|
|
-
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
|
|
&cached_state);
|
|
|
|
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
|
|
index ab485e5..644942a 100644
|
|
--- a/fs/btrfs/inode-map.c
|
|
+++ b/fs/btrfs/inode-map.c
|
|
@@ -281,7 +281,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
|
|
__btrfs_add_free_space(ctl, info->offset, count);
|
|
free:
|
|
rb_erase(&info->offset_index, rbroot);
|
|
- kfree(info);
|
|
+ kmem_cache_free(btrfs_free_space_cachep, info);
|
|
}
|
|
}
|
|
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index d3d4448..653cdd8 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -701,6 +701,18 @@ retry:
|
|
unlock_extent(io_tree, async_extent->start,
|
|
async_extent->start +
|
|
async_extent->ram_size - 1);
|
|
+
|
|
+ /*
|
|
+ * we need to redirty the pages if we decide to
|
|
+ * fallback to uncompressed IO, otherwise we
|
|
+ * will not submit these pages down to lower
|
|
+ * layers.
|
|
+ */
|
|
+ extent_range_redirty_for_io(inode,
|
|
+ async_extent->start,
|
|
+ async_extent->start +
|
|
+ async_extent->ram_size - 1);
|
|
+
|
|
goto retry;
|
|
}
|
|
goto out_free;
|
|
@@ -3584,7 +3596,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
|
|
* without delay
|
|
*/
|
|
if (!btrfs_is_free_space_inode(inode)
|
|
- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
|
+ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
|
|
+ && !root->fs_info->log_root_recovering) {
|
|
btrfs_update_root_times(trans, root);
|
|
|
|
ret = btrfs_delayed_update_inode(trans, root, inode);
|
|
@@ -6857,7 +6870,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
|
|
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
|
|
em->block_start != EXTENT_MAP_HOLE)) {
|
|
int type;
|
|
- int ret;
|
|
u64 block_start, orig_start, orig_block_len, ram_bytes;
|
|
|
|
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index a6d8efa..d40ae42 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -302,6 +302,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
|
goto out_drop;
|
|
|
|
} else {
|
|
+ ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
|
|
+ if (ret && ret != -ENODATA)
|
|
+ goto out_drop;
|
|
ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
|
|
}
|
|
|
|
@@ -2705,6 +2708,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
|
|
if (src == dst)
|
|
return -EINVAL;
|
|
|
|
+ if (len == 0)
|
|
+ return 0;
|
|
+
|
|
btrfs_double_lock(src, loff, dst, dst_loff, len);
|
|
|
|
ret = extent_same_check_offsets(src, loff, len);
|
|
@@ -2737,7 +2743,7 @@ out_unlock:
|
|
static long btrfs_ioctl_file_extent_same(struct file *file,
|
|
struct btrfs_ioctl_same_args __user *argp)
|
|
{
|
|
- struct btrfs_ioctl_same_args *same;
|
|
+ struct btrfs_ioctl_same_args *same = NULL;
|
|
struct btrfs_ioctl_same_extent_info *info;
|
|
struct inode *src = file_inode(file);
|
|
u64 off;
|
|
@@ -2767,6 +2773,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
|
|
|
|
if (IS_ERR(same)) {
|
|
ret = PTR_ERR(same);
|
|
+ same = NULL;
|
|
goto out;
|
|
}
|
|
|
|
@@ -2837,6 +2844,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
|
|
|
|
out:
|
|
mnt_drop_write_file(file);
|
|
+ kfree(same);
|
|
return ret;
|
|
}
|
|
|
|
@@ -3223,6 +3231,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|
if (off + len == src->i_size)
|
|
len = ALIGN(src->i_size, bs) - off;
|
|
|
|
+ if (len == 0) {
|
|
+ ret = 0;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
/* verify the end result is block aligned */
|
|
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
|
|
!IS_ALIGNED(destoff, bs))
|
|
@@ -4750,6 +4763,12 @@ long btrfs_ioctl(struct file *file, unsigned int
|
|
if (ret)
|
|
return ret;
|
|
ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
|
|
+ /*
|
|
+ * The transaction thread may want to do more work,
|
|
+ * namely it pokes the cleaner ktread that will start
|
|
+ * processing uncleaned subvols.
|
|
+ */
|
|
+ wake_up_process(root->fs_info->transaction_kthread);
|
|
return ret;
|
|
}
|
|
case BTRFS_IOC_START_SYNC:
|
|
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
|
|
index 07b3b36..01f977e 100644
|
|
--- a/fs/btrfs/relocation.c
|
|
+++ b/fs/btrfs/relocation.c
|
|
@@ -736,7 +736,8 @@ again:
|
|
err = ret;
|
|
goto out;
|
|
}
|
|
- BUG_ON(!ret || !path1->slots[0]);
|
|
+ ASSERT(ret);
|
|
+ ASSERT(path1->slots[0]);
|
|
|
|
path1->slots[0]--;
|
|
|
|
@@ -746,10 +747,10 @@ again:
|
|
* the backref was added previously when processing
|
|
* backref of type BTRFS_TREE_BLOCK_REF_KEY
|
|
*/
|
|
- BUG_ON(!list_is_singular(&cur->upper));
|
|
+ ASSERT(list_is_singular(&cur->upper));
|
|
edge = list_entry(cur->upper.next, struct backref_edge,
|
|
list[LOWER]);
|
|
- BUG_ON(!list_empty(&edge->list[UPPER]));
|
|
+ ASSERT(list_empty(&edge->list[UPPER]));
|
|
exist = edge->node[UPPER];
|
|
/*
|
|
* add the upper level block to pending list if we need
|
|
@@ -831,7 +832,7 @@ again:
|
|
cur->cowonly = 1;
|
|
}
|
|
#else
|
|
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
|
|
+ ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
|
|
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
|
|
#endif
|
|
if (key.objectid == key.offset) {
|
|
@@ -840,7 +841,7 @@ again:
|
|
* backref of this type.
|
|
*/
|
|
root = find_reloc_root(rc, cur->bytenr);
|
|
- BUG_ON(!root);
|
|
+ ASSERT(root);
|
|
cur->root = root;
|
|
break;
|
|
}
|
|
@@ -868,7 +869,7 @@ again:
|
|
} else {
|
|
upper = rb_entry(rb_node, struct backref_node,
|
|
rb_node);
|
|
- BUG_ON(!upper->checked);
|
|
+ ASSERT(upper->checked);
|
|
INIT_LIST_HEAD(&edge->list[UPPER]);
|
|
}
|
|
list_add_tail(&edge->list[LOWER], &cur->upper);
|
|
@@ -892,7 +893,7 @@ again:
|
|
|
|
if (btrfs_root_level(&root->root_item) == cur->level) {
|
|
/* tree root */
|
|
- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
|
|
+ ASSERT(btrfs_root_bytenr(&root->root_item) ==
|
|
cur->bytenr);
|
|
if (should_ignore_root(root))
|
|
list_add(&cur->list, &useless);
|
|
@@ -927,7 +928,7 @@ again:
|
|
need_check = true;
|
|
for (; level < BTRFS_MAX_LEVEL; level++) {
|
|
if (!path2->nodes[level]) {
|
|
- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
|
|
+ ASSERT(btrfs_root_bytenr(&root->root_item) ==
|
|
lower->bytenr);
|
|
if (should_ignore_root(root))
|
|
list_add(&lower->list, &useless);
|
|
@@ -976,12 +977,15 @@ again:
|
|
need_check = false;
|
|
list_add_tail(&edge->list[UPPER],
|
|
&list);
|
|
- } else
|
|
+ } else {
|
|
+ if (upper->checked)
|
|
+ need_check = true;
|
|
INIT_LIST_HEAD(&edge->list[UPPER]);
|
|
+ }
|
|
} else {
|
|
upper = rb_entry(rb_node, struct backref_node,
|
|
rb_node);
|
|
- BUG_ON(!upper->checked);
|
|
+ ASSERT(upper->checked);
|
|
INIT_LIST_HEAD(&edge->list[UPPER]);
|
|
if (!upper->owner)
|
|
upper->owner = btrfs_header_owner(eb);
|
|
@@ -1025,7 +1029,7 @@ next:
|
|
* everything goes well, connect backref nodes and insert backref nodes
|
|
* into the cache.
|
|
*/
|
|
- BUG_ON(!node->checked);
|
|
+ ASSERT(node->checked);
|
|
cowonly = node->cowonly;
|
|
if (!cowonly) {
|
|
rb_node = tree_insert(&cache->rb_root, node->bytenr,
|
|
@@ -1061,8 +1065,21 @@ next:
|
|
continue;
|
|
}
|
|
|
|
- BUG_ON(!upper->checked);
|
|
- BUG_ON(cowonly != upper->cowonly);
|
|
+ if (!upper->checked) {
|
|
+ /*
|
|
+ * Still want to blow up for developers since this is a
|
|
+ * logic bug.
|
|
+ */
|
|
+ ASSERT(0);
|
|
+ err = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+ if (cowonly != upper->cowonly) {
|
|
+ ASSERT(0);
|
|
+ err = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (!cowonly) {
|
|
rb_node = tree_insert(&cache->rb_root, upper->bytenr,
|
|
&upper->rb_node);
|
|
@@ -1085,7 +1102,7 @@ next:
|
|
while (!list_empty(&useless)) {
|
|
upper = list_entry(useless.next, struct backref_node, list);
|
|
list_del_init(&upper->list);
|
|
- BUG_ON(!list_empty(&upper->upper));
|
|
+ ASSERT(list_empty(&upper->upper));
|
|
if (upper == node)
|
|
node = NULL;
|
|
if (upper->lowest) {
|
|
@@ -1118,29 +1135,45 @@ out:
|
|
if (err) {
|
|
while (!list_empty(&useless)) {
|
|
lower = list_entry(useless.next,
|
|
- struct backref_node, upper);
|
|
- list_del_init(&lower->upper);
|
|
+ struct backref_node, list);
|
|
+ list_del_init(&lower->list);
|
|
}
|
|
- upper = node;
|
|
- INIT_LIST_HEAD(&list);
|
|
- while (upper) {
|
|
- if (RB_EMPTY_NODE(&upper->rb_node)) {
|
|
- list_splice_tail(&upper->upper, &list);
|
|
- free_backref_node(cache, upper);
|
|
- }
|
|
-
|
|
- if (list_empty(&list))
|
|
- break;
|
|
-
|
|
- edge = list_entry(list.next, struct backref_edge,
|
|
- list[LOWER]);
|
|
+ while (!list_empty(&list)) {
|
|
+ edge = list_first_entry(&list, struct backref_edge,
|
|
+ list[UPPER]);
|
|
+ list_del(&edge->list[UPPER]);
|
|
list_del(&edge->list[LOWER]);
|
|
+ lower = edge->node[LOWER];
|
|
upper = edge->node[UPPER];
|
|
free_backref_edge(cache, edge);
|
|
+
|
|
+ /*
|
|
+ * Lower is no longer linked to any upper backref nodes
|
|
+ * and isn't in the cache, we can free it ourselves.
|
|
+ */
|
|
+ if (list_empty(&lower->upper) &&
|
|
+ RB_EMPTY_NODE(&lower->rb_node))
|
|
+ list_add(&lower->list, &useless);
|
|
+
|
|
+ if (!RB_EMPTY_NODE(&upper->rb_node))
|
|
+ continue;
|
|
+
|
|
+ /* Add this guy's upper edges to the list to proces */
|
|
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
|
|
+ list_add_tail(&edge->list[UPPER], &list);
|
|
+ if (list_empty(&upper->upper))
|
|
+ list_add(&upper->list, &useless);
|
|
+ }
|
|
+
|
|
+ while (!list_empty(&useless)) {
|
|
+ lower = list_entry(useless.next,
|
|
+ struct backref_node, list);
|
|
+ list_del_init(&lower->list);
|
|
+ free_backref_node(cache, lower);
|
|
}
|
|
return ERR_PTR(err);
|
|
}
|
|
- BUG_ON(node && node->detached);
|
|
+ ASSERT(!node || !node->detached);
|
|
return node;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
|
|
index a65ed4c..20d7935 100644
|
|
--- a/fs/btrfs/send.c
|
|
+++ b/fs/btrfs/send.c
|
|
@@ -4728,7 +4728,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
|
|
|
|
if (S_ISREG(sctx->cur_inode_mode)) {
|
|
if (need_send_hole(sctx)) {
|
|
- if (sctx->cur_inode_last_extent == (u64)-1) {
|
|
+ if (sctx->cur_inode_last_extent == (u64)-1 ||
|
|
+ sctx->cur_inode_last_extent <
|
|
+ sctx->cur_inode_size) {
|
|
ret = get_last_extent(sctx, (u64)-1);
|
|
if (ret)
|
|
goto out;
|
|
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
|
|
index d04db81..92cbfbf 100644
|
|
--- a/fs/btrfs/super.c
|
|
+++ b/fs/btrfs/super.c
|
|
@@ -906,6 +906,15 @@ find_root:
|
|
if (IS_ERR(new_root))
|
|
return ERR_CAST(new_root);
|
|
|
|
+ if (!(sb->s_flags & MS_RDONLY)) {
|
|
+ int ret;
|
|
+ down_read(&fs_info->cleanup_work_sem);
|
|
+ ret = btrfs_orphan_cleanup(new_root);
|
|
+ up_read(&fs_info->cleanup_work_sem);
|
|
+ if (ret)
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
dir_id = btrfs_root_dirid(&new_root->root_item);
|
|
setup_root:
|
|
location.objectid = dir_id;
|
|
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
|
|
index b05bf58..a0b65a0 100644
|
|
--- a/fs/btrfs/transaction.c
|
|
+++ b/fs/btrfs/transaction.c
|
|
@@ -592,7 +592,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
|
|
if (transid <= root->fs_info->last_trans_committed)
|
|
goto out;
|
|
|
|
- ret = -EINVAL;
|
|
/* find specified transaction */
|
|
spin_lock(&root->fs_info->trans_lock);
|
|
list_for_each_entry(t, &root->fs_info->trans_list, list) {
|
|
@@ -608,9 +607,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
|
|
}
|
|
}
|
|
spin_unlock(&root->fs_info->trans_lock);
|
|
- /* The specified transaction doesn't exist */
|
|
- if (!cur_trans)
|
|
+
|
|
+ /*
|
|
+ * The specified transaction doesn't exist, or we
|
|
+ * raced with btrfs_commit_transaction
|
|
+ */
|
|
+ if (!cur_trans) {
|
|
+ if (transid > root->fs_info->last_trans_committed)
|
|
+ ret = -EINVAL;
|
|
goto out;
|
|
+ }
|
|
} else {
|
|
/* find newest transaction that is committing | committed */
|
|
spin_lock(&root->fs_info->trans_lock);
|
|
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
|
|
index 39d83da..ec8b654 100644
|
|
--- a/fs/btrfs/tree-log.c
|
|
+++ b/fs/btrfs/tree-log.c
|
|
@@ -979,7 +979,7 @@ again:
|
|
base = btrfs_item_ptr_offset(leaf, path->slots[0]);
|
|
|
|
while (cur_offset < item_size) {
|
|
- extref = (struct btrfs_inode_extref *)base + cur_offset;
|
|
+ extref = (struct btrfs_inode_extref *)(base + cur_offset);
|
|
|
|
victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
|
|
|
|
@@ -1235,13 +1235,14 @@ out:
|
|
}
|
|
|
|
static int insert_orphan_item(struct btrfs_trans_handle *trans,
|
|
- struct btrfs_root *root, u64 offset)
|
|
+ struct btrfs_root *root, u64 ino)
|
|
{
|
|
int ret;
|
|
- ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
|
|
- offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
|
|
- if (ret > 0)
|
|
- ret = btrfs_insert_orphan_item(trans, root, offset);
|
|
+
|
|
+ ret = btrfs_insert_orphan_item(trans, root, ino);
|
|
+ if (ret == -EEXIST)
|
|
+ ret = 0;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
|
|
index 7f78cbf..4c29db6 100644
|
|
--- a/fs/btrfs/ulist.h
|
|
+++ b/fs/btrfs/ulist.h
|
|
@@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist);
|
|
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
|
|
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|
u64 *old_aux, gfp_t gfp_mask);
|
|
+
|
|
+/* just like ulist_add_merge() but take a pointer for the aux data */
|
|
+static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
|
|
+ void **old_aux, gfp_t gfp_mask)
|
|
+{
|
|
+#if BITS_PER_LONG == 32
|
|
+ u64 old64 = (uintptr_t)*old_aux;
|
|
+ int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
|
|
+ *old_aux = (void *)((uintptr_t)old64);
|
|
+ return ret;
|
|
+#else
|
|
+ return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
|
|
+#endif
|
|
+}
|
|
+
|
|
struct ulist_node *ulist_next(struct ulist *ulist,
|
|
struct ulist_iterator *uiter);
|
|
|
|
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
|
|
index ad8328d..618e86c 100644
|
|
--- a/fs/btrfs/xattr.c
|
|
+++ b/fs/btrfs/xattr.c
|
|
@@ -29,6 +29,7 @@
|
|
#include "xattr.h"
|
|
#include "disk-io.h"
|
|
#include "props.h"
|
|
+#include "locking.h"
|
|
|
|
|
|
ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
|
|
@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
|
|
struct inode *inode, const char *name,
|
|
const void *value, size_t size, int flags)
|
|
{
|
|
- struct btrfs_dir_item *di;
|
|
+ struct btrfs_dir_item *di = NULL;
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
|
struct btrfs_path *path;
|
|
size_t name_len = strlen(name);
|
|
@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
|
|
path = btrfs_alloc_path();
|
|
if (!path)
|
|
return -ENOMEM;
|
|
+ path->skip_release_on_error = 1;
|
|
+
|
|
+ if (!value) {
|
|
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
|
|
+ name, name_len, -1);
|
|
+ if (!di && (flags & XATTR_REPLACE))
|
|
+ ret = -ENODATA;
|
|
+ else if (di)
|
|
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
|
|
+ goto out;
|
|
+ }
|
|
|
|
+ /*
|
|
+ * For a replace we can't just do the insert blindly.
|
|
+ * Do a lookup first (read-only btrfs_search_slot), and return if xattr
|
|
+ * doesn't exist. If it exists, fall down below to the insert/replace
|
|
+ * path - we can't race with a concurrent xattr delete, because the VFS
|
|
+ * locks the inode's i_mutex before calling setxattr or removexattr.
|
|
+ */
|
|
if (flags & XATTR_REPLACE) {
|
|
- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
|
|
- name_len, -1);
|
|
- if (IS_ERR(di)) {
|
|
- ret = PTR_ERR(di);
|
|
- goto out;
|
|
- } else if (!di) {
|
|
+ ASSERT(mutex_is_locked(&inode->i_mutex));
|
|
+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
|
|
+ name, name_len, 0);
|
|
+ if (!di) {
|
|
ret = -ENODATA;
|
|
goto out;
|
|
}
|
|
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
|
|
- if (ret)
|
|
- goto out;
|
|
btrfs_release_path(path);
|
|
+ di = NULL;
|
|
+ }
|
|
|
|
+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
|
|
+ name, name_len, value, size);
|
|
+ if (ret == -EOVERFLOW) {
|
|
/*
|
|
- * remove the attribute
|
|
+ * We have an existing item in a leaf, split_leaf couldn't
|
|
+ * expand it. That item might have or not a dir_item that
|
|
+ * matches our target xattr, so lets check.
|
|
*/
|
|
- if (!value)
|
|
- goto out;
|
|
- } else {
|
|
- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
|
|
- name, name_len, 0);
|
|
- if (IS_ERR(di)) {
|
|
- ret = PTR_ERR(di);
|
|
+ ret = 0;
|
|
+ btrfs_assert_tree_locked(path->nodes[0]);
|
|
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
|
|
+ if (!di && !(flags & XATTR_REPLACE)) {
|
|
+ ret = -ENOSPC;
|
|
goto out;
|
|
}
|
|
- if (!di && !value)
|
|
- goto out;
|
|
- btrfs_release_path(path);
|
|
+ } else if (ret == -EEXIST) {
|
|
+ ret = 0;
|
|
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
|
|
+ ASSERT(di); /* logic error */
|
|
+ } else if (ret) {
|
|
+ goto out;
|
|
}
|
|
|
|
-again:
|
|
- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
|
|
- name, name_len, value, size);
|
|
- /*
|
|
- * If we're setting an xattr to a new value but the new value is say
|
|
- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
|
|
- * back from split_leaf. This is because it thinks we'll be extending
|
|
- * the existing item size, but we're asking for enough space to add the
|
|
- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
|
|
- * the rest of the function figure it out.
|
|
- */
|
|
- if (ret == -EOVERFLOW)
|
|
+ if (di && (flags & XATTR_CREATE)) {
|
|
ret = -EEXIST;
|
|
+ goto out;
|
|
+ }
|
|
|
|
- if (ret == -EEXIST) {
|
|
- if (flags & XATTR_CREATE)
|
|
- goto out;
|
|
+ if (di) {
|
|
/*
|
|
- * We can't use the path we already have since we won't have the
|
|
- * proper locking for a delete, so release the path and
|
|
- * re-lookup to delete the thing.
|
|
+ * We're doing a replace, and it must be atomic, that is, at
|
|
+ * any point in time we have either the old or the new xattr
|
|
+ * value in the tree. We don't want readers (getxattr and
|
|
+ * listxattrs) to miss a value, this is specially important
|
|
+ * for ACLs.
|
|
*/
|
|
- btrfs_release_path(path);
|
|
- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
|
|
- name, name_len, -1);
|
|
- if (IS_ERR(di)) {
|
|
- ret = PTR_ERR(di);
|
|
- goto out;
|
|
- } else if (!di) {
|
|
- /* Shouldn't happen but just in case... */
|
|
- btrfs_release_path(path);
|
|
- goto again;
|
|
+ const int slot = path->slots[0];
|
|
+ struct extent_buffer *leaf = path->nodes[0];
|
|
+ const u16 old_data_len = btrfs_dir_data_len(leaf, di);
|
|
+ const u32 item_size = btrfs_item_size_nr(leaf, slot);
|
|
+ const u32 data_size = sizeof(*di) + name_len + size;
|
|
+ struct btrfs_item *item;
|
|
+ unsigned long data_ptr;
|
|
+ char *ptr;
|
|
+
|
|
+ if (size > old_data_len) {
|
|
+ if (btrfs_leaf_free_space(root, leaf) <
|
|
+ (size - old_data_len)) {
|
|
+ ret = -ENOSPC;
|
|
+ goto out;
|
|
+ }
|
|
}
|
|
|
|
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
|
|
- if (ret)
|
|
- goto out;
|
|
+ if (old_data_len + name_len + sizeof(*di) == item_size) {
|
|
+ /* No other xattrs packed in the same leaf item. */
|
|
+ if (size > old_data_len)
|
|
+ btrfs_extend_item(root, path,
|
|
+ size - old_data_len);
|
|
+ else if (size < old_data_len)
|
|
+ btrfs_truncate_item(root, path, data_size, 1);
|
|
+ } else {
|
|
+ /* There are other xattrs packed in the same item. */
|
|
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ btrfs_extend_item(root, path, data_size);
|
|
+ }
|
|
|
|
+ item = btrfs_item_nr(slot);
|
|
+ ptr = btrfs_item_ptr(leaf, slot, char);
|
|
+ ptr += btrfs_item_size(leaf, item) - data_size;
|
|
+ di = (struct btrfs_dir_item *)ptr;
|
|
+ btrfs_set_dir_data_len(leaf, di, size);
|
|
+ data_ptr = ((unsigned long)(di + 1)) + name_len;
|
|
+ write_extent_buffer(leaf, value, data_ptr, size);
|
|
+ btrfs_mark_buffer_dirty(leaf);
|
|
+ } else {
|
|
/*
|
|
- * We have a value to set, so go back and try to insert it now.
|
|
+ * Insert, and we had space for the xattr, so path->slots[0] is
|
|
+ * where our xattr dir_item is and btrfs_insert_xattr_item()
|
|
+ * filled it.
|
|
*/
|
|
- if (value) {
|
|
- btrfs_release_path(path);
|
|
- goto again;
|
|
- }
|
|
}
|
|
out:
|
|
btrfs_free_path(path);
|
|
@@ -324,22 +360,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
|
|
/*
|
|
* Check if the attribute is in a supported namespace.
|
|
*
|
|
- * This applied after the check for the synthetic attributes in the system
|
|
+ * This is applied after the check for the synthetic attributes in the system
|
|
* namespace.
|
|
*/
|
|
-static bool btrfs_is_valid_xattr(const char *name)
|
|
+static int btrfs_is_valid_xattr(const char *name)
|
|
{
|
|
- return !strncmp(name, XATTR_SECURITY_PREFIX,
|
|
- XATTR_SECURITY_PREFIX_LEN) ||
|
|
- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
|
|
- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
|
|
- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
|
|
- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
|
|
+ int len = strlen(name);
|
|
+ int prefixlen = 0;
|
|
+
|
|
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
|
|
+ XATTR_SECURITY_PREFIX_LEN))
|
|
+ prefixlen = XATTR_SECURITY_PREFIX_LEN;
|
|
+ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
|
|
+ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
|
|
+ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
|
|
+ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
|
|
+ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
|
|
+ prefixlen = XATTR_USER_PREFIX_LEN;
|
|
+ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
|
|
+ prefixlen = XATTR_BTRFS_PREFIX_LEN;
|
|
+ else
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
+ /*
|
|
+ * The name cannot consist of just prefix
|
|
+ */
|
|
+ if (len <= prefixlen)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
|
|
void *buffer, size_t size)
|
|
{
|
|
+ int ret;
|
|
+
|
|
/*
|
|
* If this is a request for a synthetic attribute in the system.*
|
|
* namespace use the generic infrastructure to resolve a handler
|
|
@@ -348,8 +404,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
|
|
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
|
|
return generic_getxattr(dentry, name, buffer, size);
|
|
|
|
- if (!btrfs_is_valid_xattr(name))
|
|
- return -EOPNOTSUPP;
|
|
+ ret = btrfs_is_valid_xattr(name);
|
|
+ if (ret)
|
|
+ return ret;
|
|
return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
|
|
}
|
|
|
|
@@ -357,6 +414,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|
size_t size, int flags)
|
|
{
|
|
struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
|
|
+ int ret;
|
|
|
|
/*
|
|
* The permission on security.* and system.* is not checked
|
|
@@ -373,8 +431,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
|
|
return generic_setxattr(dentry, name, value, size, flags);
|
|
|
|
- if (!btrfs_is_valid_xattr(name))
|
|
- return -EOPNOTSUPP;
|
|
+ ret = btrfs_is_valid_xattr(name);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
|
|
return btrfs_set_prop(dentry->d_inode, name,
|
|
@@ -390,6 +449,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|
int btrfs_removexattr(struct dentry *dentry, const char *name)
|
|
{
|
|
struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
|
|
+ int ret;
|
|
|
|
/*
|
|
* The permission on security.* and system.* is not checked
|
|
@@ -406,8 +466,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
|
|
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
|
|
return generic_removexattr(dentry, name);
|
|
|
|
- if (!btrfs_is_valid_xattr(name))
|
|
- return -EOPNOTSUPP;
|
|
+ ret = btrfs_is_valid_xattr(name);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
|
|
return btrfs_set_prop(dentry->d_inode, name,
|
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
|
index 8c53a2b..f48650c 100644
|
|
--- a/fs/buffer.c
|
|
+++ b/fs/buffer.c
|
|
@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
|
|
int all_mapped = 1;
|
|
|
|
index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
|
|
- page = find_get_page(bd_mapping, index);
|
|
+ page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
|
|
if (!page)
|
|
goto out;
|
|
|
|
@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
|
|
bh = page_buffers(page);
|
|
if (bh->b_size == size) {
|
|
end_block = init_page_buffers(page, bdev,
|
|
- index << sizebits, size);
|
|
+ (sector_t)index << sizebits,
|
|
+ size);
|
|
goto done;
|
|
}
|
|
if (!try_to_free_buffers(page))
|
|
@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
|
|
*/
|
|
spin_lock(&inode->i_mapping->private_lock);
|
|
link_dev_buffers(page, bh);
|
|
- end_block = init_page_buffers(page, bdev, index << sizebits, size);
|
|
+ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
|
|
+ size);
|
|
spin_unlock(&inode->i_mapping->private_lock);
|
|
done:
|
|
ret = (block < end_block) ? 1 : -ENXIO;
|
|
@@ -1366,12 +1368,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
|
|
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
|
|
|
|
if (bh == NULL) {
|
|
+ /* __find_get_block_slow will mark the page accessed */
|
|
bh = __find_get_block_slow(bdev, block);
|
|
if (bh)
|
|
bh_lru_install(bh);
|
|
- }
|
|
- if (bh)
|
|
+ } else
|
|
touch_buffer(bh);
|
|
+
|
|
return bh;
|
|
}
|
|
EXPORT_SYMBOL(__find_get_block);
|
|
@@ -1483,16 +1486,27 @@ EXPORT_SYMBOL(set_bh_page);
|
|
/*
|
|
* Called when truncating a buffer on a page completely.
|
|
*/
|
|
+
|
|
+/* Bits that are cleared during an invalidate */
|
|
+#define BUFFER_FLAGS_DISCARD \
|
|
+ (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
|
|
+ 1 << BH_Delay | 1 << BH_Unwritten)
|
|
+
|
|
static void discard_buffer(struct buffer_head * bh)
|
|
{
|
|
+ unsigned long b_state, b_state_old;
|
|
+
|
|
lock_buffer(bh);
|
|
clear_buffer_dirty(bh);
|
|
bh->b_bdev = NULL;
|
|
- clear_buffer_mapped(bh);
|
|
- clear_buffer_req(bh);
|
|
- clear_buffer_new(bh);
|
|
- clear_buffer_delay(bh);
|
|
- clear_buffer_unwritten(bh);
|
|
+ b_state = bh->b_state;
|
|
+ for (;;) {
|
|
+ b_state_old = cmpxchg(&bh->b_state, b_state,
|
|
+ (b_state & ~BUFFER_FLAGS_DISCARD));
|
|
+ if (b_state_old == b_state)
|
|
+ break;
|
|
+ b_state = b_state_old;
|
|
+ }
|
|
unlock_buffer(bh);
|
|
}
|
|
|
|
@@ -2075,6 +2089,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
|
struct page *page, void *fsdata)
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
+ loff_t old_size = inode->i_size;
|
|
int i_size_changed = 0;
|
|
|
|
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
|
@@ -2094,6 +2109,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
|
unlock_page(page);
|
|
page_cache_release(page);
|
|
|
|
+ if (old_size < pos)
|
|
+ pagecache_isize_extended(inode, old_size, pos);
|
|
/*
|
|
* Don't mark the inode dirty under page lock. First, it unnecessarily
|
|
* makes the holding time of page lock longer. Second, it forces lock
|
|
@@ -2311,6 +2328,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
|
err = 0;
|
|
|
|
balance_dirty_pages_ratelimited(mapping);
|
|
+
|
|
+ if (unlikely(fatal_signal_pending(current))) {
|
|
+ err = -EINTR;
|
|
+ goto out;
|
|
+ }
|
|
}
|
|
|
|
/* page covers the boundary, find the boundary offset */
|
|
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
|
|
index b53278c..94a85ee 100644
|
|
--- a/fs/ceph/addr.c
|
|
+++ b/fs/ceph/addr.c
|
|
@@ -676,7 +676,7 @@ static int ceph_writepages_start(struct address_space *mapping,
|
|
int rc = 0;
|
|
unsigned wsize = 1 << inode->i_blkbits;
|
|
struct ceph_osd_request *req = NULL;
|
|
- int do_sync;
|
|
+ int do_sync = 0;
|
|
u64 truncate_size, snap_size;
|
|
u32 truncate_seq;
|
|
|
|
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
|
|
index 5e0982a..18e14cf 100644
|
|
--- a/fs/ceph/dir.c
|
|
+++ b/fs/ceph/dir.c
|
|
@@ -111,7 +111,7 @@ static int fpos_cmp(loff_t l, loff_t r)
|
|
/*
|
|
* When possible, we try to satisfy a readdir by peeking at the
|
|
* dcache. We make this work by carefully ordering dentries on
|
|
- * d_u.d_child when we initially get results back from the MDS, and
|
|
+ * d_child when we initially get results back from the MDS, and
|
|
* falling back to a "normal" sync readdir if any dentries in the dir
|
|
* are dropped.
|
|
*
|
|
@@ -146,11 +146,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
|
|
p = parent->d_subdirs.prev;
|
|
dout(" initial p %p/%p\n", p->prev, p->next);
|
|
} else {
|
|
- p = last->d_u.d_child.prev;
|
|
+ p = last->d_child.prev;
|
|
}
|
|
|
|
more:
|
|
- dentry = list_entry(p, struct dentry, d_u.d_child);
|
|
+ dentry = list_entry(p, struct dentry, d_child);
|
|
di = ceph_dentry(dentry);
|
|
while (1) {
|
|
dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
|
|
@@ -172,7 +172,7 @@ more:
|
|
!dentry->d_inode ? " null" : "");
|
|
spin_unlock(&dentry->d_lock);
|
|
p = p->prev;
|
|
- dentry = list_entry(p, struct dentry, d_u.d_child);
|
|
+ dentry = list_entry(p, struct dentry, d_child);
|
|
di = ceph_dentry(dentry);
|
|
}
|
|
|
|
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
|
|
index 6471f9c..ee24490 100644
|
|
--- a/fs/ceph/inode.c
|
|
+++ b/fs/ceph/inode.c
|
|
@@ -1289,7 +1289,7 @@ retry_lookup:
|
|
/* reorder parent's d_subdirs */
|
|
spin_lock(&parent->d_lock);
|
|
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
|
|
- list_move(&dn->d_u.d_child, &parent->d_subdirs);
|
|
+ list_move(&dn->d_child, &parent->d_subdirs);
|
|
spin_unlock(&dn->d_lock);
|
|
spin_unlock(&parent->d_lock);
|
|
}
|
|
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
|
|
index f4f050a..339c412 100644
|
|
--- a/fs/ceph/mds_client.c
|
|
+++ b/fs/ceph/mds_client.c
|
|
@@ -1461,15 +1461,18 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
|
|
|
|
dout("discard_cap_releases mds%d\n", session->s_mds);
|
|
|
|
- /* zero out the in-progress message */
|
|
- msg = list_first_entry(&session->s_cap_releases,
|
|
- struct ceph_msg, list_head);
|
|
- head = msg->front.iov_base;
|
|
- num = le32_to_cpu(head->num);
|
|
- dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num);
|
|
- head->num = cpu_to_le32(0);
|
|
- msg->front.iov_len = sizeof(*head);
|
|
- session->s_num_cap_releases += num;
|
|
+ if (!list_empty(&session->s_cap_releases)) {
|
|
+ /* zero out the in-progress message */
|
|
+ msg = list_first_entry(&session->s_cap_releases,
|
|
+ struct ceph_msg, list_head);
|
|
+ head = msg->front.iov_base;
|
|
+ num = le32_to_cpu(head->num);
|
|
+ dout("discard_cap_releases mds%d %p %u\n",
|
|
+ session->s_mds, msg, num);
|
|
+ head->num = cpu_to_le32(0);
|
|
+ msg->front.iov_len = sizeof(*head);
|
|
+ session->s_num_cap_releases += num;
|
|
+ }
|
|
|
|
/* requeue completed messages */
|
|
while (!list_empty(&session->s_cap_releases_done)) {
|
|
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
|
|
index 30f6e92..5d12d69 100644
|
|
--- a/fs/cifs/cifsglob.h
|
|
+++ b/fs/cifs/cifsglob.h
|
|
@@ -70,11 +70,6 @@
|
|
#define SERVER_NAME_LENGTH 40
|
|
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
|
|
|
|
-/* used to define string lengths for reversing unicode strings */
|
|
-/* (256+1)*2 = 514 */
|
|
-/* (max path length + 1 for null) * 2 for unicode */
|
|
-#define MAX_NAME 514
|
|
-
|
|
/* SMB echo "timeout" -- FIXME: tunable? */
|
|
#define SMB_ECHO_INTERVAL (60 * HZ)
|
|
|
|
@@ -404,6 +399,8 @@ struct smb_version_operations {
|
|
const struct cifs_fid *, u32 *);
|
|
int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
|
|
int);
|
|
+ /* check if we need to issue closedir */
|
|
+ bool (*dir_needs_close)(struct cifsFileInfo *);
|
|
};
|
|
|
|
struct smb_version_values {
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index 87c4dd0..40ddb6e 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|
struct cifsLockInfo *li, *tmp;
|
|
struct cifs_fid fid;
|
|
struct cifs_pending_open open;
|
|
+ bool oplock_break_cancelled;
|
|
|
|
spin_lock(&cifs_file_list_lock);
|
|
if (--cifs_file->count > 0) {
|
|
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|
}
|
|
spin_unlock(&cifs_file_list_lock);
|
|
|
|
- cancel_work_sync(&cifs_file->oplock_break);
|
|
+ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
|
|
|
|
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
|
|
struct TCP_Server_Info *server = tcon->ses->server;
|
|
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|
_free_xid(xid);
|
|
}
|
|
|
|
+ if (oplock_break_cancelled)
|
|
+ cifs_done_oplock_break(cifsi);
|
|
+
|
|
cifs_del_pending_open(&open);
|
|
|
|
/*
|
|
@@ -762,7 +766,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
|
|
|
|
cifs_dbg(FYI, "Freeing private data in close dir\n");
|
|
spin_lock(&cifs_file_list_lock);
|
|
- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
|
|
+ if (server->ops->dir_needs_close(cfile)) {
|
|
cfile->invalidHandle = true;
|
|
spin_unlock(&cifs_file_list_lock);
|
|
if (server->ops->close_dir)
|
|
@@ -1817,6 +1821,7 @@ refind_writable:
|
|
cifsFileInfo_put(inv_file);
|
|
spin_lock(&cifs_file_list_lock);
|
|
++refind;
|
|
+ inv_file = NULL;
|
|
goto refind_writable;
|
|
}
|
|
}
|
|
@@ -2844,7 +2849,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
|
|
total_read += result;
|
|
}
|
|
|
|
- return total_read > 0 ? total_read : result;
|
|
+ return total_read > 0 && result != -EAGAIN ? total_read : result;
|
|
}
|
|
|
|
static ssize_t
|
|
@@ -3267,7 +3272,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
|
|
total_read += result;
|
|
}
|
|
|
|
- return total_read > 0 ? total_read : result;
|
|
+ return total_read > 0 && result != -EAGAIN ? total_read : result;
|
|
}
|
|
|
|
static int cifs_readpages(struct file *file, struct address_space *mapping,
|
|
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
|
|
index aadc2b6..7ee427e 100644
|
|
--- a/fs/cifs/inode.c
|
|
+++ b/fs/cifs/inode.c
|
|
@@ -883,7 +883,7 @@ inode_has_hashed_dentries(struct inode *inode)
|
|
struct dentry *dentry;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
|
|
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
|
|
spin_unlock(&inode->i_lock);
|
|
return true;
|
|
@@ -1706,13 +1706,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
|
|
unlink_target:
|
|
/* Try unlinking the target dentry if it's not negative */
|
|
if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
|
|
- tmprc = cifs_unlink(target_dir, target_dentry);
|
|
+ if (d_is_dir(target_dentry))
|
|
+ tmprc = cifs_rmdir(target_dir, target_dentry);
|
|
+ else
|
|
+ tmprc = cifs_unlink(target_dir, target_dentry);
|
|
if (tmprc)
|
|
goto cifs_rename_exit;
|
|
rc = cifs_do_rename(xid, source_dentry, from_name,
|
|
target_dentry, to_name);
|
|
}
|
|
|
|
+ /* force revalidate to go get info when needed */
|
|
+ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
|
|
+
|
|
+ source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
|
|
+ target_dir->i_mtime = current_fs_time(source_dir->i_sb);
|
|
+
|
|
cifs_rename_exit:
|
|
kfree(info_buf_source);
|
|
kfree(from_name);
|
|
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
|
|
index 7749230..dfc9564 100644
|
|
--- a/fs/cifs/ioctl.c
|
|
+++ b/fs/cifs/ioctl.c
|
|
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
|
|
}
|
|
|
|
src_inode = src_file.file->f_dentry->d_inode;
|
|
+ rc = -EINVAL;
|
|
+ if (S_ISDIR(src_inode->i_mode))
|
|
+ goto out_fput;
|
|
|
|
/*
|
|
* Note: cifs case is easier than btrfs since server responsible for
|
|
* checks for proper open modes and file type and if it wants
|
|
* server could even support copy of range where source = target
|
|
*/
|
|
-
|
|
- /* so we do not deadlock racing two ioctls on same files */
|
|
- if (target_inode < src_inode) {
|
|
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
|
|
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
|
|
- } else {
|
|
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
|
|
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
|
|
- }
|
|
+ lock_two_nondirectories(target_inode, src_inode);
|
|
|
|
/* determine range to clone */
|
|
rc = -EINVAL;
|
|
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
|
|
out_unlock:
|
|
/* although unlocking in the reverse order from locking is not
|
|
strictly necessary here it is a little cleaner to be consistent */
|
|
- if (target_inode < src_inode) {
|
|
- mutex_unlock(&src_inode->i_mutex);
|
|
- mutex_unlock(&target_inode->i_mutex);
|
|
- } else {
|
|
- mutex_unlock(&target_inode->i_mutex);
|
|
- mutex_unlock(&src_inode->i_mutex);
|
|
- }
|
|
+ unlock_two_nondirectories(src_inode, target_inode);
|
|
out_fput:
|
|
fdput(src_file);
|
|
out_drop_write:
|
|
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
|
|
index 68559fd..a5c2812 100644
|
|
--- a/fs/cifs/link.c
|
|
+++ b/fs/cifs/link.c
|
|
@@ -213,8 +213,12 @@ create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
|
if (rc)
|
|
goto out;
|
|
|
|
- rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb,
|
|
- fromName, buf, &bytes_written);
|
|
+ if (tcon->ses->server->ops->create_mf_symlink)
|
|
+ rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
|
|
+ cifs_sb, fromName, buf, &bytes_written);
|
|
+ else
|
|
+ rc = -EOPNOTSUPP;
|
|
+
|
|
if (rc)
|
|
goto out;
|
|
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index b15862e..b334a89 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -593,11 +593,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
|
|
/* close and restart search */
|
|
cifs_dbg(FYI, "search backing up - close and restart search\n");
|
|
spin_lock(&cifs_file_list_lock);
|
|
- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
|
|
+ if (server->ops->dir_needs_close(cfile)) {
|
|
cfile->invalidHandle = true;
|
|
spin_unlock(&cifs_file_list_lock);
|
|
- if (server->ops->close)
|
|
- server->ops->close(xid, tcon, &cfile->fid);
|
|
+ if (server->ops->close_dir)
|
|
+ server->ops->close_dir(xid, tcon, &cfile->fid);
|
|
} else
|
|
spin_unlock(&cifs_file_list_lock);
|
|
if (cfile->srch_inf.ntwrk_buf_start) {
|
|
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
|
|
index d1fdfa8..e9ad8d3 100644
|
|
--- a/fs/cifs/smb1ops.c
|
|
+++ b/fs/cifs/smb1ops.c
|
|
@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
|
|
if (tmprc == -EOPNOTSUPP)
|
|
*symlink = true;
|
|
- else
|
|
+ else if (tmprc == 0)
|
|
CIFSSMBClose(xid, tcon, fid.netfid);
|
|
}
|
|
|
|
@@ -1009,6 +1009,12 @@ cifs_is_read_op(__u32 oplock)
|
|
return oplock == OPLOCK_READ;
|
|
}
|
|
|
|
+static bool
|
|
+cifs_dir_needs_close(struct cifsFileInfo *cfile)
|
|
+{
|
|
+ return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
|
|
+}
|
|
+
|
|
struct smb_version_operations smb1_operations = {
|
|
.send_cancel = send_nt_cancel,
|
|
.compare_fids = cifs_compare_fids,
|
|
@@ -1078,6 +1084,7 @@ struct smb_version_operations smb1_operations = {
|
|
.query_mf_symlink = cifs_query_mf_symlink,
|
|
.create_mf_symlink = cifs_create_mf_symlink,
|
|
.is_read_op = cifs_is_read_op,
|
|
+ .dir_needs_close = cifs_dir_needs_close,
|
|
#ifdef CONFIG_CIFS_XATTR
|
|
.query_all_EAs = CIFSSMBQAllEAs,
|
|
.set_EA = CIFSSMBSetEA,
|
|
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
|
|
index 3f17b45..4599294 100644
|
|
--- a/fs/cifs/smb2file.c
|
|
+++ b/fs/cifs/smb2file.c
|
|
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
|
|
goto out;
|
|
}
|
|
|
|
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
|
|
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
|
|
GFP_KERNEL);
|
|
if (smb2_data == NULL) {
|
|
rc = -ENOMEM;
|
|
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
|
|
index 84c012a..215f8d3 100644
|
|
--- a/fs/cifs/smb2inode.c
|
|
+++ b/fs/cifs/smb2inode.c
|
|
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
*adjust_tz = false;
|
|
*symlink = false;
|
|
|
|
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
|
|
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
|
|
GFP_KERNEL);
|
|
if (smb2_data == NULL)
|
|
return -ENOMEM;
|
|
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
|
|
index 94bd4fb..a491814 100644
|
|
--- a/fs/cifs/smb2maperror.c
|
|
+++ b/fs/cifs/smb2maperror.c
|
|
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
|
|
{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
|
|
{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
|
|
{STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
|
|
- {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
|
|
+ {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
|
|
{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
|
|
{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
|
|
{STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
|
|
@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
|
|
{STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
|
|
"STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
|
|
{STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
|
|
+ {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
|
|
+ "STATUS_REPARSE_NOT_HANDLED"},
|
|
{STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
|
|
"STATUS_DEVICE_REQUIRES_CLEANING"},
|
|
{STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
|
|
@@ -605,7 +607,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
|
|
{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
|
|
{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
|
|
{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
|
|
- {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
|
|
+ {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
|
|
{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
|
|
{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
|
|
{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index 35ddc3e..30f3eb5 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -339,7 +339,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
int rc;
|
|
struct smb2_file_all_info *smb2_data;
|
|
|
|
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
|
|
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
|
|
GFP_KERNEL);
|
|
if (smb2_data == NULL)
|
|
return -ENOMEM;
|
|
@@ -630,7 +630,8 @@ smb2_clone_range(const unsigned int xid,
|
|
|
|
/* No need to change MaxChunks since already set to 1 */
|
|
chunk_sizes_updated = true;
|
|
- }
|
|
+ } else
|
|
+ goto cchunk_out;
|
|
}
|
|
|
|
cchunk_out:
|
|
@@ -1102,6 +1103,12 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch)
|
|
return le32_to_cpu(lc->lcontext.LeaseState);
|
|
}
|
|
|
|
+static bool
|
|
+smb2_dir_needs_close(struct cifsFileInfo *cfile)
|
|
+{
|
|
+ return !cfile->invalidHandle;
|
|
+}
|
|
+
|
|
struct smb_version_operations smb20_operations = {
|
|
.compare_fids = smb2_compare_fids,
|
|
.setup_request = smb2_setup_request,
|
|
@@ -1175,6 +1182,7 @@ struct smb_version_operations smb20_operations = {
|
|
.create_lease_buf = smb2_create_lease_buf,
|
|
.parse_lease_buf = smb2_parse_lease_buf,
|
|
.clone_range = smb2_clone_range,
|
|
+ .dir_needs_close = smb2_dir_needs_close,
|
|
};
|
|
|
|
struct smb_version_operations smb21_operations = {
|
|
@@ -1250,6 +1258,7 @@ struct smb_version_operations smb21_operations = {
|
|
.create_lease_buf = smb2_create_lease_buf,
|
|
.parse_lease_buf = smb2_parse_lease_buf,
|
|
.clone_range = smb2_clone_range,
|
|
+ .dir_needs_close = smb2_dir_needs_close,
|
|
};
|
|
|
|
struct smb_version_operations smb30_operations = {
|
|
@@ -1328,6 +1337,7 @@ struct smb_version_operations smb30_operations = {
|
|
.parse_lease_buf = smb3_parse_lease_buf,
|
|
.clone_range = smb2_clone_range,
|
|
.validate_negotiate = smb3_validate_negotiate,
|
|
+ .dir_needs_close = smb2_dir_needs_close,
|
|
};
|
|
|
|
struct smb_version_values smb20_values = {
|
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
|
index 049a3f2..3487929 100644
|
|
--- a/fs/cifs/smb2pdu.c
|
|
+++ b/fs/cifs/smb2pdu.c
|
|
@@ -916,7 +916,8 @@ tcon_exit:
|
|
tcon_error_exit:
|
|
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
|
|
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
|
|
- tcon->bad_network_name = true;
|
|
+ if (tcon)
|
|
+ tcon->bad_network_name = true;
|
|
}
|
|
goto tcon_exit;
|
|
}
|
|
@@ -1539,7 +1540,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
{
|
|
return query_info(xid, tcon, persistent_fid, volatile_fid,
|
|
FILE_ALL_INFORMATION,
|
|
- sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
|
|
+ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
|
|
sizeof(struct smb2_file_all_info), data);
|
|
}
|
|
|
|
@@ -2135,6 +2136,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
|
|
rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
|
|
|
|
if (rc) {
|
|
+ if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
|
|
+ srch_inf->endOfSearch = true;
|
|
+ rc = 0;
|
|
+ }
|
|
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
|
|
goto qdir_exit;
|
|
}
|
|
@@ -2172,11 +2177,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
|
|
else
|
|
cifs_dbg(VFS, "illegal search buffer type\n");
|
|
|
|
- if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
|
|
- srch_inf->endOfSearch = 1;
|
|
- else
|
|
- srch_inf->endOfSearch = 0;
|
|
-
|
|
return rc;
|
|
|
|
qdir_exit:
|
|
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
|
|
index 1da168c..9bc1147 100644
|
|
--- a/fs/coda/cache.c
|
|
+++ b/fs/coda/cache.c
|
|
@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
|
|
struct dentry *de;
|
|
|
|
spin_lock(&parent->d_lock);
|
|
- list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry(de, &parent->d_subdirs, d_child) {
|
|
/* don't know what to do with negative dentries */
|
|
if (de->d_inode )
|
|
coda_flag_inode(de->d_inode, flag);
|
|
diff --git a/fs/coredump.c b/fs/coredump.c
|
|
index 0b2528f..a93f7e6 100644
|
|
--- a/fs/coredump.c
|
|
+++ b/fs/coredump.c
|
|
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
if (unlikely(nr < 0))
|
|
return nr;
|
|
|
|
- tsk->flags = PF_DUMPCORE;
|
|
+ tsk->flags |= PF_DUMPCORE;
|
|
if (atomic_read(&mm->mm_users) == nr + 1)
|
|
goto done;
|
|
/*
|
|
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
|
|
index 06610cf..a1f801c 100644
|
|
--- a/fs/cramfs/inode.c
|
|
+++ b/fs/cramfs/inode.c
|
|
@@ -195,8 +195,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
|
|
struct page *page = NULL;
|
|
|
|
if (blocknr + i < devsize) {
|
|
- page = read_mapping_page_async(mapping, blocknr + i,
|
|
- NULL);
|
|
+ page = read_mapping_page(mapping, blocknr + i, NULL);
|
|
/* synchronous error? */
|
|
if (IS_ERR(page))
|
|
page = NULL;
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 7f3b400..df323f8 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -44,7 +44,7 @@
|
|
/*
|
|
* Usage:
|
|
* dcache->d_inode->i_lock protects:
|
|
- * - i_dentry, d_alias, d_inode of aliases
|
|
+ * - i_dentry, d_u.d_alias, d_inode of aliases
|
|
* dcache_hash_bucket lock protects:
|
|
* - the dcache hash table
|
|
* s_anon bl list spinlock protects:
|
|
@@ -59,7 +59,7 @@
|
|
* - d_unhashed()
|
|
* - d_parent and d_subdirs
|
|
* - childrens' d_child and d_parent
|
|
- * - d_alias, d_inode
|
|
+ * - d_u.d_alias, d_inode
|
|
*
|
|
* Ordering:
|
|
* dentry->d_inode->i_lock
|
|
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
|
|
unsigned int hash)
|
|
{
|
|
hash += (unsigned long) parent / L1_CACHE_BYTES;
|
|
- hash = hash + (hash >> d_hash_shift);
|
|
- return dentry_hashtable + (hash & d_hash_mask);
|
|
+ return dentry_hashtable + hash_32(hash, d_hash_shift);
|
|
}
|
|
|
|
/* Statistics gathering. */
|
|
@@ -240,22 +239,13 @@ static void __d_free(struct rcu_head *head)
|
|
{
|
|
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
|
|
|
|
- WARN_ON(!hlist_unhashed(&dentry->d_alias));
|
|
if (dname_external(dentry))
|
|
kfree(dentry->d_name.name);
|
|
kmem_cache_free(dentry_cache, dentry);
|
|
}
|
|
|
|
-/*
|
|
- * no locks, please.
|
|
- */
|
|
-static void d_free(struct dentry *dentry)
|
|
+static void dentry_free(struct dentry *dentry)
|
|
{
|
|
- BUG_ON((int)dentry->d_lockref.count > 0);
|
|
- this_cpu_dec(nr_dentry);
|
|
- if (dentry->d_op && dentry->d_op->d_release)
|
|
- dentry->d_op->d_release(dentry);
|
|
-
|
|
/* if dentry was never visible to RCU, immediate free is OK */
|
|
if (!(dentry->d_flags & DCACHE_RCUACCESS))
|
|
__d_free(&dentry->d_u.d_rcu);
|
|
@@ -289,7 +279,7 @@ static void dentry_iput(struct dentry * dentry)
|
|
struct inode *inode = dentry->d_inode;
|
|
if (inode) {
|
|
dentry->d_inode = NULL;
|
|
- hlist_del_init(&dentry->d_alias);
|
|
+ hlist_del_init(&dentry->d_u.d_alias);
|
|
spin_unlock(&dentry->d_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
if (!inode->i_nlink)
|
|
@@ -314,7 +304,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
|
|
struct inode *inode = dentry->d_inode;
|
|
__d_clear_type(dentry);
|
|
dentry->d_inode = NULL;
|
|
- hlist_del_init(&dentry->d_alias);
|
|
+ hlist_del_init(&dentry->d_u.d_alias);
|
|
dentry_rcuwalk_barrier(dentry);
|
|
spin_unlock(&dentry->d_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
@@ -403,56 +393,6 @@ static void dentry_lru_add(struct dentry *dentry)
|
|
d_lru_add(dentry);
|
|
}
|
|
|
|
-/*
|
|
- * Remove a dentry with references from the LRU.
|
|
- *
|
|
- * If we are on the shrink list, then we can get to try_prune_one_dentry() and
|
|
- * lose our last reference through the parent walk. In this case, we need to
|
|
- * remove ourselves from the shrink list, not the LRU.
|
|
- */
|
|
-static void dentry_lru_del(struct dentry *dentry)
|
|
-{
|
|
- if (dentry->d_flags & DCACHE_LRU_LIST) {
|
|
- if (dentry->d_flags & DCACHE_SHRINK_LIST)
|
|
- return d_shrink_del(dentry);
|
|
- d_lru_del(dentry);
|
|
- }
|
|
-}
|
|
-
|
|
-/**
|
|
- * d_kill - kill dentry and return parent
|
|
- * @dentry: dentry to kill
|
|
- * @parent: parent dentry
|
|
- *
|
|
- * The dentry must already be unhashed and removed from the LRU.
|
|
- *
|
|
- * If this is the root of the dentry tree, return NULL.
|
|
- *
|
|
- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
|
|
- * d_kill.
|
|
- */
|
|
-static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
|
|
- __releases(dentry->d_lock)
|
|
- __releases(parent->d_lock)
|
|
- __releases(dentry->d_inode->i_lock)
|
|
-{
|
|
- list_del(&dentry->d_u.d_child);
|
|
- /*
|
|
- * Inform d_walk() that we are no longer attached to the
|
|
- * dentry tree
|
|
- */
|
|
- dentry->d_flags |= DCACHE_DENTRY_KILLED;
|
|
- if (parent)
|
|
- spin_unlock(&parent->d_lock);
|
|
- dentry_iput(dentry);
|
|
- /*
|
|
- * dentry_iput drops the locks, at which point nobody (except
|
|
- * transient RCU lookups) can reach this dentry.
|
|
- */
|
|
- d_free(dentry);
|
|
- return parent;
|
|
-}
|
|
-
|
|
/**
|
|
* d_drop - drop a dentry
|
|
* @dentry: dentry to drop
|
|
@@ -510,7 +450,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure)
|
|
__releases(dentry->d_lock)
|
|
{
|
|
struct inode *inode;
|
|
- struct dentry *parent;
|
|
+ struct dentry *parent = NULL;
|
|
+ bool can_free = true;
|
|
+
|
|
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
|
|
+ can_free = dentry->d_flags & DCACHE_MAY_FREE;
|
|
+ spin_unlock(&dentry->d_lock);
|
|
+ goto out;
|
|
+ }
|
|
|
|
inode = dentry->d_inode;
|
|
if (inode && !spin_trylock(&inode->i_lock)) {
|
|
@@ -521,9 +468,7 @@ relock:
|
|
}
|
|
return dentry; /* try again with same dentry */
|
|
}
|
|
- if (IS_ROOT(dentry))
|
|
- parent = NULL;
|
|
- else
|
|
+ if (!IS_ROOT(dentry))
|
|
parent = dentry->d_parent;
|
|
if (parent && !spin_trylock(&parent->d_lock)) {
|
|
if (inode)
|
|
@@ -543,10 +488,40 @@ relock:
|
|
if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
|
|
dentry->d_op->d_prune(dentry);
|
|
|
|
- dentry_lru_del(dentry);
|
|
+ if (dentry->d_flags & DCACHE_LRU_LIST) {
|
|
+ if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
|
|
+ d_lru_del(dentry);
|
|
+ }
|
|
/* if it was on the hash then remove it */
|
|
__d_drop(dentry);
|
|
- return d_kill(dentry, parent);
|
|
+ __list_del_entry(&dentry->d_child);
|
|
+ /*
|
|
+ * Inform d_walk() that we are no longer attached to the
|
|
+ * dentry tree
|
|
+ */
|
|
+ dentry->d_flags |= DCACHE_DENTRY_KILLED;
|
|
+ if (parent)
|
|
+ spin_unlock(&parent->d_lock);
|
|
+ dentry_iput(dentry);
|
|
+ /*
|
|
+ * dentry_iput drops the locks, at which point nobody (except
|
|
+ * transient RCU lookups) can reach this dentry.
|
|
+ */
|
|
+ BUG_ON((int)dentry->d_lockref.count > 0);
|
|
+ this_cpu_dec(nr_dentry);
|
|
+ if (dentry->d_op && dentry->d_op->d_release)
|
|
+ dentry->d_op->d_release(dentry);
|
|
+
|
|
+ spin_lock(&dentry->d_lock);
|
|
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
|
|
+ dentry->d_flags |= DCACHE_MAY_FREE;
|
|
+ can_free = false;
|
|
+ }
|
|
+ spin_unlock(&dentry->d_lock);
|
|
+out:
|
|
+ if (likely(can_free))
|
|
+ dentry_free(dentry);
|
|
+ return parent;
|
|
}
|
|
|
|
/*
|
|
@@ -588,6 +563,9 @@ repeat:
|
|
if (unlikely(d_unhashed(dentry)))
|
|
goto kill_it;
|
|
|
|
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
|
|
+ goto kill_it;
|
|
+
|
|
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
|
|
if (dentry->d_op->d_delete(dentry))
|
|
goto kill_it;
|
|
@@ -738,7 +716,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
|
|
|
|
again:
|
|
discon_alias = NULL;
|
|
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
|
|
spin_lock(&alias->d_lock);
|
|
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
|
|
if (IS_ROOT(alias) &&
|
|
@@ -791,7 +769,7 @@ void d_prune_aliases(struct inode *inode)
|
|
struct dentry *dentry;
|
|
restart:
|
|
spin_lock(&inode->i_lock);
|
|
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
|
|
spin_lock(&dentry->d_lock);
|
|
if (!dentry->d_lockref.count) {
|
|
/*
|
|
@@ -815,65 +793,13 @@ restart:
|
|
}
|
|
EXPORT_SYMBOL(d_prune_aliases);
|
|
|
|
-/*
|
|
- * Try to throw away a dentry - free the inode, dput the parent.
|
|
- * Requires dentry->d_lock is held, and dentry->d_count == 0.
|
|
- * Releases dentry->d_lock.
|
|
- *
|
|
- * This may fail if locks cannot be acquired no problem, just try again.
|
|
- */
|
|
-static struct dentry * try_prune_one_dentry(struct dentry *dentry)
|
|
- __releases(dentry->d_lock)
|
|
-{
|
|
- struct dentry *parent;
|
|
-
|
|
- parent = dentry_kill(dentry, 0);
|
|
- /*
|
|
- * If dentry_kill returns NULL, we have nothing more to do.
|
|
- * if it returns the same dentry, trylocks failed. In either
|
|
- * case, just loop again.
|
|
- *
|
|
- * Otherwise, we need to prune ancestors too. This is necessary
|
|
- * to prevent quadratic behavior of shrink_dcache_parent(), but
|
|
- * is also expected to be beneficial in reducing dentry cache
|
|
- * fragmentation.
|
|
- */
|
|
- if (!parent)
|
|
- return NULL;
|
|
- if (parent == dentry)
|
|
- return dentry;
|
|
-
|
|
- /* Prune ancestors. */
|
|
- dentry = parent;
|
|
- while (dentry) {
|
|
- if (lockref_put_or_lock(&dentry->d_lockref))
|
|
- return NULL;
|
|
- dentry = dentry_kill(dentry, 1);
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
static void shrink_dentry_list(struct list_head *list)
|
|
{
|
|
- struct dentry *dentry;
|
|
+ struct dentry *dentry, *parent;
|
|
|
|
- rcu_read_lock();
|
|
- for (;;) {
|
|
- dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
|
|
- if (&dentry->d_lru == list)
|
|
- break; /* empty */
|
|
-
|
|
- /*
|
|
- * Get the dentry lock, and re-verify that the dentry is
|
|
- * this on the shrinking list. If it is, we know that
|
|
- * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
|
|
- */
|
|
+ while (!list_empty(list)) {
|
|
+ dentry = list_entry(list->prev, struct dentry, d_lru);
|
|
spin_lock(&dentry->d_lock);
|
|
- if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
|
|
- spin_unlock(&dentry->d_lock);
|
|
- continue;
|
|
- }
|
|
-
|
|
/*
|
|
* The dispose list is isolated and dentries are not accounted
|
|
* to the LRU here, so we can simply remove it from the list
|
|
@@ -885,30 +811,38 @@ static void shrink_dentry_list(struct list_head *list)
|
|
* We found an inuse dentry which was not removed from
|
|
* the LRU because of laziness during lookup. Do not free it.
|
|
*/
|
|
- if (dentry->d_lockref.count) {
|
|
+ if ((int)dentry->d_lockref.count > 0) {
|
|
spin_unlock(&dentry->d_lock);
|
|
continue;
|
|
}
|
|
- rcu_read_unlock();
|
|
|
|
+ parent = dentry_kill(dentry, 0);
|
|
/*
|
|
- * If 'try_to_prune()' returns a dentry, it will
|
|
- * be the same one we passed in, and d_lock will
|
|
- * have been held the whole time, so it will not
|
|
- * have been added to any other lists. We failed
|
|
- * to get the inode lock.
|
|
- *
|
|
- * We just add it back to the shrink list.
|
|
+ * If dentry_kill returns NULL, we have nothing more to do.
|
|
*/
|
|
- dentry = try_prune_one_dentry(dentry);
|
|
+ if (!parent)
|
|
+ continue;
|
|
|
|
- rcu_read_lock();
|
|
- if (dentry) {
|
|
+ if (unlikely(parent == dentry)) {
|
|
+ /*
|
|
+ * trylocks have failed and d_lock has been held the
|
|
+ * whole time, so it could not have been added to any
|
|
+ * other lists. Just add it back to the shrink list.
|
|
+ */
|
|
d_shrink_add(dentry, list);
|
|
spin_unlock(&dentry->d_lock);
|
|
+ continue;
|
|
}
|
|
+ /*
|
|
+ * We need to prune ancestors too. This is necessary to prevent
|
|
+ * quadratic behavior of shrink_dcache_parent(), but is also
|
|
+ * expected to be beneficial in reducing dentry cache
|
|
+ * fragmentation.
|
|
+ */
|
|
+ dentry = parent;
|
|
+ while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
|
|
+ dentry = dentry_kill(dentry, 1);
|
|
}
|
|
- rcu_read_unlock();
|
|
}
|
|
|
|
static enum lru_status
|
|
@@ -1092,7 +1026,7 @@ repeat:
|
|
resume:
|
|
while (next != &this_parent->d_subdirs) {
|
|
struct list_head *tmp = next;
|
|
- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
|
|
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
|
|
next = tmp->next;
|
|
|
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
@@ -1124,33 +1058,31 @@ resume:
|
|
/*
|
|
* All done at this level ... ascend and resume the search.
|
|
*/
|
|
+ rcu_read_lock();
|
|
+ascend:
|
|
if (this_parent != parent) {
|
|
struct dentry *child = this_parent;
|
|
this_parent = child->d_parent;
|
|
|
|
- rcu_read_lock();
|
|
spin_unlock(&child->d_lock);
|
|
spin_lock(&this_parent->d_lock);
|
|
|
|
- /*
|
|
- * might go back up the wrong parent if we have had a rename
|
|
- * or deletion
|
|
- */
|
|
- if (this_parent != child->d_parent ||
|
|
- (child->d_flags & DCACHE_DENTRY_KILLED) ||
|
|
- need_seqretry(&rename_lock, seq)) {
|
|
- spin_unlock(&this_parent->d_lock);
|
|
- rcu_read_unlock();
|
|
+ /* might go back up the wrong parent if we have had a rename. */
|
|
+ if (need_seqretry(&rename_lock, seq))
|
|
goto rename_retry;
|
|
- }
|
|
+ /* go into the first sibling still alive */
|
|
+ do {
|
|
+ next = child->d_child.next;
|
|
+ if (next == &this_parent->d_subdirs)
|
|
+ goto ascend;
|
|
+ child = list_entry(next, struct dentry, d_child);
|
|
+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
|
|
rcu_read_unlock();
|
|
- next = child->d_u.d_child.next;
|
|
goto resume;
|
|
}
|
|
- if (need_seqretry(&rename_lock, seq)) {
|
|
- spin_unlock(&this_parent->d_lock);
|
|
+ if (need_seqretry(&rename_lock, seq))
|
|
goto rename_retry;
|
|
- }
|
|
+ rcu_read_unlock();
|
|
if (finish)
|
|
finish(data);
|
|
|
|
@@ -1160,6 +1092,9 @@ out_unlock:
|
|
return;
|
|
|
|
rename_retry:
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
+ rcu_read_unlock();
|
|
+ BUG_ON(seq & 1);
|
|
if (!retry)
|
|
return;
|
|
seq = 1;
|
|
@@ -1261,34 +1196,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
|
|
if (data->start == dentry)
|
|
goto out;
|
|
|
|
- /*
|
|
- * move only zero ref count dentries to the dispose list.
|
|
- *
|
|
- * Those which are presently on the shrink list, being processed
|
|
- * by shrink_dentry_list(), shouldn't be moved. Otherwise the
|
|
- * loop in shrink_dcache_parent() might not make any progress
|
|
- * and loop forever.
|
|
- */
|
|
- if (dentry->d_lockref.count) {
|
|
- dentry_lru_del(dentry);
|
|
- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
|
|
- /*
|
|
- * We can't use d_lru_shrink_move() because we
|
|
- * need to get the global LRU lock and do the
|
|
- * LRU accounting.
|
|
- */
|
|
- d_lru_del(dentry);
|
|
- d_shrink_add(dentry, &data->dispose);
|
|
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
|
|
data->found++;
|
|
- ret = D_WALK_NORETRY;
|
|
+ } else {
|
|
+ if (dentry->d_flags & DCACHE_LRU_LIST)
|
|
+ d_lru_del(dentry);
|
|
+ if (!dentry->d_lockref.count) {
|
|
+ d_shrink_add(dentry, &data->dispose);
|
|
+ data->found++;
|
|
+ }
|
|
}
|
|
/*
|
|
* We can return to the caller if we have found some (this
|
|
* ensures forward progress). We'll be coming back to find
|
|
* the rest.
|
|
*/
|
|
- if (data->found && need_resched())
|
|
- ret = D_WALK_QUIT;
|
|
+ if (!list_empty(&data->dispose))
|
|
+ ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
|
|
out:
|
|
return ret;
|
|
}
|
|
@@ -1318,45 +1242,35 @@ void shrink_dcache_parent(struct dentry *parent)
|
|
}
|
|
EXPORT_SYMBOL(shrink_dcache_parent);
|
|
|
|
-static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
|
|
+static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
|
|
{
|
|
- struct select_data *data = _data;
|
|
- enum d_walk_ret ret = D_WALK_CONTINUE;
|
|
+ /* it has busy descendents; complain about those instead */
|
|
+ if (!list_empty(&dentry->d_subdirs))
|
|
+ return D_WALK_CONTINUE;
|
|
|
|
- if (dentry->d_lockref.count) {
|
|
- dentry_lru_del(dentry);
|
|
- if (likely(!list_empty(&dentry->d_subdirs)))
|
|
- goto out;
|
|
- if (dentry == data->start && dentry->d_lockref.count == 1)
|
|
- goto out;
|
|
- printk(KERN_ERR
|
|
- "BUG: Dentry %p{i=%lx,n=%s}"
|
|
- " still in use (%d)"
|
|
- " [unmount of %s %s]\n",
|
|
+ /* root with refcount 1 is fine */
|
|
+ if (dentry == _data && dentry->d_lockref.count == 1)
|
|
+ return D_WALK_CONTINUE;
|
|
+
|
|
+ printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
|
|
+ " still in use (%d) [unmount of %s %s]\n",
|
|
dentry,
|
|
dentry->d_inode ?
|
|
dentry->d_inode->i_ino : 0UL,
|
|
- dentry->d_name.name,
|
|
+ dentry,
|
|
dentry->d_lockref.count,
|
|
dentry->d_sb->s_type->name,
|
|
dentry->d_sb->s_id);
|
|
- BUG();
|
|
- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
|
|
- /*
|
|
- * We can't use d_lru_shrink_move() because we
|
|
- * need to get the global LRU lock and do the
|
|
- * LRU accounting.
|
|
- */
|
|
- if (dentry->d_flags & DCACHE_LRU_LIST)
|
|
- d_lru_del(dentry);
|
|
- d_shrink_add(dentry, &data->dispose);
|
|
- data->found++;
|
|
- ret = D_WALK_NORETRY;
|
|
- }
|
|
-out:
|
|
- if (data->found && need_resched())
|
|
- ret = D_WALK_QUIT;
|
|
- return ret;
|
|
+ WARN_ON(1);
|
|
+ return D_WALK_CONTINUE;
|
|
+}
|
|
+
|
|
+static void do_one_tree(struct dentry *dentry)
|
|
+{
|
|
+ shrink_dcache_parent(dentry);
|
|
+ d_walk(dentry, dentry, umount_check, NULL);
|
|
+ d_drop(dentry);
|
|
+ dput(dentry);
|
|
}
|
|
|
|
/*
|
|
@@ -1366,40 +1280,15 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|
{
|
|
struct dentry *dentry;
|
|
|
|
- if (down_read_trylock(&sb->s_umount))
|
|
- BUG();
|
|
+ WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
|
|
|
|
dentry = sb->s_root;
|
|
sb->s_root = NULL;
|
|
- for (;;) {
|
|
- struct select_data data;
|
|
-
|
|
- INIT_LIST_HEAD(&data.dispose);
|
|
- data.start = dentry;
|
|
- data.found = 0;
|
|
-
|
|
- d_walk(dentry, &data, umount_collect, NULL);
|
|
- if (!data.found)
|
|
- break;
|
|
-
|
|
- shrink_dentry_list(&data.dispose);
|
|
- cond_resched();
|
|
- }
|
|
- d_drop(dentry);
|
|
- dput(dentry);
|
|
+ do_one_tree(dentry);
|
|
|
|
while (!hlist_bl_empty(&sb->s_anon)) {
|
|
- struct select_data data;
|
|
- dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
|
|
-
|
|
- INIT_LIST_HEAD(&data.dispose);
|
|
- data.start = NULL;
|
|
- data.found = 0;
|
|
-
|
|
- d_walk(dentry, &data, umount_collect, NULL);
|
|
- if (data.found)
|
|
- shrink_dentry_list(&data.dispose);
|
|
- cond_resched();
|
|
+ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
|
|
+ do_one_tree(dentry);
|
|
}
|
|
}
|
|
|
|
@@ -1525,8 +1414,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
|
|
INIT_HLIST_BL_NODE(&dentry->d_hash);
|
|
INIT_LIST_HEAD(&dentry->d_lru);
|
|
INIT_LIST_HEAD(&dentry->d_subdirs);
|
|
- INIT_HLIST_NODE(&dentry->d_alias);
|
|
- INIT_LIST_HEAD(&dentry->d_u.d_child);
|
|
+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
|
|
+ INIT_LIST_HEAD(&dentry->d_child);
|
|
d_set_d_op(dentry, dentry->d_sb->s_d_op);
|
|
|
|
this_cpu_inc(nr_dentry);
|
|
@@ -1556,7 +1445,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
|
*/
|
|
__dget_dlock(parent);
|
|
dentry->d_parent = parent;
|
|
- list_add(&dentry->d_u.d_child, &parent->d_subdirs);
|
|
+ list_add(&dentry->d_child, &parent->d_subdirs);
|
|
spin_unlock(&parent->d_lock);
|
|
|
|
return dentry;
|
|
@@ -1649,7 +1538,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
|
|
spin_lock(&dentry->d_lock);
|
|
__d_set_type(dentry, add_flags);
|
|
if (inode)
|
|
- hlist_add_head(&dentry->d_alias, &inode->i_dentry);
|
|
+ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
|
|
dentry->d_inode = inode;
|
|
dentry_rcuwalk_barrier(dentry);
|
|
spin_unlock(&dentry->d_lock);
|
|
@@ -1673,7 +1562,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
|
|
|
|
void d_instantiate(struct dentry *entry, struct inode * inode)
|
|
{
|
|
- BUG_ON(!hlist_unhashed(&entry->d_alias));
|
|
+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
|
|
if (inode)
|
|
spin_lock(&inode->i_lock);
|
|
__d_instantiate(entry, inode);
|
|
@@ -1712,7 +1601,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
|
|
return NULL;
|
|
}
|
|
|
|
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
|
|
/*
|
|
* Don't need alias->d_lock here, because aliases with
|
|
* d_parent == entry->d_parent are not subject to name or
|
|
@@ -1738,7 +1627,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
|
|
{
|
|
struct dentry *result;
|
|
|
|
- BUG_ON(!hlist_unhashed(&entry->d_alias));
|
|
+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
|
|
|
|
if (inode)
|
|
spin_lock(&inode->i_lock);
|
|
@@ -1769,7 +1658,7 @@ EXPORT_SYMBOL(d_instantiate_unique);
|
|
*/
|
|
int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
|
|
{
|
|
- BUG_ON(!hlist_unhashed(&entry->d_alias));
|
|
+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
|
|
|
|
spin_lock(&inode->i_lock);
|
|
if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
|
|
@@ -1808,7 +1697,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
|
|
|
|
if (hlist_empty(&inode->i_dentry))
|
|
return NULL;
|
|
- alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
|
|
+ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
|
|
__dget(alias);
|
|
return alias;
|
|
}
|
|
@@ -1885,7 +1774,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
|
|
spin_lock(&tmp->d_lock);
|
|
tmp->d_inode = inode;
|
|
tmp->d_flags |= add_flags;
|
|
- hlist_add_head(&tmp->d_alias, &inode->i_dentry);
|
|
+ hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
|
|
hlist_bl_lock(&tmp->d_sb->s_anon);
|
|
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
|
|
hlist_bl_unlock(&tmp->d_sb->s_anon);
|
|
@@ -2328,7 +2217,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
|
|
struct dentry *child;
|
|
|
|
spin_lock(&dparent->d_lock);
|
|
- list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry(child, &dparent->d_subdirs, d_child) {
|
|
if (dentry == child) {
|
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
__dget_dlock(dentry);
|
|
@@ -2575,8 +2464,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
|
|
/* Unhash the target: dput() will then get rid of it */
|
|
__d_drop(target);
|
|
|
|
- list_del(&dentry->d_u.d_child);
|
|
- list_del(&target->d_u.d_child);
|
|
+ list_del(&dentry->d_child);
|
|
+ list_del(&target->d_child);
|
|
|
|
/* Switch the names.. */
|
|
switch_names(dentry, target);
|
|
@@ -2586,15 +2475,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
|
|
if (IS_ROOT(dentry)) {
|
|
dentry->d_parent = target->d_parent;
|
|
target->d_parent = target;
|
|
- INIT_LIST_HEAD(&target->d_u.d_child);
|
|
+ INIT_LIST_HEAD(&target->d_child);
|
|
} else {
|
|
swap(dentry->d_parent, target->d_parent);
|
|
|
|
/* And add them back to the (new) parent lists */
|
|
- list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
|
|
+ list_add(&target->d_child, &target->d_parent->d_subdirs);
|
|
}
|
|
|
|
- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
|
|
+ list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
|
|
|
|
write_seqcount_end(&target->d_seq);
|
|
write_seqcount_end(&dentry->d_seq);
|
|
@@ -2701,9 +2590,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
|
|
swap(dentry->d_name.hash, anon->d_name.hash);
|
|
|
|
dentry->d_parent = dentry;
|
|
- list_del_init(&dentry->d_u.d_child);
|
|
+ list_del_init(&dentry->d_child);
|
|
anon->d_parent = dparent;
|
|
- list_move(&anon->d_u.d_child, &dparent->d_subdirs);
|
|
+ list_move(&anon->d_child, &dparent->d_subdirs);
|
|
|
|
write_seqcount_end(&dentry->d_seq);
|
|
write_seqcount_end(&anon->d_seq);
|
|
@@ -2825,6 +2714,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
|
|
* the beginning of the name. The sequence number check at the caller will
|
|
* retry it again when a d_move() does happen. So any garbage in the buffer
|
|
* due to mismatched pointer and length will be discarded.
|
|
+ *
|
|
+ * Data dependency barrier is needed to make sure that we see that terminating
|
|
+ * NUL. Alpha strikes again, film at 11...
|
|
*/
|
|
static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
{
|
|
@@ -2832,6 +2724,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
u32 dlen = ACCESS_ONCE(name->len);
|
|
char *p;
|
|
|
|
+ smp_read_barrier_depends();
|
|
+
|
|
*buflen -= dlen + 1;
|
|
if (*buflen < 0)
|
|
return -ENAMETOOLONG;
|
|
@@ -2900,17 +2794,6 @@ restart:
|
|
vfsmnt = &mnt->mnt;
|
|
continue;
|
|
}
|
|
- /*
|
|
- * Filesystems needing to implement special "root names"
|
|
- * should do so with ->d_dname()
|
|
- */
|
|
- if (IS_ROOT(dentry) &&
|
|
- (dentry->d_name.len != 1 ||
|
|
- dentry->d_name.name[0] != '/')) {
|
|
- WARN(1, "Root dentry has weird name <%.*s>\n",
|
|
- (int) dentry->d_name.len,
|
|
- dentry->d_name.name);
|
|
- }
|
|
if (!error)
|
|
error = is_mounted(vfsmnt) ? 1 : 2;
|
|
break;
|
|
@@ -3329,7 +3212,7 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
|
|
{
|
|
inode_dec_link_count(inode);
|
|
BUG_ON(dentry->d_name.name != dentry->d_iname ||
|
|
- !hlist_unhashed(&dentry->d_alias) ||
|
|
+ !hlist_unhashed(&dentry->d_u.d_alias) ||
|
|
!d_unlinked(dentry));
|
|
spin_lock(&dentry->d_parent->d_lock);
|
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
|
|
index ca4a08f..ece3842 100644
|
|
--- a/fs/debugfs/inode.c
|
|
+++ b/fs/debugfs/inode.c
|
|
@@ -245,10 +245,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
|
|
return 0;
|
|
}
|
|
|
|
+static void debugfs_evict_inode(struct inode *inode)
|
|
+{
|
|
+ truncate_inode_pages(&inode->i_data, 0);
|
|
+ clear_inode(inode);
|
|
+ if (S_ISLNK(inode->i_mode))
|
|
+ kfree(inode->i_private);
|
|
+}
|
|
+
|
|
static const struct super_operations debugfs_super_operations = {
|
|
.statfs = simple_statfs,
|
|
.remount_fs = debugfs_remount,
|
|
.show_options = debugfs_show_options,
|
|
+ .evict_inode = debugfs_evict_inode,
|
|
};
|
|
|
|
static int debug_fill_super(struct super_block *sb, void *data, int silent)
|
|
@@ -465,23 +474,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
|
|
int ret = 0;
|
|
|
|
if (debugfs_positive(dentry)) {
|
|
- if (dentry->d_inode) {
|
|
- dget(dentry);
|
|
- switch (dentry->d_inode->i_mode & S_IFMT) {
|
|
- case S_IFDIR:
|
|
- ret = simple_rmdir(parent->d_inode, dentry);
|
|
- break;
|
|
- case S_IFLNK:
|
|
- kfree(dentry->d_inode->i_private);
|
|
- /* fall through */
|
|
- default:
|
|
- simple_unlink(parent->d_inode, dentry);
|
|
- break;
|
|
- }
|
|
- if (!ret)
|
|
- d_delete(dentry);
|
|
- dput(dentry);
|
|
- }
|
|
+ dget(dentry);
|
|
+ if (S_ISDIR(dentry->d_inode->i_mode))
|
|
+ ret = simple_rmdir(parent->d_inode, dentry);
|
|
+ else
|
|
+ simple_unlink(parent->d_inode, dentry);
|
|
+ if (!ret)
|
|
+ d_delete(dentry);
|
|
+ dput(dentry);
|
|
}
|
|
return ret;
|
|
}
|
|
@@ -533,7 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
|
|
*/
|
|
void debugfs_remove_recursive(struct dentry *dentry)
|
|
{
|
|
- struct dentry *child, *next, *parent;
|
|
+ struct dentry *child, *parent;
|
|
|
|
if (IS_ERR_OR_NULL(dentry))
|
|
return;
|
|
@@ -545,30 +545,49 @@ void debugfs_remove_recursive(struct dentry *dentry)
|
|
parent = dentry;
|
|
down:
|
|
mutex_lock(&parent->d_inode->i_mutex);
|
|
- list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
|
|
+ loop:
|
|
+ /*
|
|
+ * The parent->d_subdirs is protected by the d_lock. Outside that
|
|
+ * lock, the child can be unlinked and set to be freed which can
|
|
+ * use the d_u.d_child as the rcu head and corrupt this list.
|
|
+ */
|
|
+ spin_lock(&parent->d_lock);
|
|
+ list_for_each_entry(child, &parent->d_subdirs, d_child) {
|
|
if (!debugfs_positive(child))
|
|
continue;
|
|
|
|
/* perhaps simple_empty(child) makes more sense */
|
|
if (!list_empty(&child->d_subdirs)) {
|
|
+ spin_unlock(&parent->d_lock);
|
|
mutex_unlock(&parent->d_inode->i_mutex);
|
|
parent = child;
|
|
goto down;
|
|
}
|
|
- up:
|
|
+
|
|
+ spin_unlock(&parent->d_lock);
|
|
+
|
|
if (!__debugfs_remove(child, parent))
|
|
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
|
+
|
|
+ /*
|
|
+ * The parent->d_lock protects agaist child from unlinking
|
|
+ * from d_subdirs. When releasing the parent->d_lock we can
|
|
+ * no longer trust that the next pointer is valid.
|
|
+ * Restart the loop. We'll skip this one with the
|
|
+ * debugfs_positive() check.
|
|
+ */
|
|
+ goto loop;
|
|
}
|
|
+ spin_unlock(&parent->d_lock);
|
|
|
|
mutex_unlock(&parent->d_inode->i_mutex);
|
|
child = parent;
|
|
parent = parent->d_parent;
|
|
mutex_lock(&parent->d_inode->i_mutex);
|
|
|
|
- if (child != dentry) {
|
|
- next = list_next_entry(child, d_u.d_child);
|
|
- goto up;
|
|
- }
|
|
+ if (child != dentry)
|
|
+ /* go up */
|
|
+ goto loop;
|
|
|
|
if (!__debugfs_remove(child, parent))
|
|
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
|
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
|
|
index 2f6735d..31b148f 100644
|
|
--- a/fs/ecryptfs/crypto.c
|
|
+++ b/fs/ecryptfs/crypto.c
|
|
@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
|
|
break;
|
|
case 2:
|
|
dst[dst_byte_offset++] |= (src_byte);
|
|
- dst[dst_byte_offset] = 0;
|
|
current_bit_offset = 0;
|
|
break;
|
|
}
|
|
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
|
|
index b1eaa7a..03df502 100644
|
|
--- a/fs/ecryptfs/file.c
|
|
+++ b/fs/ecryptfs/file.c
|
|
@@ -191,23 +191,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
|
|
{
|
|
int rc = 0;
|
|
struct ecryptfs_crypt_stat *crypt_stat = NULL;
|
|
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
|
|
struct dentry *ecryptfs_dentry = file->f_path.dentry;
|
|
/* Private value of ecryptfs_dentry allocated in
|
|
* ecryptfs_lookup() */
|
|
struct ecryptfs_file_info *file_info;
|
|
|
|
- mount_crypt_stat = &ecryptfs_superblock_to_private(
|
|
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
|
|
- if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
|
|
- && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
|
|
- || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
|
|
- || (file->f_flags & O_APPEND))) {
|
|
- printk(KERN_WARNING "Mount has encrypted view enabled; "
|
|
- "files may only be read\n");
|
|
- rc = -EPERM;
|
|
- goto out;
|
|
- }
|
|
/* Released in ecryptfs_release or end of function if failure */
|
|
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
|
|
ecryptfs_set_file_private(file, file_info);
|
|
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
|
|
index b167ca4..a85ceb7 100644
|
|
--- a/fs/ecryptfs/inode.c
|
|
+++ b/fs/ecryptfs/inode.c
|
|
@@ -1039,7 +1039,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|
}
|
|
|
|
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
|
|
- if (!rc)
|
|
+ if (!rc && dentry->d_inode)
|
|
fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
|
|
out:
|
|
return rc;
|
|
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
|
|
index 1b119d3..34eb843 100644
|
|
--- a/fs/ecryptfs/main.c
|
|
+++ b/fs/ecryptfs/main.c
|
|
@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
|
|
{
|
|
struct super_block *s;
|
|
struct ecryptfs_sb_info *sbi;
|
|
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
|
|
struct ecryptfs_dentry_info *root_info;
|
|
const char *err = "Getting sb failed";
|
|
struct inode *inode;
|
|
@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
|
|
err = "Error parsing options";
|
|
goto out;
|
|
}
|
|
+ mount_crypt_stat = &sbi->mount_crypt_stat;
|
|
|
|
s = sget(fs_type, NULL, set_anon_super, flags, NULL);
|
|
if (IS_ERR(s)) {
|
|
@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
|
|
|
|
/**
|
|
* Set the POSIX ACL flag based on whether they're enabled in the lower
|
|
- * mount. Force a read-only eCryptfs mount if the lower mount is ro.
|
|
- * Allow a ro eCryptfs mount even when the lower mount is rw.
|
|
+ * mount.
|
|
*/
|
|
s->s_flags = flags & ~MS_POSIXACL;
|
|
- s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
|
|
+ s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
|
|
+
|
|
+ /**
|
|
+ * Force a read-only eCryptfs mount when:
|
|
+ * 1) The lower mount is ro
|
|
+ * 2) The ecryptfs_encrypted_view mount option is specified
|
|
+ */
|
|
+ if (path.dentry->d_sb->s_flags & MS_RDONLY ||
|
|
+ mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
|
|
+ s->s_flags |= MS_RDONLY;
|
|
|
|
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
|
|
s->s_blocksize = path.dentry->d_sb->s_blocksize;
|
|
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
|
|
index ead0046..f50d79e 100644
|
|
--- a/fs/eventpoll.c
|
|
+++ b/fs/eventpoll.c
|
|
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
|
goto error_tgt_fput;
|
|
|
|
/* Check if EPOLLWAKEUP is allowed */
|
|
- ep_take_care_of_epollwakeup(&epds);
|
|
+ if (ep_op_has_event(op))
|
|
+ ep_take_care_of_epollwakeup(&epds);
|
|
|
|
/*
|
|
* We have to check that the file structure underneath the file descriptor
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index 31e46b1..05f1942 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/file.h>
|
|
#include <linux/fdtable.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/fcntl.h>
|
|
#include <linux/swap.h>
|
|
@@ -820,7 +821,7 @@ EXPORT_SYMBOL(read_code);
|
|
static int exec_mmap(struct mm_struct *mm)
|
|
{
|
|
struct task_struct *tsk;
|
|
- struct mm_struct * old_mm, *active_mm;
|
|
+ struct mm_struct *old_mm, *active_mm;
|
|
|
|
/* Notify parent that we're no longer interested in the old VM */
|
|
tsk = current;
|
|
@@ -846,6 +847,8 @@ static int exec_mmap(struct mm_struct *mm)
|
|
tsk->mm = mm;
|
|
tsk->active_mm = mm;
|
|
activate_mm(active_mm, mm);
|
|
+ tsk->mm->vmacache_seqnum = 0;
|
|
+ vmacache_flush(tsk);
|
|
task_unlock(tsk);
|
|
if (old_mm) {
|
|
up_read(&old_mm->mmap_sem);
|
|
@@ -1265,6 +1268,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
|
|
spin_unlock(&p->fs->lock);
|
|
}
|
|
|
|
+static void bprm_fill_uid(struct linux_binprm *bprm)
|
|
+{
|
|
+ struct inode *inode;
|
|
+ unsigned int mode;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+
|
|
+ /* clear any previous set[ug]id data from a previous binary */
|
|
+ bprm->cred->euid = current_euid();
|
|
+ bprm->cred->egid = current_egid();
|
|
+
|
|
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
|
|
+ return;
|
|
+
|
|
+ if (current->no_new_privs)
|
|
+ return;
|
|
+
|
|
+ inode = file_inode(bprm->file);
|
|
+ mode = ACCESS_ONCE(inode->i_mode);
|
|
+ if (!(mode & (S_ISUID|S_ISGID)))
|
|
+ return;
|
|
+
|
|
+ /* Be careful if suid/sgid is set */
|
|
+ mutex_lock(&inode->i_mutex);
|
|
+
|
|
+ /* reload atomically mode/uid/gid now that lock held */
|
|
+ mode = inode->i_mode;
|
|
+ uid = inode->i_uid;
|
|
+ gid = inode->i_gid;
|
|
+ mutex_unlock(&inode->i_mutex);
|
|
+
|
|
+ /* We ignore suid/sgid if there are no mappings for them in the ns */
|
|
+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
|
|
+ !kgid_has_mapping(bprm->cred->user_ns, gid))
|
|
+ return;
|
|
+
|
|
+ if (mode & S_ISUID) {
|
|
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
|
|
+ bprm->cred->euid = uid;
|
|
+ }
|
|
+
|
|
+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
|
|
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
|
|
+ bprm->cred->egid = gid;
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* Fill the binprm structure from the inode.
|
|
* Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
|
|
@@ -1273,36 +1323,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
|
|
*/
|
|
int prepare_binprm(struct linux_binprm *bprm)
|
|
{
|
|
- struct inode *inode = file_inode(bprm->file);
|
|
- umode_t mode = inode->i_mode;
|
|
int retval;
|
|
|
|
-
|
|
- /* clear any previous set[ug]id data from a previous binary */
|
|
- bprm->cred->euid = current_euid();
|
|
- bprm->cred->egid = current_egid();
|
|
-
|
|
- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
|
|
- !current->no_new_privs &&
|
|
- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
|
|
- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
|
|
- /* Set-uid? */
|
|
- if (mode & S_ISUID) {
|
|
- bprm->per_clear |= PER_CLEAR_ON_SETID;
|
|
- bprm->cred->euid = inode->i_uid;
|
|
- }
|
|
-
|
|
- /* Set-gid? */
|
|
- /*
|
|
- * If setgid is set but no group execute bit then this
|
|
- * is a candidate for mandatory locking, not a setgid
|
|
- * executable.
|
|
- */
|
|
- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
|
|
- bprm->per_clear |= PER_CLEAR_ON_SETID;
|
|
- bprm->cred->egid = inode->i_gid;
|
|
- }
|
|
- }
|
|
+ bprm_fill_uid(bprm);
|
|
|
|
/* fill in binprm security blob */
|
|
retval = security_bprm_set_creds(bprm);
|
|
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
|
|
index 48a359d..831d4f0 100644
|
|
--- a/fs/exportfs/expfs.c
|
|
+++ b/fs/exportfs/expfs.c
|
|
@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
|
|
|
|
inode = result->d_inode;
|
|
spin_lock(&inode->i_lock);
|
|
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
|
|
dget(dentry);
|
|
spin_unlock(&inode->i_lock);
|
|
if (toput)
|
|
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
|
|
index 37fd31e..0498390 100644
|
|
--- a/fs/ext3/super.c
|
|
+++ b/fs/ext3/super.c
|
|
@@ -1354,13 +1354,6 @@ set_qf_format:
|
|
"not specified.");
|
|
return 0;
|
|
}
|
|
- } else {
|
|
- if (sbi->s_jquota_fmt) {
|
|
- ext3_msg(sb, KERN_ERR, "error: journaled quota format "
|
|
- "specified with no journaling "
|
|
- "enabled.");
|
|
- return 0;
|
|
- }
|
|
}
|
|
#endif
|
|
return 1;
|
|
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
|
|
index 3285aa5..b610779 100644
|
|
--- a/fs/ext4/bitmap.c
|
|
+++ b/fs/ext4/bitmap.c
|
|
@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
|
|
__u32 provided, calculated;
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return 1;
|
|
|
|
provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
|
|
@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
|
|
__u32 csum;
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return;
|
|
|
|
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
|
|
@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return 1;
|
|
|
|
provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
|
|
@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
|
|
__u32 csum;
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return;
|
|
|
|
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 62f024c..2a6830a 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -2110,6 +2110,7 @@ int do_journal_get_write_access(handle_t *handle,
|
|
#define CONVERT_INLINE_DATA 2
|
|
|
|
extern struct inode *ext4_iget(struct super_block *, unsigned long);
|
|
+extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
|
|
extern int ext4_write_inode(struct inode *, struct writeback_control *);
|
|
extern int ext4_setattr(struct dentry *, struct iattr *);
|
|
extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
|
@@ -2340,10 +2341,18 @@ extern int ext4_register_li_request(struct super_block *sb,
|
|
static inline int ext4_has_group_desc_csum(struct super_block *sb)
|
|
{
|
|
return EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
|
|
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
|
|
+ (EXT4_SB(sb)->s_chksum_driver != NULL);
|
|
}
|
|
|
|
+static inline int ext4_has_metadata_csum(struct super_block *sb)
|
|
+{
|
|
+ WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
|
|
+ !EXT4_SB(sb)->s_chksum_driver);
|
|
+
|
|
+ return (EXT4_SB(sb)->s_chksum_driver != NULL);
|
|
+}
|
|
static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
|
|
{
|
|
return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
|
|
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
|
|
index 3fe29de..ff42208 100644
|
|
--- a/fs/ext4/ext4_jbd2.c
|
|
+++ b/fs/ext4/ext4_jbd2.c
|
|
@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
|
|
ext4_put_nojournal(handle);
|
|
return 0;
|
|
}
|
|
+
|
|
+ if (!handle->h_transaction) {
|
|
+ err = jbd2_journal_stop(handle);
|
|
+ return handle->h_err ? handle->h_err : err;
|
|
+ }
|
|
+
|
|
sb = handle->h_transaction->t_journal->j_private;
|
|
err = handle->h_err;
|
|
rc = jbd2_journal_stop(handle);
|
|
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
|
index 4718891..4e237a6 100644
|
|
--- a/fs/ext4/extents.c
|
|
+++ b/fs/ext4/extents.c
|
|
@@ -74,8 +74,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
|
|
{
|
|
struct ext4_extent_tail *et;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return 1;
|
|
|
|
et = find_ext4_extent_tail(eh);
|
|
@@ -89,8 +88,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
|
|
{
|
|
struct ext4_extent_tail *et;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return;
|
|
|
|
et = find_ext4_extent_tail(eh);
|
|
@@ -363,7 +361,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
|
|
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
|
|
ext4_lblk_t last = lblock + len - 1;
|
|
|
|
- if (lblock > last)
|
|
+ if (len == 0 || lblock > last)
|
|
return 0;
|
|
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
|
|
}
|
|
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
|
|
index 171b9fa..4e8b79d 100644
|
|
--- a/fs/ext4/extents_status.c
|
|
+++ b/fs/ext4/extents_status.c
|
|
@@ -656,6 +656,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
|
|
|
|
BUG_ON(end < lblk);
|
|
|
|
+ if ((status & EXTENT_STATUS_DELAYED) &&
|
|
+ (status & EXTENT_STATUS_WRITTEN)) {
|
|
+ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
|
|
+ " delayed and written which can potentially "
|
|
+ " cause data loss.\n", lblk, len);
|
|
+ WARN_ON(1);
|
|
+ }
|
|
+
|
|
newes.es_lblk = lblk;
|
|
newes.es_len = len;
|
|
ext4_es_store_pblock(&newes, pblk);
|
|
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
|
|
index 2a8b2e1..589117e 100644
|
|
--- a/fs/ext4/file.c
|
|
+++ b/fs/ext4/file.c
|
|
@@ -100,7 +100,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
struct blk_plug plug;
|
|
int unaligned_aio = 0;
|
|
ssize_t ret;
|
|
- int overwrite = 0;
|
|
+ int *overwrite = iocb->private;
|
|
size_t length = iov_length(iov, nr_segs);
|
|
|
|
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
|
|
@@ -118,8 +118,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
mutex_lock(&inode->i_mutex);
|
|
blk_start_plug(&plug);
|
|
|
|
- iocb->private = &overwrite;
|
|
-
|
|
/* check whether we do a DIO overwrite or not */
|
|
if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
|
|
!file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
|
|
@@ -143,7 +141,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
* So we should check these two conditions.
|
|
*/
|
|
if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
|
|
- overwrite = 1;
|
|
+ *overwrite = 1;
|
|
}
|
|
|
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
|
@@ -170,6 +168,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
|
|
{
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
ssize_t ret;
|
|
+ int overwrite = 0;
|
|
|
|
/*
|
|
* If we have encountered a bitmap-format file, the size limit
|
|
@@ -190,6 +189,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
|
|
}
|
|
}
|
|
|
|
+ iocb->private = &overwrite;
|
|
if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
|
|
ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
|
|
else
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index 64bb32f1..a8d1a64 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -864,6 +864,10 @@ got:
|
|
struct buffer_head *block_bitmap_bh;
|
|
|
|
block_bitmap_bh = ext4_read_block_bitmap(sb, group);
|
|
+ if (!block_bitmap_bh) {
|
|
+ err = -EIO;
|
|
+ goto out;
|
|
+ }
|
|
BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
|
|
err = ext4_journal_get_write_access(handle, block_bitmap_bh);
|
|
if (err) {
|
|
@@ -988,8 +992,7 @@ got:
|
|
spin_unlock(&sbi->s_next_gen_lock);
|
|
|
|
/* Precompute checksum seed for inode metadata */
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
|
|
+ if (ext4_has_metadata_csum(sb)) {
|
|
__u32 csum;
|
|
__le32 inum = cpu_to_le32(inode->i_ino);
|
|
__le32 gen = cpu_to_le32(inode->i_generation);
|
|
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
|
|
index e6574d7..a7c5277 100644
|
|
--- a/fs/ext4/indirect.c
|
|
+++ b/fs/ext4/indirect.c
|
|
@@ -576,7 +576,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
|
|
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
|
|
EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
|
|
"non-extent mapped inodes with bigalloc");
|
|
- return -ENOSPC;
|
|
+ return -EUCLEAN;
|
|
}
|
|
|
|
goal = ext4_find_goal(inode, map->m_lblk, partial);
|
|
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
|
|
index 82edf5b..8c03b74 100644
|
|
--- a/fs/ext4/inline.c
|
|
+++ b/fs/ext4/inline.c
|
|
@@ -1128,8 +1128,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
|
|
memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
|
|
inline_size - EXT4_INLINE_DOTDOT_SIZE);
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(inode->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
inode->i_size = inode->i_sb->s_blocksize;
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index a7029f4..f9c63ae 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
|
|
|
|
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
|
cpu_to_le32(EXT4_OS_LINUX) ||
|
|
- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ !ext4_has_metadata_csum(inode->i_sb))
|
|
return 1;
|
|
|
|
provided = le16_to_cpu(raw->i_checksum_lo);
|
|
@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
|
|
|
|
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
|
cpu_to_le32(EXT4_OS_LINUX) ||
|
|
- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ !ext4_has_metadata_csum(inode->i_sb))
|
|
return;
|
|
|
|
csum = ext4_inode_csum(inode, raw, ei);
|
|
@@ -571,6 +569,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
|
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
|
|
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
|
|
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
|
|
+ !(status & EXTENT_STATUS_WRITTEN) &&
|
|
ext4_find_delalloc_range(inode, map->m_lblk,
|
|
map->m_lblk + map->m_len - 1))
|
|
status |= EXTENT_STATUS_DELAYED;
|
|
@@ -680,6 +679,7 @@ found:
|
|
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
|
|
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
|
|
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
|
|
+ !(status & EXTENT_STATUS_WRITTEN) &&
|
|
ext4_find_delalloc_range(inode, map->m_lblk,
|
|
map->m_lblk + map->m_len - 1))
|
|
status |= EXTENT_STATUS_DELAYED;
|
|
@@ -1357,7 +1357,7 @@ static void ext4_da_page_release_reservation(struct page *page,
|
|
unsigned int offset,
|
|
unsigned int length)
|
|
{
|
|
- int to_release = 0;
|
|
+ int to_release = 0, contiguous_blks = 0;
|
|
struct buffer_head *head, *bh;
|
|
unsigned int curr_off = 0;
|
|
struct inode *inode = page->mapping->host;
|
|
@@ -1378,14 +1378,23 @@ static void ext4_da_page_release_reservation(struct page *page,
|
|
|
|
if ((offset <= curr_off) && (buffer_delay(bh))) {
|
|
to_release++;
|
|
+ contiguous_blks++;
|
|
clear_buffer_delay(bh);
|
|
+ } else if (contiguous_blks) {
|
|
+ lblk = page->index <<
|
|
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
+ lblk += (curr_off >> inode->i_blkbits) -
|
|
+ contiguous_blks;
|
|
+ ext4_es_remove_extent(inode, lblk, contiguous_blks);
|
|
+ contiguous_blks = 0;
|
|
}
|
|
curr_off = next_off;
|
|
} while ((bh = bh->b_this_page) != head);
|
|
|
|
- if (to_release) {
|
|
+ if (contiguous_blks) {
|
|
lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
- ext4_es_remove_extent(inode, lblk, to_release);
|
|
+ lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
|
|
+ ext4_es_remove_extent(inode, lblk, contiguous_blks);
|
|
}
|
|
|
|
/* If we have released all the blocks belonging to a cluster, then we
|
|
@@ -1744,19 +1753,32 @@ static int __ext4_journalled_writepage(struct page *page,
|
|
ext4_walk_page_buffers(handle, page_bufs, 0, len,
|
|
NULL, bget_one);
|
|
}
|
|
- /* As soon as we unlock the page, it can go away, but we have
|
|
- * references to buffers so we are safe */
|
|
+ /*
|
|
+ * We need to release the page lock before we start the
|
|
+ * journal, so grab a reference so the page won't disappear
|
|
+ * out from under us.
|
|
+ */
|
|
+ get_page(page);
|
|
unlock_page(page);
|
|
|
|
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
|
|
ext4_writepage_trans_blocks(inode));
|
|
if (IS_ERR(handle)) {
|
|
ret = PTR_ERR(handle);
|
|
- goto out;
|
|
+ put_page(page);
|
|
+ goto out_no_pagelock;
|
|
}
|
|
-
|
|
BUG_ON(!ext4_handle_valid(handle));
|
|
|
|
+ lock_page(page);
|
|
+ put_page(page);
|
|
+ if (page->mapping != mapping) {
|
|
+ /* The page got truncated from under us */
|
|
+ ext4_journal_stop(handle);
|
|
+ ret = 0;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (inline_data) {
|
|
ret = ext4_journal_get_write_access(handle, inode_bh);
|
|
|
|
@@ -1781,6 +1803,8 @@ static int __ext4_journalled_writepage(struct page *page,
|
|
NULL, bput_one);
|
|
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
|
|
out:
|
|
+ unlock_page(page);
|
|
+out_no_pagelock:
|
|
brelse(inode_bh);
|
|
return ret;
|
|
}
|
|
@@ -2195,6 +2219,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
|
|
struct ext4_map_blocks *map = &mpd->map;
|
|
int err;
|
|
loff_t disksize;
|
|
+ int progress = 0;
|
|
|
|
mpd->io_submit.io_end->offset =
|
|
((loff_t)map->m_lblk) << inode->i_blkbits;
|
|
@@ -2211,8 +2236,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
|
|
* is non-zero, a commit should free up blocks.
|
|
*/
|
|
if ((err == -ENOMEM) ||
|
|
- (err == -ENOSPC && ext4_count_free_clusters(sb)))
|
|
+ (err == -ENOSPC && ext4_count_free_clusters(sb))) {
|
|
+ if (progress)
|
|
+ goto update_disksize;
|
|
return err;
|
|
+ }
|
|
ext4_msg(sb, KERN_CRIT,
|
|
"Delayed block allocation failed for "
|
|
"inode %lu at logical offset %llu with"
|
|
@@ -2229,15 +2257,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
|
|
*give_up_on_write = true;
|
|
return err;
|
|
}
|
|
+ progress = 1;
|
|
/*
|
|
* Update buffer state, submit mapped pages, and get us new
|
|
* extent to map
|
|
*/
|
|
err = mpage_map_and_submit_buffers(mpd);
|
|
if (err < 0)
|
|
- return err;
|
|
+ goto update_disksize;
|
|
} while (map->m_len);
|
|
|
|
+update_disksize:
|
|
/*
|
|
* Update on-disk size after IO is submitted. Races with
|
|
* truncate are avoided by checking i_size under i_data_sem.
|
|
@@ -2627,6 +2657,20 @@ static int ext4_nonda_switch(struct super_block *sb)
|
|
return 0;
|
|
}
|
|
|
|
+/* We always reserve for an inode update; the superblock could be there too */
|
|
+static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
|
|
+{
|
|
+ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
+ EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
|
|
+ return 1;
|
|
+
|
|
+ if (pos + len <= 0x7fffffffULL)
|
|
+ return 1;
|
|
+
|
|
+ /* We might need to update the superblock to set LARGE_FILE */
|
|
+ return 2;
|
|
+}
|
|
+
|
|
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
struct page **pagep, void **fsdata)
|
|
@@ -2677,7 +2721,8 @@ retry_grab:
|
|
* of file which has an already mapped buffer.
|
|
*/
|
|
retry_journal:
|
|
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
|
|
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
|
|
+ ext4_da_write_credits(inode, pos, len));
|
|
if (IS_ERR(handle)) {
|
|
page_cache_release(page);
|
|
return PTR_ERR(handle);
|
|
@@ -4055,8 +4100,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
ei->i_extra_isize = 0;
|
|
|
|
/* Precompute checksum seed for inode metadata */
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
|
|
+ if (ext4_has_metadata_csum(sb)) {
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
__u32 csum;
|
|
__le32 inum = cpu_to_le32(inode->i_ino);
|
|
@@ -4244,6 +4288,13 @@ bad_inode:
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
+struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
|
|
+{
|
|
+ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
|
|
+ return ERR_PTR(-EIO);
|
|
+ return ext4_iget(sb, ino);
|
|
+}
|
|
+
|
|
static int ext4_inode_blocks_set(handle_t *handle,
|
|
struct ext4_inode *raw_inode,
|
|
struct ext4_inode_info *ei)
|
|
@@ -4639,8 +4690,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
|
ext4_orphan_del(NULL, inode);
|
|
goto err_out;
|
|
}
|
|
- } else
|
|
+ } else {
|
|
+ loff_t oldsize = inode->i_size;
|
|
+
|
|
i_size_write(inode, attr->ia_size);
|
|
+ pagecache_isize_extended(inode, oldsize, inode->i_size);
|
|
+ }
|
|
|
|
/*
|
|
* Blocks are going to be removed from the inode. Wait
|
|
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
|
index a2a837f..dfe982d 100644
|
|
--- a/fs/ext4/ioctl.c
|
|
+++ b/fs/ext4/ioctl.c
|
|
@@ -343,8 +343,7 @@ flags_out:
|
|
if (!inode_owner_or_capable(inode))
|
|
return -EPERM;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
|
|
+ if (ext4_has_metadata_csum(inode->i_sb)) {
|
|
ext4_warning(sb, "Setting inode version is not "
|
|
"supported with metadata_csum enabled.");
|
|
return -ENOTTY;
|
|
@@ -544,9 +543,17 @@ group_add_out:
|
|
}
|
|
|
|
case EXT4_IOC_SWAP_BOOT:
|
|
+ {
|
|
+ int err;
|
|
if (!(filp->f_mode & FMODE_WRITE))
|
|
return -EBADF;
|
|
- return swap_inode_boot_loader(sb, inode);
|
|
+ err = mnt_want_write_file(filp);
|
|
+ if (err)
|
|
+ return err;
|
|
+ err = swap_inode_boot_loader(sb, inode);
|
|
+ mnt_drop_write_file(filp);
|
|
+ return err;
|
|
+ }
|
|
|
|
case EXT4_IOC_RESIZE_FS: {
|
|
ext4_fsblk_t n_blocks_count;
|
|
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
|
index 502f0fd..c4a5e4d 100644
|
|
--- a/fs/ext4/mballoc.c
|
|
+++ b/fs/ext4/mballoc.c
|
|
@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
|
|
* allocating. If we are looking at the buddy cache we would
|
|
* have taken a reference using ext4_mb_load_buddy and that
|
|
* would have pinned buddy page to page cache.
|
|
+ * The call to ext4_mb_get_buddy_page_lock will mark the
|
|
+ * page accessed.
|
|
*/
|
|
ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
|
|
if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
|
|
@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
|
|
ret = -EIO;
|
|
goto err;
|
|
}
|
|
- mark_page_accessed(page);
|
|
|
|
if (e4b.bd_buddy_page == NULL) {
|
|
/*
|
|
@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
|
|
ret = -EIO;
|
|
goto err;
|
|
}
|
|
- mark_page_accessed(page);
|
|
err:
|
|
ext4_mb_put_buddy_page_lock(&e4b);
|
|
return ret;
|
|
@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
|
|
|
/* we could use find_or_create_page(), but it locks page
|
|
* what we'd like to avoid in fast path ... */
|
|
- page = find_get_page(inode->i_mapping, pnum);
|
|
+ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
|
|
if (page == NULL || !PageUptodate(page)) {
|
|
if (page)
|
|
/*
|
|
@@ -1172,15 +1172,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
|
ret = -EIO;
|
|
goto err;
|
|
}
|
|
+
|
|
+ /* Pages marked accessed already */
|
|
e4b->bd_bitmap_page = page;
|
|
e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
|
|
- mark_page_accessed(page);
|
|
|
|
block++;
|
|
pnum = block / blocks_per_page;
|
|
poff = block % blocks_per_page;
|
|
|
|
- page = find_get_page(inode->i_mapping, pnum);
|
|
+ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
|
|
if (page == NULL || !PageUptodate(page)) {
|
|
if (page)
|
|
page_cache_release(page);
|
|
@@ -1201,9 +1202,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
|
ret = -EIO;
|
|
goto err;
|
|
}
|
|
+
|
|
+ /* Pages marked accessed already */
|
|
e4b->bd_buddy_page = page;
|
|
e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
|
|
- mark_page_accessed(page);
|
|
|
|
BUG_ON(e4b->bd_bitmap_page == NULL);
|
|
BUG_ON(e4b->bd_buddy_page == NULL);
|
|
@@ -1398,6 +1400,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
|
|
int last = first + count - 1;
|
|
struct super_block *sb = e4b->bd_sb;
|
|
|
|
+ if (WARN_ON(count == 0))
|
|
+ return;
|
|
BUG_ON(last >= (sb->s_blocksize << 3));
|
|
assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
|
|
/* Don't bother if the block group is corrupt. */
|
|
@@ -3196,8 +3200,30 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
|
|
static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
|
|
{
|
|
struct ext4_prealloc_space *pa = ac->ac_pa;
|
|
+ struct ext4_buddy e4b;
|
|
+ int err;
|
|
|
|
- if (pa && pa->pa_type == MB_INODE_PA)
|
|
+ if (pa == NULL) {
|
|
+ if (ac->ac_f_ex.fe_len == 0)
|
|
+ return;
|
|
+ err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
|
|
+ if (err) {
|
|
+ /*
|
|
+ * This should never happen since we pin the
|
|
+ * pages in the ext4_allocation_context so
|
|
+ * ext4_mb_load_buddy() should never fail.
|
|
+ */
|
|
+ WARN(1, "mb_load_buddy failed (%d)", err);
|
|
+ return;
|
|
+ }
|
|
+ ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
|
|
+ mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
|
|
+ ac->ac_f_ex.fe_len);
|
|
+ ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
|
|
+ ext4_mb_unload_buddy(&e4b);
|
|
+ return;
|
|
+ }
|
|
+ if (pa->pa_type == MB_INODE_PA)
|
|
pa->pa_free += ac->ac_b_ex.fe_len;
|
|
}
|
|
|
|
@@ -4767,18 +4793,12 @@ do_more:
|
|
/*
|
|
* blocks being freed are metadata. these blocks shouldn't
|
|
* be used until this transaction is committed
|
|
+ *
|
|
+ * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
|
|
+ * to fail.
|
|
*/
|
|
- retry:
|
|
- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
|
|
- if (!new_entry) {
|
|
- /*
|
|
- * We use a retry loop because
|
|
- * ext4_free_blocks() is not allowed to fail.
|
|
- */
|
|
- cond_resched();
|
|
- congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
- goto retry;
|
|
- }
|
|
+ new_entry = kmem_cache_alloc(ext4_free_data_cachep,
|
|
+ GFP_NOFS|__GFP_NOFAIL);
|
|
new_entry->efd_start_cluster = bit;
|
|
new_entry->efd_group = block_group;
|
|
new_entry->efd_count = count_clusters;
|
|
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
|
|
index 2ae73a8..be92ed2 100644
|
|
--- a/fs/ext4/migrate.c
|
|
+++ b/fs/ext4/migrate.c
|
|
@@ -616,6 +616,7 @@ int ext4_ind_migrate(struct inode *inode)
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
struct ext4_extent *ex;
|
|
unsigned int i, len;
|
|
+ ext4_lblk_t start, end;
|
|
ext4_fsblk_t blk;
|
|
handle_t *handle;
|
|
int ret;
|
|
@@ -629,6 +630,14 @@ int ext4_ind_migrate(struct inode *inode)
|
|
EXT4_FEATURE_RO_COMPAT_BIGALLOC))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ /*
|
|
+ * In order to get correct extent info, force all delayed allocation
|
|
+ * blocks to be allocated, otherwise delayed allocation blocks may not
|
|
+ * be reflected and bypass the checks on extent header.
|
|
+ */
|
|
+ if (test_opt(inode->i_sb, DELALLOC))
|
|
+ ext4_alloc_da_blocks(inode);
|
|
+
|
|
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
|
|
if (IS_ERR(handle))
|
|
return PTR_ERR(handle);
|
|
@@ -646,11 +655,13 @@ int ext4_ind_migrate(struct inode *inode)
|
|
goto errout;
|
|
}
|
|
if (eh->eh_entries == 0)
|
|
- blk = len = 0;
|
|
+ blk = len = start = end = 0;
|
|
else {
|
|
len = le16_to_cpu(ex->ee_len);
|
|
blk = ext4_ext_pblock(ex);
|
|
- if (len > EXT4_NDIR_BLOCKS) {
|
|
+ start = le32_to_cpu(ex->ee_block);
|
|
+ end = start + len - 1;
|
|
+ if (end >= EXT4_NDIR_BLOCKS) {
|
|
ret = -EOPNOTSUPP;
|
|
goto errout;
|
|
}
|
|
@@ -658,7 +669,7 @@ int ext4_ind_migrate(struct inode *inode)
|
|
|
|
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
|
|
memset(ei->i_data, 0, sizeof(ei->i_data));
|
|
- for (i=0; i < len; i++)
|
|
+ for (i = start; i <= end; i++)
|
|
ei->i_data[i] = cpu_to_le32(blk++);
|
|
ext4_mark_inode_dirty(handle, inode);
|
|
errout:
|
|
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
|
|
index 04434ad..1268a1b 100644
|
|
--- a/fs/ext4/mmp.c
|
|
+++ b/fs/ext4/mmp.c
|
|
@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
|
|
|
|
int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
|
|
{
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return 1;
|
|
|
|
return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
|
|
@@ -29,8 +28,7 @@ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
|
|
|
|
void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
|
|
{
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return;
|
|
|
|
mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
|
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
|
index d050e04..bc7e37b 100644
|
|
--- a/fs/ext4/namei.c
|
|
+++ b/fs/ext4/namei.c
|
|
@@ -123,8 +123,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
|
|
"directory leaf block found instead of index block");
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb) ||
|
|
buffer_verified(bh))
|
|
return bh;
|
|
|
|
@@ -339,8 +338,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
|
|
{
|
|
struct ext4_dir_entry_tail *t;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return 1;
|
|
|
|
t = get_dirent_tail(inode, dirent);
|
|
@@ -361,8 +359,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
|
|
{
|
|
struct ext4_dir_entry_tail *t;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return;
|
|
|
|
t = get_dirent_tail(inode, dirent);
|
|
@@ -437,8 +434,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
|
|
struct dx_tail *t;
|
|
int count_offset, limit, count;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return 1;
|
|
|
|
c = get_dx_countlimit(inode, dirent, &count_offset);
|
|
@@ -467,8 +463,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
|
|
struct dx_tail *t;
|
|
int count_offset, limit, count;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return;
|
|
|
|
c = get_dx_countlimit(inode, dirent, &count_offset);
|
|
@@ -556,8 +551,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
|
|
unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
|
|
EXT4_DIR_REC_LEN(2) - infosize;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(dir->i_sb))
|
|
entry_space -= sizeof(struct dx_tail);
|
|
return entry_space / sizeof(struct dx_entry);
|
|
}
|
|
@@ -566,8 +560,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
|
|
{
|
|
unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(dir->i_sb))
|
|
entry_space -= sizeof(struct dx_tail);
|
|
return entry_space / sizeof(struct dx_entry);
|
|
}
|
|
@@ -1429,7 +1422,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
|
|
dentry);
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
- inode = ext4_iget(dir->i_sb, ino);
|
|
+ inode = ext4_iget_normal(dir->i_sb, ino);
|
|
if (inode == ERR_PTR(-ESTALE)) {
|
|
EXT4_ERROR_INODE(dir,
|
|
"deleted inode referenced: %u",
|
|
@@ -1460,7 +1453,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
|
|
- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
|
|
+ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
|
|
}
|
|
|
|
/*
|
|
@@ -1534,8 +1527,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
|
int csum_size = 0;
|
|
int err = 0, i;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(dir->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
bh2 = ext4_append(handle, dir, &newblock);
|
|
@@ -1704,8 +1696,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
|
|
int csum_size = 0;
|
|
int err;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(inode->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
if (!de) {
|
|
@@ -1772,8 +1763,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|
struct fake_dirent *fde;
|
|
int csum_size = 0;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(inode->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
blocksize = dir->i_sb->s_blocksize;
|
|
@@ -1879,7 +1869,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
struct inode *inode)
|
|
{
|
|
struct inode *dir = dentry->d_parent->d_inode;
|
|
- struct buffer_head *bh;
|
|
+ struct buffer_head *bh = NULL;
|
|
struct ext4_dir_entry_2 *de;
|
|
struct ext4_dir_entry_tail *t;
|
|
struct super_block *sb;
|
|
@@ -1889,8 +1879,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
ext4_lblk_t block, blocks;
|
|
int csum_size = 0;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(inode->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
sb = dir->i_sb;
|
|
@@ -1904,14 +1893,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
return retval;
|
|
if (retval == 1) {
|
|
retval = 0;
|
|
- return retval;
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
if (is_dx(dir)) {
|
|
retval = ext4_dx_add_entry(handle, dentry, inode);
|
|
if (!retval || (retval != ERR_BAD_DX_DIR))
|
|
- return retval;
|
|
+ goto out;
|
|
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
|
|
dx_fallback++;
|
|
ext4_mark_inode_dirty(handle, dir);
|
|
@@ -1923,14 +1912,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
return PTR_ERR(bh);
|
|
|
|
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
|
|
- if (retval != -ENOSPC) {
|
|
- brelse(bh);
|
|
- return retval;
|
|
- }
|
|
+ if (retval != -ENOSPC)
|
|
+ goto out;
|
|
|
|
if (blocks == 1 && !dx_fallback &&
|
|
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
|
|
- return make_indexed_dir(handle, dentry, inode, bh);
|
|
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
|
|
+ retval = make_indexed_dir(handle, dentry, inode, bh);
|
|
+ bh = NULL; /* make_indexed_dir releases bh */
|
|
+ goto out;
|
|
+ }
|
|
brelse(bh);
|
|
}
|
|
bh = ext4_append(handle, dir, &block);
|
|
@@ -1946,6 +1936,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
}
|
|
|
|
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
|
|
+out:
|
|
brelse(bh);
|
|
if (retval == 0)
|
|
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
|
|
@@ -2152,8 +2143,7 @@ static int ext4_delete_entry(handle_t *handle,
|
|
return err;
|
|
}
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(dir->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
BUFFER_TRACE(bh, "get_write_access");
|
|
@@ -2372,8 +2362,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
|
|
int csum_size = 0;
|
|
int err;
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(dir->i_sb))
|
|
csum_size = sizeof(struct ext4_dir_entry_tail);
|
|
|
|
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index f3b84cd..2400ad1 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -1071,7 +1071,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
|
|
break;
|
|
|
|
if (meta_bg == 0)
|
|
- backup_block = group * bpg + blk_off;
|
|
+ backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
|
|
else
|
|
backup_block = (ext4_group_first_block_no(sb, group) +
|
|
ext4_bg_has_super(sb, group));
|
|
@@ -1200,8 +1200,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
|
|
{
|
|
struct buffer_head *bh;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return 0;
|
|
|
|
bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 25b327e..a07af5b 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -140,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
|
|
int ext4_superblock_csum_verify(struct super_block *sb,
|
|
struct ext4_super_block *es)
|
|
{
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return 1;
|
|
|
|
return es->s_checksum == ext4_superblock_csum(sb, es);
|
|
@@ -151,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
|
|
{
|
|
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
|
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(sb))
|
|
return;
|
|
|
|
es->s_checksum = ext4_superblock_csum(sb, es);
|
|
@@ -834,6 +832,7 @@ static void ext4_put_super(struct super_block *sb)
|
|
dump_orphan_list(sb, sbi);
|
|
J_ASSERT(list_empty(&sbi->s_orphan));
|
|
|
|
+ sync_blockdev(sb->s_bdev);
|
|
invalidate_bdev(sb->s_bdev);
|
|
if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
|
|
/*
|
|
@@ -996,7 +995,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
|
|
* Currently we don't know the generation for parent directory, so
|
|
* a generation of 0 means "accept any"
|
|
*/
|
|
- inode = ext4_iget(sb, ino);
|
|
+ inode = ext4_iget_normal(sb, ino);
|
|
if (IS_ERR(inode))
|
|
return ERR_CAST(inode);
|
|
if (generation && inode->i_generation != generation) {
|
|
@@ -1706,13 +1705,6 @@ static int parse_options(char *options, struct super_block *sb,
|
|
"not specified");
|
|
return 0;
|
|
}
|
|
- } else {
|
|
- if (sbi->s_jquota_fmt) {
|
|
- ext4_msg(sb, KERN_ERR, "journaled quota format "
|
|
- "specified with no journaling "
|
|
- "enabled");
|
|
- return 0;
|
|
- }
|
|
}
|
|
#endif
|
|
if (test_opt(sb, DIOREAD_NOLOCK)) {
|
|
@@ -2010,8 +2002,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
|
|
__u16 crc = 0;
|
|
__le32 le_group = cpu_to_le32(block_group);
|
|
|
|
- if ((sbi->s_es->s_feature_ro_compat &
|
|
- cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
|
|
+ if (ext4_has_metadata_csum(sbi->s_sb)) {
|
|
/* Use new metadata_csum algorithm */
|
|
__le16 save_csum;
|
|
__u32 csum32;
|
|
@@ -2029,6 +2020,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
|
|
}
|
|
|
|
/* old crc16 code */
|
|
+ if (!(sbi->s_es->s_feature_ro_compat &
|
|
+ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
|
|
+ return 0;
|
|
+
|
|
offset = offsetof(struct ext4_group_desc, bg_checksum);
|
|
|
|
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
|
|
@@ -3167,11 +3162,10 @@ static int set_journal_csum_feature_set(struct super_block *sb)
|
|
int compat, incompat;
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
|
|
- /* journal checksum v2 */
|
|
+ if (ext4_has_metadata_csum(sb)) {
|
|
+ /* journal checksum v3 */
|
|
compat = 0;
|
|
- incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
|
|
+ incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
|
|
} else {
|
|
/* journal checksum v1 */
|
|
compat = JBD2_FEATURE_COMPAT_CHECKSUM;
|
|
@@ -3193,6 +3187,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
|
|
jbd2_journal_clear_features(sbi->s_journal,
|
|
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
|
|
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
|
|
+ JBD2_FEATURE_INCOMPAT_CSUM_V3 |
|
|
JBD2_FEATURE_INCOMPAT_CSUM_V2);
|
|
}
|
|
|
|
@@ -3474,8 +3469,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
}
|
|
|
|
/* Precompute checksum seed for all metadata */
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (ext4_has_metadata_csum(sb))
|
|
sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
|
|
sizeof(es->s_uuid));
|
|
|
|
@@ -3493,6 +3487,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
|
set_opt(sb, POSIX_ACL);
|
|
#endif
|
|
+ /* don't forget to enable journal_csum when metadata_csum is enabled. */
|
|
+ if (ext4_has_metadata_csum(sb))
|
|
+ set_opt(sb, JOURNAL_CHECKSUM);
|
|
+
|
|
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
|
|
set_opt(sb, JOURNAL_DATA);
|
|
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
|
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
|
index 55e611c..8825154 100644
|
|
--- a/fs/ext4/xattr.c
|
|
+++ b/fs/ext4/xattr.c
|
|
@@ -141,8 +141,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
|
|
sector_t block_nr,
|
|
struct ext4_xattr_header *hdr)
|
|
{
|
|
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
|
|
+ if (ext4_has_metadata_csum(inode->i_sb) &&
|
|
(hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
|
|
return 0;
|
|
return 1;
|
|
@@ -152,8 +151,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
|
|
sector_t block_nr,
|
|
struct ext4_xattr_header *hdr)
|
|
{
|
|
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
|
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
|
|
+ if (!ext4_has_metadata_csum(inode->i_sb))
|
|
return;
|
|
|
|
hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
|
|
@@ -189,14 +187,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|
}
|
|
|
|
static int
|
|
-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
|
|
+ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
|
|
+ void *value_start)
|
|
{
|
|
- while (!IS_LAST_ENTRY(entry)) {
|
|
- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
|
|
+ struct ext4_xattr_entry *e = entry;
|
|
+
|
|
+ while (!IS_LAST_ENTRY(e)) {
|
|
+ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
|
|
if ((void *)next >= end)
|
|
return -EIO;
|
|
- entry = next;
|
|
+ e = next;
|
|
}
|
|
+
|
|
+ while (!IS_LAST_ENTRY(entry)) {
|
|
+ if (entry->e_value_size != 0 &&
|
|
+ (value_start + le16_to_cpu(entry->e_value_offs) <
|
|
+ (void *)e + sizeof(__u32) ||
|
|
+ value_start + le16_to_cpu(entry->e_value_offs) +
|
|
+ le32_to_cpu(entry->e_value_size) > end))
|
|
+ return -EIO;
|
|
+ entry = EXT4_XATTR_NEXT(entry);
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -213,7 +225,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
|
|
return -EIO;
|
|
if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
|
|
return -EIO;
|
|
- error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
|
|
+ error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
|
|
+ bh->b_data);
|
|
if (!error)
|
|
set_buffer_verified(bh);
|
|
return error;
|
|
@@ -329,7 +342,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
|
header = IHDR(inode, raw_inode);
|
|
entry = IFIRST(header);
|
|
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
|
- error = ext4_xattr_check_names(entry, end);
|
|
+ error = ext4_xattr_check_names(entry, end, entry);
|
|
if (error)
|
|
goto cleanup;
|
|
error = ext4_xattr_find_entry(&entry, name_index, name,
|
|
@@ -457,7 +470,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|
raw_inode = ext4_raw_inode(&iloc);
|
|
header = IHDR(inode, raw_inode);
|
|
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
|
- error = ext4_xattr_check_names(IFIRST(header), end);
|
|
+ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
|
|
if (error)
|
|
goto cleanup;
|
|
error = ext4_xattr_list_entries(dentry, IFIRST(header),
|
|
@@ -972,7 +985,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
|
is->s.here = is->s.first;
|
|
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
|
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
|
- error = ext4_xattr_check_names(IFIRST(header), is->s.end);
|
|
+ error = ext4_xattr_check_names(IFIRST(header), is->s.end,
|
|
+ IFIRST(header));
|
|
if (error)
|
|
return error;
|
|
/* Find the named attribute. */
|
|
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
|
|
index 293d048..5c6fe27 100644
|
|
--- a/fs/f2fs/checkpoint.c
|
|
+++ b/fs/f2fs/checkpoint.c
|
|
@@ -71,7 +71,6 @@ repeat:
|
|
goto repeat;
|
|
}
|
|
out:
|
|
- mark_page_accessed(page);
|
|
return page;
|
|
}
|
|
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index b0649b7..bb6478a 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -969,7 +969,6 @@ repeat:
|
|
}
|
|
got_it:
|
|
f2fs_bug_on(nid != nid_of_node(page));
|
|
- mark_page_accessed(page);
|
|
return page;
|
|
}
|
|
|
|
@@ -1024,7 +1023,6 @@ page_hit:
|
|
f2fs_put_page(page, 1);
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
- mark_page_accessed(page);
|
|
return page;
|
|
}
|
|
|
|
diff --git a/fs/fhandle.c b/fs/fhandle.c
|
|
index 999ff5c..d59712d 100644
|
|
--- a/fs/fhandle.c
|
|
+++ b/fs/fhandle.c
|
|
@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
|
|
goto out_err;
|
|
}
|
|
/* copy the full handle */
|
|
- if (copy_from_user(handle, ufh,
|
|
- sizeof(struct file_handle) +
|
|
+ *handle = f_handle;
|
|
+ if (copy_from_user(&handle->f_handle,
|
|
+ &ufh->f_handle,
|
|
f_handle.handle_bytes)) {
|
|
retval = -EFAULT;
|
|
goto out_handle;
|
|
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
|
|
index a163159..23a51f0 100644
|
|
--- a/fs/fs-writeback.c
|
|
+++ b/fs/fs-writeback.c
|
|
@@ -476,12 +476,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
* write_inode()
|
|
*/
|
|
spin_lock(&inode->i_lock);
|
|
- /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
|
|
- if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
|
|
- inode->i_state &= ~I_DIRTY_PAGES;
|
|
+
|
|
dirty = inode->i_state & I_DIRTY;
|
|
- inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
|
|
+ inode->i_state &= ~I_DIRTY;
|
|
+
|
|
+ /*
|
|
+ * Paired with smp_mb() in __mark_inode_dirty(). This allows
|
|
+ * __mark_inode_dirty() to test i_state without grabbing i_lock -
|
|
+ * either they see the I_DIRTY bits cleared or we see the dirtied
|
|
+ * inode.
|
|
+ *
|
|
+ * I_DIRTY_PAGES is always cleared together above even if @mapping
|
|
+ * still has dirty pages. The flag is reinstated after smp_mb() if
|
|
+ * necessary. This guarantees that either __mark_inode_dirty()
|
|
+ * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
|
|
+ */
|
|
+ smp_mb();
|
|
+
|
|
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
|
|
+ inode->i_state |= I_DIRTY_PAGES;
|
|
+
|
|
spin_unlock(&inode->i_lock);
|
|
+
|
|
/* Don't write the inode if only I_DIRTY_PAGES was set */
|
|
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
|
|
int err = write_inode(inode, wbc);
|
|
@@ -1145,12 +1161,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|
}
|
|
|
|
/*
|
|
- * make sure that changes are seen by all cpus before we test i_state
|
|
- * -- mikulas
|
|
+ * Paired with smp_mb() in __writeback_single_inode() for the
|
|
+ * following lockless i_state test. See there for details.
|
|
*/
|
|
smp_mb();
|
|
|
|
- /* avoid the locking if we can */
|
|
if ((inode->i_state & flags) == flags)
|
|
return;
|
|
|
|
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
|
|
index 0a648bb..499155c 100644
|
|
--- a/fs/fuse/dev.c
|
|
+++ b/fs/fuse/dev.c
|
|
@@ -819,8 +819,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
|
|
|
|
newpage = buf->page;
|
|
|
|
- if (WARN_ON(!PageUptodate(newpage)))
|
|
- return -EIO;
|
|
+ if (!PageUptodate(newpage))
|
|
+ SetPageUptodate(newpage);
|
|
|
|
ClearPageMappedToDisk(newpage);
|
|
|
|
@@ -1614,7 +1614,7 @@ out_finish:
|
|
|
|
static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
|
|
{
|
|
- release_pages(req->pages, req->num_pages, 0);
|
|
+ release_pages(req->pages, req->num_pages, false);
|
|
}
|
|
|
|
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
|
|
@@ -1726,6 +1726,9 @@ copy_finish:
|
|
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
|
|
unsigned int size, struct fuse_copy_state *cs)
|
|
{
|
|
+ /* Don't try to move pages (yet) */
|
|
+ cs->move_pages = 0;
|
|
+
|
|
switch (code) {
|
|
case FUSE_NOTIFY_POLL:
|
|
return fuse_notify_poll(fc, size, cs);
|
|
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
|
|
index 77bcc30..d8a6027 100644
|
|
--- a/fs/fuse/file.c
|
|
+++ b/fs/fuse/file.c
|
|
@@ -1003,13 +1003,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
|
|
if (mapping_writably_mapped(mapping))
|
|
flush_dcache_page(page);
|
|
|
|
- pagefault_disable();
|
|
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
|
|
- pagefault_enable();
|
|
flush_dcache_page(page);
|
|
|
|
- mark_page_accessed(page);
|
|
-
|
|
if (!tmp) {
|
|
unlock_page(page);
|
|
page_cache_release(page);
|
|
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
|
|
index 73f6bcb..faf00af 100644
|
|
--- a/fs/fuse/inode.c
|
|
+++ b/fs/fuse/inode.c
|
|
@@ -1026,6 +1026,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
|
|
goto err_fput;
|
|
|
|
fuse_conn_init(fc);
|
|
+ fc->release = fuse_free_conn;
|
|
|
|
fc->dev = sb->s_dev;
|
|
fc->sb = sb;
|
|
@@ -1040,7 +1041,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
|
|
fc->dont_mask = 1;
|
|
sb->s_flags |= MS_POSIXACL;
|
|
|
|
- fc->release = fuse_free_conn;
|
|
fc->flags = d.flags;
|
|
fc->user_id = d.user_id;
|
|
fc->group_id = d.group_id;
|
|
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
|
|
index 49436fa..4ccb60d 100644
|
|
--- a/fs/gfs2/aops.c
|
|
+++ b/fs/gfs2/aops.c
|
|
@@ -517,7 +517,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
|
|
p = kmap_atomic(page);
|
|
memcpy(buf + copied, p + offset, amt);
|
|
kunmap_atomic(p);
|
|
- mark_page_accessed(page);
|
|
page_cache_release(page);
|
|
copied += amt;
|
|
index++;
|
|
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
|
|
index 5c52418..bc643b9 100644
|
|
--- a/fs/gfs2/inode.c
|
|
+++ b/fs/gfs2/inode.c
|
|
@@ -606,8 +606,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|
if (!IS_ERR(inode)) {
|
|
d = d_splice_alias(inode, dentry);
|
|
error = PTR_ERR(d);
|
|
- if (IS_ERR(d))
|
|
+ if (IS_ERR(d)) {
|
|
+ inode = ERR_CAST(d);
|
|
goto fail_gunlock;
|
|
+ }
|
|
error = 0;
|
|
if (file) {
|
|
if (S_ISREG(inode->i_mode)) {
|
|
@@ -823,7 +825,6 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
|
|
|
|
d = d_splice_alias(inode, dentry);
|
|
if (IS_ERR(d)) {
|
|
- iput(inode);
|
|
gfs2_glock_dq_uninit(&gh);
|
|
return d;
|
|
}
|
|
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
|
|
index c7f2469..e7b1496 100644
|
|
--- a/fs/gfs2/meta_io.c
|
|
+++ b/fs/gfs2/meta_io.c
|
|
@@ -97,6 +97,11 @@ const struct address_space_operations gfs2_meta_aops = {
|
|
.releasepage = gfs2_releasepage,
|
|
};
|
|
|
|
+const struct address_space_operations gfs2_rgrp_aops = {
|
|
+ .writepage = gfs2_aspace_writepage,
|
|
+ .releasepage = gfs2_releasepage,
|
|
+};
|
|
+
|
|
/**
|
|
* gfs2_getbuf - Get a buffer with a given address space
|
|
* @gl: the glock
|
|
@@ -131,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
|
yield();
|
|
}
|
|
} else {
|
|
- page = find_lock_page(mapping, index);
|
|
+ page = find_get_page_flags(mapping, index,
|
|
+ FGP_LOCK|FGP_ACCESSED);
|
|
if (!page)
|
|
return NULL;
|
|
}
|
|
@@ -148,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
|
map_bh(bh, sdp->sd_vfs, blkno);
|
|
|
|
unlock_page(page);
|
|
- mark_page_accessed(page);
|
|
page_cache_release(page);
|
|
|
|
return bh;
|
|
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
|
|
index 4823b93..ac5d802 100644
|
|
--- a/fs/gfs2/meta_io.h
|
|
+++ b/fs/gfs2/meta_io.h
|
|
@@ -38,12 +38,15 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
|
|
}
|
|
|
|
extern const struct address_space_operations gfs2_meta_aops;
|
|
+extern const struct address_space_operations gfs2_rgrp_aops;
|
|
|
|
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
if (mapping->a_ops == &gfs2_meta_aops)
|
|
return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
|
|
+ else if (mapping->a_ops == &gfs2_rgrp_aops)
|
|
+ return container_of(mapping, struct gfs2_sbd, sd_aspace);
|
|
else
|
|
return inode->i_sb->s_fs_info;
|
|
}
|
|
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
|
|
index c6872d0..f6c9d83 100644
|
|
--- a/fs/gfs2/ops_fstype.c
|
|
+++ b/fs/gfs2/ops_fstype.c
|
|
@@ -104,7 +104,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|
mapping = &sdp->sd_aspace;
|
|
|
|
address_space_init_once(mapping);
|
|
- mapping->a_ops = &gfs2_meta_aops;
|
|
+ mapping->a_ops = &gfs2_rgrp_aops;
|
|
mapping->host = sb->s_bdev->bd_inode;
|
|
mapping->flags = 0;
|
|
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
|
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
|
|
index 6e560d5..754fdf8 100644
|
|
--- a/fs/hfsplus/brec.c
|
|
+++ b/fs/hfsplus/brec.c
|
|
@@ -131,13 +131,16 @@ skip:
|
|
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
|
|
hfs_bnode_dump(node);
|
|
|
|
- if (new_node) {
|
|
- /* update parent key if we inserted a key
|
|
- * at the start of the first node
|
|
- */
|
|
- if (!rec && new_node != node)
|
|
- hfs_brec_update_parent(fd);
|
|
+ /*
|
|
+ * update parent key if we inserted a key
|
|
+ * at the start of the node and it is not the new node
|
|
+ */
|
|
+ if (!rec && new_node != node) {
|
|
+ hfs_bnode_read_key(node, fd->search_key, data_off + size);
|
|
+ hfs_brec_update_parent(fd);
|
|
+ }
|
|
|
|
+ if (new_node) {
|
|
hfs_bnode_put(fd->bnode);
|
|
if (!new_node->parent) {
|
|
hfs_btree_inc_height(tree);
|
|
@@ -168,9 +171,6 @@ skip:
|
|
goto again;
|
|
}
|
|
|
|
- if (!rec)
|
|
- hfs_brec_update_parent(fd);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -370,6 +370,8 @@ again:
|
|
if (IS_ERR(parent))
|
|
return PTR_ERR(parent);
|
|
__hfs_brec_find(parent, fd, hfs_find_rec_by_key);
|
|
+ if (fd->record < 0)
|
|
+ return -ENOENT;
|
|
hfs_bnode_dump(parent);
|
|
rec = fd->record;
|
|
|
|
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
|
|
index 1b39afd..5f60bb2 100644
|
|
--- a/fs/hpfs/namei.c
|
|
+++ b/fs/hpfs/namei.c
|
|
@@ -8,6 +8,17 @@
|
|
#include <linux/sched.h>
|
|
#include "hpfs_fn.h"
|
|
|
|
+static void hpfs_update_directory_times(struct inode *dir)
|
|
+{
|
|
+ time_t t = get_seconds();
|
|
+ if (t == dir->i_mtime.tv_sec &&
|
|
+ t == dir->i_ctime.tv_sec)
|
|
+ return;
|
|
+ dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
|
|
+ dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
|
|
+ hpfs_write_inode_nolock(dir);
|
|
+}
|
|
+
|
|
static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
|
{
|
|
const unsigned char *name = dentry->d_name.name;
|
|
@@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
|
result->i_mode = mode | S_IFDIR;
|
|
hpfs_write_inode_nolock(result);
|
|
}
|
|
+ hpfs_update_directory_times(dir);
|
|
d_instantiate(dentry, result);
|
|
hpfs_unlock(dir->i_sb);
|
|
return 0;
|
|
@@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
|
|
result->i_mode = mode | S_IFREG;
|
|
hpfs_write_inode_nolock(result);
|
|
}
|
|
+ hpfs_update_directory_times(dir);
|
|
d_instantiate(dentry, result);
|
|
hpfs_unlock(dir->i_sb);
|
|
return 0;
|
|
@@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
|
|
insert_inode_hash(result);
|
|
|
|
hpfs_write_inode_nolock(result);
|
|
+ hpfs_update_directory_times(dir);
|
|
d_instantiate(dentry, result);
|
|
brelse(bh);
|
|
hpfs_unlock(dir->i_sb);
|
|
@@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
|
|
insert_inode_hash(result);
|
|
|
|
hpfs_write_inode_nolock(result);
|
|
+ hpfs_update_directory_times(dir);
|
|
d_instantiate(dentry, result);
|
|
hpfs_unlock(dir->i_sb);
|
|
return 0;
|
|
@@ -423,6 +438,8 @@ again:
|
|
out1:
|
|
hpfs_brelse4(&qbh);
|
|
out:
|
|
+ if (!err)
|
|
+ hpfs_update_directory_times(dir);
|
|
hpfs_unlock(dir->i_sb);
|
|
return err;
|
|
}
|
|
@@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|
out1:
|
|
hpfs_brelse4(&qbh);
|
|
out:
|
|
+ if (!err)
|
|
+ hpfs_update_directory_times(dir);
|
|
hpfs_unlock(dir->i_sb);
|
|
return err;
|
|
}
|
|
@@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
goto end1;
|
|
}
|
|
|
|
- end:
|
|
+end:
|
|
hpfs_i(i)->i_parent_dir = new_dir->i_ino;
|
|
if (S_ISDIR(i->i_mode)) {
|
|
inc_nlink(new_dir);
|
|
@@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
brelse(bh);
|
|
}
|
|
end1:
|
|
+ if (!err) {
|
|
+ hpfs_update_directory_times(old_dir);
|
|
+ hpfs_update_directory_times(new_dir);
|
|
+ }
|
|
hpfs_unlock(i->i_sb);
|
|
return err;
|
|
}
|
|
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
|
|
index 4534ff6..77b583d 100644
|
|
--- a/fs/hpfs/super.c
|
|
+++ b/fs/hpfs/super.c
|
|
@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
|
|
}
|
|
|
|
/* Filesystem error... */
|
|
-static char err_buf[1024];
|
|
-
|
|
void hpfs_error(struct super_block *s, const char *fmt, ...)
|
|
{
|
|
+ struct va_format vaf;
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
- vsnprintf(err_buf, sizeof(err_buf), fmt, args);
|
|
+
|
|
+ vaf.fmt = fmt;
|
|
+ vaf.va = &args;
|
|
+
|
|
+ pr_err("filesystem error: %pV", &vaf);
|
|
+
|
|
va_end(args);
|
|
|
|
- printk("HPFS: filesystem error: %s", err_buf);
|
|
if (!hpfs_sb(s)->sb_was_error) {
|
|
if (hpfs_sb(s)->sb_err == 2) {
|
|
printk("; crashing the system because you wanted it\n");
|
|
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
|
|
index d19b30a..a4a8ed5 100644
|
|
--- a/fs/hugetlbfs/inode.c
|
|
+++ b/fs/hugetlbfs/inode.c
|
|
@@ -1017,6 +1017,11 @@ static int __init init_hugetlbfs_fs(void)
|
|
int error;
|
|
int i;
|
|
|
|
+ if (!hugepages_supported()) {
|
|
+ pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n");
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
error = bdi_init(&hugetlbfs_backing_dev_info);
|
|
if (error)
|
|
return error;
|
|
diff --git a/fs/inode.c b/fs/inode.c
|
|
index e846a32..644875b 100644
|
|
--- a/fs/inode.c
|
|
+++ b/fs/inode.c
|
|
@@ -1631,8 +1631,8 @@ int file_remove_suid(struct file *file)
|
|
error = security_inode_killpriv(dentry);
|
|
if (!error && killsuid)
|
|
error = __remove_suid(dentry, killsuid);
|
|
- if (!error && (inode->i_sb->s_flags & MS_NOSEC))
|
|
- inode->i_flags |= S_NOSEC;
|
|
+ if (!error)
|
|
+ inode_has_no_xattr(inode);
|
|
|
|
return error;
|
|
}
|
|
diff --git a/fs/ioprio.c b/fs/ioprio.c
|
|
index e50170c..31666c9 100644
|
|
--- a/fs/ioprio.c
|
|
+++ b/fs/ioprio.c
|
|
@@ -157,14 +157,16 @@ out:
|
|
|
|
int ioprio_best(unsigned short aprio, unsigned short bprio)
|
|
{
|
|
- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
|
- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
|
+ unsigned short aclass;
|
|
+ unsigned short bclass;
|
|
|
|
- if (aclass == IOPRIO_CLASS_NONE)
|
|
- aclass = IOPRIO_CLASS_BE;
|
|
- if (bclass == IOPRIO_CLASS_NONE)
|
|
- bclass = IOPRIO_CLASS_BE;
|
|
+ if (!ioprio_valid(aprio))
|
|
+ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
|
|
+ if (!ioprio_valid(bprio))
|
|
+ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
|
|
|
|
+ aclass = IOPRIO_PRIO_CLASS(aprio);
|
|
+ bclass = IOPRIO_PRIO_CLASS(bprio);
|
|
if (aclass == bclass)
|
|
return min(aprio, bprio);
|
|
if (aclass > bclass)
|
|
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
|
|
index 4a9e10e..a9daccb 100644
|
|
--- a/fs/isofs/inode.c
|
|
+++ b/fs/isofs/inode.c
|
|
@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
|
|
return;
|
|
}
|
|
|
|
-static int isofs_read_inode(struct inode *);
|
|
+static int isofs_read_inode(struct inode *, int relocated);
|
|
static int isofs_statfs (struct dentry *, struct kstatfs *);
|
|
|
|
static struct kmem_cache *isofs_inode_cachep;
|
|
@@ -1258,7 +1258,7 @@ out_toomany:
|
|
goto out;
|
|
}
|
|
|
|
-static int isofs_read_inode(struct inode *inode)
|
|
+static int isofs_read_inode(struct inode *inode, int relocated)
|
|
{
|
|
struct super_block *sb = inode->i_sb;
|
|
struct isofs_sb_info *sbi = ISOFS_SB(sb);
|
|
@@ -1403,7 +1403,7 @@ static int isofs_read_inode(struct inode *inode)
|
|
*/
|
|
|
|
if (!high_sierra) {
|
|
- parse_rock_ridge_inode(de, inode);
|
|
+ parse_rock_ridge_inode(de, inode, relocated);
|
|
/* if we want uid/gid set, override the rock ridge setting */
|
|
if (sbi->s_uid_set)
|
|
inode->i_uid = sbi->s_uid;
|
|
@@ -1482,9 +1482,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
|
|
* offset that point to the underlying meta-data for the inode. The
|
|
* code below is otherwise similar to the iget() code in
|
|
* include/linux/fs.h */
|
|
-struct inode *isofs_iget(struct super_block *sb,
|
|
- unsigned long block,
|
|
- unsigned long offset)
|
|
+struct inode *__isofs_iget(struct super_block *sb,
|
|
+ unsigned long block,
|
|
+ unsigned long offset,
|
|
+ int relocated)
|
|
{
|
|
unsigned long hashval;
|
|
struct inode *inode;
|
|
@@ -1506,7 +1507,7 @@ struct inode *isofs_iget(struct super_block *sb,
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
if (inode->i_state & I_NEW) {
|
|
- ret = isofs_read_inode(inode);
|
|
+ ret = isofs_read_inode(inode, relocated);
|
|
if (ret < 0) {
|
|
iget_failed(inode);
|
|
inode = ERR_PTR(ret);
|
|
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
|
|
index 9916723..0ac4c1f 100644
|
|
--- a/fs/isofs/isofs.h
|
|
+++ b/fs/isofs/isofs.h
|
|
@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
|
|
|
|
struct inode; /* To make gcc happy */
|
|
|
|
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
|
|
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
|
|
extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
|
|
extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
|
|
|
|
@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
|
|
extern struct buffer_head *isofs_bread(struct inode *, sector_t);
|
|
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
|
|
|
|
-extern struct inode *isofs_iget(struct super_block *sb,
|
|
- unsigned long block,
|
|
- unsigned long offset);
|
|
+struct inode *__isofs_iget(struct super_block *sb,
|
|
+ unsigned long block,
|
|
+ unsigned long offset,
|
|
+ int relocated);
|
|
+
|
|
+static inline struct inode *isofs_iget(struct super_block *sb,
|
|
+ unsigned long block,
|
|
+ unsigned long offset)
|
|
+{
|
|
+ return __isofs_iget(sb, block, offset, 0);
|
|
+}
|
|
+
|
|
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
|
|
+ unsigned long block,
|
|
+ unsigned long offset)
|
|
+{
|
|
+ return __isofs_iget(sb, block, offset, 1);
|
|
+}
|
|
|
|
/* Because the inode number is no longer relevant to finding the
|
|
* underlying meta-data for an inode, we are free to choose a more
|
|
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
|
|
index c0bf424..735d752 100644
|
|
--- a/fs/isofs/rock.c
|
|
+++ b/fs/isofs/rock.c
|
|
@@ -30,6 +30,7 @@ struct rock_state {
|
|
int cont_size;
|
|
int cont_extent;
|
|
int cont_offset;
|
|
+ int cont_loops;
|
|
struct inode *inode;
|
|
};
|
|
|
|
@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
|
|
rs->inode = inode;
|
|
}
|
|
|
|
+/* Maximum number of Rock Ridge continuation entries */
|
|
+#define RR_MAX_CE_ENTRIES 32
|
|
+
|
|
/*
|
|
* Returns 0 if the caller should continue scanning, 1 if the scan must end
|
|
* and -ve on error.
|
|
@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
|
|
goto out;
|
|
}
|
|
ret = -EIO;
|
|
+ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
|
|
+ goto out;
|
|
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
|
|
if (bh) {
|
|
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
|
|
@@ -288,12 +294,16 @@ eio:
|
|
goto out;
|
|
}
|
|
|
|
+#define RR_REGARD_XA 1
|
|
+#define RR_RELOC_DE 2
|
|
+
|
|
static int
|
|
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
|
|
- struct inode *inode, int regard_xa)
|
|
+ struct inode *inode, int flags)
|
|
{
|
|
int symlink_len = 0;
|
|
int cnt, sig;
|
|
+ unsigned int reloc_block;
|
|
struct inode *reloc;
|
|
struct rock_ridge *rr;
|
|
int rootflag;
|
|
@@ -305,7 +315,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
|
|
|
|
init_rock_state(&rs, inode);
|
|
setup_rock_ridge(de, inode, &rs);
|
|
- if (regard_xa) {
|
|
+ if (flags & RR_REGARD_XA) {
|
|
rs.chr += 14;
|
|
rs.len -= 14;
|
|
if (rs.len < 0)
|
|
@@ -352,6 +362,9 @@ repeat:
|
|
rs.cont_size = isonum_733(rr->u.CE.size);
|
|
break;
|
|
case SIG('E', 'R'):
|
|
+ /* Invalid length of ER tag id? */
|
|
+ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
|
|
+ goto out;
|
|
ISOFS_SB(inode->i_sb)->s_rock = 1;
|
|
printk(KERN_DEBUG "ISO 9660 Extensions: ");
|
|
{
|
|
@@ -485,12 +498,22 @@ repeat:
|
|
"relocated directory\n");
|
|
goto out;
|
|
case SIG('C', 'L'):
|
|
- ISOFS_I(inode)->i_first_extent =
|
|
- isonum_733(rr->u.CL.location);
|
|
- reloc =
|
|
- isofs_iget(inode->i_sb,
|
|
- ISOFS_I(inode)->i_first_extent,
|
|
- 0);
|
|
+ if (flags & RR_RELOC_DE) {
|
|
+ printk(KERN_ERR
|
|
+ "ISOFS: Recursive directory relocation "
|
|
+ "is not supported\n");
|
|
+ goto eio;
|
|
+ }
|
|
+ reloc_block = isonum_733(rr->u.CL.location);
|
|
+ if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
|
|
+ ISOFS_I(inode)->i_iget5_offset == 0) {
|
|
+ printk(KERN_ERR
|
|
+ "ISOFS: Directory relocation points to "
|
|
+ "itself\n");
|
|
+ goto eio;
|
|
+ }
|
|
+ ISOFS_I(inode)->i_first_extent = reloc_block;
|
|
+ reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
|
|
if (IS_ERR(reloc)) {
|
|
ret = PTR_ERR(reloc);
|
|
goto out;
|
|
@@ -637,9 +660,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
|
|
return rpnt;
|
|
}
|
|
|
|
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
|
|
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
|
|
+ int relocated)
|
|
{
|
|
- int result = parse_rock_ridge_inode_internal(de, inode, 0);
|
|
+ int flags = relocated ? RR_RELOC_DE : 0;
|
|
+ int result = parse_rock_ridge_inode_internal(de, inode, flags);
|
|
|
|
/*
|
|
* if rockridge flag was reset and we didn't look for attributes
|
|
@@ -647,7 +672,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
|
|
*/
|
|
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
|
|
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
|
|
- result = parse_rock_ridge_inode_internal(de, inode, 14);
|
|
+ result = parse_rock_ridge_inode_internal(de, inode,
|
|
+ flags | RR_REGARD_XA);
|
|
}
|
|
return result;
|
|
}
|
|
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
|
|
index 7f34f47..b892355 100644
|
|
--- a/fs/jbd2/checkpoint.c
|
|
+++ b/fs/jbd2/checkpoint.c
|
|
@@ -448,7 +448,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
|
|
unsigned long blocknr;
|
|
|
|
if (is_journal_aborted(journal))
|
|
- return 1;
|
|
+ return -EIO;
|
|
|
|
if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
|
|
return 1;
|
|
@@ -463,10 +463,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
|
|
* jbd2_cleanup_journal_tail() doesn't get called all that often.
|
|
*/
|
|
if (journal->j_flags & JBD2_BARRIER)
|
|
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
|
|
+ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
|
|
|
|
- __jbd2_update_log_tail(journal, first_tid, blocknr);
|
|
- return 0;
|
|
+ return __jbd2_update_log_tail(journal, first_tid, blocknr);
|
|
}
|
|
|
|
|
|
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
|
|
index cf2fc05..9181c2b 100644
|
|
--- a/fs/jbd2/commit.c
|
|
+++ b/fs/jbd2/commit.c
|
|
@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
|
|
struct commit_header *h;
|
|
__u32 csum;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return;
|
|
|
|
h = (struct commit_header *)(bh->b_data);
|
|
@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
|
|
return checksum;
|
|
}
|
|
|
|
-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
|
|
+static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
|
|
unsigned long long block)
|
|
{
|
|
tag->t_blocknr = cpu_to_be32(block & (u32)~0);
|
|
- if (tag_bytes > JBD2_TAG_SIZE32)
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
|
|
}
|
|
|
|
@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
|
|
struct jbd2_journal_block_tail *tail;
|
|
__u32 csum;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return;
|
|
|
|
tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
|
|
@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
|
|
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
|
|
struct buffer_head *bh, __u32 sequence)
|
|
{
|
|
+ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
|
|
struct page *page = bh->b_page;
|
|
__u8 *addr;
|
|
__u32 csum32;
|
|
__be32 seq;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return;
|
|
|
|
seq = cpu_to_be32(sequence);
|
|
@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
|
|
bh->b_size);
|
|
kunmap_atomic(addr);
|
|
|
|
- /* We only have space to store the lower 16 bits of the crc32c. */
|
|
- tag->t_checksum = cpu_to_be16(csum32);
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
|
|
+ tag3->t_checksum = cpu_to_be32(csum32);
|
|
+ else
|
|
+ tag->t_checksum = cpu_to_be16(csum32);
|
|
}
|
|
/*
|
|
* jbd2_journal_commit_transaction
|
|
@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|
LIST_HEAD(io_bufs);
|
|
LIST_HEAD(log_bufs);
|
|
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
csum_size = sizeof(struct jbd2_journal_block_tail);
|
|
|
|
/*
|
|
@@ -692,7 +695,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|
tag_flag |= JBD2_FLAG_SAME_UUID;
|
|
|
|
tag = (journal_block_tag_t *) tagp;
|
|
- write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
|
|
+ write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
|
|
tag->t_flags = cpu_to_be16(tag_flag);
|
|
jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
|
|
commit_transaction->t_tid);
|
|
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
|
|
index 5fa344a..e8d62d7 100644
|
|
--- a/fs/jbd2/journal.c
|
|
+++ b/fs/jbd2/journal.c
|
|
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
|
|
/* Checksumming functions */
|
|
int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
|
|
{
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
|
|
@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
|
|
|
|
int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
|
|
{
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
return sb->s_checksum == jbd2_superblock_csum(j, sb);
|
|
@@ -153,7 +153,7 @@ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
|
|
|
|
void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
|
|
{
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return;
|
|
|
|
sb->s_checksum = jbd2_superblock_csum(j, sb);
|
|
@@ -885,9 +885,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
|
|
*
|
|
* Requires j_checkpoint_mutex
|
|
*/
|
|
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
|
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
|
{
|
|
unsigned long freed;
|
|
+ int ret;
|
|
|
|
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
|
|
|
|
@@ -897,7 +898,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
|
* space and if we lose sb update during power failure we'd replay
|
|
* old transaction with possibly newly overwritten data.
|
|
*/
|
|
- jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
|
|
+ ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
write_lock(&journal->j_state_lock);
|
|
freed = block - journal->j_tail;
|
|
if (block < journal->j_tail)
|
|
@@ -913,6 +917,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
|
journal->j_tail_sequence = tid;
|
|
journal->j_tail = block;
|
|
write_unlock(&journal->j_state_lock);
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -1331,7 +1338,7 @@ static int journal_reset(journal_t *journal)
|
|
return jbd2_journal_start_thread(journal);
|
|
}
|
|
|
|
-static void jbd2_write_superblock(journal_t *journal, int write_op)
|
|
+static int jbd2_write_superblock(journal_t *journal, int write_op)
|
|
{
|
|
struct buffer_head *bh = journal->j_sb_buffer;
|
|
journal_superblock_t *sb = journal->j_superblock;
|
|
@@ -1370,7 +1377,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
|
|
printk(KERN_ERR "JBD2: Error %d detected when updating "
|
|
"journal superblock for %s.\n", ret,
|
|
journal->j_devname);
|
|
+ jbd2_journal_abort(journal, ret);
|
|
}
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -1383,10 +1393,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
|
|
* Update a journal's superblock information about log tail and write it to
|
|
* disk, waiting for the IO to complete.
|
|
*/
|
|
-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
|
|
+int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
|
|
unsigned long tail_block, int write_op)
|
|
{
|
|
journal_superblock_t *sb = journal->j_superblock;
|
|
+ int ret;
|
|
|
|
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
|
|
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
|
|
@@ -1395,13 +1406,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
|
|
sb->s_sequence = cpu_to_be32(tail_tid);
|
|
sb->s_start = cpu_to_be32(tail_block);
|
|
|
|
- jbd2_write_superblock(journal, write_op);
|
|
+ ret = jbd2_write_superblock(journal, write_op);
|
|
+ if (ret)
|
|
+ goto out;
|
|
|
|
/* Log is no longer empty */
|
|
write_lock(&journal->j_state_lock);
|
|
WARN_ON(!sb->s_sequence);
|
|
journal->j_flags &= ~JBD2_FLUSHED;
|
|
write_unlock(&journal->j_state_lock);
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -1522,21 +1538,29 @@ static int journal_get_superblock(journal_t *journal)
|
|
goto out;
|
|
}
|
|
|
|
- if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
|
|
- JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
|
|
+ if (jbd2_journal_has_csum_v2or3(journal) &&
|
|
+ JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
|
|
/* Can't have checksum v1 and v2 on at the same time! */
|
|
printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
|
|
"at the same time!\n");
|
|
goto out;
|
|
}
|
|
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
|
|
+ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
|
|
+ /* Can't have checksum v2 and v3 at the same time! */
|
|
+ printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
|
|
+ "at the same time!\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (!jbd2_verify_csum_type(journal, sb)) {
|
|
printk(KERN_ERR "JBD2: Unknown checksum type\n");
|
|
goto out;
|
|
}
|
|
|
|
/* Load the checksum driver */
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
|
|
+ if (jbd2_journal_has_csum_v2or3(journal)) {
|
|
journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
|
|
if (IS_ERR(journal->j_chksum_driver)) {
|
|
printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
|
|
@@ -1553,7 +1577,7 @@ static int journal_get_superblock(journal_t *journal)
|
|
}
|
|
|
|
/* Precompute checksum seed for all metadata */
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
|
|
sizeof(sb->s_uuid));
|
|
|
|
@@ -1813,8 +1837,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
|
|
if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
|
|
return 0;
|
|
|
|
- /* Asking for checksumming v2 and v1? Only give them v2. */
|
|
- if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
|
|
+ /* If enabling v2 checksums, turn on v3 instead */
|
|
+ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
|
|
+ incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
|
|
+ incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
|
|
+ }
|
|
+
|
|
+ /* Asking for checksumming v3 and v1? Only give them v3. */
|
|
+ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
|
|
compat & JBD2_FEATURE_COMPAT_CHECKSUM)
|
|
compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
|
|
|
|
@@ -1823,8 +1853,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
|
|
|
|
sb = journal->j_superblock;
|
|
|
|
- /* If enabling v2 checksums, update superblock */
|
|
- if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
|
|
+ /* If enabling v3 checksums, update superblock */
|
|
+ if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
|
|
sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
|
|
sb->s_feature_compat &=
|
|
~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
|
|
@@ -1842,8 +1872,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
|
|
}
|
|
|
|
/* Precompute checksum seed for all metadata */
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
|
|
- JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
journal->j_csum_seed = jbd2_chksum(journal, ~0,
|
|
sb->s_uuid,
|
|
sizeof(sb->s_uuid));
|
|
@@ -1852,7 +1881,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
|
|
/* If enabling v1 checksums, downgrade superblock */
|
|
if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
|
|
sb->s_feature_incompat &=
|
|
- ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
|
|
+ ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
|
|
+ JBD2_FEATURE_INCOMPAT_CSUM_V3);
|
|
|
|
sb->s_feature_compat |= cpu_to_be32(compat);
|
|
sb->s_feature_ro_compat |= cpu_to_be32(ro);
|
|
@@ -1938,7 +1968,14 @@ int jbd2_journal_flush(journal_t *journal)
|
|
return -EIO;
|
|
|
|
mutex_lock(&journal->j_checkpoint_mutex);
|
|
- jbd2_cleanup_journal_tail(journal);
|
|
+ if (!err) {
|
|
+ err = jbd2_cleanup_journal_tail(journal);
|
|
+ if (err < 0) {
|
|
+ mutex_unlock(&journal->j_checkpoint_mutex);
|
|
+ goto out;
|
|
+ }
|
|
+ err = 0;
|
|
+ }
|
|
|
|
/* Finally, mark the journal as really needing no recovery.
|
|
* This sets s_start==0 in the underlying superblock, which is
|
|
@@ -1954,7 +1991,8 @@ int jbd2_journal_flush(journal_t *journal)
|
|
J_ASSERT(journal->j_head == journal->j_tail);
|
|
J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
|
|
write_unlock(&journal->j_state_lock);
|
|
- return 0;
|
|
+out:
|
|
+ return err;
|
|
}
|
|
|
|
/**
|
|
@@ -2165,16 +2203,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
|
|
*/
|
|
size_t journal_tag_bytes(journal_t *journal)
|
|
{
|
|
- journal_block_tag_t tag;
|
|
- size_t x = 0;
|
|
+ size_t sz;
|
|
+
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
|
|
+ return sizeof(journal_block_tag3_t);
|
|
+
|
|
+ sz = sizeof(journal_block_tag_t);
|
|
|
|
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
- x += sizeof(tag.t_checksum);
|
|
+ sz += sizeof(__u16);
|
|
|
|
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
- return x + JBD2_TAG_SIZE64;
|
|
+ return sz;
|
|
else
|
|
- return x + JBD2_TAG_SIZE32;
|
|
+ return sz - sizeof(__u32);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
|
|
index 3b6bb19..a5f72a3 100644
|
|
--- a/fs/jbd2/recovery.c
|
|
+++ b/fs/jbd2/recovery.c
|
|
@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
|
|
__be32 provided;
|
|
__u32 calculated;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
|
|
@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
|
|
int nr = 0, size = journal->j_blocksize;
|
|
int tag_bytes = journal_tag_bytes(journal);
|
|
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
size -= sizeof(struct jbd2_journal_block_tail);
|
|
|
|
tagp = &bh->b_data[sizeof(journal_header_t)];
|
|
@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
|
|
return err;
|
|
}
|
|
|
|
-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
|
|
+static inline unsigned long long read_tag_block(journal_t *journal,
|
|
+ journal_block_tag_t *tag)
|
|
{
|
|
unsigned long long block = be32_to_cpu(tag->t_blocknr);
|
|
- if (tag_bytes > JBD2_TAG_SIZE32)
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
|
|
return block;
|
|
}
|
|
@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
|
|
__be32 provided;
|
|
__u32 calculated;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
h = buf;
|
|
@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
|
|
static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
|
|
void *buf, __u32 sequence)
|
|
{
|
|
+ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
|
|
__u32 csum32;
|
|
__be32 seq;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
seq = cpu_to_be32(sequence);
|
|
csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
|
|
csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
|
|
|
|
- return tag->t_checksum == cpu_to_be16(csum32);
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
|
|
+ return tag3->t_checksum == cpu_to_be32(csum32);
|
|
+ else
|
|
+ return tag->t_checksum == cpu_to_be16(csum32);
|
|
}
|
|
|
|
static int do_one_pass(journal_t *journal,
|
|
@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
|
|
int tag_bytes = journal_tag_bytes(journal);
|
|
__u32 crc32_sum = ~0; /* Transactional Checksums */
|
|
int descr_csum_size = 0;
|
|
+ int block_error = 0;
|
|
|
|
/*
|
|
* First thing is to establish what we expect to find in the log
|
|
@@ -512,14 +518,14 @@ static int do_one_pass(journal_t *journal,
|
|
switch(blocktype) {
|
|
case JBD2_DESCRIPTOR_BLOCK:
|
|
/* Verify checksum first */
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
|
|
- JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
descr_csum_size =
|
|
sizeof(struct jbd2_journal_block_tail);
|
|
if (descr_csum_size > 0 &&
|
|
!jbd2_descr_block_csum_verify(journal,
|
|
bh->b_data)) {
|
|
err = -EIO;
|
|
+ brelse(bh);
|
|
goto failed;
|
|
}
|
|
|
|
@@ -574,7 +580,7 @@ static int do_one_pass(journal_t *journal,
|
|
unsigned long long blocknr;
|
|
|
|
J_ASSERT(obh != NULL);
|
|
- blocknr = read_tag_block(tag_bytes,
|
|
+ blocknr = read_tag_block(journal,
|
|
tag);
|
|
|
|
/* If the block has been
|
|
@@ -598,7 +604,8 @@ static int do_one_pass(journal_t *journal,
|
|
"checksum recovering "
|
|
"block %llu in log\n",
|
|
blocknr);
|
|
- continue;
|
|
+ block_error = 1;
|
|
+ goto skip_write;
|
|
}
|
|
|
|
/* Find a buffer for the new
|
|
@@ -797,7 +804,8 @@ static int do_one_pass(journal_t *journal,
|
|
success = -EIO;
|
|
}
|
|
}
|
|
-
|
|
+ if (block_error && success == 0)
|
|
+ success = -EIO;
|
|
return success;
|
|
|
|
failed:
|
|
@@ -811,7 +819,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
|
|
__be32 provided;
|
|
__u32 calculated;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return 1;
|
|
|
|
tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
|
|
@@ -831,15 +839,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
|
|
{
|
|
jbd2_journal_revoke_header_t *header;
|
|
int offset, max;
|
|
+ int csum_size = 0;
|
|
+ __u32 rcount;
|
|
int record_len = 4;
|
|
|
|
header = (jbd2_journal_revoke_header_t *) bh->b_data;
|
|
offset = sizeof(jbd2_journal_revoke_header_t);
|
|
- max = be32_to_cpu(header->r_count);
|
|
+ rcount = be32_to_cpu(header->r_count);
|
|
|
|
if (!jbd2_revoke_block_csum_verify(journal, header))
|
|
return -EINVAL;
|
|
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
+ csum_size = sizeof(struct jbd2_journal_revoke_tail);
|
|
+ if (rcount > journal->j_blocksize - csum_size)
|
|
+ return -EINVAL;
|
|
+ max = rcount;
|
|
+
|
|
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
record_len = 8;
|
|
|
|
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
|
|
index 198c9c1..8ecf9b9 100644
|
|
--- a/fs/jbd2/revoke.c
|
|
+++ b/fs/jbd2/revoke.c
|
|
@@ -91,8 +91,8 @@
|
|
#include <linux/list.h>
|
|
#include <linux/init.h>
|
|
#include <linux/bio.h>
|
|
-#endif
|
|
#include <linux/log2.h>
|
|
+#endif
|
|
|
|
static struct kmem_cache *jbd2_revoke_record_cache;
|
|
static struct kmem_cache *jbd2_revoke_table_cache;
|
|
@@ -583,7 +583,7 @@ static void write_one_revoke_record(journal_t *journal,
|
|
{
|
|
int csum_size = 0;
|
|
struct buffer_head *descriptor;
|
|
- int offset;
|
|
+ int sz, offset;
|
|
journal_header_t *header;
|
|
|
|
/* If we are already aborting, this all becomes a noop. We
|
|
@@ -597,12 +597,17 @@ static void write_one_revoke_record(journal_t *journal,
|
|
offset = *offsetp;
|
|
|
|
/* Do we need to leave space at the end for a checksum? */
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (jbd2_journal_has_csum_v2or3(journal))
|
|
csum_size = sizeof(struct jbd2_journal_revoke_tail);
|
|
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
+ sz = 8;
|
|
+ else
|
|
+ sz = 4;
|
|
+
|
|
/* Make sure we have a descriptor with space left for the record */
|
|
if (descriptor) {
|
|
- if (offset >= journal->j_blocksize - csum_size) {
|
|
+ if (offset + sz > journal->j_blocksize - csum_size) {
|
|
flush_descriptor(journal, descriptor, offset, write_op);
|
|
descriptor = NULL;
|
|
}
|
|
@@ -625,16 +630,13 @@ static void write_one_revoke_record(journal_t *journal,
|
|
*descriptorp = descriptor;
|
|
}
|
|
|
|
- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
|
|
* ((__be64 *)(&descriptor->b_data[offset])) =
|
|
cpu_to_be64(record->blocknr);
|
|
- offset += 8;
|
|
-
|
|
- } else {
|
|
+ else
|
|
* ((__be32 *)(&descriptor->b_data[offset])) =
|
|
cpu_to_be32(record->blocknr);
|
|
- offset += 4;
|
|
- }
|
|
+ offset += sz;
|
|
|
|
*offsetp = offset;
|
|
}
|
|
@@ -644,7 +646,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
|
|
struct jbd2_journal_revoke_tail *tail;
|
|
__u32 csum;
|
|
|
|
- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
|
|
+ if (!jbd2_journal_has_csum_v2or3(j))
|
|
return;
|
|
|
|
tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
|
|
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
|
|
index f8a5d6a..ecc5707 100644
|
|
--- a/fs/jbd2/transaction.c
|
|
+++ b/fs/jbd2/transaction.c
|
|
@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
|
|
int result;
|
|
int wanted;
|
|
|
|
- WARN_ON(!transaction);
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
journal = transaction->t_journal;
|
|
@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
|
|
tid_t tid;
|
|
int need_to_start, ret;
|
|
|
|
- WARN_ON(!transaction);
|
|
/* If we've had an abort of any type, don't even think about
|
|
* actually doing the restart! */
|
|
if (is_handle_aborted(handle))
|
|
@@ -791,7 +789,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
|
int need_copy = 0;
|
|
unsigned long start_lock, time_lock;
|
|
|
|
- WARN_ON(!transaction);
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
journal = transaction->t_journal;
|
|
@@ -1057,7 +1054,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
|
|
int err;
|
|
|
|
jbd_debug(5, "journal_head %p\n", jh);
|
|
- WARN_ON(!transaction);
|
|
err = -EROFS;
|
|
if (is_handle_aborted(handle))
|
|
goto out;
|
|
@@ -1271,7 +1267,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|
struct journal_head *jh;
|
|
int ret = 0;
|
|
|
|
- WARN_ON(!transaction);
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
journal = transaction->t_journal;
|
|
@@ -1407,7 +1402,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|
int err = 0;
|
|
int was_modified = 0;
|
|
|
|
- WARN_ON(!transaction);
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
journal = transaction->t_journal;
|
|
@@ -1538,8 +1532,22 @@ int jbd2_journal_stop(handle_t *handle)
|
|
tid_t tid;
|
|
pid_t pid;
|
|
|
|
- if (!transaction)
|
|
- goto free_and_exit;
|
|
+ if (!transaction) {
|
|
+ /*
|
|
+ * Handle is already detached from the transaction so
|
|
+ * there is nothing to do other than decrease a refcount,
|
|
+ * or free the handle if refcount drops to zero
|
|
+ */
|
|
+ if (--handle->h_ref > 0) {
|
|
+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
|
|
+ handle->h_ref);
|
|
+ return err;
|
|
+ } else {
|
|
+ if (handle->h_rsv_handle)
|
|
+ jbd2_free_handle(handle->h_rsv_handle);
|
|
+ goto free_and_exit;
|
|
+ }
|
|
+ }
|
|
journal = transaction->t_journal;
|
|
|
|
J_ASSERT(journal_current_handle() == handle);
|
|
@@ -2381,7 +2389,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
|
|
transaction_t *transaction = handle->h_transaction;
|
|
journal_t *journal;
|
|
|
|
- WARN_ON(!transaction);
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
journal = transaction->t_journal;
|
|
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
|
|
index a69e426..5b234db 100644
|
|
--- a/fs/jffs2/fs.c
|
|
+++ b/fs/jffs2/fs.c
|
|
@@ -687,7 +687,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
|
|
struct inode *inode = OFNI_EDONI_2SFFJ(f);
|
|
struct page *pg;
|
|
|
|
- pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
|
|
+ pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
|
|
(void *)jffs2_do_readpage_unlock, inode);
|
|
if (IS_ERR(pg))
|
|
return (void *)pg;
|
|
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
|
|
index 413ef89..046fee8 100644
|
|
--- a/fs/jffs2/jffs2_fs_sb.h
|
|
+++ b/fs/jffs2/jffs2_fs_sb.h
|
|
@@ -134,8 +134,6 @@ struct jffs2_sb_info {
|
|
struct rw_semaphore wbuf_sem; /* Protects the write buffer */
|
|
|
|
struct delayed_work wbuf_dwork; /* write-buffer write-out work */
|
|
- int wbuf_queued; /* non-zero delayed work is queued */
|
|
- spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
|
|
|
|
unsigned char *oobbuf;
|
|
int oobavail; /* How many bytes are available for JFFS2 in OOB */
|
|
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
|
|
index 7654e87..9ad5ba4 100644
|
|
--- a/fs/jffs2/scan.c
|
|
+++ b/fs/jffs2/scan.c
|
|
@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
sumlen = c->sector_size - je32_to_cpu(sm->offset);
|
|
sumptr = buf + buf_size - sumlen;
|
|
|
|
+ /* sm->offset maybe wrong but MAGIC maybe right */
|
|
+ if (sumlen > c->sector_size)
|
|
+ goto full_scan;
|
|
+
|
|
/* Now, make sure the summary itself is available */
|
|
if (sumlen > buf_size) {
|
|
/* Need to kmalloc for this. */
|
|
@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
}
|
|
}
|
|
|
|
+full_scan:
|
|
buf_ofs = jeb->offset;
|
|
|
|
if (!buf_size) {
|
|
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
|
|
index a6597d6..09ed551 100644
|
|
--- a/fs/jffs2/wbuf.c
|
|
+++ b/fs/jffs2/wbuf.c
|
|
@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
|
|
struct jffs2_sb_info *c = work_to_sb(work);
|
|
struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
|
|
- spin_lock(&c->wbuf_dwork_lock);
|
|
- c->wbuf_queued = 0;
|
|
- spin_unlock(&c->wbuf_dwork_lock);
|
|
-
|
|
if (!(sb->s_flags & MS_RDONLY)) {
|
|
jffs2_dbg(1, "%s()\n", __func__);
|
|
jffs2_flush_wbuf_gc(c, 0);
|
|
@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
|
|
if (sb->s_flags & MS_RDONLY)
|
|
return;
|
|
|
|
- spin_lock(&c->wbuf_dwork_lock);
|
|
- if (!c->wbuf_queued) {
|
|
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
|
|
+ if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
|
|
jffs2_dbg(1, "%s()\n", __func__);
|
|
- delay = msecs_to_jiffies(dirty_writeback_interval * 10);
|
|
- queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
|
|
- c->wbuf_queued = 1;
|
|
- }
|
|
- spin_unlock(&c->wbuf_dwork_lock);
|
|
}
|
|
|
|
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
|
@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
|
|
|
/* Initialise write buffer */
|
|
init_rwsem(&c->wbuf_sem);
|
|
- spin_lock_init(&c->wbuf_dwork_lock);
|
|
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
c->wbuf_pagesize = c->mtd->writesize;
|
|
c->wbuf_ofs = 0xFFFFFFFF;
|
|
@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
|
|
|
|
/* Initialize write buffer */
|
|
init_rwsem(&c->wbuf_sem);
|
|
- spin_lock_init(&c->wbuf_dwork_lock);
|
|
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
c->wbuf_pagesize = c->mtd->erasesize;
|
|
|
|
@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
|
|
|
|
/* Initialize write buffer */
|
|
init_rwsem(&c->wbuf_sem);
|
|
- spin_lock_init(&c->wbuf_dwork_lock);
|
|
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
|
|
c->wbuf_pagesize = c->mtd->writesize;
|
|
@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
|
|
return 0;
|
|
|
|
init_rwsem(&c->wbuf_sem);
|
|
- spin_lock_init(&c->wbuf_dwork_lock);
|
|
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
|
|
c->wbuf_pagesize = c->mtd->writesize;
|
|
diff --git a/fs/libfs.c b/fs/libfs.c
|
|
index a184424..868c0b7 100644
|
|
--- a/fs/libfs.c
|
|
+++ b/fs/libfs.c
|
|
@@ -113,18 +113,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
/* d_lock not required for cursor */
|
|
- list_del(&cursor->d_u.d_child);
|
|
+ list_del(&cursor->d_child);
|
|
p = dentry->d_subdirs.next;
|
|
while (n && p != &dentry->d_subdirs) {
|
|
struct dentry *next;
|
|
- next = list_entry(p, struct dentry, d_u.d_child);
|
|
+ next = list_entry(p, struct dentry, d_child);
|
|
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
|
|
if (simple_positive(next))
|
|
n--;
|
|
spin_unlock(&next->d_lock);
|
|
p = p->next;
|
|
}
|
|
- list_add_tail(&cursor->d_u.d_child, p);
|
|
+ list_add_tail(&cursor->d_child, p);
|
|
spin_unlock(&dentry->d_lock);
|
|
}
|
|
}
|
|
@@ -149,7 +149,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
|
|
{
|
|
struct dentry *dentry = file->f_path.dentry;
|
|
struct dentry *cursor = file->private_data;
|
|
- struct list_head *p, *q = &cursor->d_u.d_child;
|
|
+ struct list_head *p, *q = &cursor->d_child;
|
|
|
|
if (!dir_emit_dots(file, ctx))
|
|
return 0;
|
|
@@ -158,7 +158,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
|
|
list_move(q, &dentry->d_subdirs);
|
|
|
|
for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
|
|
- struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
|
|
+ struct dentry *next = list_entry(p, struct dentry, d_child);
|
|
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
|
|
if (!simple_positive(next)) {
|
|
spin_unlock(&next->d_lock);
|
|
@@ -286,7 +286,7 @@ int simple_empty(struct dentry *dentry)
|
|
int ret = 0;
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
- list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry(child, &dentry->d_subdirs, d_child) {
|
|
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
|
|
if (simple_positive(child)) {
|
|
spin_unlock(&child->d_lock);
|
|
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
|
|
index 1812f02..6ae664b 100644
|
|
--- a/fs/lockd/mon.c
|
|
+++ b/fs/lockd/mon.c
|
|
@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
|
|
|
|
msg.rpc_proc = &clnt->cl_procinfo[proc];
|
|
status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
|
|
+ if (status == -ECONNREFUSED) {
|
|
+ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
|
|
+ status);
|
|
+ rpc_force_rebind(clnt);
|
|
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
|
|
+ }
|
|
if (status < 0)
|
|
dprintk("lockd: NSM upcall RPC failed, status=%d\n",
|
|
status);
|
|
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
|
|
index 6bf06a0..59a53f6 100644
|
|
--- a/fs/lockd/svc.c
|
|
+++ b/fs/lockd/svc.c
|
|
@@ -137,10 +137,6 @@ lockd(void *vrqstp)
|
|
|
|
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
|
|
|
|
- if (!nlm_timeout)
|
|
- nlm_timeout = LOCKD_DFLT_TIMEO;
|
|
- nlmsvc_timeout = nlm_timeout * HZ;
|
|
-
|
|
/*
|
|
* The main request loop. We don't terminate until the last
|
|
* NFS mount or NFS daemon has gone away.
|
|
@@ -253,13 +249,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
|
|
|
|
error = make_socks(serv, net);
|
|
if (error < 0)
|
|
- goto err_socks;
|
|
+ goto err_bind;
|
|
set_grace_period(net);
|
|
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
|
|
return 0;
|
|
|
|
-err_socks:
|
|
- svc_rpcb_cleanup(serv, net);
|
|
err_bind:
|
|
ln->nlmsvc_users--;
|
|
return error;
|
|
@@ -348,6 +342,10 @@ static struct svc_serv *lockd_create_svc(void)
|
|
printk(KERN_WARNING
|
|
"lockd_up: no pid, %d users??\n", nlmsvc_users);
|
|
|
|
+ if (!nlm_timeout)
|
|
+ nlm_timeout = LOCKD_DFLT_TIMEO;
|
|
+ nlmsvc_timeout = nlm_timeout * HZ;
|
|
+
|
|
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
|
|
if (!serv) {
|
|
printk(KERN_WARNING "lockd_up: create service failed\n");
|
|
diff --git a/fs/locks.c b/fs/locks.c
|
|
index 4dd39b9..2c61c4e 100644
|
|
--- a/fs/locks.c
|
|
+++ b/fs/locks.c
|
|
@@ -2235,16 +2235,28 @@ void locks_remove_flock(struct file *filp)
|
|
|
|
while ((fl = *before) != NULL) {
|
|
if (fl->fl_file == filp) {
|
|
- if (IS_FLOCK(fl)) {
|
|
- locks_delete_lock(before);
|
|
- continue;
|
|
- }
|
|
if (IS_LEASE(fl)) {
|
|
lease_modify(before, F_UNLCK);
|
|
continue;
|
|
}
|
|
- /* What? */
|
|
- BUG();
|
|
+
|
|
+ /*
|
|
+ * There's a leftover lock on the list of a type that
|
|
+ * we didn't expect to see. Most likely a classic
|
|
+ * POSIX lock that ended up not getting released
|
|
+ * properly, or that raced onto the list somehow. Log
|
|
+ * some info about it and then just remove it from
|
|
+ * the list.
|
|
+ */
|
|
+ WARN(!IS_FLOCK(fl),
|
|
+ "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
|
|
+ MAJOR(inode->i_sb->s_dev),
|
|
+ MINOR(inode->i_sb->s_dev), inode->i_ino,
|
|
+ fl->fl_type, fl->fl_flags,
|
|
+ fl->fl_start, fl->fl_end);
|
|
+
|
|
+ locks_delete_lock(before);
|
|
+ continue;
|
|
}
|
|
before = &fl->fl_next;
|
|
}
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index 8274c8d..c6fa079 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/device_cgroup.h>
|
|
#include <linux/fs_struct.h>
|
|
#include <linux/posix_acl.h>
|
|
+#include <linux/hash.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
#include "internal.h"
|
|
@@ -641,24 +642,22 @@ static int complete_walk(struct nameidata *nd)
|
|
|
|
static __always_inline void set_root(struct nameidata *nd)
|
|
{
|
|
- if (!nd->root.mnt)
|
|
- get_fs_root(current->fs, &nd->root);
|
|
+ get_fs_root(current->fs, &nd->root);
|
|
}
|
|
|
|
static int link_path_walk(const char *, struct nameidata *);
|
|
|
|
-static __always_inline void set_root_rcu(struct nameidata *nd)
|
|
+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
|
|
{
|
|
- if (!nd->root.mnt) {
|
|
- struct fs_struct *fs = current->fs;
|
|
- unsigned seq;
|
|
+ struct fs_struct *fs = current->fs;
|
|
+ unsigned seq, res;
|
|
|
|
- do {
|
|
- seq = read_seqcount_begin(&fs->seq);
|
|
- nd->root = fs->root;
|
|
- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
|
|
- } while (read_seqcount_retry(&fs->seq, seq));
|
|
- }
|
|
+ do {
|
|
+ seq = read_seqcount_begin(&fs->seq);
|
|
+ nd->root = fs->root;
|
|
+ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
|
|
+ } while (read_seqcount_retry(&fs->seq, seq));
|
|
+ return res;
|
|
}
|
|
|
|
static void path_put_conditional(struct path *path, struct nameidata *nd)
|
|
@@ -858,7 +857,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
|
|
return PTR_ERR(s);
|
|
}
|
|
if (*s == '/') {
|
|
- set_root(nd);
|
|
+ if (!nd->root.mnt)
|
|
+ set_root(nd);
|
|
path_put(&nd->path);
|
|
nd->path = nd->root;
|
|
path_get(&nd->root);
|
|
@@ -1131,7 +1131,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
|
|
|
|
static int follow_dotdot_rcu(struct nameidata *nd)
|
|
{
|
|
- set_root_rcu(nd);
|
|
+ if (!nd->root.mnt)
|
|
+ set_root_rcu(nd);
|
|
|
|
while (1) {
|
|
if (nd->path.dentry == nd->root.dentry &&
|
|
@@ -1243,7 +1244,8 @@ static void follow_mount(struct path *path)
|
|
|
|
static void follow_dotdot(struct nameidata *nd)
|
|
{
|
|
- set_root(nd);
|
|
+ if (!nd->root.mnt)
|
|
+ set_root(nd);
|
|
|
|
while(1) {
|
|
struct dentry *old = nd->path.dentry;
|
|
@@ -1543,7 +1545,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
|
|
|
|
if (should_follow_link(path->dentry, follow)) {
|
|
if (nd->flags & LOOKUP_RCU) {
|
|
- if (unlikely(unlazy_walk(nd, path->dentry))) {
|
|
+ if (unlikely(nd->path.mnt != path->mnt ||
|
|
+ unlazy_walk(nd, path->dentry))) {
|
|
err = -ECHILD;
|
|
goto out_err;
|
|
}
|
|
@@ -1624,8 +1627,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
|
|
|
|
static inline unsigned int fold_hash(unsigned long hash)
|
|
{
|
|
- hash += hash >> (8*sizeof(int));
|
|
- return hash;
|
|
+ return hash_64(hash, 32);
|
|
}
|
|
|
|
#else /* 32-bit case */
|
|
@@ -1797,7 +1799,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
|
|
if (err)
|
|
return err;
|
|
}
|
|
- if (!d_is_directory(nd->path.dentry)) {
|
|
+ if (!d_can_lookup(nd->path.dentry)) {
|
|
err = -ENOTDIR;
|
|
break;
|
|
}
|
|
@@ -1818,7 +1820,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
|
|
struct dentry *root = nd->root.dentry;
|
|
struct inode *inode = root->d_inode;
|
|
if (*name) {
|
|
- if (!d_is_directory(root))
|
|
+ if (!d_can_lookup(root))
|
|
return -ENOTDIR;
|
|
retval = inode_permission(inode, MAY_EXEC);
|
|
if (retval)
|
|
@@ -1842,7 +1844,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
|
|
if (*name=='/') {
|
|
if (flags & LOOKUP_RCU) {
|
|
rcu_read_lock();
|
|
- set_root_rcu(nd);
|
|
+ nd->seq = set_root_rcu(nd);
|
|
} else {
|
|
set_root(nd);
|
|
path_get(&nd->root);
|
|
@@ -1874,7 +1876,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
|
|
dentry = f.file->f_path.dentry;
|
|
|
|
if (*name) {
|
|
- if (!d_is_directory(dentry)) {
|
|
+ if (!d_can_lookup(dentry)) {
|
|
fdput(f);
|
|
return -ENOTDIR;
|
|
}
|
|
@@ -1956,7 +1958,7 @@ static int path_lookupat(int dfd, const char *name,
|
|
err = complete_walk(nd);
|
|
|
|
if (!err && nd->flags & LOOKUP_DIRECTORY) {
|
|
- if (!d_is_directory(nd->path.dentry)) {
|
|
+ if (!d_can_lookup(nd->path.dentry)) {
|
|
path_put(&nd->path);
|
|
err = -ENOTDIR;
|
|
}
|
|
@@ -2247,9 +2249,10 @@ done:
|
|
goto out;
|
|
}
|
|
path->dentry = dentry;
|
|
- path->mnt = mntget(nd->path.mnt);
|
|
+ path->mnt = nd->path.mnt;
|
|
if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
|
|
return 1;
|
|
+ mntget(path->mnt);
|
|
follow_mount(path);
|
|
error = 0;
|
|
out:
|
|
@@ -2415,11 +2418,11 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
|
|
IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
|
|
return -EPERM;
|
|
if (isdir) {
|
|
- if (!d_is_directory(victim) && !d_is_autodir(victim))
|
|
+ if (!d_is_dir(victim))
|
|
return -ENOTDIR;
|
|
if (IS_ROOT(victim))
|
|
return -EBUSY;
|
|
- } else if (d_is_directory(victim) || d_is_autodir(victim))
|
|
+ } else if (d_is_dir(victim))
|
|
return -EISDIR;
|
|
if (IS_DEADDIR(dir))
|
|
return -ENOENT;
|
|
@@ -2990,7 +2993,8 @@ finish_lookup:
|
|
|
|
if (should_follow_link(path->dentry, !symlink_ok)) {
|
|
if (nd->flags & LOOKUP_RCU) {
|
|
- if (unlikely(unlazy_walk(nd, path->dentry))) {
|
|
+ if (unlikely(nd->path.mnt != path->mnt ||
|
|
+ unlazy_walk(nd, path->dentry))) {
|
|
error = -ECHILD;
|
|
goto out;
|
|
}
|
|
@@ -3017,11 +3021,10 @@ finish_open:
|
|
}
|
|
audit_inode(name, nd->path.dentry, 0);
|
|
error = -EISDIR;
|
|
- if ((open_flag & O_CREAT) &&
|
|
- (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
|
|
+ if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
|
|
goto out;
|
|
error = -ENOTDIR;
|
|
- if ((nd->flags & LOOKUP_DIRECTORY) && !d_is_directory(nd->path.dentry))
|
|
+ if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
|
|
goto out;
|
|
if (!S_ISREG(nd->inode->i_mode))
|
|
will_truncate = false;
|
|
@@ -3127,7 +3130,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
|
|
if (error)
|
|
goto out2;
|
|
audit_inode(pathname, nd->path.dentry, 0);
|
|
- error = may_open(&nd->path, op->acc_mode, op->open_flag);
|
|
+ /* Don't check for other permissions, the inode was just created */
|
|
+ error = may_open(&nd->path, MAY_OPEN, op->open_flag);
|
|
if (error)
|
|
goto out2;
|
|
file->f_path.mnt = nd->path.mnt;
|
|
@@ -3167,7 +3171,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
|
|
|
|
if (unlikely(file->f_flags & __O_TMPFILE)) {
|
|
error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
|
|
- goto out;
|
|
+ goto out2;
|
|
}
|
|
|
|
error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
|
|
@@ -3205,6 +3209,7 @@ out:
|
|
path_put(&nd->root);
|
|
if (base)
|
|
fput(base);
|
|
+out2:
|
|
if (!(opened & FILE_OPENED)) {
|
|
BUG_ON(!error);
|
|
put_filp(file);
|
|
@@ -3745,7 +3750,7 @@ exit1:
|
|
slashes:
|
|
if (d_is_negative(dentry))
|
|
error = -ENOENT;
|
|
- else if (d_is_directory(dentry) || d_is_autodir(dentry))
|
|
+ else if (d_is_dir(dentry))
|
|
error = -EISDIR;
|
|
else
|
|
error = -ENOTDIR;
|
|
@@ -4124,7 +4129,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
struct inode **delegated_inode)
|
|
{
|
|
int error;
|
|
- int is_dir = d_is_directory(old_dentry) || d_is_autodir(old_dentry);
|
|
+ int is_dir = d_is_dir(old_dentry);
|
|
const unsigned char *old_name;
|
|
|
|
if (old_dentry->d_inode == new_dentry->d_inode)
|
|
@@ -4217,7 +4222,7 @@ retry_deleg:
|
|
if (d_is_negative(old_dentry))
|
|
goto exit4;
|
|
/* unless the source is a directory trailing slashes give -ENOTDIR */
|
|
- if (!d_is_directory(old_dentry) && !d_is_autodir(old_dentry)) {
|
|
+ if (!d_is_dir(old_dentry)) {
|
|
error = -ENOTDIR;
|
|
if (oldnd.last.name[oldnd.last.len])
|
|
goto exit4;
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index 65233a5..fc99d18 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -777,6 +777,20 @@ static void attach_mnt(struct mount *mnt,
|
|
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
}
|
|
|
|
+static void attach_shadowed(struct mount *mnt,
|
|
+ struct mount *parent,
|
|
+ struct mount *shadows)
|
|
+{
|
|
+ if (shadows) {
|
|
+ hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
|
|
+ list_add(&mnt->mnt_child, &shadows->mnt_child);
|
|
+ } else {
|
|
+ hlist_add_head_rcu(&mnt->mnt_hash,
|
|
+ m_hash(&parent->mnt, mnt->mnt_mountpoint));
|
|
+ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* vfsmount lock must be held for write
|
|
*/
|
|
@@ -795,12 +809,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
|
|
|
|
list_splice(&head, n->list.prev);
|
|
|
|
- if (shadows)
|
|
- hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
|
|
- else
|
|
- hlist_add_head_rcu(&mnt->mnt_hash,
|
|
- m_hash(&parent->mnt, mnt->mnt_mountpoint));
|
|
- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
+ attach_shadowed(mnt, parent, shadows);
|
|
touch_mnt_namespace(n);
|
|
}
|
|
|
|
@@ -887,8 +896,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
|
|
|
|
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
|
|
/* Don't allow unprivileged users to change mount flags */
|
|
- if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
|
|
- mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
|
|
+ if (flag & CL_UNPRIVILEGED) {
|
|
+ mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
|
|
+
|
|
+ if (mnt->mnt.mnt_flags & MNT_READONLY)
|
|
+ mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
|
|
+
|
|
+ if (mnt->mnt.mnt_flags & MNT_NODEV)
|
|
+ mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
|
|
+
|
|
+ if (mnt->mnt.mnt_flags & MNT_NOSUID)
|
|
+ mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
|
|
+
|
|
+ if (mnt->mnt.mnt_flags & MNT_NOEXEC)
|
|
+ mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
|
|
+ }
|
|
|
|
/* Don't allow unprivileged users to reveal what is under a mount */
|
|
if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
|
|
@@ -1204,6 +1226,11 @@ static void namespace_unlock(void)
|
|
head.first->pprev = &head.first;
|
|
INIT_HLIST_HEAD(&unmounted);
|
|
|
|
+ /* undo decrements we'd done in umount_tree() */
|
|
+ hlist_for_each_entry(mnt, &head, mnt_hash)
|
|
+ if (mnt->mnt_ex_mountpoint.mnt)
|
|
+ mntget(mnt->mnt_ex_mountpoint.mnt);
|
|
+
|
|
up_write(&namespace_sem);
|
|
|
|
synchronize_rcu();
|
|
@@ -1240,6 +1267,9 @@ void umount_tree(struct mount *mnt, int how)
|
|
hlist_add_head(&p->mnt_hash, &tmp_list);
|
|
}
|
|
|
|
+ hlist_for_each_entry(p, &tmp_list, mnt_hash)
|
|
+ list_del_init(&p->mnt_child);
|
|
+
|
|
if (how)
|
|
propagate_umount(&tmp_list);
|
|
|
|
@@ -1250,9 +1280,9 @@ void umount_tree(struct mount *mnt, int how)
|
|
p->mnt_ns = NULL;
|
|
if (how < 2)
|
|
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
|
|
- list_del_init(&p->mnt_child);
|
|
if (mnt_has_parent(p)) {
|
|
put_mountpoint(p->mnt_mp);
|
|
+ mnt_add_count(p->mnt_parent, -1);
|
|
/* move the reference to mountpoint into ->mnt_ex_mountpoint */
|
|
p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
|
|
p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
|
|
@@ -1265,6 +1295,8 @@ void umount_tree(struct mount *mnt, int how)
|
|
}
|
|
if (last) {
|
|
last->mnt_hash.next = unmounted.first;
|
|
+ if (unmounted.first)
|
|
+ unmounted.first->pprev = &last->mnt_hash.next;
|
|
unmounted.first = tmp_list.first;
|
|
unmounted.first->pprev = &unmounted.first;
|
|
}
|
|
@@ -1335,6 +1367,8 @@ static int do_umount(struct mount *mnt, int flags)
|
|
* Special case for "unmounting" root ...
|
|
* we just try to remount it readonly.
|
|
*/
|
|
+ if (!capable(CAP_SYS_ADMIN))
|
|
+ return -EPERM;
|
|
down_write(&sb->s_umount);
|
|
if (!(sb->s_flags & MS_RDONLY))
|
|
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
|
|
@@ -1407,6 +1441,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
|
|
goto dput_and_out;
|
|
if (mnt->mnt.mnt_flags & MNT_LOCKED)
|
|
goto dput_and_out;
|
|
+ retval = -EPERM;
|
|
+ if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
|
|
+ goto dput_and_out;
|
|
|
|
retval = do_umount(mnt, flags);
|
|
dput_and_out:
|
|
@@ -1483,6 +1520,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
|
continue;
|
|
|
|
for (s = r; s; s = next_mnt(s, r)) {
|
|
+ struct mount *t = NULL;
|
|
if (!(flag & CL_COPY_UNBINDABLE) &&
|
|
IS_MNT_UNBINDABLE(s)) {
|
|
s = skip_mnt_tree(s);
|
|
@@ -1504,7 +1542,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
|
goto out;
|
|
lock_mount_hash();
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
- attach_mnt(q, parent, p->mnt_mp);
|
|
+ mnt_set_mountpoint(parent, p->mnt_mp, q);
|
|
+ if (!list_empty(&parent->mnt_mounts)) {
|
|
+ t = list_last_entry(&parent->mnt_mounts,
|
|
+ struct mount, mnt_child);
|
|
+ if (t->mnt_mp != p->mnt_mp)
|
|
+ t = NULL;
|
|
+ }
|
|
+ attach_shadowed(q, parent, t);
|
|
unlock_mount_hash();
|
|
}
|
|
}
|
|
@@ -1887,9 +1932,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
|
|
if (readonly_request == __mnt_is_readonly(mnt))
|
|
return 0;
|
|
|
|
- if (mnt->mnt_flags & MNT_LOCK_READONLY)
|
|
- return -EPERM;
|
|
-
|
|
if (readonly_request)
|
|
error = mnt_make_readonly(real_mount(mnt));
|
|
else
|
|
@@ -1915,6 +1957,39 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
|
|
if (path->dentry != path->mnt->mnt_root)
|
|
return -EINVAL;
|
|
|
|
+ /* Don't allow changing of locked mnt flags.
|
|
+ *
|
|
+ * No locks need to be held here while testing the various
|
|
+ * MNT_LOCK flags because those flags can never be cleared
|
|
+ * once they are set.
|
|
+ */
|
|
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
|
|
+ !(mnt_flags & MNT_READONLY)) {
|
|
+ return -EPERM;
|
|
+ }
|
|
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
|
|
+ !(mnt_flags & MNT_NODEV)) {
|
|
+ /* Was the nodev implicitly added in mount? */
|
|
+ if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
|
|
+ !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
|
|
+ mnt_flags |= MNT_NODEV;
|
|
+ } else {
|
|
+ return -EPERM;
|
|
+ }
|
|
+ }
|
|
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
|
|
+ !(mnt_flags & MNT_NOSUID)) {
|
|
+ return -EPERM;
|
|
+ }
|
|
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
|
|
+ !(mnt_flags & MNT_NOEXEC)) {
|
|
+ return -EPERM;
|
|
+ }
|
|
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
|
|
+ ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
err = security_sb_remount(sb, data);
|
|
if (err)
|
|
return err;
|
|
@@ -1928,7 +2003,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
|
|
err = do_remount_sb(sb, flags, data, 0);
|
|
if (!err) {
|
|
lock_mount_hash();
|
|
- mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
|
|
+ mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
|
|
mnt->mnt.mnt_flags = mnt_flags;
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
unlock_mount_hash();
|
|
@@ -2113,7 +2188,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
|
|
*/
|
|
if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
|
|
flags |= MS_NODEV;
|
|
- mnt_flags |= MNT_NODEV;
|
|
+ mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
|
|
}
|
|
}
|
|
|
|
@@ -2427,6 +2502,14 @@ long do_mount(const char *dev_name, const char *dir_name,
|
|
if (flags & MS_RDONLY)
|
|
mnt_flags |= MNT_READONLY;
|
|
|
|
+ /* The default atime for remount is preservation */
|
|
+ if ((flags & MS_REMOUNT) &&
|
|
+ ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
|
|
+ MS_STRICTATIME)) == 0)) {
|
|
+ mnt_flags &= ~MNT_ATIME_MASK;
|
|
+ mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
|
|
+ }
|
|
+
|
|
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
|
|
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
|
|
MS_STRICTATIME);
|
|
@@ -2759,6 +2842,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
|
|
/* make sure we can reach put_old from new_root */
|
|
if (!is_path_reachable(old_mnt, old.dentry, &new))
|
|
goto out4;
|
|
+ /* make certain new is below the root */
|
|
+ if (!is_path_reachable(new_mnt, new.dentry, &root))
|
|
+ goto out4;
|
|
root_mp->m_count++; /* pin it so it won't go away */
|
|
lock_mount_hash();
|
|
detach_mnt(new_mnt, &parent_path);
|
|
@@ -2939,11 +3025,21 @@ bool fs_fully_visible(struct file_system_type *type)
|
|
if (mnt->mnt.mnt_sb->s_type != type)
|
|
continue;
|
|
|
|
- /* This mount is not fully visible if there are any child mounts
|
|
- * that cover anything except for empty directories.
|
|
+ /* This mount is not fully visible if it's root directory
|
|
+ * is not the root directory of the filesystem.
|
|
+ */
|
|
+ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
|
|
+ continue;
|
|
+
|
|
+ /* This mount is not fully visible if there are any
|
|
+ * locked child mounts that cover anything except for
|
|
+ * empty directories.
|
|
*/
|
|
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
|
|
struct inode *inode = child->mnt_mountpoint->d_inode;
|
|
+ /* Only worry about locked mounts */
|
|
+ if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
|
|
+ continue;
|
|
if (!S_ISDIR(inode->i_mode))
|
|
goto next;
|
|
if (inode->i_nlink > 2)
|
|
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
|
|
index c320ac5..dc9747d 100644
|
|
--- a/fs/ncpfs/dir.c
|
|
+++ b/fs/ncpfs/dir.c
|
|
@@ -406,7 +406,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
|
|
spin_lock(&parent->d_lock);
|
|
next = parent->d_subdirs.next;
|
|
while (next != &parent->d_subdirs) {
|
|
- dent = list_entry(next, struct dentry, d_u.d_child);
|
|
+ dent = list_entry(next, struct dentry, d_child);
|
|
if ((unsigned long)dent->d_fsdata == fpos) {
|
|
if (dent->d_inode)
|
|
dget(dent);
|
|
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
|
|
index 60426cc..2f970de 100644
|
|
--- a/fs/ncpfs/ioctl.c
|
|
+++ b/fs/ncpfs/ioctl.c
|
|
@@ -448,7 +448,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
|
|
result = -EIO;
|
|
}
|
|
}
|
|
- result = 0;
|
|
}
|
|
mutex_unlock(&server->root_setup_lock);
|
|
|
|
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
|
|
index 32c0658..6d5e7c5 100644
|
|
--- a/fs/ncpfs/ncplib_kernel.h
|
|
+++ b/fs/ncpfs/ncplib_kernel.h
|
|
@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
|
|
spin_lock(&parent->d_lock);
|
|
next = parent->d_subdirs.next;
|
|
while (next != &parent->d_subdirs) {
|
|
- dentry = list_entry(next, struct dentry, d_u.d_child);
|
|
+ dentry = list_entry(next, struct dentry, d_child);
|
|
|
|
if (dentry->d_fsdata == NULL)
|
|
ncp_age_dentry(server, dentry);
|
|
@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
|
|
spin_lock(&parent->d_lock);
|
|
next = parent->d_subdirs.next;
|
|
while (next != &parent->d_subdirs) {
|
|
- dentry = list_entry(next, struct dentry, d_u.d_child);
|
|
+ dentry = list_entry(next, struct dentry, d_child);
|
|
dentry->d_fsdata = NULL;
|
|
ncp_age_dentry(server, dentry);
|
|
next = next->next;
|
|
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
|
|
index 56ff823..65d849b 100644
|
|
--- a/fs/nfs/blocklayout/blocklayout.c
|
|
+++ b/fs/nfs/blocklayout/blocklayout.c
|
|
@@ -1213,7 +1213,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
|
|
end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
|
|
if (end != NFS_I(inode)->npages) {
|
|
rcu_read_lock();
|
|
- end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX);
|
|
+ end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
|
|
index 073b4cf..0a2016b 100644
|
|
--- a/fs/nfs/callback.c
|
|
+++ b/fs/nfs/callback.c
|
|
@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
|
|
if (try_to_freeze())
|
|
continue;
|
|
|
|
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
|
|
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
|
|
spin_lock_bh(&serv->sv_cb_lock);
|
|
if (!list_empty(&serv->sv_cb_list)) {
|
|
req = list_first_entry(&serv->sv_cb_list,
|
|
struct rpc_rqst, rq_bc_list);
|
|
list_del(&req->rq_bc_list);
|
|
spin_unlock_bh(&serv->sv_cb_lock);
|
|
+ finish_wait(&serv->sv_cb_waitq, &wq);
|
|
dprintk("Invoking bc_svc_process()\n");
|
|
error = bc_svc_process(serv, req, rqstp);
|
|
dprintk("bc_svc_process() returned w/ error code= %d\n",
|
|
error);
|
|
} else {
|
|
spin_unlock_bh(&serv->sv_cb_lock);
|
|
- schedule();
|
|
+ /* schedule_timeout to game the hung task watchdog */
|
|
+ schedule_timeout(60 * HZ);
|
|
+ finish_wait(&serv->sv_cb_waitq, &wq);
|
|
}
|
|
- finish_wait(&serv->sv_cb_waitq, &wq);
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
|
|
index f4ccfe6..02f8d09 100644
|
|
--- a/fs/nfs/callback_xdr.c
|
|
+++ b/fs/nfs/callback_xdr.c
|
|
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
|
|
|
|
for (i = 0; i < args->csa_nrclists; i++) {
|
|
status = decode_rc_list(xdr, &args->csa_rclists[i]);
|
|
- if (status)
|
|
+ if (status) {
|
|
+ args->csa_nrclists = i;
|
|
goto out_free;
|
|
+ }
|
|
}
|
|
}
|
|
status = 0;
|
|
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
|
|
index 5d8ccec..2ea3537 100644
|
|
--- a/fs/nfs/delegation.c
|
|
+++ b/fs/nfs/delegation.c
|
|
@@ -109,6 +109,8 @@ again:
|
|
continue;
|
|
if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
|
|
continue;
|
|
+ if (!nfs4_valid_open_stateid(state))
|
|
+ continue;
|
|
if (!nfs4_stateid_match(&state->stateid, stateid))
|
|
continue;
|
|
get_nfs_open_context(ctx);
|
|
@@ -159,8 +161,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
|
|
&delegation->flags);
|
|
NFS_I(inode)->delegation_state = delegation->type;
|
|
spin_unlock(&delegation->lock);
|
|
- put_rpccred(oldcred);
|
|
rcu_read_unlock();
|
|
+ put_rpccred(oldcred);
|
|
trace_nfs4_reclaim_delegation(inode, res->delegation_type);
|
|
} else {
|
|
/* We appear to have raced with a delegation return. */
|
|
@@ -177,7 +179,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
|
|
{
|
|
int res = 0;
|
|
|
|
- res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
|
|
+ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
|
|
+ res = nfs4_proc_delegreturn(inode,
|
|
+ delegation->cred,
|
|
+ &delegation->stateid,
|
|
+ issync);
|
|
nfs_free_delegation(delegation);
|
|
return res;
|
|
}
|
|
@@ -364,11 +370,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
|
|
{
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
- int err;
|
|
+ int err = 0;
|
|
|
|
if (delegation == NULL)
|
|
return 0;
|
|
do {
|
|
+ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
|
|
+ break;
|
|
err = nfs_delegation_claim_opens(inode, &delegation->stateid);
|
|
if (!issync || err != -EAGAIN)
|
|
break;
|
|
@@ -589,10 +597,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
+static void nfs_revoke_delegation(struct inode *inode)
|
|
+{
|
|
+ struct nfs_delegation *delegation;
|
|
+ rcu_read_lock();
|
|
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
+ if (delegation != NULL) {
|
|
+ set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
|
|
+ nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+}
|
|
+
|
|
void nfs_remove_bad_delegation(struct inode *inode)
|
|
{
|
|
struct nfs_delegation *delegation;
|
|
|
|
+ nfs_revoke_delegation(inode);
|
|
delegation = nfs_inode_detach_delegation(inode);
|
|
if (delegation) {
|
|
nfs_inode_find_state_and_recover(inode, &delegation->stateid);
|
|
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
|
|
index 9a79c7a..e02b090 100644
|
|
--- a/fs/nfs/delegation.h
|
|
+++ b/fs/nfs/delegation.h
|
|
@@ -31,6 +31,7 @@ enum {
|
|
NFS_DELEGATION_RETURN_IF_CLOSED,
|
|
NFS_DELEGATION_REFERENCED,
|
|
NFS_DELEGATION_RETURNING,
|
|
+ NFS_DELEGATION_REVOKED,
|
|
};
|
|
|
|
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
|
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
|
|
index b8797ae..7ececa1 100644
|
|
--- a/fs/nfs/direct.c
|
|
+++ b/fs/nfs/direct.c
|
|
@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
|
|
*/
|
|
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
|
|
{
|
|
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
|
|
+
|
|
+ /* we only support swap file calling nfs_direct_IO */
|
|
+ if (!IS_SWAPFILE(inode))
|
|
+ return 0;
|
|
+
|
|
#ifndef CONFIG_NFS_SWAP
|
|
dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
|
|
iocb->ki_filp, (long long) pos, nr_segs);
|
|
@@ -178,6 +184,7 @@ static void nfs_direct_req_free(struct kref *kref)
|
|
{
|
|
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
|
|
|
|
+ nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
|
|
if (dreq->l_ctx != NULL)
|
|
nfs_put_lock_context(dreq->l_ctx);
|
|
if (dreq->ctx != NULL)
|
|
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
|
|
index 66984a9..5b8ab0e 100644
|
|
--- a/fs/nfs/getroot.c
|
|
+++ b/fs/nfs/getroot.c
|
|
@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
|
|
*/
|
|
spin_lock(&sb->s_root->d_inode->i_lock);
|
|
spin_lock(&sb->s_root->d_lock);
|
|
- hlist_del_init(&sb->s_root->d_alias);
|
|
+ hlist_del_init(&sb->s_root->d_u.d_alias);
|
|
spin_unlock(&sb->s_root->d_lock);
|
|
spin_unlock(&sb->s_root->d_inode->i_lock);
|
|
}
|
|
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
|
|
index 15f9d98..6659ce5 100644
|
|
--- a/fs/nfs/inode.c
|
|
+++ b/fs/nfs/inode.c
|
|
@@ -592,7 +592,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
|
{
|
|
struct inode *inode = dentry->d_inode;
|
|
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
|
|
- int err;
|
|
+ int err = 0;
|
|
|
|
trace_nfs_getattr_enter(inode);
|
|
/* Flush out writes to the server in order to update c/mtime. */
|
|
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
|
|
index 871d6ed..24c6898 100644
|
|
--- a/fs/nfs/nfs3acl.c
|
|
+++ b/fs/nfs/nfs3acl.c
|
|
@@ -129,7 +129,10 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
|
|
.rpc_argp = &args,
|
|
.rpc_resp = &fattr,
|
|
};
|
|
- int status;
|
|
+ int status = 0;
|
|
+
|
|
+ if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL))
|
|
+ goto out;
|
|
|
|
status = -EOPNOTSUPP;
|
|
if (!nfs_server_capable(inode, NFS_CAP_ACLS))
|
|
@@ -247,3 +250,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
|
|
&posix_acl_default_xattr_handler,
|
|
NULL,
|
|
};
|
|
+
|
|
+static int
|
|
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
|
|
+ size_t size, ssize_t *result)
|
|
+{
|
|
+ struct posix_acl *acl;
|
|
+ char *p = data + *result;
|
|
+
|
|
+ acl = get_acl(inode, type);
|
|
+ if (IS_ERR_OR_NULL(acl))
|
|
+ return 0;
|
|
+
|
|
+ posix_acl_release(acl);
|
|
+
|
|
+ *result += strlen(name);
|
|
+ *result += 1;
|
|
+ if (!size)
|
|
+ return 0;
|
|
+ if (*result > size)
|
|
+ return -ERANGE;
|
|
+
|
|
+ strcpy(p, name);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t
|
|
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
|
|
+{
|
|
+ struct inode *inode = dentry->d_inode;
|
|
+ ssize_t result = 0;
|
|
+ int error;
|
|
+
|
|
+ error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
|
|
+ POSIX_ACL_XATTR_ACCESS, data, size, &result);
|
|
+ if (error)
|
|
+ return error;
|
|
+
|
|
+ error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
|
|
+ POSIX_ACL_XATTR_DEFAULT, data, size, &result);
|
|
+ if (error)
|
|
+ return error;
|
|
+ return result;
|
|
+}
|
|
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
|
|
index a462ef0..8a18b4a 100644
|
|
--- a/fs/nfs/nfs3proc.c
|
|
+++ b/fs/nfs/nfs3proc.c
|
|
@@ -926,7 +926,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
|
|
.getattr = nfs_getattr,
|
|
.setattr = nfs_setattr,
|
|
#ifdef CONFIG_NFS_V3_ACL
|
|
- .listxattr = generic_listxattr,
|
|
+ .listxattr = nfs3_listxattr,
|
|
.getxattr = generic_getxattr,
|
|
.setxattr = generic_setxattr,
|
|
.removexattr = generic_removexattr,
|
|
@@ -940,7 +940,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
|
|
.getattr = nfs_getattr,
|
|
.setattr = nfs_setattr,
|
|
#ifdef CONFIG_NFS_V3_ACL
|
|
- .listxattr = generic_listxattr,
|
|
+ .listxattr = nfs3_listxattr,
|
|
.getxattr = generic_getxattr,
|
|
.setxattr = generic_setxattr,
|
|
.removexattr = generic_removexattr,
|
|
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
|
|
index fa6d721..4495cad 100644
|
|
--- a/fs/nfs/nfs3xdr.c
|
|
+++ b/fs/nfs/nfs3xdr.c
|
|
@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
|
|
if (args->npages != 0)
|
|
xdr_write_pages(xdr, args->pages, 0, args->len);
|
|
else
|
|
- xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
|
|
+ xdr_reserve_space(xdr, args->len);
|
|
|
|
error = nfsacl_encode(xdr->buf, base, args->inode,
|
|
(args->mask & NFS_ACL) ?
|
|
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
|
|
index 0e46d3d..d3f6062 100644
|
|
--- a/fs/nfs/nfs4client.c
|
|
+++ b/fs/nfs/nfs4client.c
|
|
@@ -482,6 +482,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|
|
|
spin_lock(&nn->nfs_client_lock);
|
|
list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
|
|
+
|
|
+ if (pos->rpc_ops != new->rpc_ops)
|
|
+ continue;
|
|
+
|
|
+ if (pos->cl_proto != new->cl_proto)
|
|
+ continue;
|
|
+
|
|
+ if (pos->cl_minorversion != new->cl_minorversion)
|
|
+ continue;
|
|
+
|
|
/* If "pos" isn't marked ready, we can't trust the
|
|
* remaining fields in "pos" */
|
|
if (pos->cl_cons_state > NFS_CS_READY) {
|
|
@@ -501,15 +511,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|
if (pos->cl_cons_state != NFS_CS_READY)
|
|
continue;
|
|
|
|
- if (pos->rpc_ops != new->rpc_ops)
|
|
- continue;
|
|
-
|
|
- if (pos->cl_proto != new->cl_proto)
|
|
- continue;
|
|
-
|
|
- if (pos->cl_minorversion != new->cl_minorversion)
|
|
- continue;
|
|
-
|
|
if (pos->cl_clientid != new->cl_clientid)
|
|
continue;
|
|
|
|
@@ -564,20 +565,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
|
|
}
|
|
|
|
/*
|
|
- * Returns true if the server owners match
|
|
+ * Returns true if the server major ids match
|
|
*/
|
|
static bool
|
|
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
|
|
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
|
|
{
|
|
struct nfs41_server_owner *o1 = a->cl_serverowner;
|
|
struct nfs41_server_owner *o2 = b->cl_serverowner;
|
|
|
|
- if (o1->minor_id != o2->minor_id) {
|
|
- dprintk("NFS: --> %s server owner minor IDs do not match\n",
|
|
- __func__);
|
|
- return false;
|
|
- }
|
|
-
|
|
if (o1->major_id_sz != o2->major_id_sz)
|
|
goto out_major_mismatch;
|
|
if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
|
|
@@ -615,6 +610,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
|
|
|
|
spin_lock(&nn->nfs_client_lock);
|
|
list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
|
|
+
|
|
+ if (pos->rpc_ops != new->rpc_ops)
|
|
+ continue;
|
|
+
|
|
+ if (pos->cl_proto != new->cl_proto)
|
|
+ continue;
|
|
+
|
|
+ if (pos->cl_minorversion != new->cl_minorversion)
|
|
+ continue;
|
|
+
|
|
/* If "pos" isn't marked ready, we can't trust the
|
|
* remaining fields in "pos", especially the client
|
|
* ID and serverowner fields. Wait for CREATE_SESSION
|
|
@@ -628,7 +633,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
|
|
prev = pos;
|
|
|
|
status = nfs_wait_client_init_complete(pos);
|
|
- if (status == 0) {
|
|
+ if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
|
|
nfs4_schedule_lease_recovery(pos);
|
|
status = nfs4_wait_clnt_recover(pos);
|
|
}
|
|
@@ -640,19 +645,15 @@ int nfs41_walk_client_list(struct nfs_client *new,
|
|
if (pos->cl_cons_state != NFS_CS_READY)
|
|
continue;
|
|
|
|
- if (pos->rpc_ops != new->rpc_ops)
|
|
- continue;
|
|
-
|
|
- if (pos->cl_proto != new->cl_proto)
|
|
- continue;
|
|
-
|
|
- if (pos->cl_minorversion != new->cl_minorversion)
|
|
- continue;
|
|
-
|
|
if (!nfs4_match_clientids(pos, new))
|
|
continue;
|
|
|
|
- if (!nfs4_match_serverowners(pos, new))
|
|
+ /*
|
|
+ * Note that session trunking is just a special subcase of
|
|
+ * client id trunking. In either case, we want to fall back
|
|
+ * to using the existing nfs_client.
|
|
+ */
|
|
+ if (!nfs4_check_clientid_trunking(pos, new))
|
|
continue;
|
|
|
|
atomic_inc(&pos->cl_count);
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index d5d06e8..58258ad 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -1587,7 +1587,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
|
|
nfs_inode_find_state_and_recover(state->inode,
|
|
stateid);
|
|
nfs4_schedule_stateid_recovery(server, state);
|
|
- return 0;
|
|
+ return -EAGAIN;
|
|
case -NFS4ERR_DELAY:
|
|
case -NFS4ERR_GRACE:
|
|
set_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
@@ -2034,46 +2034,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
|
|
return ret;
|
|
}
|
|
|
|
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
|
|
+{
|
|
+ nfs_remove_bad_delegation(state->inode);
|
|
+ write_seqlock(&state->seqlock);
|
|
+ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
|
|
+ write_sequnlock(&state->seqlock);
|
|
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
+}
|
|
+
|
|
+static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
|
|
+{
|
|
+ if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
|
|
+ nfs_finish_clear_delegation_stateid(state);
|
|
+}
|
|
+
|
|
+static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
+{
|
|
+ /* NFSv4.0 doesn't allow for delegation recovery on open expire */
|
|
+ nfs40_clear_delegation_stateid(state);
|
|
+ return nfs4_open_expired(sp, state);
|
|
+}
|
|
+
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
-static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
|
|
+static void nfs41_check_delegation_stateid(struct nfs4_state *state)
|
|
{
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
- nfs4_stateid *stateid = &state->stateid;
|
|
+ nfs4_stateid stateid;
|
|
struct nfs_delegation *delegation;
|
|
- struct rpc_cred *cred = NULL;
|
|
- int status = -NFS4ERR_BAD_STATEID;
|
|
-
|
|
- /* If a state reset has been done, test_stateid is unneeded */
|
|
- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
|
- return;
|
|
+ struct rpc_cred *cred;
|
|
+ int status;
|
|
|
|
/* Get the delegation credential for use by test/free_stateid */
|
|
rcu_read_lock();
|
|
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
|
- if (delegation != NULL &&
|
|
- nfs4_stateid_match(&delegation->stateid, stateid)) {
|
|
- cred = get_rpccred(delegation->cred);
|
|
- rcu_read_unlock();
|
|
- status = nfs41_test_stateid(server, stateid, cred);
|
|
- trace_nfs4_test_delegation_stateid(state, NULL, status);
|
|
- } else
|
|
+ if (delegation == NULL) {
|
|
rcu_read_unlock();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
|
|
+ cred = get_rpccred(delegation->cred);
|
|
+ rcu_read_unlock();
|
|
+ status = nfs41_test_stateid(server, &stateid, cred);
|
|
+ trace_nfs4_test_delegation_stateid(state, NULL, status);
|
|
|
|
if (status != NFS_OK) {
|
|
/* Free the stateid unless the server explicitly
|
|
* informs us the stateid is unrecognized. */
|
|
if (status != -NFS4ERR_BAD_STATEID)
|
|
- nfs41_free_stateid(server, stateid, cred);
|
|
- nfs_remove_bad_delegation(state->inode);
|
|
-
|
|
- write_seqlock(&state->seqlock);
|
|
- nfs4_stateid_copy(&state->stateid, &state->open_stateid);
|
|
- write_sequnlock(&state->seqlock);
|
|
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
+ nfs41_free_stateid(server, &stateid, cred);
|
|
+ nfs_finish_clear_delegation_stateid(state);
|
|
}
|
|
|
|
- if (cred != NULL)
|
|
- put_rpccred(cred);
|
|
+ put_rpccred(cred);
|
|
}
|
|
|
|
/**
|
|
@@ -2117,7 +2131,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
|
|
{
|
|
int status;
|
|
|
|
- nfs41_clear_delegation_stateid(state);
|
|
+ nfs41_check_delegation_stateid(state);
|
|
status = nfs41_check_open_stateid(state);
|
|
if (status != NFS_OK)
|
|
status = nfs4_open_expired(sp, state);
|
|
@@ -2546,6 +2560,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|
struct nfs4_closedata *calldata = data;
|
|
struct nfs4_state *state = calldata->state;
|
|
struct inode *inode = calldata->inode;
|
|
+ bool is_rdonly, is_wronly, is_rdwr;
|
|
int call_close = 0;
|
|
|
|
dprintk("%s: begin!\n", __func__);
|
|
@@ -2553,21 +2568,27 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|
goto out_wait;
|
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
|
- calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
|
|
spin_lock(&state->owner->so_lock);
|
|
+ is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
+ is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
+ is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
/* Calculate the change in open mode */
|
|
+ calldata->arg.fmode = 0;
|
|
if (state->n_rdwr == 0) {
|
|
- if (state->n_rdonly == 0) {
|
|
- call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
- calldata->arg.fmode &= ~FMODE_READ;
|
|
- }
|
|
- if (state->n_wronly == 0) {
|
|
- call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
- calldata->arg.fmode &= ~FMODE_WRITE;
|
|
- }
|
|
- }
|
|
+ if (state->n_rdonly == 0)
|
|
+ call_close |= is_rdonly;
|
|
+ else if (is_rdonly)
|
|
+ calldata->arg.fmode |= FMODE_READ;
|
|
+ if (state->n_wronly == 0)
|
|
+ call_close |= is_wronly;
|
|
+ else if (is_wronly)
|
|
+ calldata->arg.fmode |= FMODE_WRITE;
|
|
+ } else if (is_rdwr)
|
|
+ calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
|
|
+
|
|
+ if (calldata->arg.fmode == 0)
|
|
+ call_close |= is_rdwr;
|
|
+
|
|
if (!nfs4_valid_open_stateid(state))
|
|
call_close = 0;
|
|
spin_unlock(&state->owner->so_lock);
|
|
@@ -7235,7 +7256,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
|
|
int ret = 0;
|
|
|
|
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
|
|
- return 0;
|
|
+ return -EAGAIN;
|
|
task = _nfs41_proc_sequence(clp, cred, false);
|
|
if (IS_ERR(task))
|
|
ret = PTR_ERR(task);
|
|
@@ -7568,6 +7589,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
+ /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
|
|
+ pnfs_get_layout_hdr(NFS_I(inode)->layout);
|
|
+
|
|
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
|
|
if (!lgp->args.layout.pages) {
|
|
nfs4_layoutget_release(lgp);
|
|
@@ -7580,9 +7604,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
|
|
lgp->res.seq_res.sr_slot = NULL;
|
|
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
|
|
|
|
- /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
|
|
- pnfs_get_layout_hdr(NFS_I(inode)->layout);
|
|
-
|
|
task = rpc_run_task(&task_setup_data);
|
|
if (IS_ERR(task))
|
|
return ERR_CAST(task);
|
|
@@ -8248,7 +8269,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
|
|
static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
|
- .recover_open = nfs4_open_expired,
|
|
+ .recover_open = nfs40_open_expired,
|
|
.recover_lock = nfs4_lock_expired,
|
|
.establish_clid = nfs4_init_clientid,
|
|
};
|
|
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
|
|
index 1720d32..e1ba58c 100644
|
|
--- a/fs/nfs/nfs4renewd.c
|
|
+++ b/fs/nfs/nfs4renewd.c
|
|
@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
|
|
}
|
|
nfs_expire_all_delegations(clp);
|
|
} else {
|
|
+ int ret;
|
|
+
|
|
/* Queue an asynchronous RENEW. */
|
|
- ops->sched_state_renewal(clp, cred, renew_flags);
|
|
+ ret = ops->sched_state_renewal(clp, cred, renew_flags);
|
|
put_rpccred(cred);
|
|
- goto out_exp;
|
|
+ switch (ret) {
|
|
+ default:
|
|
+ goto out_exp;
|
|
+ case -EAGAIN:
|
|
+ case -ENOMEM:
|
|
+ break;
|
|
+ }
|
|
}
|
|
} else {
|
|
dprintk("%s: failed to call renewd. Reason: lease not expired \n",
|
|
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
|
|
index 27f5f85..c402b67 100644
|
|
--- a/fs/nfs/nfs4state.c
|
|
+++ b/fs/nfs/nfs4state.c
|
|
@@ -1482,6 +1482,8 @@ restart:
|
|
spin_unlock(&state->state_lock);
|
|
}
|
|
nfs4_put_open_state(state);
|
|
+ clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
|
|
+ &state->flags);
|
|
spin_lock(&sp->so_lock);
|
|
goto restart;
|
|
}
|
|
@@ -1732,7 +1734,8 @@ restart:
|
|
if (status < 0) {
|
|
set_bit(ops->owner_flag_bit, &sp->so_flags);
|
|
nfs4_put_state_owner(sp);
|
|
- return nfs4_recovery_handle_error(clp, status);
|
|
+ status = nfs4_recovery_handle_error(clp, status);
|
|
+ return (status != 0) ? status : -EAGAIN;
|
|
}
|
|
|
|
nfs4_put_state_owner(sp);
|
|
@@ -1741,7 +1744,7 @@ restart:
|
|
spin_unlock(&clp->cl_lock);
|
|
}
|
|
rcu_read_unlock();
|
|
- return status;
|
|
+ return 0;
|
|
}
|
|
|
|
static int nfs4_check_lease(struct nfs_client *clp)
|
|
@@ -1788,7 +1791,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
|
|
break;
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
|
|
- nfs4_state_clear_reclaim_reboot(clp);
|
|
nfs4_state_start_reclaim_reboot(clp);
|
|
break;
|
|
case -NFS4ERR_CLID_INUSE:
|
|
@@ -2370,6 +2372,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
|
status = nfs4_check_lease(clp);
|
|
if (status < 0)
|
|
goto out_error;
|
|
+ continue;
|
|
}
|
|
|
|
if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
|
|
@@ -2391,14 +2394,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
|
section = "reclaim reboot";
|
|
status = nfs4_do_reclaim(clp,
|
|
clp->cl_mvops->reboot_recovery_ops);
|
|
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
|
- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
|
|
- continue;
|
|
- nfs4_state_end_reclaim_reboot(clp);
|
|
- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
|
|
+ if (status == -EAGAIN)
|
|
continue;
|
|
if (status < 0)
|
|
goto out_error;
|
|
+ nfs4_state_end_reclaim_reboot(clp);
|
|
}
|
|
|
|
/* Now recover expired state... */
|
|
@@ -2406,9 +2406,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
|
section = "reclaim nograce";
|
|
status = nfs4_do_reclaim(clp,
|
|
clp->cl_mvops->nograce_recovery_ops);
|
|
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
|
- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
|
|
- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
|
+ if (status == -EAGAIN)
|
|
continue;
|
|
if (status < 0)
|
|
goto out_error;
|
|
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
|
|
index 2ffebf2..27d7f27 100644
|
|
--- a/fs/nfs/pagelist.c
|
|
+++ b/fs/nfs/pagelist.c
|
|
@@ -113,7 +113,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
|
|
if (atomic_read(&c->io_count) == 0)
|
|
break;
|
|
ret = nfs_wait_bit_killable(&c->flags);
|
|
- } while (atomic_read(&c->io_count) != 0);
|
|
+ } while (atomic_read(&c->io_count) != 0 && !ret);
|
|
finish_wait(wq, &q.wait);
|
|
return ret;
|
|
}
|
|
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
|
|
index 3eaa6e3..f42bbe5 100644
|
|
--- a/fs/nfsd/nfs4callback.c
|
|
+++ b/fs/nfsd/nfs4callback.c
|
|
@@ -672,7 +672,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
|
|
clp->cl_cb_session = ses;
|
|
args.bc_xprt = conn->cb_xprt;
|
|
args.prognumber = clp->cl_cb_session->se_cb_prog;
|
|
- args.protocol = XPRT_TRANSPORT_BC_TCP;
|
|
+ args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
|
|
+ XPRT_TRANSPORT_BC;
|
|
args.authflavor = ses->se_cb_sec.flavor;
|
|
}
|
|
/* Create RPC client */
|
|
@@ -783,8 +784,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
|
|
{
|
|
if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
|
|
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
|
|
- dprintk("%s slot is busy\n", __func__);
|
|
- return false;
|
|
+ /* Race breaker */
|
|
+ if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
|
|
+ dprintk("%s slot is busy\n", __func__);
|
|
+ return false;
|
|
+ }
|
|
+ rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
|
|
}
|
|
return true;
|
|
}
|
|
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
|
|
index f23a6ca..86f5d3e 100644
|
|
--- a/fs/nfsd/nfs4proc.c
|
|
+++ b/fs/nfsd/nfs4proc.c
|
|
@@ -1243,7 +1243,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
|
|
*/
|
|
if (argp->opcnt == resp->opcnt)
|
|
return false;
|
|
-
|
|
+ if (next->opnum == OP_ILLEGAL)
|
|
+ return false;
|
|
nextd = OPDESC(next);
|
|
/*
|
|
* Rest of 2.6.3.1.1: certain operations will return WRONGSEC
|
|
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
|
index 34d2a1f..daa53da 100644
|
|
--- a/fs/nfsd/nfs4state.c
|
|
+++ b/fs/nfsd/nfs4state.c
|
|
@@ -1209,15 +1209,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
|
|
return 0;
|
|
}
|
|
|
|
-static long long
|
|
+static int
|
|
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
|
|
{
|
|
- long long res;
|
|
-
|
|
- res = o1->len - o2->len;
|
|
- if (res)
|
|
- return res;
|
|
- return (long long)memcmp(o1->data, o2->data, o1->len);
|
|
+ if (o1->len < o2->len)
|
|
+ return -1;
|
|
+ if (o1->len > o2->len)
|
|
+ return 1;
|
|
+ return memcmp(o1->data, o2->data, o1->len);
|
|
}
|
|
|
|
static int same_name(const char *n1, const char *n2)
|
|
@@ -1401,7 +1400,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
|
|
static struct nfs4_client *
|
|
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
|
|
{
|
|
- long long cmp;
|
|
+ int cmp;
|
|
struct rb_node *node = root->rb_node;
|
|
struct nfs4_client *clp;
|
|
|
|
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
|
|
index 8657335..dd1afa3 100644
|
|
--- a/fs/nfsd/nfs4xdr.c
|
|
+++ b/fs/nfsd/nfs4xdr.c
|
|
@@ -1809,6 +1809,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char *components,
|
|
}
|
|
else
|
|
end++;
|
|
+ if (found_esc)
|
|
+ end = next;
|
|
+
|
|
str = end;
|
|
}
|
|
*pp = p;
|
|
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
|
|
index f8f060f..6040da8 100644
|
|
--- a/fs/nfsd/nfscache.c
|
|
+++ b/fs/nfsd/nfscache.c
|
|
@@ -224,13 +224,6 @@ hash_refile(struct svc_cacherep *rp)
|
|
hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
|
|
}
|
|
|
|
-static inline bool
|
|
-nfsd_cache_entry_expired(struct svc_cacherep *rp)
|
|
-{
|
|
- return rp->c_state != RC_INPROG &&
|
|
- time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
|
|
-}
|
|
-
|
|
/*
|
|
* Walk the LRU list and prune off entries that are older than RC_EXPIRE.
|
|
* Also prune the oldest ones when the total exceeds the max number of entries.
|
|
@@ -242,8 +235,14 @@ prune_cache_entries(void)
|
|
long freed = 0;
|
|
|
|
list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
|
|
- if (!nfsd_cache_entry_expired(rp) &&
|
|
- num_drc_entries <= max_drc_entries)
|
|
+ /*
|
|
+ * Don't free entries attached to calls that are still
|
|
+ * in-progress, but do keep scanning the list.
|
|
+ */
|
|
+ if (rp->c_state == RC_INPROG)
|
|
+ continue;
|
|
+ if (num_drc_entries <= max_drc_entries &&
|
|
+ time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
|
|
break;
|
|
nfsd_reply_cache_free_locked(rp);
|
|
freed++;
|
|
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
|
|
index 479eb68..f417fef 100644
|
|
--- a/fs/nfsd/nfsd.h
|
|
+++ b/fs/nfsd/nfsd.h
|
|
@@ -328,12 +328,15 @@ void nfsd_lockd_shutdown(void);
|
|
(NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
|
|
|
|
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
|
|
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
|
|
- (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
|
|
+#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL
|
|
#else
|
|
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
|
|
+#define NFSD4_2_SECURITY_ATTRS 0
|
|
#endif
|
|
|
|
+#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
|
|
+ (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
|
|
+ NFSD4_2_SECURITY_ATTRS)
|
|
+
|
|
static inline u32 nfsd_suppattrs0(u32 minorversion)
|
|
{
|
|
return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
|
|
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
|
|
index 9a4a5f9..c34e45d 100644
|
|
--- a/fs/nfsd/nfssvc.c
|
|
+++ b/fs/nfsd/nfssvc.c
|
|
@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs)
|
|
*/
|
|
ret = nfsd_racache_init(2*nrservs);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto dec_users;
|
|
+
|
|
ret = nfs4_state_start();
|
|
if (ret)
|
|
goto out_racache;
|
|
@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs)
|
|
|
|
out_racache:
|
|
nfsd_racache_shutdown();
|
|
+dec_users:
|
|
+ nfsd_users--;
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
|
|
index b2e3ff3..090d8ce 100644
|
|
--- a/fs/nilfs2/btree.c
|
|
+++ b/fs/nilfs2/btree.c
|
|
@@ -31,6 +31,8 @@
|
|
#include "alloc.h"
|
|
#include "dat.h"
|
|
|
|
+static void __nilfs_btree_init(struct nilfs_bmap *bmap);
|
|
+
|
|
static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
|
|
{
|
|
struct nilfs_btree_path *path;
|
|
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
|
|
return ret;
|
|
}
|
|
|
|
+/**
|
|
+ * nilfs_btree_root_broken - verify consistency of btree root node
|
|
+ * @node: btree root node to be examined
|
|
+ * @ino: inode number
|
|
+ *
|
|
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
|
|
+ */
|
|
+static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
|
|
+ unsigned long ino)
|
|
+{
|
|
+ int level, flags, nchildren;
|
|
+ int ret = 0;
|
|
+
|
|
+ level = nilfs_btree_node_get_level(node);
|
|
+ flags = nilfs_btree_node_get_flags(node);
|
|
+ nchildren = nilfs_btree_node_get_nchildren(node);
|
|
+
|
|
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
|
|
+ level >= NILFS_BTREE_LEVEL_MAX ||
|
|
+ nchildren < 0 ||
|
|
+ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
|
|
+ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
|
|
+ ino, level, flags, nchildren);
|
|
+ ret = 1;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int nilfs_btree_broken_node_block(struct buffer_head *bh)
|
|
{
|
|
int ret;
|
|
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
|
|
|
|
/* convert and insert */
|
|
dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
|
|
- nilfs_btree_init(btree);
|
|
+ __nilfs_btree_init(btree);
|
|
if (nreq != NULL) {
|
|
nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
|
|
nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
|
|
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
|
|
.bop_gather_data = NULL,
|
|
};
|
|
|
|
-int nilfs_btree_init(struct nilfs_bmap *bmap)
|
|
+static void __nilfs_btree_init(struct nilfs_bmap *bmap)
|
|
{
|
|
bmap->b_ops = &nilfs_btree_ops;
|
|
bmap->b_nchildren_per_block =
|
|
NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
|
|
- return 0;
|
|
+}
|
|
+
|
|
+int nilfs_btree_init(struct nilfs_bmap *bmap)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ __nilfs_btree_init(bmap);
|
|
+
|
|
+ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
|
|
+ bmap->b_inode->i_ino))
|
|
+ ret = -EIO;
|
|
+ return ret;
|
|
}
|
|
|
|
void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
|
|
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
|
|
index 7e350c5..09480c53 100644
|
|
--- a/fs/nilfs2/inode.c
|
|
+++ b/fs/nilfs2/inode.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <linux/buffer_head.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mpage.h>
|
|
+#include <linux/pagemap.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/aio.h>
|
|
#include "nilfs.h"
|
|
@@ -48,6 +49,8 @@ struct nilfs_iget_args {
|
|
int for_gc;
|
|
};
|
|
|
|
+static int nilfs_iget_test(struct inode *inode, void *opaque);
|
|
+
|
|
void nilfs_inode_add_blocks(struct inode *inode, int n)
|
|
{
|
|
struct nilfs_root *root = NILFS_I(inode)->i_root;
|
|
@@ -219,10 +222,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
|
|
|
|
static int nilfs_set_page_dirty(struct page *page)
|
|
{
|
|
+ struct inode *inode = page->mapping->host;
|
|
int ret = __set_page_dirty_nobuffers(page);
|
|
|
|
if (page_has_buffers(page)) {
|
|
- struct inode *inode = page->mapping->host;
|
|
unsigned nr_dirty = 0;
|
|
struct buffer_head *bh, *head;
|
|
|
|
@@ -245,6 +248,10 @@ static int nilfs_set_page_dirty(struct page *page)
|
|
|
|
if (nr_dirty)
|
|
nilfs_set_file_dirty(inode, nr_dirty);
|
|
+ } else if (ret) {
|
|
+ unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
|
+
|
|
+ nilfs_set_file_dirty(inode, nr_dirty);
|
|
}
|
|
return ret;
|
|
}
|
|
@@ -342,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
|
|
.is_partially_uptodate = block_is_partially_uptodate,
|
|
};
|
|
|
|
+static int nilfs_insert_inode_locked(struct inode *inode,
|
|
+ struct nilfs_root *root,
|
|
+ unsigned long ino)
|
|
+{
|
|
+ struct nilfs_iget_args args = {
|
|
+ .ino = ino, .root = root, .cno = 0, .for_gc = 0
|
|
+ };
|
|
+
|
|
+ return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
|
|
+}
|
|
+
|
|
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
|
|
{
|
|
struct super_block *sb = dir->i_sb;
|
|
@@ -377,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
|
|
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
|
|
err = nilfs_bmap_read(ii->i_bmap, NULL);
|
|
if (err < 0)
|
|
- goto failed_bmap;
|
|
+ goto failed_after_creation;
|
|
|
|
set_bit(NILFS_I_BMAP, &ii->i_state);
|
|
/* No lock is needed; iget() ensures it. */
|
|
@@ -393,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
|
|
spin_lock(&nilfs->ns_next_gen_lock);
|
|
inode->i_generation = nilfs->ns_next_generation++;
|
|
spin_unlock(&nilfs->ns_next_gen_lock);
|
|
- insert_inode_hash(inode);
|
|
+ if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
|
|
+ err = -EIO;
|
|
+ goto failed_after_creation;
|
|
+ }
|
|
|
|
err = nilfs_init_acl(inode, dir);
|
|
if (unlikely(err))
|
|
- goto failed_acl; /* never occur. When supporting
|
|
+ goto failed_after_creation; /* never occur. When supporting
|
|
nilfs_init_acl(), proper cancellation of
|
|
above jobs should be considered */
|
|
|
|
return inode;
|
|
|
|
- failed_acl:
|
|
- failed_bmap:
|
|
+ failed_after_creation:
|
|
clear_nlink(inode);
|
|
+ unlock_new_inode(inode);
|
|
iput(inode); /* raw_inode will be deleted through
|
|
- generic_delete_inode() */
|
|
+ nilfs_evict_inode() */
|
|
goto failed;
|
|
|
|
failed_ifile_create_inode:
|
|
@@ -455,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
|
|
inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
|
|
inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
|
|
inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
|
|
- if (inode->i_nlink == 0 && inode->i_mode == 0)
|
|
- return -EINVAL; /* this inode is deleted */
|
|
+ if (inode->i_nlink == 0)
|
|
+ return -ESTALE; /* this inode is deleted */
|
|
|
|
inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
|
|
ii->i_flags = le32_to_cpu(raw_inode->i_flags);
|
|
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
|
|
index 9de78f0..0f84b25 100644
|
|
--- a/fs/nilfs2/namei.c
|
|
+++ b/fs/nilfs2/namei.c
|
|
@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
|
|
int err = nilfs_add_link(dentry, inode);
|
|
if (!err) {
|
|
d_instantiate(dentry, inode);
|
|
+ unlock_new_inode(inode);
|
|
return 0;
|
|
}
|
|
inode_dec_link_count(inode);
|
|
+ unlock_new_inode(inode);
|
|
iput(inode);
|
|
return err;
|
|
}
|
|
@@ -182,6 +184,7 @@ out:
|
|
out_fail:
|
|
drop_nlink(inode);
|
|
nilfs_mark_inode_dirty(inode);
|
|
+ unlock_new_inode(inode);
|
|
iput(inode);
|
|
goto out;
|
|
}
|
|
@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
|
|
inode_inc_link_count(inode);
|
|
ihold(inode);
|
|
|
|
- err = nilfs_add_nondir(dentry, inode);
|
|
- if (!err)
|
|
+ err = nilfs_add_link(dentry, inode);
|
|
+ if (!err) {
|
|
+ d_instantiate(dentry, inode);
|
|
err = nilfs_transaction_commit(dir->i_sb);
|
|
- else
|
|
+ } else {
|
|
+ inode_dec_link_count(inode);
|
|
+ iput(inode);
|
|
nilfs_transaction_abort(dir->i_sb);
|
|
+ }
|
|
|
|
return err;
|
|
}
|
|
@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
|
|
|
nilfs_mark_inode_dirty(inode);
|
|
d_instantiate(dentry, inode);
|
|
+ unlock_new_inode(inode);
|
|
out:
|
|
if (!err)
|
|
err = nilfs_transaction_commit(dir->i_sb);
|
|
@@ -255,6 +263,7 @@ out_fail:
|
|
drop_nlink(inode);
|
|
drop_nlink(inode);
|
|
nilfs_mark_inode_dirty(inode);
|
|
+ unlock_new_inode(inode);
|
|
iput(inode);
|
|
out_dir:
|
|
drop_nlink(dir);
|
|
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
|
|
index 9bc72de..b02c202 100644
|
|
--- a/fs/nilfs2/nilfs.h
|
|
+++ b/fs/nilfs2/nilfs.h
|
|
@@ -141,7 +141,6 @@ enum {
|
|
* @ti_save: Backup of journal_info field of task_struct
|
|
* @ti_flags: Flags
|
|
* @ti_count: Nest level
|
|
- * @ti_garbage: List of inode to be put when releasing semaphore
|
|
*/
|
|
struct nilfs_transaction_info {
|
|
u32 ti_magic;
|
|
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
|
|
one of other filesystems has a bug. */
|
|
unsigned short ti_flags;
|
|
unsigned short ti_count;
|
|
- struct list_head ti_garbage;
|
|
};
|
|
|
|
/* ti_magic */
|
|
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
|
|
index a1a1916..14538a8 100644
|
|
--- a/fs/nilfs2/segment.c
|
|
+++ b/fs/nilfs2/segment.c
|
|
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
|
|
ti->ti_count = 0;
|
|
ti->ti_save = cur_ti;
|
|
ti->ti_magic = NILFS_TI_MAGIC;
|
|
- INIT_LIST_HEAD(&ti->ti_garbage);
|
|
current->journal_info = ti;
|
|
|
|
for (;;) {
|
|
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
|
|
|
|
up_write(&nilfs->ns_segctor_sem);
|
|
current->journal_info = ti->ti_save;
|
|
- if (!list_empty(&ti->ti_garbage))
|
|
- nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
|
|
}
|
|
|
|
static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
|
|
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
|
|
}
|
|
}
|
|
|
|
+static void nilfs_iput_work_func(struct work_struct *work)
|
|
+{
|
|
+ struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
|
|
+ sc_iput_work);
|
|
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
|
|
+
|
|
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
|
|
+}
|
|
+
|
|
static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
|
|
struct nilfs_root *root)
|
|
{
|
|
@@ -1899,8 +1905,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
|
|
static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
|
|
struct the_nilfs *nilfs)
|
|
{
|
|
- struct nilfs_transaction_info *ti = current->journal_info;
|
|
struct nilfs_inode_info *ii, *n;
|
|
+ int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
|
|
+ int defer_iput = false;
|
|
|
|
spin_lock(&nilfs->ns_inode_lock);
|
|
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
|
|
@@ -1911,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
|
|
clear_bit(NILFS_I_BUSY, &ii->i_state);
|
|
brelse(ii->i_bh);
|
|
ii->i_bh = NULL;
|
|
- list_move_tail(&ii->i_dirty, &ti->ti_garbage);
|
|
+ list_del_init(&ii->i_dirty);
|
|
+ if (!ii->vfs_inode.i_nlink || during_mount) {
|
|
+ /*
|
|
+ * Defer calling iput() to avoid deadlocks if
|
|
+ * i_nlink == 0 or mount is not yet finished.
|
|
+ */
|
|
+ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
|
|
+ defer_iput = true;
|
|
+ } else {
|
|
+ spin_unlock(&nilfs->ns_inode_lock);
|
|
+ iput(&ii->vfs_inode);
|
|
+ spin_lock(&nilfs->ns_inode_lock);
|
|
+ }
|
|
}
|
|
spin_unlock(&nilfs->ns_inode_lock);
|
|
+
|
|
+ if (defer_iput)
|
|
+ schedule_work(&sci->sc_iput_work);
|
|
}
|
|
|
|
/*
|
|
@@ -2580,6 +2602,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
|
|
INIT_LIST_HEAD(&sci->sc_segbufs);
|
|
INIT_LIST_HEAD(&sci->sc_write_logs);
|
|
INIT_LIST_HEAD(&sci->sc_gc_inodes);
|
|
+ INIT_LIST_HEAD(&sci->sc_iput_queue);
|
|
+ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
|
|
init_timer(&sci->sc_timer);
|
|
|
|
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
|
|
@@ -2606,6 +2630,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
|
|
ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
|
|
nilfs_transaction_unlock(sci->sc_super);
|
|
|
|
+ flush_work(&sci->sc_iput_work);
|
|
+
|
|
} while (ret && retrycount-- > 0);
|
|
}
|
|
|
|
@@ -2630,6 +2656,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|
|
|| sci->sc_seq_request != sci->sc_seq_done);
|
|
spin_unlock(&sci->sc_state_lock);
|
|
|
|
+ if (flush_work(&sci->sc_iput_work))
|
|
+ flag = true;
|
|
+
|
|
if (flag || !nilfs_segctor_confirm(sci))
|
|
nilfs_segctor_write_out(sci);
|
|
|
|
@@ -2639,6 +2668,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|
|
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
|
|
}
|
|
|
|
+ if (!list_empty(&sci->sc_iput_queue)) {
|
|
+ nilfs_warning(sci->sc_super, __func__,
|
|
+ "iput queue is not empty\n");
|
|
+ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
|
|
+ }
|
|
+
|
|
WARN_ON(!list_empty(&sci->sc_segbufs));
|
|
WARN_ON(!list_empty(&sci->sc_write_logs));
|
|
|
|
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
|
|
index 38a1d00..a48d6de 100644
|
|
--- a/fs/nilfs2/segment.h
|
|
+++ b/fs/nilfs2/segment.h
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/types.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/buffer_head.h>
|
|
+#include <linux/workqueue.h>
|
|
#include <linux/nilfs2_fs.h>
|
|
#include "nilfs.h"
|
|
|
|
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
|
|
* @sc_nblk_inc: Block count of current generation
|
|
* @sc_dirty_files: List of files to be written
|
|
* @sc_gc_inodes: List of GC inodes having blocks to be written
|
|
+ * @sc_iput_queue: list of inodes for which iput should be done
|
|
+ * @sc_iput_work: work struct to defer iput call
|
|
* @sc_freesegs: array of segment numbers to be freed
|
|
* @sc_nfreesegs: number of segments on @sc_freesegs
|
|
* @sc_dsync_inode: inode whose data pages are written for a sync operation
|
|
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
|
|
|
|
struct list_head sc_dirty_files;
|
|
struct list_head sc_gc_inodes;
|
|
+ struct list_head sc_iput_queue;
|
|
+ struct work_struct sc_iput_work;
|
|
|
|
__u64 *sc_freesegs;
|
|
size_t sc_nfreesegs;
|
|
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
|
|
index 287a22c..de6323e 100644
|
|
--- a/fs/notify/fanotify/fanotify_user.c
|
|
+++ b/fs/notify/fanotify/fanotify_user.c
|
|
@@ -71,7 +71,7 @@ static int create_fd(struct fsnotify_group *group,
|
|
|
|
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
|
|
|
- client_fd = get_unused_fd();
|
|
+ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
|
|
if (client_fd < 0)
|
|
return client_fd;
|
|
|
|
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
|
|
index 238a593..9d7e2b9 100644
|
|
--- a/fs/notify/fdinfo.c
|
|
+++ b/fs/notify/fdinfo.c
|
|
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
|
|
{
|
|
struct {
|
|
struct file_handle handle;
|
|
- u8 pad[64];
|
|
+ u8 pad[MAX_HANDLE_SZ];
|
|
} f;
|
|
int size, ret, i;
|
|
|
|
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
|
|
size = f.handle.handle_bytes >> 2;
|
|
|
|
ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
|
|
- if ((ret == 255) || (ret == -ENOSPC)) {
|
|
+ if ((ret == FILEID_INVALID) || (ret < 0)) {
|
|
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
|
|
return 0;
|
|
}
|
|
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
|
|
index 9d3e9c5..7001299 100644
|
|
--- a/fs/notify/fsnotify.c
|
|
+++ b/fs/notify/fsnotify.c
|
|
@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
|
|
spin_lock(&inode->i_lock);
|
|
/* run all of the dentries associated with this inode. Since this is a
|
|
* directory, there damn well better only be one item on this list */
|
|
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
|
|
struct dentry *child;
|
|
|
|
/* run all of the children of the original inode and fix their
|
|
* d_flags to indicate parental interest (their parent is the
|
|
* original inode) */
|
|
spin_lock(&alias->d_lock);
|
|
- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry(child, &alias->d_subdirs, d_child) {
|
|
if (!child->d_inode)
|
|
continue;
|
|
|
|
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
|
|
index 74825be..fbb9dfb 100644
|
|
--- a/fs/notify/inode_mark.c
|
|
+++ b/fs/notify/inode_mark.c
|
|
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
/* In case the dropping of a reference would nuke next_i. */
|
|
- if ((&next_i->i_sb_list != list) &&
|
|
- atomic_read(&next_i->i_count)) {
|
|
+ while (&next_i->i_sb_list != list) {
|
|
spin_lock(&next_i->i_lock);
|
|
- if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
|
|
+ if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
|
|
+ atomic_read(&next_i->i_count)) {
|
|
__iget(next_i);
|
|
need_iput = next_i;
|
|
+ spin_unlock(&next_i->i_lock);
|
|
+ break;
|
|
}
|
|
spin_unlock(&next_i->i_lock);
|
|
+ next_i = list_entry(next_i->i_sb_list.next,
|
|
+ struct inode, i_sb_list);
|
|
}
|
|
|
|
/*
|
|
- * We can safely drop inode_sb_list_lock here because we hold
|
|
- * references on both inode and next_i. Also no new inodes
|
|
- * will be added since the umount has begun.
|
|
+ * We can safely drop inode_sb_list_lock here because either
|
|
+ * we actually hold references on both inode and next_i or
|
|
+ * end of list. Also no new inodes will be added since the
|
|
+ * umount has begun.
|
|
*/
|
|
spin_unlock(&inode_sb_list_lock);
|
|
|
|
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
|
|
index 923fe4a..6bffc33 100644
|
|
--- a/fs/notify/mark.c
|
|
+++ b/fs/notify/mark.c
|
|
@@ -293,16 +293,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
|
|
unsigned int flags)
|
|
{
|
|
struct fsnotify_mark *lmark, *mark;
|
|
+ LIST_HEAD(to_free);
|
|
|
|
+ /*
|
|
+ * We have to be really careful here. Anytime we drop mark_mutex, e.g.
|
|
+ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
|
|
+ * to_free list so we have to use mark_mutex even when accessing that
|
|
+ * list. And freeing mark requires us to drop mark_mutex. So we can
|
|
+ * reliably free only the first mark in the list. That's why we first
|
|
+ * move marks to free to to_free list in one go and then free marks in
|
|
+ * to_free list one by one.
|
|
+ */
|
|
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
|
|
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
|
|
- if (mark->flags & flags) {
|
|
- fsnotify_get_mark(mark);
|
|
- fsnotify_destroy_mark_locked(mark, group);
|
|
- fsnotify_put_mark(mark);
|
|
- }
|
|
+ if (mark->flags & flags)
|
|
+ list_move(&mark->g_list, &to_free);
|
|
}
|
|
mutex_unlock(&group->mark_mutex);
|
|
+
|
|
+ while (1) {
|
|
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
|
|
+ if (list_empty(&to_free)) {
|
|
+ mutex_unlock(&group->mark_mutex);
|
|
+ break;
|
|
+ }
|
|
+ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
|
|
+ fsnotify_get_mark(mark);
|
|
+ fsnotify_destroy_mark_locked(mark, group);
|
|
+ mutex_unlock(&group->mark_mutex);
|
|
+ fsnotify_put_mark(mark);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
|
|
index a27e3fe..250ed5b 100644
|
|
--- a/fs/ntfs/attrib.c
|
|
+++ b/fs/ntfs/attrib.c
|
|
@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
|
|
if (page) {
|
|
set_page_dirty(page);
|
|
unlock_page(page);
|
|
- mark_page_accessed(page);
|
|
page_cache_release(page);
|
|
}
|
|
ntfs_debug("Done.");
|
|
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
|
|
index db9bd8a..86ddab9 100644
|
|
--- a/fs/ntfs/file.c
|
|
+++ b/fs/ntfs/file.c
|
|
@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
|
|
}
|
|
do {
|
|
unlock_page(pages[--do_pages]);
|
|
- mark_page_accessed(pages[do_pages]);
|
|
page_cache_release(pages[do_pages]);
|
|
} while (do_pages);
|
|
if (unlikely(status))
|
|
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
|
|
index aeb44e8..bb6ee06 100644
|
|
--- a/fs/ocfs2/aops.c
|
|
+++ b/fs/ocfs2/aops.c
|
|
@@ -899,7 +899,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
|
|
}
|
|
}
|
|
|
|
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
|
|
+static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
|
|
{
|
|
int i;
|
|
|
|
@@ -920,7 +920,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
|
|
page_cache_release(wc->w_target_page);
|
|
}
|
|
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
|
|
+}
|
|
|
|
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
|
|
+{
|
|
+ ocfs2_unlock_pages(wc);
|
|
brelse(wc->w_di_bh);
|
|
kfree(wc);
|
|
}
|
|
@@ -2045,11 +2049,19 @@ out_write_size:
|
|
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
|
|
ocfs2_journal_dirty(handle, wc->w_di_bh);
|
|
|
|
+ /* unlock pages before dealloc since it needs acquiring j_trans_barrier
|
|
+ * lock, or it will cause a deadlock since journal commit threads holds
|
|
+ * this lock and will ask for the page lock when flushing the data.
|
|
+ * put it here to preserve the unlock order.
|
|
+ */
|
|
+ ocfs2_unlock_pages(wc);
|
|
+
|
|
ocfs2_commit_trans(osb, handle);
|
|
|
|
ocfs2_run_deallocs(osb, &wc->w_dealloc);
|
|
|
|
- ocfs2_free_write_ctxt(wc);
|
|
+ brelse(wc->w_di_bh);
|
|
+ kfree(wc);
|
|
|
|
return copied;
|
|
}
|
|
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
|
|
index 0d3a97d..1167485 100644
|
|
--- a/fs/ocfs2/dcache.c
|
|
+++ b/fs/ocfs2/dcache.c
|
|
@@ -173,7 +173,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
|
|
struct dentry *dentry;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
|
|
spin_lock(&dentry->d_lock);
|
|
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
|
|
trace_ocfs2_find_local_alias(dentry->d_name.len,
|
|
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
|
|
index af3f7aa..1dd0bcc 100644
|
|
--- a/fs/ocfs2/dlm/dlmmaster.c
|
|
+++ b/fs/ocfs2/dlm/dlmmaster.c
|
|
@@ -650,12 +650,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
|
|
clear_bit(bit, res->refmap);
|
|
}
|
|
|
|
-
|
|
-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
|
|
+static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
|
|
struct dlm_lock_resource *res)
|
|
{
|
|
- assert_spin_locked(&res->spinlock);
|
|
-
|
|
res->inflight_locks++;
|
|
|
|
mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
|
|
@@ -663,6 +660,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
|
|
__builtin_return_address(0));
|
|
}
|
|
|
|
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
|
|
+ struct dlm_lock_resource *res)
|
|
+{
|
|
+ assert_spin_locked(&res->spinlock);
|
|
+ __dlm_lockres_grab_inflight_ref(dlm, res);
|
|
+}
|
|
+
|
|
void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
|
|
struct dlm_lock_resource *res)
|
|
{
|
|
@@ -722,6 +726,19 @@ lookup:
|
|
if (tmpres) {
|
|
spin_unlock(&dlm->spinlock);
|
|
spin_lock(&tmpres->spinlock);
|
|
+
|
|
+ /*
|
|
+ * Right after dlm spinlock was released, dlm_thread could have
|
|
+ * purged the lockres. Check if lockres got unhashed. If so
|
|
+ * start over.
|
|
+ */
|
|
+ if (hlist_unhashed(&tmpres->hash_node)) {
|
|
+ spin_unlock(&tmpres->spinlock);
|
|
+ dlm_lockres_put(tmpres);
|
|
+ tmpres = NULL;
|
|
+ goto lookup;
|
|
+ }
|
|
+
|
|
/* Wait on the thread that is mastering the resource */
|
|
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
|
|
__dlm_wait_on_lockres(tmpres);
|
|
@@ -852,10 +869,8 @@ lookup:
|
|
/* finally add the lockres to its hash bucket */
|
|
__dlm_insert_lockres(dlm, res);
|
|
|
|
- /* Grab inflight ref to pin the resource */
|
|
- spin_lock(&res->spinlock);
|
|
- dlm_lockres_grab_inflight_ref(dlm, res);
|
|
- spin_unlock(&res->spinlock);
|
|
+ /* since this lockres is new it doesn't not require the spinlock */
|
|
+ __dlm_lockres_grab_inflight_ref(dlm, res);
|
|
|
|
/* get an extra ref on the mle in case this is a BLOCK
|
|
* if so, the creator of the BLOCK may try to put the last
|
|
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
|
|
index 1998695..fa74259 100644
|
|
--- a/fs/ocfs2/dlmglue.c
|
|
+++ b/fs/ocfs2/dlmglue.c
|
|
@@ -3973,9 +3973,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
|
|
osb->dc_work_sequence = osb->dc_wake_sequence;
|
|
|
|
processed = osb->blocked_lock_count;
|
|
- while (processed) {
|
|
- BUG_ON(list_empty(&osb->blocked_lock_list));
|
|
-
|
|
+ /*
|
|
+ * blocked lock processing in this loop might call iput which can
|
|
+ * remove items off osb->blocked_lock_list. Downconvert up to
|
|
+ * 'processed' number of locks, but stop short if we had some
|
|
+ * removed in ocfs2_mark_lockres_freeing when downconverting.
|
|
+ */
|
|
+ while (processed && !list_empty(&osb->blocked_lock_list)) {
|
|
lockres = list_entry(osb->blocked_lock_list.next,
|
|
struct ocfs2_lock_res, l_blocked_list);
|
|
list_del_init(&lockres->l_blocked_list);
|
|
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
|
|
index 51632c4..35f54bc 100644
|
|
--- a/fs/ocfs2/file.c
|
|
+++ b/fs/ocfs2/file.c
|
|
@@ -2391,10 +2391,14 @@ out_dio:
|
|
/* buffered aio wouldn't have proper lock coverage today */
|
|
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
|
|
|
|
+ if (unlikely(written <= 0))
|
|
+ goto no_sync;
|
|
+
|
|
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
|
|
((file->f_flags & O_DIRECT) && !direct_io)) {
|
|
- ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
|
|
- *ppos + count - 1);
|
|
+ ret = filemap_fdatawrite_range(file->f_mapping,
|
|
+ iocb->ki_pos - written,
|
|
+ iocb->ki_pos - 1);
|
|
if (ret < 0)
|
|
written = ret;
|
|
|
|
@@ -2407,10 +2411,12 @@ out_dio:
|
|
}
|
|
|
|
if (!ret)
|
|
- ret = filemap_fdatawait_range(file->f_mapping, *ppos,
|
|
- *ppos + count - 1);
|
|
+ ret = filemap_fdatawait_range(file->f_mapping,
|
|
+ iocb->ki_pos - written,
|
|
+ iocb->ki_pos - 1);
|
|
}
|
|
|
|
+no_sync:
|
|
/*
|
|
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
|
|
* function pointer which is called when o_direct io completes so that
|
|
@@ -2472,9 +2478,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
|
|
struct address_space *mapping = out->f_mapping;
|
|
struct inode *inode = mapping->host;
|
|
struct splice_desc sd = {
|
|
- .total_len = len,
|
|
.flags = flags,
|
|
- .pos = *ppos,
|
|
.u.file = out,
|
|
};
|
|
|
|
@@ -2484,6 +2488,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
|
|
out->f_path.dentry->d_name.len,
|
|
out->f_path.dentry->d_name.name, len);
|
|
|
|
+ ret = generic_write_checks(out, ppos, &len, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ sd.total_len = len;
|
|
+ sd.pos = *ppos;
|
|
+
|
|
pipe_lock(pipe);
|
|
|
|
splice_from_pipe_begin(&sd);
|
|
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
|
|
index feed025f..b242762 100644
|
|
--- a/fs/ocfs2/namei.c
|
|
+++ b/fs/ocfs2/namei.c
|
|
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
|
|
struct inode *inode,
|
|
const char *symname);
|
|
|
|
+static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|
+ struct buffer_head **bh1,
|
|
+ struct inode *inode1,
|
|
+ struct buffer_head **bh2,
|
|
+ struct inode *inode2,
|
|
+ int rename);
|
|
+
|
|
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
|
|
/* An orphan dir name is an 8 byte value, printed as a hex string */
|
|
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
|
|
|
|
@@ -656,8 +664,10 @@ static int ocfs2_link(struct dentry *old_dentry,
|
|
{
|
|
handle_t *handle;
|
|
struct inode *inode = old_dentry->d_inode;
|
|
+ struct inode *old_dir = old_dentry->d_parent->d_inode;
|
|
int err;
|
|
struct buffer_head *fe_bh = NULL;
|
|
+ struct buffer_head *old_dir_bh = NULL;
|
|
struct buffer_head *parent_fe_bh = NULL;
|
|
struct ocfs2_dinode *fe = NULL;
|
|
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
|
|
@@ -674,19 +684,33 @@ static int ocfs2_link(struct dentry *old_dentry,
|
|
|
|
dquot_initialize(dir);
|
|
|
|
- err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
|
|
+ err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
|
|
+ &parent_fe_bh, dir, 0);
|
|
if (err < 0) {
|
|
if (err != -ENOENT)
|
|
mlog_errno(err);
|
|
return err;
|
|
}
|
|
|
|
+ /* make sure both dirs have bhs
|
|
+ * get an extra ref on old_dir_bh if old==new */
|
|
+ if (!parent_fe_bh) {
|
|
+ if (old_dir_bh) {
|
|
+ parent_fe_bh = old_dir_bh;
|
|
+ get_bh(parent_fe_bh);
|
|
+ } else {
|
|
+ mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
|
|
+ err = -EIO;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (!dir->i_nlink) {
|
|
err = -ENOENT;
|
|
goto out;
|
|
}
|
|
|
|
- err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
|
|
+ err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
|
|
old_dentry->d_name.len, &old_de_ino);
|
|
if (err) {
|
|
err = -ENOENT;
|
|
@@ -779,10 +803,11 @@ out_unlock_inode:
|
|
ocfs2_inode_unlock(inode, 1);
|
|
|
|
out:
|
|
- ocfs2_inode_unlock(dir, 1);
|
|
+ ocfs2_double_unlock(old_dir, dir);
|
|
|
|
brelse(fe_bh);
|
|
brelse(parent_fe_bh);
|
|
+ brelse(old_dir_bh);
|
|
|
|
ocfs2_free_dir_lookup_result(&lookup);
|
|
|
|
@@ -991,14 +1016,15 @@ leave:
|
|
}
|
|
|
|
/*
|
|
- * The only place this should be used is rename!
|
|
+ * The only place this should be used is rename and link!
|
|
* if they have the same id, then the 1st one is the only one locked.
|
|
*/
|
|
static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|
struct buffer_head **bh1,
|
|
struct inode *inode1,
|
|
struct buffer_head **bh2,
|
|
- struct inode *inode2)
|
|
+ struct inode *inode2,
|
|
+ int rename)
|
|
{
|
|
int status;
|
|
struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
|
|
@@ -1028,7 +1054,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|
}
|
|
/* lock id2 */
|
|
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
|
|
- OI_LS_RENAME1);
|
|
+ rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
|
|
if (status < 0) {
|
|
if (status != -ENOENT)
|
|
mlog_errno(status);
|
|
@@ -1037,7 +1063,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|
}
|
|
|
|
/* lock id1 */
|
|
- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
|
|
+ status = ocfs2_inode_lock_nested(inode1, bh1, 1,
|
|
+ rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
|
|
if (status < 0) {
|
|
/*
|
|
* An error return must mean that no cluster locks
|
|
@@ -1137,7 +1164,7 @@ static int ocfs2_rename(struct inode *old_dir,
|
|
|
|
/* if old and new are the same, this'll just do one lock. */
|
|
status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
|
|
- &new_dir_bh, new_dir);
|
|
+ &new_dir_bh, new_dir, 1);
|
|
if (status < 0) {
|
|
mlog_errno(status);
|
|
goto bail;
|
|
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
|
|
index d8b0afd..2dba0ca 100644
|
|
--- a/fs/omfs/inode.c
|
|
+++ b/fs/omfs/inode.c
|
|
@@ -361,7 +361,7 @@ nomem:
|
|
}
|
|
|
|
enum {
|
|
- Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
|
|
+ Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
|
|
};
|
|
|
|
static const match_table_t tokens = {
|
|
@@ -370,6 +370,7 @@ static const match_table_t tokens = {
|
|
{Opt_umask, "umask=%o"},
|
|
{Opt_dmask, "dmask=%o"},
|
|
{Opt_fmask, "fmask=%o"},
|
|
+ {Opt_err, NULL},
|
|
};
|
|
|
|
static int parse_options(char *options, struct omfs_sb_info *sbi)
|
|
diff --git a/fs/open.c b/fs/open.c
|
|
index 2ed7325..17679f2 100644
|
|
--- a/fs/open.c
|
|
+++ b/fs/open.c
|
|
@@ -539,6 +539,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
|
|
uid = make_kuid(current_user_ns(), user);
|
|
gid = make_kgid(current_user_ns(), group);
|
|
|
|
+retry_deleg:
|
|
newattrs.ia_valid = ATTR_CTIME;
|
|
if (user != (uid_t) -1) {
|
|
if (!uid_valid(uid))
|
|
@@ -555,7 +556,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
|
|
if (!S_ISDIR(inode->i_mode))
|
|
newattrs.ia_valid |=
|
|
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
|
|
-retry_deleg:
|
|
mutex_lock(&inode->i_mutex);
|
|
error = security_path_chown(path, uid, gid);
|
|
if (!error)
|
|
diff --git a/fs/pipe.c b/fs/pipe.c
|
|
index 78fd0d0..46f1ab2 100644
|
|
--- a/fs/pipe.c
|
|
+++ b/fs/pipe.c
|
|
@@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
|
|
}
|
|
|
|
static int
|
|
-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
|
|
- int atomic)
|
|
+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
|
|
+ size_t *remaining, int atomic)
|
|
{
|
|
unsigned long copy;
|
|
|
|
- while (len > 0) {
|
|
+ while (*remaining > 0) {
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
- copy = min_t(unsigned long, len, iov->iov_len);
|
|
+ copy = min_t(unsigned long, *remaining, iov->iov_len);
|
|
|
|
if (atomic) {
|
|
- if (__copy_from_user_inatomic(to, iov->iov_base, copy))
|
|
+ if (__copy_from_user_inatomic(addr + *offset,
|
|
+ iov->iov_base, copy))
|
|
return -EFAULT;
|
|
} else {
|
|
- if (copy_from_user(to, iov->iov_base, copy))
|
|
+ if (copy_from_user(addr + *offset,
|
|
+ iov->iov_base, copy))
|
|
return -EFAULT;
|
|
}
|
|
- to += copy;
|
|
- len -= copy;
|
|
+ *offset += copy;
|
|
+ *remaining -= copy;
|
|
iov->iov_base += copy;
|
|
iov->iov_len -= copy;
|
|
}
|
|
@@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
|
|
}
|
|
|
|
static int
|
|
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
|
|
- int atomic)
|
|
+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
|
|
+ size_t *remaining, int atomic)
|
|
{
|
|
unsigned long copy;
|
|
|
|
- while (len > 0) {
|
|
+ while (*remaining > 0) {
|
|
while (!iov->iov_len)
|
|
iov++;
|
|
- copy = min_t(unsigned long, len, iov->iov_len);
|
|
+ copy = min_t(unsigned long, *remaining, iov->iov_len);
|
|
|
|
if (atomic) {
|
|
- if (__copy_to_user_inatomic(iov->iov_base, from, copy))
|
|
+ if (__copy_to_user_inatomic(iov->iov_base,
|
|
+ addr + *offset, copy))
|
|
return -EFAULT;
|
|
} else {
|
|
- if (copy_to_user(iov->iov_base, from, copy))
|
|
+ if (copy_to_user(iov->iov_base,
|
|
+ addr + *offset, copy))
|
|
return -EFAULT;
|
|
}
|
|
- from += copy;
|
|
- len -= copy;
|
|
+ *offset += copy;
|
|
+ *remaining -= copy;
|
|
iov->iov_base += copy;
|
|
iov->iov_len -= copy;
|
|
}
|
|
@@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
|
|
struct pipe_buffer *buf = pipe->bufs + curbuf;
|
|
const struct pipe_buf_operations *ops = buf->ops;
|
|
void *addr;
|
|
- size_t chars = buf->len;
|
|
+ size_t chars = buf->len, remaining;
|
|
int error, atomic;
|
|
|
|
if (chars > total_len)
|
|
@@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
|
|
}
|
|
|
|
atomic = !iov_fault_in_pages_write(iov, chars);
|
|
+ remaining = chars;
|
|
redo:
|
|
addr = ops->map(pipe, buf, atomic);
|
|
- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
|
|
+ error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
|
|
+ &remaining, atomic);
|
|
ops->unmap(pipe, buf, addr);
|
|
if (unlikely(error)) {
|
|
/*
|
|
@@ -426,7 +432,6 @@ redo:
|
|
break;
|
|
}
|
|
ret += chars;
|
|
- buf->offset += chars;
|
|
buf->len -= chars;
|
|
|
|
/* Was it a packet buffer? Clean up and exit */
|
|
@@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
|
|
int error, atomic = 1;
|
|
void *addr;
|
|
+ size_t remaining = chars;
|
|
|
|
error = ops->confirm(pipe, buf);
|
|
if (error)
|
|
@@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
iov_fault_in_pages_read(iov, chars);
|
|
redo1:
|
|
addr = ops->map(pipe, buf, atomic);
|
|
- error = pipe_iov_copy_from_user(offset + addr, iov,
|
|
- chars, atomic);
|
|
+ error = pipe_iov_copy_from_user(addr, &offset, iov,
|
|
+ &remaining, atomic);
|
|
ops->unmap(pipe, buf, addr);
|
|
ret = error;
|
|
do_wakeup = 1;
|
|
@@ -575,6 +581,8 @@ redo1:
|
|
struct page *page = pipe->tmp_page;
|
|
char *src;
|
|
int error, atomic = 1;
|
|
+ int offset = 0;
|
|
+ size_t remaining;
|
|
|
|
if (!page) {
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
@@ -595,14 +603,15 @@ redo1:
|
|
chars = total_len;
|
|
|
|
iov_fault_in_pages_read(iov, chars);
|
|
+ remaining = chars;
|
|
redo2:
|
|
if (atomic)
|
|
src = kmap_atomic(page);
|
|
else
|
|
src = kmap(page);
|
|
|
|
- error = pipe_iov_copy_from_user(src, iov, chars,
|
|
- atomic);
|
|
+ error = pipe_iov_copy_from_user(src, &offset, iov,
|
|
+ &remaining, atomic);
|
|
if (atomic)
|
|
kunmap_atomic(src);
|
|
else
|
|
diff --git a/fs/pnode.c b/fs/pnode.c
|
|
index a364a70..b7f8310 100644
|
|
--- a/fs/pnode.c
|
|
+++ b/fs/pnode.c
|
|
@@ -381,6 +381,7 @@ static void __propagate_umount(struct mount *mnt)
|
|
* other children
|
|
*/
|
|
if (child && list_empty(&child->mnt_mounts)) {
|
|
+ list_del_init(&child->mnt_child);
|
|
hlist_del_init_rcu(&child->mnt_hash);
|
|
hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
|
|
}
|
|
diff --git a/fs/proc/array.c b/fs/proc/array.c
|
|
index 656e401..baf3464 100644
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -297,15 +297,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
|
|
seq_puts(m, header);
|
|
CAP_FOR_EACH_U32(__capi) {
|
|
seq_printf(m, "%08x",
|
|
- a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
|
|
+ a->cap[CAP_LAST_U32 - __capi]);
|
|
}
|
|
seq_putc(m, '\n');
|
|
}
|
|
|
|
-/* Remove non-existent capabilities */
|
|
-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
|
|
- CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
|
|
-
|
|
static inline void task_cap(struct seq_file *m, struct task_struct *p)
|
|
{
|
|
const struct cred *cred;
|
|
@@ -319,11 +315,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
|
|
cap_bset = cred->cap_bset;
|
|
rcu_read_unlock();
|
|
|
|
- NORM_CAPS(cap_inheritable);
|
|
- NORM_CAPS(cap_permitted);
|
|
- NORM_CAPS(cap_effective);
|
|
- NORM_CAPS(cap_bset);
|
|
-
|
|
render_cap_t(m, "CapInh:\t", &cap_inheritable);
|
|
render_cap_t(m, "CapPrm:\t", &cap_permitted);
|
|
render_cap_t(m, "CapEff:\t", &cap_effective);
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index b976062..489ba8c 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -2555,6 +2555,57 @@ static const struct file_operations proc_projid_map_operations = {
|
|
.llseek = seq_lseek,
|
|
.release = proc_id_map_release,
|
|
};
|
|
+
|
|
+static int proc_setgroups_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct user_namespace *ns = NULL;
|
|
+ struct task_struct *task;
|
|
+ int ret;
|
|
+
|
|
+ ret = -ESRCH;
|
|
+ task = get_proc_task(inode);
|
|
+ if (task) {
|
|
+ rcu_read_lock();
|
|
+ ns = get_user_ns(task_cred_xxx(task, user_ns));
|
|
+ rcu_read_unlock();
|
|
+ put_task_struct(task);
|
|
+ }
|
|
+ if (!ns)
|
|
+ goto err;
|
|
+
|
|
+ if (file->f_mode & FMODE_WRITE) {
|
|
+ ret = -EACCES;
|
|
+ if (!ns_capable(ns, CAP_SYS_ADMIN))
|
|
+ goto err_put_ns;
|
|
+ }
|
|
+
|
|
+ ret = single_open(file, &proc_setgroups_show, ns);
|
|
+ if (ret)
|
|
+ goto err_put_ns;
|
|
+
|
|
+ return 0;
|
|
+err_put_ns:
|
|
+ put_user_ns(ns);
|
|
+err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int proc_setgroups_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct seq_file *seq = file->private_data;
|
|
+ struct user_namespace *ns = seq->private;
|
|
+ int ret = single_release(inode, file);
|
|
+ put_user_ns(ns);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static const struct file_operations proc_setgroups_operations = {
|
|
+ .open = proc_setgroups_open,
|
|
+ .write = proc_setgroups_write,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = proc_setgroups_release,
|
|
+};
|
|
#endif /* CONFIG_USER_NS */
|
|
|
|
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
|
|
@@ -2663,6 +2714,7 @@ static const struct pid_entry tgid_base_stuff[] = {
|
|
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
|
|
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
|
|
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
|
|
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
|
|
#endif
|
|
#ifdef CONFIG_CHECKPOINT_RESTORE
|
|
REG("timers", S_IRUGO, proc_timers_operations),
|
|
@@ -2998,6 +3050,7 @@ static const struct pid_entry tid_base_stuff[] = {
|
|
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
|
|
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
|
|
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
|
|
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
|
|
#endif
|
|
};
|
|
|
|
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
|
|
index b7f268e..2e2d9d5 100644
|
|
--- a/fs/proc/generic.c
|
|
+++ b/fs/proc/generic.c
|
|
@@ -19,7 +19,6 @@
|
|
#include <linux/mount.h>
|
|
#include <linux/init.h>
|
|
#include <linux/idr.h>
|
|
-#include <linux/namei.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/completion.h>
|
|
@@ -162,17 +161,6 @@ void proc_free_inum(unsigned int inum)
|
|
spin_unlock_irqrestore(&proc_inum_lock, flags);
|
|
}
|
|
|
|
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
-{
|
|
- nd_set_link(nd, __PDE_DATA(dentry->d_inode));
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-static const struct inode_operations proc_link_inode_operations = {
|
|
- .readlink = generic_readlink,
|
|
- .follow_link = proc_follow_link,
|
|
-};
|
|
-
|
|
/*
|
|
* Don't create negative dentries here, return -ENOENT by hand
|
|
* instead.
|
|
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
|
|
index 124fc43..2f2815f 100644
|
|
--- a/fs/proc/inode.c
|
|
+++ b/fs/proc/inode.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/magic.h>
|
|
+#include <linux/namei.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
@@ -401,6 +402,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
|
|
};
|
|
#endif
|
|
|
|
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
+{
|
|
+ struct proc_dir_entry *pde = PDE(dentry->d_inode);
|
|
+ if (unlikely(!use_pde(pde)))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ nd_set_link(nd, pde->data);
|
|
+ return pde;
|
|
+}
|
|
+
|
|
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
|
|
+{
|
|
+ unuse_pde(p);
|
|
+}
|
|
+
|
|
+const struct inode_operations proc_link_inode_operations = {
|
|
+ .readlink = generic_readlink,
|
|
+ .follow_link = proc_follow_link,
|
|
+ .put_link = proc_put_link,
|
|
+};
|
|
+
|
|
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
|
|
{
|
|
struct inode *inode = new_inode_pseudo(sb);
|
|
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
|
|
index 651d09a..8b8ca1d 100644
|
|
--- a/fs/proc/internal.h
|
|
+++ b/fs/proc/internal.h
|
|
@@ -202,6 +202,7 @@ struct pde_opener {
|
|
int closing;
|
|
struct completion *c;
|
|
};
|
|
+extern const struct inode_operations proc_link_inode_operations;
|
|
|
|
extern const struct inode_operations proc_pid_link_inode_operations;
|
|
|
|
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
|
|
index 6f599c6..dbd0272 100644
|
|
--- a/fs/proc/stat.c
|
|
+++ b/fs/proc/stat.c
|
|
@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
|
|
|
|
/* sum again ? it could be updated? */
|
|
for_each_irq_nr(j)
|
|
- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
|
|
+ seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
|
|
|
|
seq_printf(p,
|
|
"\nctxt %llu\n"
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index 8f78819..eaa7374 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -1,4 +1,5 @@
|
|
#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/huge_mm.h>
|
|
#include <linux/mount.h>
|
|
@@ -152,7 +153,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
|
|
|
|
/*
|
|
* We remember last_addr rather than next_addr to hit with
|
|
- * mmap_cache most of the time. We have zero last_addr at
|
|
+ * vmacache most of the time. We have zero last_addr at
|
|
* the beginning and also after lseek. We will have -1 last_addr
|
|
* after the end of the vmas.
|
|
*/
|
|
@@ -992,9 +993,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
struct vm_area_struct *vma;
|
|
struct pagemapread *pm = walk->private;
|
|
spinlock_t *ptl;
|
|
- pte_t *pte;
|
|
+ pte_t *pte, *orig_pte;
|
|
int err = 0;
|
|
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
|
|
|
/* find the first VMA at or above 'addr' */
|
|
vma = find_vma(walk->mm, addr);
|
|
@@ -1008,6 +1008,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
|
for (; addr != end; addr += PAGE_SIZE) {
|
|
unsigned long offset;
|
|
+ pagemap_entry_t pme;
|
|
|
|
offset = (addr & ~PAGEMAP_WALK_MASK) >>
|
|
PAGE_SHIFT;
|
|
@@ -1022,32 +1023,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
return 0;
|
|
- for (; addr != end; addr += PAGE_SIZE) {
|
|
- int flags2;
|
|
-
|
|
- /* check to see if we've left 'vma' behind
|
|
- * and need a new, higher one */
|
|
- if (vma && (addr >= vma->vm_end)) {
|
|
- vma = find_vma(walk->mm, addr);
|
|
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
|
|
- flags2 = __PM_SOFT_DIRTY;
|
|
- else
|
|
- flags2 = 0;
|
|
- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
|
|
+
|
|
+ while (1) {
|
|
+ /* End of address space hole, which we mark as non-present. */
|
|
+ unsigned long hole_end;
|
|
+
|
|
+ if (vma)
|
|
+ hole_end = min(end, vma->vm_start);
|
|
+ else
|
|
+ hole_end = end;
|
|
+
|
|
+ for (; addr < hole_end; addr += PAGE_SIZE) {
|
|
+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
|
+
|
|
+ err = add_to_pagemap(addr, &pme, pm);
|
|
+ if (err)
|
|
+ return err;
|
|
}
|
|
|
|
- /* check that 'vma' actually covers this address,
|
|
- * and that it isn't a huge page vma */
|
|
- if (vma && (vma->vm_start <= addr) &&
|
|
- !is_vm_hugetlb_page(vma)) {
|
|
- pte = pte_offset_map(pmd, addr);
|
|
+ if (!vma || vma->vm_start >= end)
|
|
+ break;
|
|
+ /*
|
|
+ * We can't possibly be in a hugetlb VMA. In general,
|
|
+ * for a mm_walk with a pmd_entry and a hugetlb_entry,
|
|
+ * the pmd_entry can only be called on addresses in a
|
|
+ * hugetlb if the walk starts in a non-hugetlb VMA and
|
|
+ * spans a hugepage VMA. Since pagemap_read walks are
|
|
+ * PMD-sized and PMD-aligned, this will never be true.
|
|
+ */
|
|
+ BUG_ON(is_vm_hugetlb_page(vma));
|
|
+
|
|
+ /* Addresses in the VMA. */
|
|
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
|
+ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
|
|
+ pagemap_entry_t pme;
|
|
+
|
|
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
|
|
- /* unmap before userspace copy */
|
|
- pte_unmap(pte);
|
|
+ err = add_to_pagemap(addr, &pme, pm);
|
|
+ if (err)
|
|
+ break;
|
|
}
|
|
- err = add_to_pagemap(addr, &pme, pm);
|
|
+ pte_unmap_unlock(orig_pte, ptl);
|
|
+
|
|
if (err)
|
|
return err;
|
|
+
|
|
+ if (addr == end)
|
|
+ break;
|
|
+
|
|
+ vma = find_vma(walk->mm, addr);
|
|
}
|
|
|
|
cond_resched();
|
|
@@ -1226,6 +1250,9 @@ out:
|
|
|
|
static int pagemap_open(struct inode *inode, struct file *file)
|
|
{
|
|
+ /* do not disclose physical addresses: attack vector */
|
|
+ if (!capable(CAP_SYS_ADMIN))
|
|
+ return -EPERM;
|
|
pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
|
|
"to stop being page-shift some time soon. See the "
|
|
"linux/Documentation/vm/pagemap.txt for details.\n");
|
|
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
|
|
index 1282384..14120a3 100644
|
|
--- a/fs/pstore/inode.c
|
|
+++ b/fs/pstore/inode.c
|
|
@@ -319,10 +319,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
|
|
compressed ? ".enc.z" : "");
|
|
break;
|
|
case PSTORE_TYPE_CONSOLE:
|
|
- sprintf(name, "console-%s", psname);
|
|
+ sprintf(name, "console-%s-%lld", psname, id);
|
|
break;
|
|
case PSTORE_TYPE_FTRACE:
|
|
- sprintf(name, "ftrace-%s", psname);
|
|
+ sprintf(name, "ftrace-%s-%lld", psname, id);
|
|
break;
|
|
case PSTORE_TYPE_MCE:
|
|
sprintf(name, "mce-%s-%lld", psname, id);
|
|
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
|
|
index fa8cef2..fe68d8a 100644
|
|
--- a/fs/pstore/ram.c
|
|
+++ b/fs/pstore/ram.c
|
|
@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
|
|
MODULE_PARM_DESC(mem_size,
|
|
"size of reserved RAM used to store oops/panic logs");
|
|
|
|
+static unsigned int mem_type;
|
|
+module_param(mem_type, uint, 0600);
|
|
+MODULE_PARM_DESC(mem_type,
|
|
+ "set to 1 to try to use unbuffered memory (default 0)");
|
|
+
|
|
static int dump_oops = 1;
|
|
module_param(dump_oops, int, 0600);
|
|
MODULE_PARM_DESC(dump_oops,
|
|
@@ -79,6 +84,7 @@ struct ramoops_context {
|
|
struct persistent_ram_zone *fprz;
|
|
phys_addr_t phys_addr;
|
|
unsigned long size;
|
|
+ unsigned int memtype;
|
|
size_t record_size;
|
|
size_t console_size;
|
|
size_t ftrace_size;
|
|
@@ -86,6 +92,7 @@ struct ramoops_context {
|
|
struct persistent_ram_ecc_info ecc_info;
|
|
unsigned int max_dump_cnt;
|
|
unsigned int dump_write_cnt;
|
|
+ /* _read_cnt need clear on ramoops_pstore_open */
|
|
unsigned int dump_read_cnt;
|
|
unsigned int console_read_cnt;
|
|
unsigned int ftrace_read_cnt;
|
|
@@ -101,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
|
|
|
|
cxt->dump_read_cnt = 0;
|
|
cxt->console_read_cnt = 0;
|
|
+ cxt->ftrace_read_cnt = 0;
|
|
return 0;
|
|
}
|
|
|
|
@@ -117,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
|
|
return NULL;
|
|
|
|
prz = przs[i];
|
|
+ if (!prz)
|
|
+ return NULL;
|
|
|
|
- if (update) {
|
|
- /* Update old/shadowed buffer. */
|
|
+ /* Update old/shadowed buffer. */
|
|
+ if (update)
|
|
persistent_ram_save_old(prz);
|
|
- if (!persistent_ram_old_size(prz))
|
|
- return NULL;
|
|
- }
|
|
+
|
|
+ if (!persistent_ram_old_size(prz))
|
|
+ return NULL;
|
|
|
|
*typep = type;
|
|
*id = i;
|
|
@@ -353,7 +363,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
|
|
size_t sz = cxt->record_size;
|
|
|
|
cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
|
|
- &cxt->ecc_info);
|
|
+ &cxt->ecc_info,
|
|
+ cxt->memtype);
|
|
if (IS_ERR(cxt->przs[i])) {
|
|
err = PTR_ERR(cxt->przs[i]);
|
|
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
|
|
@@ -383,7 +394,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
|
|
+ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
|
|
if (IS_ERR(*prz)) {
|
|
int err = PTR_ERR(*prz);
|
|
|
|
@@ -428,9 +439,9 @@ static int ramoops_probe(struct platform_device *pdev)
|
|
if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
|
|
pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
|
|
|
|
- cxt->dump_read_cnt = 0;
|
|
cxt->size = pdata->mem_size;
|
|
cxt->phys_addr = pdata->mem_address;
|
|
+ cxt->memtype = pdata->mem_type;
|
|
cxt->record_size = pdata->record_size;
|
|
cxt->console_size = pdata->console_size;
|
|
cxt->ftrace_size = pdata->ftrace_size;
|
|
@@ -561,6 +572,7 @@ static void ramoops_register_dummy(void)
|
|
|
|
dummy_data->mem_size = mem_size;
|
|
dummy_data->mem_address = mem_address;
|
|
+ dummy_data->mem_type = 0;
|
|
dummy_data->record_size = record_size;
|
|
dummy_data->console_size = ramoops_console_size;
|
|
dummy_data->ftrace_size = ramoops_ftrace_size;
|
|
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
|
|
index de272d4..bda61a7 100644
|
|
--- a/fs/pstore/ram_core.c
|
|
+++ b/fs/pstore/ram_core.c
|
|
@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
|
|
persistent_ram_update_header_ecc(prz);
|
|
}
|
|
|
|
-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
|
|
+static void *persistent_ram_vmap(phys_addr_t start, size_t size,
|
|
+ unsigned int memtype)
|
|
{
|
|
struct page **pages;
|
|
phys_addr_t page_start;
|
|
@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
|
|
page_start = start - offset_in_page(start);
|
|
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
|
|
|
|
- prot = pgprot_noncached(PAGE_KERNEL);
|
|
+ if (memtype)
|
|
+ prot = pgprot_noncached(PAGE_KERNEL);
|
|
+ else
|
|
+ prot = pgprot_writecombine(PAGE_KERNEL);
|
|
|
|
pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
|
|
if (!pages) {
|
|
@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
|
|
return vaddr;
|
|
}
|
|
|
|
-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
|
|
+static void *persistent_ram_iomap(phys_addr_t start, size_t size,
|
|
+ unsigned int memtype)
|
|
{
|
|
+ void *va;
|
|
+
|
|
if (!request_mem_region(start, size, "persistent_ram")) {
|
|
pr_err("request mem region (0x%llx@0x%llx) failed\n",
|
|
(unsigned long long)size, (unsigned long long)start);
|
|
@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
|
|
buffer_start_add = buffer_start_add_locked;
|
|
buffer_size_add = buffer_size_add_locked;
|
|
|
|
- return ioremap(start, size);
|
|
+ if (memtype)
|
|
+ va = ioremap(start, size);
|
|
+ else
|
|
+ va = ioremap_wc(start, size);
|
|
+
|
|
+ return va;
|
|
}
|
|
|
|
static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
|
|
- struct persistent_ram_zone *prz)
|
|
+ struct persistent_ram_zone *prz, int memtype)
|
|
{
|
|
prz->paddr = start;
|
|
prz->size = size;
|
|
|
|
if (pfn_valid(start >> PAGE_SHIFT))
|
|
- prz->vaddr = persistent_ram_vmap(start, size);
|
|
+ prz->vaddr = persistent_ram_vmap(start, size, memtype);
|
|
else
|
|
- prz->vaddr = persistent_ram_iomap(start, size);
|
|
+ prz->vaddr = persistent_ram_iomap(start, size, memtype);
|
|
|
|
if (!prz->vaddr) {
|
|
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
|
|
@@ -502,7 +514,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
|
|
}
|
|
|
|
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
|
- u32 sig, struct persistent_ram_ecc_info *ecc_info)
|
|
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
|
|
+ unsigned int memtype)
|
|
{
|
|
struct persistent_ram_zone *prz;
|
|
int ret = -ENOMEM;
|
|
@@ -513,7 +526,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
|
goto err;
|
|
}
|
|
|
|
- ret = persistent_ram_buffer_map(start, size, prz);
|
|
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
|
|
if (ret)
|
|
goto err;
|
|
|
|
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
|
|
index ce87c90..89da957 100644
|
|
--- a/fs/quota/dquot.c
|
|
+++ b/fs/quota/dquot.c
|
|
@@ -637,7 +637,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
|
|
dqstats_inc(DQST_LOOKUPS);
|
|
err = sb->dq_op->write_dquot(dquot);
|
|
if (!ret && err)
|
|
- err = ret;
|
|
+ ret = err;
|
|
dqput(dquot);
|
|
spin_lock(&dq_list_lock);
|
|
}
|
|
diff --git a/fs/signalfd.c b/fs/signalfd.c
|
|
index 424b7b6..148f8e7 100644
|
|
--- a/fs/signalfd.c
|
|
+++ b/fs/signalfd.c
|
|
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
|
|
* Other callers might not initialize the si_lsb field,
|
|
* so check explicitly for the right codes here.
|
|
*/
|
|
- if (kinfo->si_code == BUS_MCEERR_AR ||
|
|
- kinfo->si_code == BUS_MCEERR_AO)
|
|
+ if (kinfo->si_signo == SIGBUS &&
|
|
+ (kinfo->si_code == BUS_MCEERR_AR ||
|
|
+ kinfo->si_code == BUS_MCEERR_AO))
|
|
err |= __put_user((short) kinfo->si_addr_lsb,
|
|
&uinfo->ssi_addr_lsb);
|
|
#endif
|
|
diff --git a/fs/splice.c b/fs/splice.c
|
|
index 12028fa..f345d53 100644
|
|
--- a/fs/splice.c
|
|
+++ b/fs/splice.c
|
|
@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
|
struct address_space *mapping = out->f_mapping;
|
|
struct inode *inode = mapping->host;
|
|
struct splice_desc sd = {
|
|
- .total_len = len,
|
|
.flags = flags,
|
|
- .pos = *ppos,
|
|
.u.file = out,
|
|
};
|
|
ssize_t ret;
|
|
|
|
+ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ sd.total_len = len;
|
|
+ sd.pos = *ppos;
|
|
+
|
|
pipe_lock(pipe);
|
|
|
|
splice_from_pipe_begin(&sd);
|
|
diff --git a/fs/super.c b/fs/super.c
|
|
index 7624267..440ef51 100644
|
|
--- a/fs/super.c
|
|
+++ b/fs/super.c
|
|
@@ -81,6 +81,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
|
|
inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
|
|
dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
|
|
total_objects = dentries + inodes + fs_objects + 1;
|
|
+ if (!total_objects)
|
|
+ total_objects = 1;
|
|
|
|
/* proportion the scan between the caches */
|
|
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
|
|
@@ -112,9 +114,14 @@ static unsigned long super_cache_count(struct shrinker *shrink,
|
|
|
|
sb = container_of(shrink, struct super_block, s_shrink);
|
|
|
|
- if (!grab_super_passive(sb))
|
|
- return 0;
|
|
-
|
|
+ /*
|
|
+ * Don't call grab_super_passive as it is a potential
|
|
+ * scalability bottleneck. The counts could get updated
|
|
+ * between super_cache_count and super_cache_scan anyway.
|
|
+ * Call to super_cache_count with shrinker_rwsem held
|
|
+ * ensures the safety of call to list_lru_count_node() and
|
|
+ * s_op->nr_cached_objects().
|
|
+ */
|
|
if (sb->s_op && sb->s_op->nr_cached_objects)
|
|
total_objects = sb->s_op->nr_cached_objects(sb,
|
|
sc->nid);
|
|
@@ -125,7 +132,6 @@ static unsigned long super_cache_count(struct shrinker *shrink,
|
|
sc->nid);
|
|
|
|
total_objects = vfs_pressure_ratio(total_objects);
|
|
- drop_super(sb);
|
|
return total_objects;
|
|
}
|
|
|
|
@@ -276,10 +282,8 @@ void deactivate_locked_super(struct super_block *s)
|
|
struct file_system_type *fs = s->s_type;
|
|
if (atomic_dec_and_test(&s->s_active)) {
|
|
cleancache_invalidate_fs(s);
|
|
- fs->kill_sb(s);
|
|
-
|
|
- /* caches are now gone, we can safely kill the shrinker now */
|
|
unregister_shrinker(&s->s_shrink);
|
|
+ fs->kill_sb(s);
|
|
|
|
put_filesystem(fs);
|
|
put_super(s);
|
|
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
|
|
index ff82293..26b69b2 100644
|
|
--- a/fs/ubifs/commit.c
|
|
+++ b/fs/ubifs/commit.c
|
|
@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c)
|
|
err = ubifs_orphan_end_commit(c);
|
|
if (err)
|
|
goto out;
|
|
- old_ltail_lnum = c->ltail_lnum;
|
|
- err = ubifs_log_end_commit(c, new_ltail_lnum);
|
|
- if (err)
|
|
- goto out;
|
|
err = dbg_check_old_index(c, &zroot);
|
|
if (err)
|
|
goto out;
|
|
|
|
- mutex_lock(&c->mst_mutex);
|
|
c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
|
|
c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
|
|
c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
|
|
@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
|
|
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
|
else
|
|
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
|
- err = ubifs_write_master(c);
|
|
- mutex_unlock(&c->mst_mutex);
|
|
+
|
|
+ old_ltail_lnum = c->ltail_lnum;
|
|
+ err = ubifs_log_end_commit(c, new_ltail_lnum);
|
|
if (err)
|
|
goto out;
|
|
|
|
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
|
|
index a902c59..8d59de8 100644
|
|
--- a/fs/ubifs/log.c
|
|
+++ b/fs/ubifs/log.c
|
|
@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
|
|
h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
|
|
t = (long long)c->ltail_lnum * c->leb_size;
|
|
|
|
- if (h >= t)
|
|
+ if (h > t)
|
|
return c->log_bytes - h + t;
|
|
- else
|
|
+ else if (h != t)
|
|
return t - h;
|
|
+ else if (c->lhead_lnum != c->ltail_lnum)
|
|
+ return 0;
|
|
+ else
|
|
+ return c->log_bytes;
|
|
}
|
|
|
|
/**
|
|
@@ -447,9 +451,9 @@ out:
|
|
* @ltail_lnum: new log tail LEB number
|
|
*
|
|
* This function is called on when the commit operation was finished. It
|
|
- * moves log tail to new position and unmaps LEBs which contain obsolete data.
|
|
- * Returns zero in case of success and a negative error code in case of
|
|
- * failure.
|
|
+ * moves log tail to new position and updates the master node so that it stores
|
|
+ * the new log tail LEB number. Returns zero in case of success and a negative
|
|
+ * error code in case of failure.
|
|
*/
|
|
int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
|
{
|
|
@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
|
spin_unlock(&c->buds_lock);
|
|
|
|
err = dbg_check_bud_bytes(c);
|
|
+ if (err)
|
|
+ goto out;
|
|
|
|
+ err = ubifs_write_master(c);
|
|
+
|
|
+out:
|
|
mutex_unlock(&c->log_mutex);
|
|
return err;
|
|
}
|
|
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
|
|
index ab83ace..1a4bb9e 100644
|
|
--- a/fs/ubifs/master.c
|
|
+++ b/fs/ubifs/master.c
|
|
@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
|
|
* ubifs_write_master - write master node.
|
|
* @c: UBIFS file-system description object
|
|
*
|
|
- * This function writes the master node. The caller has to take the
|
|
- * @c->mst_mutex lock before calling this function. Returns zero in case of
|
|
- * success and a negative error code in case of failure. The master node is
|
|
- * written twice to enable recovery.
|
|
+ * This function writes the master node. Returns zero in case of success and a
|
|
+ * negative error code in case of failure. The master node is written twice to
|
|
+ * enable recovery.
|
|
*/
|
|
int ubifs_write_master(struct ubifs_info *c)
|
|
{
|
|
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
|
|
index 5ded849..94d9a64 100644
|
|
--- a/fs/ubifs/super.c
|
|
+++ b/fs/ubifs/super.c
|
|
@@ -1957,7 +1957,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
|
|
mutex_init(&c->lp_mutex);
|
|
mutex_init(&c->tnc_mutex);
|
|
mutex_init(&c->log_mutex);
|
|
- mutex_init(&c->mst_mutex);
|
|
mutex_init(&c->umount_mutex);
|
|
mutex_init(&c->bu_mutex);
|
|
mutex_init(&c->write_reserve_mutex);
|
|
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
|
|
index e8c8cfe..7ab9c71 100644
|
|
--- a/fs/ubifs/ubifs.h
|
|
+++ b/fs/ubifs/ubifs.h
|
|
@@ -1042,7 +1042,6 @@ struct ubifs_debug_info;
|
|
*
|
|
* @mst_node: master node
|
|
* @mst_offs: offset of valid master node
|
|
- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
|
|
*
|
|
* @max_bu_buf_len: maximum bulk-read buffer length
|
|
* @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
|
|
@@ -1282,7 +1281,6 @@ struct ubifs_info {
|
|
|
|
struct ubifs_mst_node *mst_node;
|
|
int mst_offs;
|
|
- struct mutex mst_mutex;
|
|
|
|
int max_bu_buf_len;
|
|
struct mutex bu_mutex;
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index 982ce05..287cd5f 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -1271,13 +1271,22 @@ update_time:
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
|
|
+ * arbitrary - just that we hopefully don't limit any real use of rewritten
|
|
+ * inode on write-once media but avoid looping for too long on corrupted media.
|
|
+ */
|
|
+#define UDF_MAX_ICB_NESTING 1024
|
|
+
|
|
static void __udf_read_inode(struct inode *inode)
|
|
{
|
|
struct buffer_head *bh = NULL;
|
|
struct fileEntry *fe;
|
|
uint16_t ident;
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
+ unsigned int indirections = 0;
|
|
|
|
+reread:
|
|
/*
|
|
* Set defaults, but the inode is still incomplete!
|
|
* Note: get_new_inode() sets the following on a new inode:
|
|
@@ -1314,28 +1323,26 @@ static void __udf_read_inode(struct inode *inode)
|
|
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
|
|
&ident);
|
|
if (ident == TAG_IDENT_IE && ibh) {
|
|
- struct buffer_head *nbh = NULL;
|
|
struct kernel_lb_addr loc;
|
|
struct indirectEntry *ie;
|
|
|
|
ie = (struct indirectEntry *)ibh->b_data;
|
|
loc = lelb_to_cpu(ie->indirectICB.extLocation);
|
|
|
|
- if (ie->indirectICB.extLength &&
|
|
- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
|
|
- &ident))) {
|
|
- if (ident == TAG_IDENT_FE ||
|
|
- ident == TAG_IDENT_EFE) {
|
|
- memcpy(&iinfo->i_location,
|
|
- &loc,
|
|
- sizeof(struct kernel_lb_addr));
|
|
- brelse(bh);
|
|
- brelse(ibh);
|
|
- brelse(nbh);
|
|
- __udf_read_inode(inode);
|
|
+ if (ie->indirectICB.extLength) {
|
|
+ brelse(bh);
|
|
+ brelse(ibh);
|
|
+ memcpy(&iinfo->i_location, &loc,
|
|
+ sizeof(struct kernel_lb_addr));
|
|
+ if (++indirections > UDF_MAX_ICB_NESTING) {
|
|
+ udf_err(inode->i_sb,
|
|
+ "too many ICBs in ICB hierarchy"
|
|
+ " (max %d supported)\n",
|
|
+ UDF_MAX_ICB_NESTING);
|
|
+ make_bad_inode(inode);
|
|
return;
|
|
}
|
|
- brelse(nbh);
|
|
+ goto reread;
|
|
}
|
|
}
|
|
brelse(ibh);
|
|
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
|
|
index d7c6dbe..d89f324 100644
|
|
--- a/fs/udf/symlink.c
|
|
+++ b/fs/udf/symlink.c
|
|
@@ -80,11 +80,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
|
|
struct inode *inode = page->mapping->host;
|
|
struct buffer_head *bh = NULL;
|
|
unsigned char *symlink;
|
|
- int err = -EIO;
|
|
+ int err;
|
|
unsigned char *p = kmap(page);
|
|
struct udf_inode_info *iinfo;
|
|
uint32_t pos;
|
|
|
|
+ /* We don't support symlinks longer than one block */
|
|
+ if (inode->i_size > inode->i_sb->s_blocksize) {
|
|
+ err = -ENAMETOOLONG;
|
|
+ goto out_unmap;
|
|
+ }
|
|
+
|
|
iinfo = UDF_I(inode);
|
|
pos = udf_block_map(inode, 0);
|
|
|
|
@@ -94,8 +100,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
|
|
} else {
|
|
bh = sb_bread(inode->i_sb, pos);
|
|
|
|
- if (!bh)
|
|
- goto out;
|
|
+ if (!bh) {
|
|
+ err = -EIO;
|
|
+ goto out_unlock_inode;
|
|
+ }
|
|
|
|
symlink = bh->b_data;
|
|
}
|
|
@@ -109,9 +117,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
|
|
unlock_page(page);
|
|
return 0;
|
|
|
|
-out:
|
|
+out_unlock_inode:
|
|
up_read(&iinfo->i_data_sem);
|
|
SetPageError(page);
|
|
+out_unmap:
|
|
kunmap(page);
|
|
unlock_page(page);
|
|
return err;
|
|
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
|
|
index db2cfb0..0461fbe 100644
|
|
--- a/fs/xfs/xfs_aops.c
|
|
+++ b/fs/xfs/xfs_aops.c
|
|
@@ -434,10 +434,22 @@ xfs_start_page_writeback(
|
|
{
|
|
ASSERT(PageLocked(page));
|
|
ASSERT(!PageWriteback(page));
|
|
- if (clear_dirty)
|
|
+
|
|
+ /*
|
|
+ * if the page was not fully cleaned, we need to ensure that the higher
|
|
+ * layers come back to it correctly. That means we need to keep the page
|
|
+ * dirty, and for WB_SYNC_ALL writeback we need to ensure the
|
|
+ * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
|
|
+ * write this page in this writeback sweep will be made.
|
|
+ */
|
|
+ if (clear_dirty) {
|
|
clear_page_dirty_for_io(page);
|
|
- set_page_writeback(page);
|
|
+ set_page_writeback(page);
|
|
+ } else
|
|
+ set_page_writeback_keepwrite(page);
|
|
+
|
|
unlock_page(page);
|
|
+
|
|
/* If no buffers on the page are to be written, finish it here */
|
|
if (!buffers)
|
|
end_page_writeback(page);
|
|
@@ -1660,11 +1672,72 @@ xfs_vm_readpages(
|
|
return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
|
|
}
|
|
|
|
+/*
|
|
+ * This is basically a copy of __set_page_dirty_buffers() with one
|
|
+ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
|
|
+ * dirty, we'll never be able to clean them because we don't write buffers
|
|
+ * beyond EOF, and that means we can't invalidate pages that span EOF
|
|
+ * that have been marked dirty. Further, the dirty state can leak into
|
|
+ * the file interior if the file is extended, resulting in all sorts of
|
|
+ * bad things happening as the state does not match the underlying data.
|
|
+ *
|
|
+ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
|
|
+ * this only exist because of bufferheads and how the generic code manages them.
|
|
+ */
|
|
+STATIC int
|
|
+xfs_vm_set_page_dirty(
|
|
+ struct page *page)
|
|
+{
|
|
+ struct address_space *mapping = page->mapping;
|
|
+ struct inode *inode = mapping->host;
|
|
+ loff_t end_offset;
|
|
+ loff_t offset;
|
|
+ int newly_dirty;
|
|
+
|
|
+ if (unlikely(!mapping))
|
|
+ return !TestSetPageDirty(page);
|
|
+
|
|
+ end_offset = i_size_read(inode);
|
|
+ offset = page_offset(page);
|
|
+
|
|
+ spin_lock(&mapping->private_lock);
|
|
+ if (page_has_buffers(page)) {
|
|
+ struct buffer_head *head = page_buffers(page);
|
|
+ struct buffer_head *bh = head;
|
|
+
|
|
+ do {
|
|
+ if (offset < end_offset)
|
|
+ set_buffer_dirty(bh);
|
|
+ bh = bh->b_this_page;
|
|
+ offset += 1 << inode->i_blkbits;
|
|
+ } while (bh != head);
|
|
+ }
|
|
+ newly_dirty = !TestSetPageDirty(page);
|
|
+ spin_unlock(&mapping->private_lock);
|
|
+
|
|
+ if (newly_dirty) {
|
|
+ /* sigh - __set_page_dirty() is static, so copy it here, too */
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&mapping->tree_lock, flags);
|
|
+ if (page->mapping) { /* Race with truncate? */
|
|
+ WARN_ON_ONCE(!PageUptodate(page));
|
|
+ account_page_dirtied(page, mapping);
|
|
+ radix_tree_tag_set(&mapping->page_tree,
|
|
+ page_index(page), PAGECACHE_TAG_DIRTY);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
|
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
|
+ }
|
|
+ return newly_dirty;
|
|
+}
|
|
+
|
|
const struct address_space_operations xfs_address_space_operations = {
|
|
.readpage = xfs_vm_readpage,
|
|
.readpages = xfs_vm_readpages,
|
|
.writepage = xfs_vm_writepage,
|
|
.writepages = xfs_vm_writepages,
|
|
+ .set_page_dirty = xfs_vm_set_page_dirty,
|
|
.releasepage = xfs_vm_releasepage,
|
|
.invalidatepage = xfs_vm_invalidatepage,
|
|
.write_begin = xfs_vm_write_begin,
|
|
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
|
|
index 3314911..645f180 100644
|
|
--- a/fs/xfs/xfs_buf_item.c
|
|
+++ b/fs/xfs/xfs_buf_item.c
|
|
@@ -319,6 +319,10 @@ xfs_buf_item_format(
|
|
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
|
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
|
|
(bip->bli_flags & XFS_BLI_STALE));
|
|
+ ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
|
|
+ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
|
|
+ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
|
|
+
|
|
|
|
/*
|
|
* If it is an inode buffer, transfer the in-memory state to the
|
|
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
|
|
index 7aeb4c8..95f9448 100644
|
|
--- a/fs/xfs/xfs_dquot.c
|
|
+++ b/fs/xfs/xfs_dquot.c
|
|
@@ -1011,7 +1011,8 @@ xfs_qm_dqflush(
|
|
* Get the buffer containing the on-disk dquot
|
|
*/
|
|
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
|
|
- mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
|
|
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp,
|
|
+ &xfs_dquot_buf_ops);
|
|
if (error)
|
|
goto out_unlock;
|
|
|
|
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
|
|
index 64b48ea..f50def6 100644
|
|
--- a/fs/xfs/xfs_file.c
|
|
+++ b/fs/xfs/xfs_file.c
|
|
@@ -302,7 +302,16 @@ xfs_file_aio_read(
|
|
xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
|
|
return ret;
|
|
}
|
|
- truncate_pagecache_range(VFS_I(ip), pos, -1);
|
|
+
|
|
+ /*
|
|
+ * Invalidate whole pages. This can return an error if
|
|
+ * we fail to invalidate a page, but this should never
|
|
+ * happen on XFS. Warn if it does fail.
|
|
+ */
|
|
+ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
|
|
+ pos >> PAGE_CACHE_SHIFT, -1);
|
|
+ WARN_ON_ONCE(ret);
|
|
+ ret = 0;
|
|
}
|
|
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
|
|
}
|
|
@@ -683,7 +692,15 @@ xfs_file_dio_aio_write(
|
|
pos, -1);
|
|
if (ret)
|
|
goto out;
|
|
- truncate_pagecache_range(VFS_I(ip), pos, -1);
|
|
+ /*
|
|
+ * Invalidate whole pages. This can return an error if
|
|
+ * we fail to invalidate a page, but this should never
|
|
+ * happen on XFS. Warn if it does fail.
|
|
+ */
|
|
+ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
|
|
+ pos >> PAGE_CACHE_SHIFT, -1);
|
|
+ WARN_ON_ONCE(ret);
|
|
+ ret = 0;
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
|
|
index 3a137e9..5d90b8d 100644
|
|
--- a/fs/xfs/xfs_inode.c
|
|
+++ b/fs/xfs/xfs_inode.c
|
|
@@ -1946,6 +1946,7 @@ xfs_iunlink(
|
|
agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
|
|
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
|
(sizeof(xfs_agino_t) * bucket_index);
|
|
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
|
|
xfs_trans_log_buf(tp, agibp, offset,
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
return 0;
|
|
@@ -2037,6 +2038,7 @@ xfs_iunlink_remove(
|
|
agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
|
|
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
|
(sizeof(xfs_agino_t) * bucket_index);
|
|
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
|
|
xfs_trans_log_buf(tp, agibp, offset,
|
|
(offset + sizeof(xfs_agino_t) - 1));
|
|
} else {
|
|
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
|
|
index b0f4ef7..bf9781e 100644
|
|
--- a/fs/xfs/xfs_log.h
|
|
+++ b/fs/xfs/xfs_log.h
|
|
@@ -24,7 +24,8 @@ struct xfs_log_vec {
|
|
struct xfs_log_iovec *lv_iovecp; /* iovec array */
|
|
struct xfs_log_item *lv_item; /* owner */
|
|
char *lv_buf; /* formatted buffer */
|
|
- int lv_buf_len; /* size of formatted buffer */
|
|
+ int lv_bytes; /* accounted space in buffer */
|
|
+ int lv_buf_len; /* aligned size of buffer */
|
|
int lv_size; /* size of allocated lv */
|
|
};
|
|
|
|
@@ -52,15 +53,21 @@ xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
return vec->i_addr;
|
|
}
|
|
|
|
+/*
|
|
+ * We need to make sure the next buffer is naturally aligned for the biggest
|
|
+ * basic data type we put into it. We already accounted for this padding when
|
|
+ * sizing the buffer.
|
|
+ *
|
|
+ * However, this padding does not get written into the log, and hence we have to
|
|
+ * track the space used by the log vectors separately to prevent log space hangs
|
|
+ * due to inaccurate accounting (i.e. a leak) of the used log space through the
|
|
+ * CIL context ticket.
|
|
+ */
|
|
static inline void
|
|
xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
|
|
{
|
|
- /*
|
|
- * We need to make sure the next buffer is naturally aligned for the
|
|
- * biggest basic data type we put into it. We already accounted for
|
|
- * this when sizing the buffer.
|
|
- */
|
|
lv->lv_buf_len += round_up(len, sizeof(uint64_t));
|
|
+ lv->lv_bytes += len;
|
|
vec->i_len = len;
|
|
}
|
|
|
|
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
|
|
index 4ef6fdb..bcfbaae 100644
|
|
--- a/fs/xfs/xfs_log_cil.c
|
|
+++ b/fs/xfs/xfs_log_cil.c
|
|
@@ -97,7 +97,7 @@ xfs_cil_prepare_item(
|
|
{
|
|
/* Account for the new LV being passed in */
|
|
if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
|
|
- *diff_len += lv->lv_buf_len;
|
|
+ *diff_len += lv->lv_bytes;
|
|
*diff_iovecs += lv->lv_niovecs;
|
|
}
|
|
|
|
@@ -111,7 +111,7 @@ xfs_cil_prepare_item(
|
|
else if (old_lv != lv) {
|
|
ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
|
|
|
|
- *diff_len -= old_lv->lv_buf_len;
|
|
+ *diff_len -= old_lv->lv_bytes;
|
|
*diff_iovecs -= old_lv->lv_niovecs;
|
|
kmem_free(old_lv);
|
|
}
|
|
@@ -239,7 +239,7 @@ xlog_cil_insert_format_items(
|
|
* that the space reservation accounting is correct.
|
|
*/
|
|
*diff_iovecs -= lv->lv_niovecs;
|
|
- *diff_len -= lv->lv_buf_len;
|
|
+ *diff_len -= lv->lv_bytes;
|
|
} else {
|
|
/* allocate new data chunk */
|
|
lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
|
|
@@ -259,6 +259,7 @@ xlog_cil_insert_format_items(
|
|
|
|
/* The allocated data region lies beyond the iovec region */
|
|
lv->lv_buf_len = 0;
|
|
+ lv->lv_bytes = 0;
|
|
lv->lv_buf = (char *)lv + buf_size - nbytes;
|
|
ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
|
|
|
|
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
|
|
index bce53ac..eb26418 100644
|
|
--- a/fs/xfs/xfs_log_recover.c
|
|
+++ b/fs/xfs/xfs_log_recover.c
|
|
@@ -2125,6 +2125,17 @@ xlog_recover_validate_buf_type(
|
|
__uint16_t magic16;
|
|
__uint16_t magicda;
|
|
|
|
+ /*
|
|
+ * We can only do post recovery validation on items on CRC enabled
|
|
+ * fielsystems as we need to know when the buffer was written to be able
|
|
+ * to determine if we should have replayed the item. If we replay old
|
|
+ * metadata over a newer buffer, then it will enter a temporarily
|
|
+ * inconsistent state resulting in verification failures. Hence for now
|
|
+ * just avoid the verification stage for non-crc filesystems
|
|
+ */
|
|
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
+ return;
|
|
+
|
|
magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
|
|
magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
|
|
magicda = be16_to_cpu(info->magic);
|
|
@@ -2160,8 +2171,6 @@ xlog_recover_validate_buf_type(
|
|
bp->b_ops = &xfs_agf_buf_ops;
|
|
break;
|
|
case XFS_BLFT_AGFL_BUF:
|
|
- if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
- break;
|
|
if (magic32 != XFS_AGFL_MAGIC) {
|
|
xfs_warn(mp, "Bad AGFL block magic!");
|
|
ASSERT(0);
|
|
@@ -2194,10 +2203,6 @@ xlog_recover_validate_buf_type(
|
|
#endif
|
|
break;
|
|
case XFS_BLFT_DINO_BUF:
|
|
- /*
|
|
- * we get here with inode allocation buffers, not buffers that
|
|
- * track unlinked list changes.
|
|
- */
|
|
if (magic16 != XFS_DINODE_MAGIC) {
|
|
xfs_warn(mp, "Bad INODE block magic!");
|
|
ASSERT(0);
|
|
@@ -2277,8 +2282,6 @@ xlog_recover_validate_buf_type(
|
|
bp->b_ops = &xfs_attr3_leaf_buf_ops;
|
|
break;
|
|
case XFS_BLFT_ATTR_RMT_BUF:
|
|
- if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
- break;
|
|
if (magic32 != XFS_ATTR3_RMT_MAGIC) {
|
|
xfs_warn(mp, "Bad attr remote magic!");
|
|
ASSERT(0);
|
|
@@ -2385,16 +2388,7 @@ xlog_recover_do_reg_buffer(
|
|
/* Shouldn't be any more regions */
|
|
ASSERT(i == item->ri_total);
|
|
|
|
- /*
|
|
- * We can only do post recovery validation on items on CRC enabled
|
|
- * fielsystems as we need to know when the buffer was written to be able
|
|
- * to determine if we should have replayed the item. If we replay old
|
|
- * metadata over a newer buffer, then it will enter a temporarily
|
|
- * inconsistent state resulting in verification failures. Hence for now
|
|
- * just avoid the verification stage for non-crc filesystems
|
|
- */
|
|
- if (xfs_sb_version_hascrc(&mp->m_sb))
|
|
- xlog_recover_validate_buf_type(mp, bp, buf_f);
|
|
+ xlog_recover_validate_buf_type(mp, bp, buf_f);
|
|
}
|
|
|
|
/*
|
|
@@ -2502,12 +2496,29 @@ xlog_recover_buffer_pass2(
|
|
}
|
|
|
|
/*
|
|
- * recover the buffer only if we get an LSN from it and it's less than
|
|
+ * Recover the buffer only if we get an LSN from it and it's less than
|
|
* the lsn of the transaction we are replaying.
|
|
+ *
|
|
+ * Note that we have to be extremely careful of readahead here.
|
|
+ * Readahead does not attach verfiers to the buffers so if we don't
|
|
+ * actually do any replay after readahead because of the LSN we found
|
|
+ * in the buffer if more recent than that current transaction then we
|
|
+ * need to attach the verifier directly. Failure to do so can lead to
|
|
+ * future recovery actions (e.g. EFI and unlinked list recovery) can
|
|
+ * operate on the buffers and they won't get the verifier attached. This
|
|
+ * can lead to blocks on disk having the correct content but a stale
|
|
+ * CRC.
|
|
+ *
|
|
+ * It is safe to assume these clean buffers are currently up to date.
|
|
+ * If the buffer is dirtied by a later transaction being replayed, then
|
|
+ * the verifier will be reset to match whatever recover turns that
|
|
+ * buffer into.
|
|
*/
|
|
lsn = xlog_recover_get_buf_lsn(mp, bp);
|
|
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
|
|
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
|
|
+ xlog_recover_validate_buf_type(mp, bp, buf_f);
|
|
goto out_release;
|
|
+ }
|
|
|
|
if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
|
|
error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
|
|
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
|
|
index c6ff3cf5..0eaaa2d 100644
|
|
--- a/fs/xfs/xfs_mount.c
|
|
+++ b/fs/xfs/xfs_mount.c
|
|
@@ -321,7 +321,6 @@ reread:
|
|
* Initialize the mount structure from the superblock.
|
|
*/
|
|
xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
|
|
- xfs_sb_quota_from_disk(sbp);
|
|
|
|
/*
|
|
* If we haven't validated the superblock, do so now before we try
|
|
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
|
|
index 348e4d2..1b271f5 100644
|
|
--- a/fs/xfs/xfs_qm.c
|
|
+++ b/fs/xfs/xfs_qm.c
|
|
@@ -1108,6 +1108,11 @@ xfs_qm_reset_dqcounts(
|
|
*/
|
|
xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
|
|
"xfs_quotacheck");
|
|
+ /*
|
|
+ * Reset type in case we are reusing group quota file for
|
|
+ * project quotas or vice versa
|
|
+ */
|
|
+ ddq->d_flags = type;
|
|
ddq->d_bcount = 0;
|
|
ddq->d_icount = 0;
|
|
ddq->d_rtbcount = 0;
|
|
@@ -1176,6 +1181,12 @@ xfs_qm_dqiter_bufs(
|
|
if (error)
|
|
break;
|
|
|
|
+ /*
|
|
+ * A corrupt buffer might not have a verifier attached, so
|
|
+ * make sure we have the correct one attached before writeback
|
|
+ * occurs.
|
|
+ */
|
|
+ bp->b_ops = &xfs_dquot_buf_ops;
|
|
xfs_qm_reset_dqcounts(mp, bp, firstid, type);
|
|
xfs_buf_delwri_queue(bp, buffer_list);
|
|
xfs_buf_relse(bp);
|
|
@@ -1261,7 +1272,7 @@ xfs_qm_dqiterate(
|
|
xfs_buf_readahead(mp->m_ddev_targp,
|
|
XFS_FSB_TO_DADDR(mp, rablkno),
|
|
mp->m_quotainfo->qi_dqchunklen,
|
|
- NULL);
|
|
+ &xfs_dquot_buf_ops);
|
|
rablkno++;
|
|
}
|
|
}
|
|
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
|
|
index 1e11679..4afd393 100644
|
|
--- a/fs/xfs/xfs_sb.c
|
|
+++ b/fs/xfs/xfs_sb.c
|
|
@@ -397,10 +397,11 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
|
|
}
|
|
}
|
|
|
|
-void
|
|
-xfs_sb_from_disk(
|
|
+static void
|
|
+__xfs_sb_from_disk(
|
|
struct xfs_sb *to,
|
|
- xfs_dsb_t *from)
|
|
+ xfs_dsb_t *from,
|
|
+ bool convert_xquota)
|
|
{
|
|
to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
|
|
to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
|
|
@@ -456,6 +457,17 @@ xfs_sb_from_disk(
|
|
to->sb_pad = 0;
|
|
to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
|
|
to->sb_lsn = be64_to_cpu(from->sb_lsn);
|
|
+ /* Convert on-disk flags to in-memory flags? */
|
|
+ if (convert_xquota)
|
|
+ xfs_sb_quota_from_disk(to);
|
|
+}
|
|
+
|
|
+void
|
|
+xfs_sb_from_disk(
|
|
+ struct xfs_sb *to,
|
|
+ xfs_dsb_t *from)
|
|
+{
|
|
+ __xfs_sb_from_disk(to, from, true);
|
|
}
|
|
|
|
static inline void
|
|
@@ -571,7 +583,11 @@ xfs_sb_verify(
|
|
struct xfs_mount *mp = bp->b_target->bt_mount;
|
|
struct xfs_sb sb;
|
|
|
|
- xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
|
|
+ /*
|
|
+ * Use call variant which doesn't convert quota flags from disk
|
|
+ * format, because xfs_mount_validate_sb checks the on-disk flags.
|
|
+ */
|
|
+ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
|
|
|
|
/*
|
|
* Only check the in progress field for the primary superblock as
|
|
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
|
|
index 14e58f2..98d4948 100644
|
|
--- a/fs/xfs/xfs_symlink.c
|
|
+++ b/fs/xfs/xfs_symlink.c
|
|
@@ -102,7 +102,7 @@ xfs_readlink_bmap(
|
|
cur_chunk += sizeof(struct xfs_dsymlink_hdr);
|
|
}
|
|
|
|
- memcpy(link + offset, bp->b_addr, byte_cnt);
|
|
+ memcpy(link + offset, cur_chunk, byte_cnt);
|
|
|
|
pathlen -= byte_cnt;
|
|
offset += byte_cnt;
|
|
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
|
|
index c812c5c..b626f3d 100644
|
|
--- a/fs/xfs/xfs_trans.c
|
|
+++ b/fs/xfs/xfs_trans.c
|
|
@@ -474,6 +474,7 @@ xfs_trans_apply_sb_deltas(
|
|
whole = 1;
|
|
}
|
|
|
|
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
|
|
if (whole)
|
|
/*
|
|
* Log the whole thing, the fields are noncontiguous.
|
|
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
|
|
index 8256eb4..ac46782 100644
|
|
--- a/include/acpi/acpi_bus.h
|
|
+++ b/include/acpi/acpi_bus.h
|
|
@@ -118,6 +118,7 @@ struct acpi_device;
|
|
struct acpi_hotplug_profile {
|
|
struct kobject kobj;
|
|
int (*scan_dependent)(struct acpi_device *adev);
|
|
+ void (*notify_online)(struct acpi_device *adev);
|
|
bool enabled:1;
|
|
bool demand_offline:1;
|
|
};
|
|
@@ -228,7 +229,6 @@ struct acpi_device_pnp {
|
|
acpi_device_name device_name; /* Driver-determined */
|
|
acpi_device_class device_class; /* " */
|
|
union acpi_object *str_obj; /* unicode string for _STR method */
|
|
- unsigned long sun; /* _SUN */
|
|
};
|
|
|
|
#define acpi_device_bid(d) ((d)->pnp.bus_id)
|
|
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
|
|
index fea6773..5d51f27 100644
|
|
--- a/include/acpi/acpixf.h
|
|
+++ b/include/acpi/acpixf.h
|
|
@@ -175,7 +175,7 @@ acpi_status __init acpi_load_tables(void);
|
|
*/
|
|
acpi_status __init acpi_reallocate_root_table(void);
|
|
|
|
-acpi_status __init acpi_find_root_pointer(acpi_size *rsdp_address);
|
|
+acpi_status __init acpi_find_root_pointer(acpi_physical_address * rsdp_address);
|
|
|
|
acpi_status acpi_unload_table_id(acpi_owner_id id);
|
|
|
|
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
|
|
index 68a3ada..69afb57 100644
|
|
--- a/include/acpi/actypes.h
|
|
+++ b/include/acpi/actypes.h
|
|
@@ -198,9 +198,29 @@ typedef int INT32;
|
|
typedef s32 acpi_native_int;
|
|
|
|
typedef u32 acpi_size;
|
|
+
|
|
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
|
|
+
|
|
+/*
|
|
+ * OSPMs can define this to shrink the size of the structures for 32-bit
|
|
+ * none PAE environment. ASL compiler may always define this to generate
|
|
+ * 32-bit OSPM compliant tables.
|
|
+ */
|
|
typedef u32 acpi_io_address;
|
|
typedef u32 acpi_physical_address;
|
|
|
|
+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
|
|
+
|
|
+/*
|
|
+ * It is reported that, after some calculations, the physical addresses can
|
|
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
|
|
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
|
|
+ */
|
|
+typedef u64 acpi_io_address;
|
|
+typedef u64 acpi_physical_address;
|
|
+
|
|
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
|
|
+
|
|
#define ACPI_MAX_PTR ACPI_UINT32_MAX
|
|
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
|
|
|
|
@@ -541,6 +561,7 @@ typedef u64 acpi_integer;
|
|
#define ACPI_NO_ACPI_ENABLE 0x10
|
|
#define ACPI_NO_DEVICE_INIT 0x20
|
|
#define ACPI_NO_OBJECT_INIT 0x40
|
|
+#define ACPI_NO_FACS_INIT 0x80
|
|
|
|
/*
|
|
* Initialization state
|
|
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
|
|
index b402eb6..579912c 100644
|
|
--- a/include/acpi/platform/acenv.h
|
|
+++ b/include/acpi/platform/acenv.h
|
|
@@ -76,6 +76,7 @@
|
|
#define ACPI_LARGE_NAMESPACE_NODE
|
|
#define ACPI_DATA_TABLE_DISASSEMBLY
|
|
#define ACPI_SINGLE_THREADED
|
|
+#define ACPI_32BIT_PHYSICAL_ADDRESS
|
|
#endif
|
|
|
|
/* acpi_exec configuration. Multithreaded with full AML debugger */
|
|
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
|
|
index f1a24b5..b58fd66 100644
|
|
--- a/include/asm-generic/sections.h
|
|
+++ b/include/asm-generic/sections.h
|
|
@@ -3,6 +3,8 @@
|
|
|
|
/* References to section boundaries */
|
|
|
|
+#include <linux/compiler.h>
|
|
+
|
|
/*
|
|
* Usage guidelines:
|
|
* _text, _data: architecture specific, don't use them in arch-independent code
|
|
@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
|
|
/* Start and end of .ctors section - used for constructor calls. */
|
|
extern char __ctors_start[], __ctors_end[];
|
|
|
|
+extern __visible const void __nosave_begin, __nosave_end;
|
|
+
|
|
/* function descriptor handling (if any). Override
|
|
* in asm/sections.h */
|
|
#ifndef dereference_function_descriptor
|
|
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
|
|
index 49376ae..7e9a0a6 100644
|
|
--- a/include/drm/drm_pciids.h
|
|
+++ b/include/drm/drm_pciids.h
|
|
@@ -17,6 +17,7 @@
|
|
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
+ {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
@@ -73,7 +74,6 @@
|
|
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
|
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
|
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
|
- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
|
|
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
|
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
|
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
|
@@ -164,23 +164,30 @@
|
|
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
@@ -297,6 +304,7 @@
|
|
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
|
|
index 002a285..7448edf 100644
|
|
--- a/include/dt-bindings/pinctrl/dra.h
|
|
+++ b/include/dt-bindings/pinctrl/dra.h
|
|
@@ -30,7 +30,8 @@
|
|
#define MUX_MODE14 0xe
|
|
#define MUX_MODE15 0xf
|
|
|
|
-#define PULL_ENA (1 << 16)
|
|
+#define PULL_ENA (0 << 16)
|
|
+#define PULL_DIS (1 << 16)
|
|
#define PULL_UP (1 << 17)
|
|
#define INPUT_EN (1 << 18)
|
|
#define SLEWCONTROL (1 << 19)
|
|
@@ -38,10 +39,10 @@
|
|
#define WAKEUP_EVENT (1 << 25)
|
|
|
|
/* Active pin states */
|
|
-#define PIN_OUTPUT 0
|
|
-#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
|
|
-#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
|
|
-#define PIN_INPUT INPUT_EN
|
|
+#define PIN_OUTPUT (0 | PULL_DIS)
|
|
+#define PIN_OUTPUT_PULLUP (PULL_UP)
|
|
+#define PIN_OUTPUT_PULLDOWN (0)
|
|
+#define PIN_INPUT (INPUT_EN | PULL_DIS)
|
|
#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
|
|
#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
|
|
#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
|
|
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
|
|
index 6d9aedd..327b155 100644
|
|
--- a/include/kvm/arm_arch_timer.h
|
|
+++ b/include/kvm/arm_arch_timer.h
|
|
@@ -60,7 +60,8 @@ struct arch_timer_cpu {
|
|
|
|
#ifdef CONFIG_KVM_ARM_TIMER
|
|
int kvm_timer_hyp_init(void);
|
|
-int kvm_timer_init(struct kvm *kvm);
|
|
+void kvm_timer_enable(struct kvm *kvm);
|
|
+void kvm_timer_init(struct kvm *kvm);
|
|
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
|
|
const struct kvm_irq_level *irq);
|
|
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
|
|
@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void)
|
|
return 0;
|
|
};
|
|
|
|
-static inline int kvm_timer_init(struct kvm *kvm)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
-
|
|
+static inline void kvm_timer_enable(struct kvm *kvm) {}
|
|
+static inline void kvm_timer_init(struct kvm *kvm) {}
|
|
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
|
|
const struct kvm_irq_level *irq) {}
|
|
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
|
|
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
|
|
index cd80aa8..77af621 100644
|
|
--- a/include/linux/acpi.h
|
|
+++ b/include/linux/acpi.h
|
|
@@ -402,6 +402,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
|
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
|
|
|
|
extern void acpi_early_init(void);
|
|
+extern void acpi_subsystem_init(void);
|
|
|
|
extern int acpi_nvs_register(__u64 start, __u64 size);
|
|
|
|
@@ -436,6 +437,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
|
|
}
|
|
|
|
static inline void acpi_early_init(void) { }
|
|
+static inline void acpi_subsystem_init(void) { }
|
|
|
|
static inline int early_acpi_boot_init(void)
|
|
{
|
|
diff --git a/include/linux/audit.h b/include/linux/audit.h
|
|
index ec1464d..419b7d7 100644
|
|
--- a/include/linux/audit.h
|
|
+++ b/include/linux/audit.h
|
|
@@ -47,6 +47,7 @@ struct sk_buff;
|
|
|
|
struct audit_krule {
|
|
int vers_ops;
|
|
+ u32 pflags;
|
|
u32 flags;
|
|
u32 listnr;
|
|
u32 action;
|
|
@@ -64,6 +65,9 @@ struct audit_krule {
|
|
u64 prio;
|
|
};
|
|
|
|
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
|
|
+#define AUDIT_LOGINUID_LEGACY 0x1
|
|
+
|
|
struct audit_field {
|
|
u32 type;
|
|
u32 val;
|
|
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
|
|
index be5fd38..5d858e0 100644
|
|
--- a/include/linux/bitops.h
|
|
+++ b/include/linux/bitops.h
|
|
@@ -18,8 +18,11 @@
|
|
* position @h. For example
|
|
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
|
*/
|
|
-#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
|
|
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
|
|
+#define GENMASK(h, l) \
|
|
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
|
+
|
|
+#define GENMASK_ULL(h, l) \
|
|
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
|
|
|
extern unsigned int __sw_hweight8(unsigned int w);
|
|
extern unsigned int __sw_hweight16(unsigned int w);
|
|
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
|
|
index bbc3a6c..33fd7ff 100644
|
|
--- a/include/linux/blk_types.h
|
|
+++ b/include/linux/blk_types.h
|
|
@@ -180,7 +180,9 @@ enum rq_flag_bits {
|
|
__REQ_ELVPRIV, /* elevator private data attached */
|
|
__REQ_FAILED, /* set if the request failed */
|
|
__REQ_QUIET, /* don't worry about errors */
|
|
- __REQ_PREEMPT, /* set for "ide_preempt" requests */
|
|
+ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
|
|
+ for requests for which the SCSI "quiesce"
|
|
+ state must be ignored. */
|
|
__REQ_ALLOCED, /* request came from our alloc pool */
|
|
__REQ_COPY_USER, /* contains copies of user pages */
|
|
__REQ_FLUSH_SEQ, /* request for flush sequence */
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index 4afa4f8..a693c6d 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -1232,10 +1232,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
|
|
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
|
|
{
|
|
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
|
|
- unsigned int alignment = (sector << 9) & (granularity - 1);
|
|
+ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
|
|
|
|
- return (granularity + lim->alignment_offset - alignment)
|
|
- & (granularity - 1);
|
|
+ return (granularity + lim->alignment_offset - alignment) % granularity;
|
|
}
|
|
|
|
static inline int bdev_alignment_offset(struct block_device *bdev)
|
|
diff --git a/include/linux/capability.h b/include/linux/capability.h
|
|
index 84b13ad..aa93e5e 100644
|
|
--- a/include/linux/capability.h
|
|
+++ b/include/linux/capability.h
|
|
@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
|
|
# error Fix up hand-coded capability macro initializers
|
|
#else /* HAND-CODED capability initializers */
|
|
|
|
+#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
|
|
+#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
|
|
+
|
|
# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
|
|
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
|
|
+# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
|
|
# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
|
|
| CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
|
|
CAP_FS_MASK_B1 } })
|
|
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
|
|
index 67301a4..879065d 100644
|
|
--- a/include/linux/clocksource.h
|
|
+++ b/include/linux/clocksource.h
|
|
@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void);
|
|
extern void clocksource_change_rating(struct clocksource *cs, int rating);
|
|
extern void clocksource_suspend(void);
|
|
extern void clocksource_resume(void);
|
|
-extern struct clocksource * __init __weak clocksource_default_clock(void);
|
|
+extern struct clocksource * __init clocksource_default_clock(void);
|
|
extern void clocksource_mark_unstable(struct clocksource *cs);
|
|
|
|
extern u64
|
|
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
|
|
index 7e1c76e..01e3132 100644
|
|
--- a/include/linux/compaction.h
|
|
+++ b/include/linux/compaction.h
|
|
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
int order, gfp_t gfp_mask, nodemask_t *mask,
|
|
- bool sync, bool *contended);
|
|
+ enum migrate_mode mode, bool *contended);
|
|
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
|
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
|
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
|
@@ -91,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
|
|
#else
|
|
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
|
- bool sync, bool *contended)
|
|
+ enum migrate_mode mode, bool *contended)
|
|
{
|
|
return COMPACT_CONTINUE;
|
|
}
|
|
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
|
|
index 50fcbb0..d133817 100644
|
|
--- a/include/linux/cpuidle.h
|
|
+++ b/include/linux/cpuidle.h
|
|
@@ -69,7 +69,6 @@ struct cpuidle_device {
|
|
unsigned int cpu;
|
|
|
|
int last_residency;
|
|
- int state_count;
|
|
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
|
|
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
|
|
struct cpuidle_driver_kobj *kobj_driver;
|
|
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
|
|
index 3fe661f..ade2390 100644
|
|
--- a/include/linux/cpuset.h
|
|
+++ b/include/linux/cpuset.h
|
|
@@ -12,10 +12,31 @@
|
|
#include <linux/cpumask.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/jump_label.h>
|
|
|
|
#ifdef CONFIG_CPUSETS
|
|
|
|
-extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
|
+extern struct static_key cpusets_enabled_key;
|
|
+static inline bool cpusets_enabled(void)
|
|
+{
|
|
+ return static_key_false(&cpusets_enabled_key);
|
|
+}
|
|
+
|
|
+static inline int nr_cpusets(void)
|
|
+{
|
|
+ /* jump label reference count + the top-level cpuset */
|
|
+ return static_key_count(&cpusets_enabled_key) + 1;
|
|
+}
|
|
+
|
|
+static inline void cpuset_inc(void)
|
|
+{
|
|
+ static_key_slow_inc(&cpusets_enabled_key);
|
|
+}
|
|
+
|
|
+static inline void cpuset_dec(void)
|
|
+{
|
|
+ static_key_slow_dec(&cpusets_enabled_key);
|
|
+}
|
|
|
|
extern int cpuset_init(void);
|
|
extern void cpuset_init_smp(void);
|
|
@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
|
|
|
|
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|
{
|
|
- return number_of_cpusets <= 1 ||
|
|
+ return nr_cpusets() <= 1 ||
|
|
__cpuset_node_allowed_softwall(node, gfp_mask);
|
|
}
|
|
|
|
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
|
|
{
|
|
- return number_of_cpusets <= 1 ||
|
|
+ return nr_cpusets() <= 1 ||
|
|
__cpuset_node_allowed_hardwall(node, gfp_mask);
|
|
}
|
|
|
|
@@ -87,25 +108,26 @@ extern void rebuild_sched_domains(void);
|
|
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
|
|
|
|
/*
|
|
- * get_mems_allowed is required when making decisions involving mems_allowed
|
|
- * such as during page allocation. mems_allowed can be updated in parallel
|
|
- * and depending on the new value an operation can fail potentially causing
|
|
- * process failure. A retry loop with get_mems_allowed and put_mems_allowed
|
|
- * prevents these artificial failures.
|
|
+ * read_mems_allowed_begin is required when making decisions involving
|
|
+ * mems_allowed such as during page allocation. mems_allowed can be updated in
|
|
+ * parallel and depending on the new value an operation can fail potentially
|
|
+ * causing process failure. A retry loop with read_mems_allowed_begin and
|
|
+ * read_mems_allowed_retry prevents these artificial failures.
|
|
*/
|
|
-static inline unsigned int get_mems_allowed(void)
|
|
+static inline unsigned int read_mems_allowed_begin(void)
|
|
{
|
|
return read_seqcount_begin(¤t->mems_allowed_seq);
|
|
}
|
|
|
|
/*
|
|
- * If this returns false, the operation that took place after get_mems_allowed
|
|
- * may have failed. It is up to the caller to retry the operation if
|
|
+ * If this returns true, the operation that took place after
|
|
+ * read_mems_allowed_begin may have failed artificially due to a concurrent
|
|
+ * update of mems_allowed. It is up to the caller to retry the operation if
|
|
* appropriate.
|
|
*/
|
|
-static inline bool put_mems_allowed(unsigned int seq)
|
|
+static inline bool read_mems_allowed_retry(unsigned int seq)
|
|
{
|
|
- return !read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
|
+ return read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
|
}
|
|
|
|
static inline void set_mems_allowed(nodemask_t nodemask)
|
|
@@ -123,6 +145,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
|
|
|
|
#else /* !CONFIG_CPUSETS */
|
|
|
|
+static inline bool cpusets_enabled(void) { return false; }
|
|
+
|
|
static inline int cpuset_init(void) { return 0; }
|
|
static inline void cpuset_init_smp(void) {}
|
|
|
|
@@ -225,14 +249,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
|
|
{
|
|
}
|
|
|
|
-static inline unsigned int get_mems_allowed(void)
|
|
+static inline unsigned int read_mems_allowed_begin(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
-static inline bool put_mems_allowed(unsigned int seq)
|
|
+static inline bool read_mems_allowed_retry(unsigned int seq)
|
|
{
|
|
- return true;
|
|
+ return false;
|
|
}
|
|
|
|
#endif /* !CONFIG_CPUSETS */
|
|
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
|
|
index 7032518..60023e5 100644
|
|
--- a/include/linux/crash_dump.h
|
|
+++ b/include/linux/crash_dump.h
|
|
@@ -14,14 +14,13 @@
|
|
extern unsigned long long elfcorehdr_addr;
|
|
extern unsigned long long elfcorehdr_size;
|
|
|
|
-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
|
|
- unsigned long long *size);
|
|
-extern void __weak elfcorehdr_free(unsigned long long addr);
|
|
-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
|
|
-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
|
|
-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
|
|
- unsigned long from, unsigned long pfn,
|
|
- unsigned long size, pgprot_t prot);
|
|
+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
|
|
+extern void elfcorehdr_free(unsigned long long addr);
|
|
+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
|
|
+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
|
|
+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
|
|
+ unsigned long from, unsigned long pfn,
|
|
+ unsigned long size, pgprot_t prot);
|
|
|
|
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
|
unsigned long, int);
|
|
diff --git a/include/linux/cred.h b/include/linux/cred.h
|
|
index 04421e8..6c58dd7 100644
|
|
--- a/include/linux/cred.h
|
|
+++ b/include/linux/cred.h
|
|
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
|
|
extern int set_current_groups(struct group_info *);
|
|
extern int set_groups(struct cred *, struct group_info *);
|
|
extern int groups_search(const struct group_info *, kgid_t);
|
|
+extern bool may_setgroups(void);
|
|
|
|
/* access the groups "array" with this macro */
|
|
#define GROUP_AT(gi, i) \
|
|
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
|
|
index b92eadf..2b00d92 100644
|
|
--- a/include/linux/crypto.h
|
|
+++ b/include/linux/crypto.h
|
|
@@ -26,6 +26,19 @@
|
|
#include <linux/uaccess.h>
|
|
|
|
/*
|
|
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
|
|
+ * arbitrary modules to be loaded. Loading from userspace may still need the
|
|
+ * unprefixed names, so retains those aliases as well.
|
|
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
|
|
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
|
|
+ * expands twice on the same line. Instead, use a separate base name for the
|
|
+ * alias.
|
|
+ */
|
|
+#define MODULE_ALIAS_CRYPTO(name) \
|
|
+ __MODULE_INFO(alias, alias_userspace, name); \
|
|
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
|
|
+
|
|
+/*
|
|
* Algorithm masks and types.
|
|
*/
|
|
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
|
|
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
|
|
index bf72e9a..2a23ecb 100644
|
|
--- a/include/linux/dcache.h
|
|
+++ b/include/linux/dcache.h
|
|
@@ -124,15 +124,15 @@ struct dentry {
|
|
void *d_fsdata; /* fs-specific data */
|
|
|
|
struct list_head d_lru; /* LRU list */
|
|
+ struct list_head d_child; /* child of parent list */
|
|
+ struct list_head d_subdirs; /* our children */
|
|
/*
|
|
- * d_child and d_rcu can share memory
|
|
+ * d_alias and d_rcu can share memory
|
|
*/
|
|
union {
|
|
- struct list_head d_child; /* child of parent list */
|
|
+ struct hlist_node d_alias; /* inode alias list */
|
|
struct rcu_head d_rcu;
|
|
} d_u;
|
|
- struct list_head d_subdirs; /* our children */
|
|
- struct hlist_node d_alias; /* inode alias list */
|
|
};
|
|
|
|
/*
|
|
@@ -221,6 +221,8 @@ struct dentry_operations {
|
|
#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */
|
|
#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
|
|
|
|
+#define DCACHE_MAY_FREE 0x00800000
|
|
+
|
|
extern seqlock_t rename_lock;
|
|
|
|
static inline int dname_external(const struct dentry *dentry)
|
|
@@ -429,7 +431,7 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
|
|
return dentry->d_flags & DCACHE_ENTRY_TYPE;
|
|
}
|
|
|
|
-static inline bool d_is_directory(const struct dentry *dentry)
|
|
+static inline bool d_can_lookup(const struct dentry *dentry)
|
|
{
|
|
return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
|
|
}
|
|
@@ -439,6 +441,11 @@ static inline bool d_is_autodir(const struct dentry *dentry)
|
|
return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
|
|
}
|
|
|
|
+static inline bool d_is_dir(const struct dentry *dentry)
|
|
+{
|
|
+ return d_can_lookup(dentry) || d_is_autodir(dentry);
|
|
+}
|
|
+
|
|
static inline bool d_is_symlink(const struct dentry *dentry)
|
|
{
|
|
return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
|
|
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
|
|
index 1c804b0..7ee1774 100644
|
|
--- a/include/linux/fsnotify.h
|
|
+++ b/include/linux/fsnotify.h
|
|
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
|
|
new_dir_mask |= FS_ISDIR;
|
|
}
|
|
|
|
- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
|
|
- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
|
|
+ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
|
|
+ fs_cookie);
|
|
+ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
|
|
+ fs_cookie);
|
|
|
|
if (target)
|
|
fsnotify_link_count(target);
|
|
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
|
|
index 39b81dc..3824ac6 100644
|
|
--- a/include/linux/gfp.h
|
|
+++ b/include/linux/gfp.h
|
|
@@ -369,8 +369,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
|
|
|
extern void __free_pages(struct page *page, unsigned int order);
|
|
extern void free_pages(unsigned long addr, unsigned int order);
|
|
-extern void free_hot_cold_page(struct page *page, int cold);
|
|
-extern void free_hot_cold_page_list(struct list_head *list, int cold);
|
|
+extern void free_hot_cold_page(struct page *page, bool cold);
|
|
+extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
|
|
|
extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
|
|
extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
|
|
diff --git a/include/linux/hid.h b/include/linux/hid.h
|
|
index 31b9d29..00c88fc 100644
|
|
--- a/include/linux/hid.h
|
|
+++ b/include/linux/hid.h
|
|
@@ -286,6 +286,7 @@ struct hid_item {
|
|
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
|
|
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
|
|
#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
|
|
+#define HID_QUIRK_ALWAYS_POLL 0x00000400
|
|
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
|
|
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
|
|
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
|
|
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
|
|
index b826239..63579cb 100644
|
|
--- a/include/linux/huge_mm.h
|
|
+++ b/include/linux/huge_mm.h
|
|
@@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
|
#endif /* CONFIG_DEBUG_VM */
|
|
|
|
extern unsigned long transparent_hugepage_flags;
|
|
-extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
- pmd_t *dst_pmd, pmd_t *src_pmd,
|
|
- struct vm_area_struct *vma,
|
|
- unsigned long addr, unsigned long end);
|
|
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
|
|
static inline int split_huge_page(struct page *page)
|
|
{
|
|
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
|
|
index bd1e9bc..42b05c4 100644
|
|
--- a/include/linux/hugetlb.h
|
|
+++ b/include/linux/hugetlb.h
|
|
@@ -400,6 +400,16 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
|
return &mm->page_table_lock;
|
|
}
|
|
|
|
+static inline bool hugepages_supported(void)
|
|
+{
|
|
+ /*
|
|
+ * Some platform decide whether they support huge pages at boot
|
|
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
|
|
+ * there is no such support
|
|
+ */
|
|
+ return HPAGE_SHIFT != 0;
|
|
+}
|
|
+
|
|
#else /* CONFIG_HUGETLB_PAGE */
|
|
struct hstate {};
|
|
#define alloc_huge_page_node(h, nid) NULL
|
|
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
|
|
index 72ba6f5..2abe67b 100644
|
|
--- a/include/linux/if_vlan.h
|
|
+++ b/include/linux/if_vlan.h
|
|
@@ -186,7 +186,6 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
|
|
}
|
|
|
|
extern bool vlan_do_receive(struct sk_buff **skb);
|
|
-extern struct sk_buff *vlan_untag(struct sk_buff *skb);
|
|
|
|
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
|
|
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
|
|
@@ -228,11 +227,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb)
|
|
return false;
|
|
}
|
|
|
|
-static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
|
|
-{
|
|
- return skb;
|
|
-}
|
|
-
|
|
static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
|
|
{
|
|
return 0;
|
|
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
|
|
index 8bbd7bc..03fa332 100644
|
|
--- a/include/linux/iio/events.h
|
|
+++ b/include/linux/iio/events.h
|
|
@@ -72,7 +72,7 @@ struct iio_event_data {
|
|
|
|
#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
|
|
|
|
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
|
|
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
|
|
|
|
#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
|
|
|
|
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
|
|
index 75a8a20..c2fb6f3 100644
|
|
--- a/include/linux/iio/iio.h
|
|
+++ b/include/linux/iio/iio.h
|
|
@@ -593,6 +593,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
|
|
#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
|
|
|
|
/**
|
|
+ * IIO_RAD_TO_DEGREE() - Convert rad to degree
|
|
+ * @rad: A value in rad
|
|
+ *
|
|
+ * Returns the given value converted from rad to degree
|
|
+ */
|
|
+#define IIO_RAD_TO_DEGREE(rad) \
|
|
+ (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
|
|
+
|
|
+/**
|
|
* IIO_G_TO_M_S_2() - Convert g to meter / second**2
|
|
* @g: A value in g
|
|
*
|
|
@@ -600,4 +609,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
|
|
*/
|
|
#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
|
|
|
|
+/**
|
|
+ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
|
|
+ * @ms2: A value in meter / second**2
|
|
+ *
|
|
+ * Returns the given value converted from meter / second**2 to g
|
|
+ */
|
|
+#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
|
|
+
|
|
#endif /* _INDUSTRIAL_IO_H_ */
|
|
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
|
|
index 369cf2c..68f46cd 100644
|
|
--- a/include/linux/iio/trigger.h
|
|
+++ b/include/linux/iio/trigger.h
|
|
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
|
|
put_device(&trig->dev);
|
|
}
|
|
|
|
-static inline void iio_trigger_get(struct iio_trigger *trig)
|
|
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
|
|
{
|
|
get_device(&trig->dev);
|
|
__module_get(trig->ops->owner);
|
|
+
|
|
+ return trig;
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
|
|
index 0068708..0a21fbe 100644
|
|
--- a/include/linux/inetdevice.h
|
|
+++ b/include/linux/inetdevice.h
|
|
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
|
|
static __inline__ __be32 inet_make_mask(int logmask)
|
|
{
|
|
if (logmask)
|
|
- return htonl(~((1<<(32-logmask))-1));
|
|
+ return htonl(~((1U<<(32-logmask))-1));
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
|
|
index d5b50a1..e1fb0f6 100644
|
|
--- a/include/linux/jbd2.h
|
|
+++ b/include/linux/jbd2.h
|
|
@@ -159,7 +159,11 @@ typedef struct journal_header_s
|
|
* journal_block_tag (in the descriptor). The other h_chksum* fields are
|
|
* not used.
|
|
*
|
|
- * Checksum v1 and v2 are mutually exclusive features.
|
|
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
|
|
+ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
|
|
+ * is the same as v2.
|
|
+ *
|
|
+ * Checksum v1, v2, and v3 are mutually exclusive features.
|
|
*/
|
|
struct commit_header {
|
|
__be32 h_magic;
|
|
@@ -179,6 +183,14 @@ struct commit_header {
|
|
* raw struct shouldn't be used for pointer math or sizeof() - use
|
|
* journal_tag_bytes(journal) instead to compute this.
|
|
*/
|
|
+typedef struct journal_block_tag3_s
|
|
+{
|
|
+ __be32 t_blocknr; /* The on-disk block number */
|
|
+ __be32 t_flags; /* See below */
|
|
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
|
|
+ __be32 t_checksum; /* crc32c(uuid+seq+block) */
|
|
+} journal_block_tag3_t;
|
|
+
|
|
typedef struct journal_block_tag_s
|
|
{
|
|
__be32 t_blocknr; /* The on-disk block number */
|
|
@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
|
|
__be32 t_blocknr_high; /* most-significant high 32bits. */
|
|
} journal_block_tag_t;
|
|
|
|
-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
|
|
-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
|
|
-
|
|
/* Tail of descriptor block, for checksumming */
|
|
struct jbd2_journal_block_tail {
|
|
__be32 t_checksum; /* crc32c(uuid+descr_block) */
|
|
@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
|
|
#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
|
|
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
|
|
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
|
|
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
|
|
|
|
/* Features known to this kernel version: */
|
|
#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
|
|
@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
|
|
#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
|
|
JBD2_FEATURE_INCOMPAT_64BIT | \
|
|
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
|
|
- JBD2_FEATURE_INCOMPAT_CSUM_V2)
|
|
+ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
|
|
+ JBD2_FEATURE_INCOMPAT_CSUM_V3)
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
@@ -1024,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
|
|
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
|
|
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
|
|
unsigned long *block);
|
|
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
|
|
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
|
|
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
|
|
|
|
/* Commit management */
|
|
@@ -1146,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
|
|
extern int jbd2_journal_wipe (journal_t *, int);
|
|
extern int jbd2_journal_skip_recovery (journal_t *);
|
|
extern void jbd2_journal_update_sb_errno(journal_t *);
|
|
-extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
|
|
+extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
|
|
unsigned long, int);
|
|
extern void __jbd2_journal_abort_hard (journal_t *);
|
|
extern void jbd2_journal_abort (journal_t *, int);
|
|
@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
|
|
extern int jbd2_journal_blocks_per_page(struct inode *inode);
|
|
extern size_t journal_tag_bytes(journal_t *journal);
|
|
|
|
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
|
|
+{
|
|
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
|
|
+ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
|
|
* transaction control blocks.
|
|
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
|
|
index 1f44466..c367cbd 100644
|
|
--- a/include/linux/jiffies.h
|
|
+++ b/include/linux/jiffies.h
|
|
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
|
|
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
|
|
#endif
|
|
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
|
|
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
|
|
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
|
|
TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
|
|
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
|
|
TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
-#define USEC_CONVERSION \
|
|
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
|
|
- TICK_NSEC -1) / (u64)TICK_NSEC))
|
|
-/*
|
|
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
|
|
- * for more details. It is the scaled resolution rounding value. Note
|
|
- * that it is a 64-bit value. Since, when it is applied, we are already
|
|
- * in jiffies (albit scaled), it is nothing but the bits we will shift
|
|
- * off.
|
|
- */
|
|
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
|
|
/*
|
|
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
|
|
* into seconds. The 64-bit case will overflow if we are not careful,
|
|
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
|
|
index 5c1dfb2..784304b 100644
|
|
--- a/include/linux/jump_label.h
|
|
+++ b/include/linux/jump_label.h
|
|
@@ -69,6 +69,10 @@ struct static_key {
|
|
|
|
# include <asm/jump_label.h>
|
|
# define HAVE_JUMP_LABEL
|
|
+#else
|
|
+struct static_key {
|
|
+ atomic_t enabled;
|
|
+};
|
|
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
|
|
|
|
enum jump_label_type {
|
|
@@ -79,6 +83,12 @@ enum jump_label_type {
|
|
struct module;
|
|
|
|
#include <linux/atomic.h>
|
|
+
|
|
+static inline int static_key_count(struct static_key *key)
|
|
+{
|
|
+ return atomic_read(&key->enabled);
|
|
+}
|
|
+
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
|
|
@@ -134,10 +144,6 @@ extern void jump_label_apply_nops(struct module *mod);
|
|
|
|
#else /* !HAVE_JUMP_LABEL */
|
|
|
|
-struct static_key {
|
|
- atomic_t enabled;
|
|
-};
|
|
-
|
|
static __always_inline void jump_label_init(void)
|
|
{
|
|
static_key_initialized = true;
|
|
@@ -145,14 +151,14 @@ static __always_inline void jump_label_init(void)
|
|
|
|
static __always_inline bool static_key_false(struct static_key *key)
|
|
{
|
|
- if (unlikely(atomic_read(&key->enabled) > 0))
|
|
+ if (unlikely(static_key_count(key) > 0))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static __always_inline bool static_key_true(struct static_key *key)
|
|
{
|
|
- if (likely(atomic_read(&key->enabled) > 0))
|
|
+ if (likely(static_key_count(key) > 0))
|
|
return true;
|
|
return false;
|
|
}
|
|
@@ -194,7 +200,7 @@ static inline int jump_label_apply_nops(struct module *mod)
|
|
|
|
static inline bool static_key_enabled(struct static_key *key)
|
|
{
|
|
- return (atomic_read(&key->enabled) > 0);
|
|
+ return static_key_count(key) > 0;
|
|
}
|
|
|
|
#endif /* _LINUX_JUMP_LABEL_H */
|
|
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
|
|
index 51c72be..4b2053a 100644
|
|
--- a/include/linux/kernel_stat.h
|
|
+++ b/include/linux/kernel_stat.h
|
|
@@ -74,6 +74,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
|
|
* Number of interrupts per specific IRQ source, since bootup
|
|
*/
|
|
extern unsigned int kstat_irqs(unsigned int irq);
|
|
+extern unsigned int kstat_irqs_usr(unsigned int irq);
|
|
|
|
/*
|
|
* Number of interrupts per cpu, since bootup
|
|
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
|
|
index 6b06d37..e465bb1 100644
|
|
--- a/include/linux/kgdb.h
|
|
+++ b/include/linux/kgdb.h
|
|
@@ -283,7 +283,7 @@ struct kgdb_io {
|
|
|
|
extern struct kgdb_arch arch_kgdb_ops;
|
|
|
|
-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
|
|
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
|
|
|
|
#ifdef CONFIG_SERIAL_KGDB_NMI
|
|
extern int kgdb_register_nmi_console(void);
|
|
diff --git a/include/linux/libata.h b/include/linux/libata.h
|
|
index 3fee55e..189c9ff 100644
|
|
--- a/include/linux/libata.h
|
|
+++ b/include/linux/libata.h
|
|
@@ -204,6 +204,7 @@ enum {
|
|
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
|
|
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
|
|
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
|
|
+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
|
|
|
|
/* struct ata_port flags */
|
|
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
|
|
@@ -307,6 +308,12 @@ enum {
|
|
*/
|
|
ATA_TMOUT_PMP_SRST_WAIT = 5000,
|
|
|
|
+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
|
|
+ * be a spurious PHY event, so ignore the first PHY event that
|
|
+ * occurs within 10s after the policy change.
|
|
+ */
|
|
+ ATA_TMOUT_SPURIOUS_PHY = 10000,
|
|
+
|
|
/* ATA bus states */
|
|
BUS_UNKNOWN = 0,
|
|
BUS_DMA = 1,
|
|
@@ -421,6 +428,7 @@ enum {
|
|
ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
|
|
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
|
|
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
|
|
+ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
|
|
|
|
/* DMA mask for user DMA control: User visible values; DO NOT
|
|
renumber */
|
|
@@ -593,6 +601,7 @@ struct ata_host {
|
|
struct device *dev;
|
|
void __iomem * const *iomap;
|
|
unsigned int n_ports;
|
|
+ unsigned int n_tags; /* nr of NCQ tags */
|
|
void *private_data;
|
|
struct ata_port_operations *ops;
|
|
unsigned long flags;
|
|
@@ -784,6 +793,8 @@ struct ata_link {
|
|
struct ata_eh_context eh_context;
|
|
|
|
struct ata_device device[ATA_MAX_DEVICES];
|
|
+
|
|
+ unsigned long last_lpm_change; /* when last LPM change happened */
|
|
};
|
|
#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
|
|
#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
|
|
@@ -1200,6 +1211,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
|
|
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
|
|
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
|
|
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
|
|
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
|
|
|
|
extern int ata_cable_40wire(struct ata_port *ap);
|
|
extern int ata_cable_80wire(struct ata_port *ap);
|
|
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
|
|
index 345b8c5..550c88f 100644
|
|
--- a/include/linux/mbus.h
|
|
+++ b/include/linux/mbus.h
|
|
@@ -73,6 +73,6 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size);
|
|
int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
|
|
size_t mbus_size, phys_addr_t sdram_phys_base,
|
|
size_t sdram_size);
|
|
-int mvebu_mbus_dt_init(void);
|
|
+int mvebu_mbus_dt_init(bool is_coherent);
|
|
|
|
#endif /* __LINUX_MBUS_H */
|
|
diff --git a/include/linux/memory.h b/include/linux/memory.h
|
|
index bb7384e..8b8d8d1 100644
|
|
--- a/include/linux/memory.h
|
|
+++ b/include/linux/memory.h
|
|
@@ -35,7 +35,7 @@ struct memory_block {
|
|
};
|
|
|
|
int arch_get_memory_phys_device(unsigned long start_pfn);
|
|
-unsigned long __weak memory_block_size_bytes(void);
|
|
+unsigned long memory_block_size_bytes(void);
|
|
|
|
/* These states are exposed to userspace as text strings in sysfs */
|
|
#define MEM_ONLINE (1<<0) /* exposed to userspace */
|
|
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
|
|
index 84a31ad..a2901c4 100644
|
|
--- a/include/linux/migrate.h
|
|
+++ b/include/linux/migrate.h
|
|
@@ -5,7 +5,9 @@
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/migrate_mode.h>
|
|
|
|
-typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
|
+typedef struct page *new_page_t(struct page *page, unsigned long private,
|
|
+ int **reason);
|
|
+typedef void free_page_t(struct page *page, unsigned long private);
|
|
|
|
/*
|
|
* Return values from addresss_space_operations.migratepage():
|
|
@@ -38,7 +40,7 @@ enum migrate_reason {
|
|
extern void putback_movable_pages(struct list_head *l);
|
|
extern int migrate_page(struct address_space *,
|
|
struct page *, struct page *, enum migrate_mode);
|
|
-extern int migrate_pages(struct list_head *l, new_page_t x,
|
|
+extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
|
|
unsigned long private, enum migrate_mode mode, int reason);
|
|
|
|
extern int migrate_prep(void);
|
|
@@ -56,8 +58,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
#else
|
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
|
-static inline int migrate_pages(struct list_head *l, new_page_t x,
|
|
- unsigned long private, enum migrate_mode mode, int reason)
|
|
+static inline int migrate_pages(struct list_head *l, new_page_t new,
|
|
+ free_page_t free, unsigned long private, enum migrate_mode mode,
|
|
+ int reason)
|
|
{ return -ENOSYS; }
|
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index c1b7414..a7b311d 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -1009,6 +1009,7 @@ static inline int page_mapped(struct page *page)
|
|
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
|
|
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
|
|
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
|
|
+#define VM_FAULT_SIGSEGV 0x0040
|
|
|
|
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
|
|
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
|
|
@@ -1017,8 +1018,9 @@ static inline int page_mapped(struct page *page)
|
|
|
|
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
|
|
|
|
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
|
|
- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
|
|
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
|
|
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
|
|
+ VM_FAULT_FALLBACK)
|
|
|
|
/* Encode hstate index for a hwpoisoned large page */
|
|
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
|
|
@@ -1041,6 +1043,14 @@ extern void show_free_areas(unsigned int flags);
|
|
extern bool skip_free_areas_node(unsigned int flags, int nid);
|
|
|
|
int shmem_zero_setup(struct vm_area_struct *);
|
|
+#ifdef CONFIG_SHMEM
|
|
+bool shmem_mapping(struct address_space *mapping);
|
|
+#else
|
|
+static inline bool shmem_mapping(struct address_space *mapping)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+#endif
|
|
|
|
extern int can_do_mlock(void);
|
|
extern int user_shm_lock(size_t, struct user_struct *);
|
|
@@ -1123,6 +1133,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
|
|
|
|
extern void truncate_pagecache(struct inode *inode, loff_t new);
|
|
extern void truncate_setsize(struct inode *inode, loff_t newsize);
|
|
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
|
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
|
|
int truncate_inode_page(struct address_space *mapping, struct page *page);
|
|
int generic_error_remove_page(struct address_space *mapping, struct page *page);
|
|
@@ -1847,9 +1858,6 @@ void page_cache_async_readahead(struct address_space *mapping,
|
|
unsigned long size);
|
|
|
|
unsigned long max_sane_readahead(unsigned long nr);
|
|
-unsigned long ra_submit(struct file_ra_state *ra,
|
|
- struct address_space *mapping,
|
|
- struct file *filp);
|
|
|
|
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
|
|
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
|
@@ -1860,7 +1868,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
|
|
#if VM_GROWSUP
|
|
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
|
|
#else
|
|
- #define expand_upwards(vma, address) do { } while (0)
|
|
+ #define expand_upwards(vma, address) (0)
|
|
#endif
|
|
|
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
|
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
|
|
index 290901a..2b58d19 100644
|
|
--- a/include/linux/mm_types.h
|
|
+++ b/include/linux/mm_types.h
|
|
@@ -342,9 +342,9 @@ struct mm_rss_stat {
|
|
|
|
struct kioctx_table;
|
|
struct mm_struct {
|
|
- struct vm_area_struct * mmap; /* list of VMAs */
|
|
+ struct vm_area_struct *mmap; /* list of VMAs */
|
|
struct rb_root mm_rb;
|
|
- struct vm_area_struct * mmap_cache; /* last find_vma result */
|
|
+ u32 vmacache_seqnum; /* per-thread vmacache */
|
|
#ifdef CONFIG_MMU
|
|
unsigned long (*get_unmapped_area) (struct file *filp,
|
|
unsigned long addr, unsigned long len,
|
|
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
|
|
index e6800f0..ac819bf 100644
|
|
--- a/include/linux/mmzone.h
|
|
+++ b/include/linux/mmzone.h
|
|
@@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled;
|
|
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
|
|
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
|
|
|
|
-static inline int get_pageblock_migratetype(struct page *page)
|
|
+#define get_pageblock_migratetype(page) \
|
|
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
|
+ PB_migrate_end, MIGRATETYPE_MASK)
|
|
+
|
|
+static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
|
|
{
|
|
BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
|
|
- return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
|
|
+ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
|
|
+ MIGRATETYPE_MASK);
|
|
}
|
|
|
|
struct free_area {
|
|
@@ -138,6 +143,7 @@ enum zone_stat_item {
|
|
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
|
|
NR_DIRTIED, /* page dirtyings since bootup */
|
|
NR_WRITTEN, /* page writings since bootup */
|
|
+ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
|
|
#ifdef CONFIG_NUMA
|
|
NUMA_HIT, /* allocated in intended node */
|
|
NUMA_MISS, /* allocated in non intended node */
|
|
@@ -316,19 +322,12 @@ enum zone_type {
|
|
#ifndef __GENERATING_BOUNDS_H
|
|
|
|
struct zone {
|
|
- /* Fields commonly accessed by the page allocator */
|
|
+ /* Read-mostly fields */
|
|
|
|
/* zone watermarks, access with *_wmark_pages(zone) macros */
|
|
unsigned long watermark[NR_WMARK];
|
|
|
|
/*
|
|
- * When free pages are below this point, additional steps are taken
|
|
- * when reading the number of free pages to avoid per-cpu counter
|
|
- * drift allowing watermarks to be breached
|
|
- */
|
|
- unsigned long percpu_drift_mark;
|
|
-
|
|
- /*
|
|
* We don't know if the memory that we're going to allocate will be freeable
|
|
* or/and it will be released eventually, so to avoid totally wasting several
|
|
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
|
|
@@ -336,40 +335,26 @@ struct zone {
|
|
* on the higher zones). This array is recalculated at runtime if the
|
|
* sysctl_lowmem_reserve_ratio sysctl changes.
|
|
*/
|
|
- unsigned long lowmem_reserve[MAX_NR_ZONES];
|
|
-
|
|
- /*
|
|
- * This is a per-zone reserve of pages that should not be
|
|
- * considered dirtyable memory.
|
|
- */
|
|
- unsigned long dirty_balance_reserve;
|
|
+ long lowmem_reserve[MAX_NR_ZONES];
|
|
|
|
#ifdef CONFIG_NUMA
|
|
int node;
|
|
+#endif
|
|
+
|
|
/*
|
|
- * zone reclaim becomes active if more unmapped pages exist.
|
|
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
|
|
+ * this zone's LRU. Maintained by the pageout code.
|
|
*/
|
|
- unsigned long min_unmapped_pages;
|
|
- unsigned long min_slab_pages;
|
|
-#endif
|
|
+ unsigned int inactive_ratio;
|
|
+
|
|
+ struct pglist_data *zone_pgdat;
|
|
struct per_cpu_pageset __percpu *pageset;
|
|
+
|
|
/*
|
|
- * free areas of different sizes
|
|
+ * This is a per-zone reserve of pages that should not be
|
|
+ * considered dirtyable memory.
|
|
*/
|
|
- spinlock_t lock;
|
|
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
|
- /* Set to true when the PG_migrate_skip bits should be cleared */
|
|
- bool compact_blockskip_flush;
|
|
-
|
|
- /* pfns where compaction scanners should start */
|
|
- unsigned long compact_cached_free_pfn;
|
|
- unsigned long compact_cached_migrate_pfn;
|
|
-#endif
|
|
-#ifdef CONFIG_MEMORY_HOTPLUG
|
|
- /* see spanned/present_pages for more description */
|
|
- seqlock_t span_seqlock;
|
|
-#endif
|
|
- struct free_area free_area[MAX_ORDER];
|
|
+ unsigned long dirty_balance_reserve;
|
|
|
|
#ifndef CONFIG_SPARSEMEM
|
|
/*
|
|
@@ -379,71 +364,14 @@ struct zone {
|
|
unsigned long *pageblock_flags;
|
|
#endif /* CONFIG_SPARSEMEM */
|
|
|
|
-#ifdef CONFIG_COMPACTION
|
|
- /*
|
|
- * On compaction failure, 1<<compact_defer_shift compactions
|
|
- * are skipped before trying again. The number attempted since
|
|
- * last failure is tracked with compact_considered.
|
|
- */
|
|
- unsigned int compact_considered;
|
|
- unsigned int compact_defer_shift;
|
|
- int compact_order_failed;
|
|
-#endif
|
|
-
|
|
- ZONE_PADDING(_pad1_)
|
|
-
|
|
- /* Fields commonly accessed by the page reclaim scanner */
|
|
- spinlock_t lru_lock;
|
|
- struct lruvec lruvec;
|
|
-
|
|
- unsigned long pages_scanned; /* since last reclaim */
|
|
- unsigned long flags; /* zone flags, see below */
|
|
-
|
|
- /* Zone statistics */
|
|
- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
-
|
|
- /*
|
|
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
|
|
- * this zone's LRU. Maintained by the pageout code.
|
|
- */
|
|
- unsigned int inactive_ratio;
|
|
-
|
|
-
|
|
- ZONE_PADDING(_pad2_)
|
|
- /* Rarely used or read-mostly fields */
|
|
-
|
|
+#ifdef CONFIG_NUMA
|
|
/*
|
|
- * wait_table -- the array holding the hash table
|
|
- * wait_table_hash_nr_entries -- the size of the hash table array
|
|
- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
|
- *
|
|
- * The purpose of all these is to keep track of the people
|
|
- * waiting for a page to become available and make them
|
|
- * runnable again when possible. The trouble is that this
|
|
- * consumes a lot of space, especially when so few things
|
|
- * wait on pages at a given time. So instead of using
|
|
- * per-page waitqueues, we use a waitqueue hash table.
|
|
- *
|
|
- * The bucket discipline is to sleep on the same queue when
|
|
- * colliding and wake all in that wait queue when removing.
|
|
- * When something wakes, it must check to be sure its page is
|
|
- * truly available, a la thundering herd. The cost of a
|
|
- * collision is great, but given the expected load of the
|
|
- * table, they should be so rare as to be outweighed by the
|
|
- * benefits from the saved space.
|
|
- *
|
|
- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
|
- * primary users of these fields, and in mm/page_alloc.c
|
|
- * free_area_init_core() performs the initialization of them.
|
|
+ * zone reclaim becomes active if more unmapped pages exist.
|
|
*/
|
|
- wait_queue_head_t * wait_table;
|
|
- unsigned long wait_table_hash_nr_entries;
|
|
- unsigned long wait_table_bits;
|
|
+ unsigned long min_unmapped_pages;
|
|
+ unsigned long min_slab_pages;
|
|
+#endif /* CONFIG_NUMA */
|
|
|
|
- /*
|
|
- * Discontig memory support fields.
|
|
- */
|
|
- struct pglist_data *zone_pgdat;
|
|
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
|
|
unsigned long zone_start_pfn;
|
|
|
|
@@ -489,9 +417,11 @@ struct zone {
|
|
* adjust_managed_page_count() should be used instead of directly
|
|
* touching zone->managed_pages and totalram_pages.
|
|
*/
|
|
+ unsigned long managed_pages;
|
|
unsigned long spanned_pages;
|
|
unsigned long present_pages;
|
|
- unsigned long managed_pages;
|
|
+
|
|
+ const char *name;
|
|
|
|
/*
|
|
* Number of MIGRATE_RESEVE page block. To maintain for just
|
|
@@ -499,10 +429,91 @@ struct zone {
|
|
*/
|
|
int nr_migrate_reserve_block;
|
|
|
|
+#ifdef CONFIG_MEMORY_HOTPLUG
|
|
+ /* see spanned/present_pages for more description */
|
|
+ seqlock_t span_seqlock;
|
|
+#endif
|
|
+
|
|
/*
|
|
- * rarely used fields:
|
|
+ * wait_table -- the array holding the hash table
|
|
+ * wait_table_hash_nr_entries -- the size of the hash table array
|
|
+ * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
|
+ *
|
|
+ * The purpose of all these is to keep track of the people
|
|
+ * waiting for a page to become available and make them
|
|
+ * runnable again when possible. The trouble is that this
|
|
+ * consumes a lot of space, especially when so few things
|
|
+ * wait on pages at a given time. So instead of using
|
|
+ * per-page waitqueues, we use a waitqueue hash table.
|
|
+ *
|
|
+ * The bucket discipline is to sleep on the same queue when
|
|
+ * colliding and wake all in that wait queue when removing.
|
|
+ * When something wakes, it must check to be sure its page is
|
|
+ * truly available, a la thundering herd. The cost of a
|
|
+ * collision is great, but given the expected load of the
|
|
+ * table, they should be so rare as to be outweighed by the
|
|
+ * benefits from the saved space.
|
|
+ *
|
|
+ * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
|
+ * primary users of these fields, and in mm/page_alloc.c
|
|
+ * free_area_init_core() performs the initialization of them.
|
|
*/
|
|
- const char *name;
|
|
+ wait_queue_head_t *wait_table;
|
|
+ unsigned long wait_table_hash_nr_entries;
|
|
+ unsigned long wait_table_bits;
|
|
+
|
|
+ ZONE_PADDING(_pad1_)
|
|
+
|
|
+ /* Write-intensive fields used from the page allocator */
|
|
+ spinlock_t lock;
|
|
+
|
|
+ /* free areas of different sizes */
|
|
+ struct free_area free_area[MAX_ORDER];
|
|
+
|
|
+ /* zone flags, see below */
|
|
+ unsigned long flags;
|
|
+
|
|
+ ZONE_PADDING(_pad2_)
|
|
+
|
|
+ /* Write-intensive fields used by page reclaim */
|
|
+
|
|
+ /* Fields commonly accessed by the page reclaim scanner */
|
|
+ spinlock_t lru_lock;
|
|
+ struct lruvec lruvec;
|
|
+
|
|
+ /*
|
|
+ * When free pages are below this point, additional steps are taken
|
|
+ * when reading the number of free pages to avoid per-cpu counter
|
|
+ * drift allowing watermarks to be breached
|
|
+ */
|
|
+ unsigned long percpu_drift_mark;
|
|
+
|
|
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
|
+ /* pfn where compaction free scanner should start */
|
|
+ unsigned long compact_cached_free_pfn;
|
|
+ /* pfn where async and sync compaction migration scanner should start */
|
|
+ unsigned long compact_cached_migrate_pfn[2];
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_COMPACTION
|
|
+ /*
|
|
+ * On compaction failure, 1<<compact_defer_shift compactions
|
|
+ * are skipped before trying again. The number attempted since
|
|
+ * last failure is tracked with compact_considered.
|
|
+ */
|
|
+ unsigned int compact_considered;
|
|
+ unsigned int compact_defer_shift;
|
|
+ int compact_order_failed;
|
|
+#endif
|
|
+
|
|
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
|
+ /* Set to true when the PG_migrate_skip bits should be cleared */
|
|
+ bool compact_blockskip_flush;
|
|
+#endif
|
|
+
|
|
+ ZONE_PADDING(_pad3_)
|
|
+ /* Zone statistics */
|
|
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
typedef enum {
|
|
@@ -518,6 +529,7 @@ typedef enum {
|
|
ZONE_WRITEBACK, /* reclaim scanning has recently found
|
|
* many pages under writeback
|
|
*/
|
|
+ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
|
|
} zone_flags_t;
|
|
|
|
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
|
|
@@ -555,6 +567,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
|
|
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
|
}
|
|
|
|
+static inline int zone_is_fair_depleted(const struct zone *zone)
|
|
+{
|
|
+ return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
|
|
+}
|
|
+
|
|
static inline int zone_is_oom_locked(const struct zone *zone)
|
|
{
|
|
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
|
@@ -806,10 +823,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
|
|
extern struct mutex zonelists_mutex;
|
|
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
|
|
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
|
|
-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
- int classzone_idx, int alloc_flags);
|
|
-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
|
|
- int classzone_idx, int alloc_flags);
|
|
+bool zone_watermark_ok(struct zone *z, unsigned int order,
|
|
+ unsigned long mark, int classzone_idx, int alloc_flags);
|
|
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
|
+ unsigned long mark, int classzone_idx, int alloc_flags);
|
|
enum memmap_context {
|
|
MEMMAP_EARLY,
|
|
MEMMAP_HOTPLUG,
|
|
diff --git a/include/linux/mount.h b/include/linux/mount.h
|
|
index 839bac2..b0c1e65 100644
|
|
--- a/include/linux/mount.h
|
|
+++ b/include/linux/mount.h
|
|
@@ -42,13 +42,20 @@ struct mnt_namespace;
|
|
* flag, consider how it interacts with shared mounts.
|
|
*/
|
|
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
|
|
-#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
|
|
+#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
|
|
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
|
|
+ | MNT_READONLY)
|
|
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
|
|
|
|
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
|
|
MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
|
|
|
|
#define MNT_INTERNAL 0x4000
|
|
|
|
+#define MNT_LOCK_ATIME 0x040000
|
|
+#define MNT_LOCK_NOEXEC 0x080000
|
|
+#define MNT_LOCK_NOSUID 0x100000
|
|
+#define MNT_LOCK_NODEV 0x200000
|
|
#define MNT_LOCK_READONLY 0x400000
|
|
#define MNT_LOCKED 0x800000
|
|
#define MNT_DOOMED 0x1000000
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 911718f..bf46cc8 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -1880,6 +1880,12 @@ void netdev_freemem(struct net_device *dev);
|
|
void synchronize_net(void);
|
|
int init_dummy_netdev(struct net_device *dev);
|
|
|
|
+DECLARE_PER_CPU(int, xmit_recursion);
|
|
+static inline int dev_recursion_level(void)
|
|
+{
|
|
+ return this_cpu_read(xmit_recursion);
|
|
+}
|
|
+
|
|
struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
|
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
|
|
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
|
|
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
|
|
index 5624e4e..3a36a2c 100644
|
|
--- a/include/linux/nfs_xdr.h
|
|
+++ b/include/linux/nfs_xdr.h
|
|
@@ -1155,7 +1155,7 @@ struct nfs41_state_protection {
|
|
struct nfs4_op_map allow;
|
|
};
|
|
|
|
-#define NFS4_EXCHANGE_ID_LEN (48)
|
|
+#define NFS4_EXCHANGE_ID_LEN (127)
|
|
struct nfs41_exchange_id_args {
|
|
struct nfs_client *client;
|
|
nfs4_verifier *verifier;
|
|
@@ -1247,11 +1247,22 @@ struct nfs41_free_stateid_res {
|
|
unsigned int status;
|
|
};
|
|
|
|
+static inline void
|
|
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
|
|
+{
|
|
+ kfree(cinfo->buckets);
|
|
+}
|
|
+
|
|
#else
|
|
|
|
struct pnfs_ds_commit_info {
|
|
};
|
|
|
|
+static inline void
|
|
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
|
|
+{
|
|
+}
|
|
+
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
struct nfs_page;
|
|
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
|
|
index 9875576..1108aca 100644
|
|
--- a/include/linux/nilfs2_fs.h
|
|
+++ b/include/linux/nilfs2_fs.h
|
|
@@ -458,7 +458,7 @@ struct nilfs_btree_node {
|
|
/* level */
|
|
#define NILFS_BTREE_LEVEL_DATA 0
|
|
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
|
|
-#define NILFS_BTREE_LEVEL_MAX 14
|
|
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
|
|
|
|
/**
|
|
* struct nilfs_palloc_group_desc - block group descriptor
|
|
diff --git a/include/linux/of.h b/include/linux/of.h
|
|
index 435cb99..9f2698d 100644
|
|
--- a/include/linux/of.h
|
|
+++ b/include/linux/of.h
|
|
@@ -215,14 +215,12 @@ extern int of_property_read_u64(const struct device_node *np,
|
|
extern int of_property_read_string(struct device_node *np,
|
|
const char *propname,
|
|
const char **out_string);
|
|
-extern int of_property_read_string_index(struct device_node *np,
|
|
- const char *propname,
|
|
- int index, const char **output);
|
|
extern int of_property_match_string(struct device_node *np,
|
|
const char *propname,
|
|
const char *string);
|
|
-extern int of_property_count_strings(struct device_node *np,
|
|
- const char *propname);
|
|
+extern int of_property_read_string_helper(struct device_node *np,
|
|
+ const char *propname,
|
|
+ const char **out_strs, size_t sz, int index);
|
|
extern int of_device_is_compatible(const struct device_node *device,
|
|
const char *);
|
|
extern int of_device_is_available(const struct device_node *device);
|
|
@@ -422,15 +420,9 @@ static inline int of_property_read_string(struct device_node *np,
|
|
return -ENOSYS;
|
|
}
|
|
|
|
-static inline int of_property_read_string_index(struct device_node *np,
|
|
- const char *propname, int index,
|
|
- const char **out_string)
|
|
-{
|
|
- return -ENOSYS;
|
|
-}
|
|
-
|
|
-static inline int of_property_count_strings(struct device_node *np,
|
|
- const char *propname)
|
|
+static inline int of_property_read_string_helper(struct device_node *np,
|
|
+ const char *propname,
|
|
+ const char **out_strs, size_t sz, int index)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
@@ -525,7 +517,10 @@ static inline const char *of_prop_next_string(struct property *prop,
|
|
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
|
|
extern int of_node_to_nid(struct device_node *np);
|
|
#else
|
|
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
|
|
+static inline int of_node_to_nid(struct device_node *device)
|
|
+{
|
|
+ return NUMA_NO_NODE;
|
|
+}
|
|
#endif
|
|
|
|
static inline struct device_node *of_find_matching_node(
|
|
@@ -536,6 +531,70 @@ static inline struct device_node *of_find_matching_node(
|
|
}
|
|
|
|
/**
|
|
+ * of_property_read_string_array() - Read an array of strings from a multiple
|
|
+ * strings property.
|
|
+ * @np: device node from which the property value is to be read.
|
|
+ * @propname: name of the property to be searched.
|
|
+ * @out_strs: output array of string pointers.
|
|
+ * @sz: number of array elements to read.
|
|
+ *
|
|
+ * Search for a property in a device tree node and retrieve a list of
|
|
+ * terminated string values (pointer to data, not a copy) in that property.
|
|
+ *
|
|
+ * If @out_strs is NULL, the number of strings in the property is returned.
|
|
+ */
|
|
+static inline int of_property_read_string_array(struct device_node *np,
|
|
+ const char *propname, const char **out_strs,
|
|
+ size_t sz)
|
|
+{
|
|
+ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * of_property_count_strings() - Find and return the number of strings from a
|
|
+ * multiple strings property.
|
|
+ * @np: device node from which the property value is to be read.
|
|
+ * @propname: name of the property to be searched.
|
|
+ *
|
|
+ * Search for a property in a device tree node and retrieve the number of null
|
|
+ * terminated string contain in it. Returns the number of strings on
|
|
+ * success, -EINVAL if the property does not exist, -ENODATA if property
|
|
+ * does not have a value, and -EILSEQ if the string is not null-terminated
|
|
+ * within the length of the property data.
|
|
+ */
|
|
+static inline int of_property_count_strings(struct device_node *np,
|
|
+ const char *propname)
|
|
+{
|
|
+ return of_property_read_string_helper(np, propname, NULL, 0, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * of_property_read_string_index() - Find and read a string from a multiple
|
|
+ * strings property.
|
|
+ * @np: device node from which the property value is to be read.
|
|
+ * @propname: name of the property to be searched.
|
|
+ * @index: index of the string in the list of strings
|
|
+ * @out_string: pointer to null terminated return string, modified only if
|
|
+ * return value is 0.
|
|
+ *
|
|
+ * Search for a property in a device tree node and retrieve a null
|
|
+ * terminated string value (pointer to data, not a copy) in the list of strings
|
|
+ * contained in that property.
|
|
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
|
|
+ * property does not have a value, and -EILSEQ if the string is not
|
|
+ * null-terminated within the length of the property data.
|
|
+ *
|
|
+ * The out_string pointer is modified only if a valid string can be decoded.
|
|
+ */
|
|
+static inline int of_property_read_string_index(struct device_node *np,
|
|
+ const char *propname,
|
|
+ int index, const char **output)
|
|
+{
|
|
+ int rc = of_property_read_string_helper(np, propname, output, 1, index);
|
|
+ return rc < 0 ? rc : 0;
|
|
+}
|
|
+
|
|
+/**
|
|
* of_property_read_bool - Findfrom a property
|
|
* @np: device node from which the property value is to be read.
|
|
* @propname: name of the property to be searched.
|
|
diff --git a/include/linux/oom.h b/include/linux/oom.h
|
|
index 4cd6267..17f0949 100644
|
|
--- a/include/linux/oom.h
|
|
+++ b/include/linux/oom.h
|
|
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
unsigned long totalpages);
|
|
+
|
|
+extern int oom_kills_count(void);
|
|
+extern void note_oom_kill(void);
|
|
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
unsigned int points, unsigned long totalpages,
|
|
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
|
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
|
|
index ca71a1d..3c545b4 100644
|
|
--- a/include/linux/page-flags.h
|
|
+++ b/include/linux/page-flags.h
|
|
@@ -198,6 +198,7 @@ struct page; /* forward declaration */
|
|
TESTPAGEFLAG(Locked, locked)
|
|
PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
|
|
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
|
|
+ __SETPAGEFLAG(Referenced, referenced)
|
|
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
|
|
PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
|
|
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
|
|
@@ -208,6 +209,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
|
|
PAGEFLAG(SavePinned, savepinned); /* Xen */
|
|
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
|
|
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
|
|
+ __SETPAGEFLAG(SwapBacked, swapbacked)
|
|
|
|
__PAGEFLAG(SlobFree, slob_free)
|
|
|
|
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
|
|
index c08730c..2baeee1 100644
|
|
--- a/include/linux/pageblock-flags.h
|
|
+++ b/include/linux/pageblock-flags.h
|
|
@@ -65,33 +65,26 @@ extern int pageblock_order;
|
|
/* Forward declaration */
|
|
struct page;
|
|
|
|
-unsigned long get_pageblock_flags_mask(struct page *page,
|
|
+unsigned long get_pfnblock_flags_mask(struct page *page,
|
|
+ unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask);
|
|
-void set_pageblock_flags_mask(struct page *page,
|
|
+
|
|
+void set_pfnblock_flags_mask(struct page *page,
|
|
unsigned long flags,
|
|
+ unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask);
|
|
|
|
/* Declarations for getting and setting flags. See mm/page_alloc.c */
|
|
-static inline unsigned long get_pageblock_flags_group(struct page *page,
|
|
- int start_bitidx, int end_bitidx)
|
|
-{
|
|
- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
|
|
- unsigned long mask = (1 << nr_flag_bits) - 1;
|
|
-
|
|
- return get_pageblock_flags_mask(page, end_bitidx, mask);
|
|
-}
|
|
-
|
|
-static inline void set_pageblock_flags_group(struct page *page,
|
|
- unsigned long flags,
|
|
- int start_bitidx, int end_bitidx)
|
|
-{
|
|
- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
|
|
- unsigned long mask = (1 << nr_flag_bits) - 1;
|
|
-
|
|
- set_pageblock_flags_mask(page, flags, end_bitidx, mask);
|
|
-}
|
|
+#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
|
|
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
|
+ end_bitidx, \
|
|
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
|
|
+#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
|
|
+ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
|
|
+ end_bitidx, \
|
|
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
#define get_pageblock_skip(page) \
|
|
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
|
|
index 1710d1b..fcebdda 100644
|
|
--- a/include/linux/pagemap.h
|
|
+++ b/include/linux/pagemap.h
|
|
@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
|
|
|
|
#define page_cache_get(page) get_page(page)
|
|
#define page_cache_release(page) put_page(page)
|
|
-void release_pages(struct page **pages, int nr, int cold);
|
|
+void release_pages(struct page **pages, int nr, bool cold);
|
|
|
|
/*
|
|
* speculatively take a reference to a page.
|
|
@@ -243,12 +243,116 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
|
|
|
|
typedef int filler_t(void *, struct page *);
|
|
|
|
-extern struct page * find_get_page(struct address_space *mapping,
|
|
- pgoff_t index);
|
|
-extern struct page * find_lock_page(struct address_space *mapping,
|
|
- pgoff_t index);
|
|
-extern struct page * find_or_create_page(struct address_space *mapping,
|
|
- pgoff_t index, gfp_t gfp_mask);
|
|
+pgoff_t page_cache_next_hole(struct address_space *mapping,
|
|
+ pgoff_t index, unsigned long max_scan);
|
|
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
|
+ pgoff_t index, unsigned long max_scan);
|
|
+
|
|
+#define FGP_ACCESSED 0x00000001
|
|
+#define FGP_LOCK 0x00000002
|
|
+#define FGP_CREAT 0x00000004
|
|
+#define FGP_WRITE 0x00000008
|
|
+#define FGP_NOFS 0x00000010
|
|
+#define FGP_NOWAIT 0x00000020
|
|
+
|
|
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
|
|
+ int fgp_flags, gfp_t cache_gfp_mask);
|
|
+
|
|
+/**
|
|
+ * find_get_page - find and get a page reference
|
|
+ * @mapping: the address_space to search
|
|
+ * @offset: the page index
|
|
+ *
|
|
+ * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
+ * page cache page, it is returned with an increased refcount.
|
|
+ *
|
|
+ * Otherwise, %NULL is returned.
|
|
+ */
|
|
+static inline struct page *find_get_page(struct address_space *mapping,
|
|
+ pgoff_t offset)
|
|
+{
|
|
+ return pagecache_get_page(mapping, offset, 0, 0);
|
|
+}
|
|
+
|
|
+static inline struct page *find_get_page_flags(struct address_space *mapping,
|
|
+ pgoff_t offset, int fgp_flags)
|
|
+{
|
|
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * find_lock_page - locate, pin and lock a pagecache page
|
|
+ * pagecache_get_page - find and get a page reference
|
|
+ * @mapping: the address_space to search
|
|
+ * @offset: the page index
|
|
+ *
|
|
+ * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
+ * page cache page, it is returned locked and with an increased
|
|
+ * refcount.
|
|
+ *
|
|
+ * Otherwise, %NULL is returned.
|
|
+ *
|
|
+ * find_lock_page() may sleep.
|
|
+ */
|
|
+static inline struct page *find_lock_page(struct address_space *mapping,
|
|
+ pgoff_t offset)
|
|
+{
|
|
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * find_or_create_page - locate or add a pagecache page
|
|
+ * @mapping: the page's address_space
|
|
+ * @index: the page's index into the mapping
|
|
+ * @gfp_mask: page allocation mode
|
|
+ *
|
|
+ * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
+ * page cache page, it is returned locked and with an increased
|
|
+ * refcount.
|
|
+ *
|
|
+ * If the page is not present, a new page is allocated using @gfp_mask
|
|
+ * and added to the page cache and the VM's LRU list. The page is
|
|
+ * returned locked and with an increased refcount.
|
|
+ *
|
|
+ * On memory exhaustion, %NULL is returned.
|
|
+ *
|
|
+ * find_or_create_page() may sleep, even if @gfp_flags specifies an
|
|
+ * atomic allocation!
|
|
+ */
|
|
+static inline struct page *find_or_create_page(struct address_space *mapping,
|
|
+ pgoff_t offset, gfp_t gfp_mask)
|
|
+{
|
|
+ return pagecache_get_page(mapping, offset,
|
|
+ FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
|
|
+ gfp_mask);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * grab_cache_page_nowait - returns locked page at given index in given cache
|
|
+ * @mapping: target address_space
|
|
+ * @index: the page index
|
|
+ *
|
|
+ * Same as grab_cache_page(), but do not wait if the page is unavailable.
|
|
+ * This is intended for speculative data generators, where the data can
|
|
+ * be regenerated if the page couldn't be grabbed. This routine should
|
|
+ * be safe to call while holding the lock for another page.
|
|
+ *
|
|
+ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
|
|
+ * and deadlock against the caller's locked page.
|
|
+ */
|
|
+static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
|
|
+ pgoff_t index)
|
|
+{
|
|
+ return pagecache_get_page(mapping, index,
|
|
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
|
|
+ mapping_gfp_mask(mapping));
|
|
+}
|
|
+
|
|
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
|
|
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
|
|
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
|
|
+ unsigned int nr_entries, struct page **entries,
|
|
+ pgoff_t *indices);
|
|
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
|
|
unsigned int nr_pages, struct page **pages);
|
|
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
|
|
@@ -268,10 +372,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
|
|
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
|
|
}
|
|
|
|
-extern struct page * grab_cache_page_nowait(struct address_space *mapping,
|
|
- pgoff_t index);
|
|
-extern struct page * read_cache_page_async(struct address_space *mapping,
|
|
- pgoff_t index, filler_t *filler, void *data);
|
|
extern struct page * read_cache_page(struct address_space *mapping,
|
|
pgoff_t index, filler_t *filler, void *data);
|
|
extern struct page * read_cache_page_gfp(struct address_space *mapping,
|
|
@@ -279,14 +379,6 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping,
|
|
extern int read_cache_pages(struct address_space *mapping,
|
|
struct list_head *pages, filler_t *filler, void *data);
|
|
|
|
-static inline struct page *read_mapping_page_async(
|
|
- struct address_space *mapping,
|
|
- pgoff_t index, void *data)
|
|
-{
|
|
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
|
|
- return read_cache_page_async(mapping, index, filler, data);
|
|
-}
|
|
-
|
|
static inline struct page *read_mapping_page(struct address_space *mapping,
|
|
pgoff_t index, void *data)
|
|
{
|
|
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
|
|
index e4dbfab..b45d391 100644
|
|
--- a/include/linux/pagevec.h
|
|
+++ b/include/linux/pagevec.h
|
|
@@ -22,6 +22,11 @@ struct pagevec {
|
|
|
|
void __pagevec_release(struct pagevec *pvec);
|
|
void __pagevec_lru_add(struct pagevec *pvec);
|
|
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
|
|
+ struct address_space *mapping,
|
|
+ pgoff_t start, unsigned nr_entries,
|
|
+ pgoff_t *indices);
|
|
+void pagevec_remove_exceptionals(struct pagevec *pvec);
|
|
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
|
|
pgoff_t start, unsigned nr_pages);
|
|
unsigned pagevec_lookup_tag(struct pagevec *pvec,
|
|
diff --git a/include/linux/pci.h b/include/linux/pci.h
|
|
index 33aa2ca..d662546 100644
|
|
--- a/include/linux/pci.h
|
|
+++ b/include/linux/pci.h
|
|
@@ -170,6 +170,8 @@ enum pci_dev_flags {
|
|
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
|
|
/* Provide indication device is assigned by a Virtual Machine Manager */
|
|
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
|
|
+ /* Do not use bus resets for device */
|
|
+ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
|
|
};
|
|
|
|
enum pci_irq_reroute_variant {
|
|
@@ -324,6 +326,7 @@ struct pci_dev {
|
|
unsigned int is_added:1;
|
|
unsigned int is_busmaster:1; /* device is busmaster */
|
|
unsigned int no_msi:1; /* device may not use msi */
|
|
+ unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
|
|
unsigned int block_cfg_access:1; /* config space access is blocked */
|
|
unsigned int broken_parity_status:1; /* Device generates false positive parity */
|
|
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
|
|
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
|
|
index 97fbecd..057c1d8 100644
|
|
--- a/include/linux/pci_ids.h
|
|
+++ b/include/linux/pci_ids.h
|
|
@@ -2551,6 +2551,7 @@
|
|
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
|
|
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
|
|
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
|
|
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
|
|
#define PCI_DEVICE_ID_INTEL_I960 0x0960
|
|
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
|
|
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
|
|
diff --git a/include/linux/plist.h b/include/linux/plist.h
|
|
index aa0fb39..8b6c970 100644
|
|
--- a/include/linux/plist.h
|
|
+++ b/include/linux/plist.h
|
|
@@ -98,6 +98,13 @@ struct plist_node {
|
|
}
|
|
|
|
/**
|
|
+ * PLIST_HEAD - declare and init plist_head
|
|
+ * @head: name for struct plist_head variable
|
|
+ */
|
|
+#define PLIST_HEAD(head) \
|
|
+ struct plist_head head = PLIST_HEAD_INIT(head)
|
|
+
|
|
+/**
|
|
* PLIST_NODE_INIT - static struct plist_node initializer
|
|
* @node: struct plist_node variable name
|
|
* @__prio: initial node priority
|
|
@@ -134,6 +141,8 @@ static inline void plist_node_init(struct plist_node *node, int prio)
|
|
extern void plist_add(struct plist_node *node, struct plist_head *head);
|
|
extern void plist_del(struct plist_node *node, struct plist_head *head);
|
|
|
|
+extern void plist_requeue(struct plist_node *node, struct plist_head *head);
|
|
+
|
|
/**
|
|
* plist_for_each - iterate over the plist
|
|
* @pos: the type * to use as a loop counter
|
|
@@ -143,6 +152,16 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
|
|
list_for_each_entry(pos, &(head)->node_list, node_list)
|
|
|
|
/**
|
|
+ * plist_for_each_continue - continue iteration over the plist
|
|
+ * @pos: the type * to use as a loop cursor
|
|
+ * @head: the head for your list
|
|
+ *
|
|
+ * Continue to iterate over plist, continuing after the current position.
|
|
+ */
|
|
+#define plist_for_each_continue(pos, head) \
|
|
+ list_for_each_entry_continue(pos, &(head)->node_list, node_list)
|
|
+
|
|
+/**
|
|
* plist_for_each_safe - iterate safely over a plist of given type
|
|
* @pos: the type * to use as a loop counter
|
|
* @n: another type * to use as temporary storage
|
|
@@ -163,6 +182,18 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
|
|
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
|
|
|
|
/**
|
|
+ * plist_for_each_entry_continue - continue iteration over list of given type
|
|
+ * @pos: the type * to use as a loop cursor
|
|
+ * @head: the head for your list
|
|
+ * @m: the name of the list_struct within the struct
|
|
+ *
|
|
+ * Continue to iterate over list of given type, continuing after
|
|
+ * the current position.
|
|
+ */
|
|
+#define plist_for_each_entry_continue(pos, head, m) \
|
|
+ list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
|
|
+
|
|
+/**
|
|
* plist_for_each_entry_safe - iterate safely over list of given type
|
|
* @pos: the type * to use as a loop counter
|
|
* @n: another type * to use as temporary storage
|
|
@@ -229,6 +260,20 @@ static inline int plist_node_empty(const struct plist_node *node)
|
|
#endif
|
|
|
|
/**
|
|
+ * plist_next - get the next entry in list
|
|
+ * @pos: the type * to cursor
|
|
+ */
|
|
+#define plist_next(pos) \
|
|
+ list_next_entry(pos, node_list)
|
|
+
|
|
+/**
|
|
+ * plist_prev - get the prev entry in list
|
|
+ * @pos: the type * to cursor
|
|
+ */
|
|
+#define plist_prev(pos) \
|
|
+ list_prev_entry(pos, node_list)
|
|
+
|
|
+/**
|
|
* plist_first - return the first node (and thus, highest priority)
|
|
* @head: the &struct plist_head pointer
|
|
*
|
|
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
|
|
index 07e7945..e97fc65 100644
|
|
--- a/include/linux/power/charger-manager.h
|
|
+++ b/include/linux/power/charger-manager.h
|
|
@@ -253,9 +253,6 @@ struct charger_manager {
|
|
struct device *dev;
|
|
struct charger_desc *desc;
|
|
|
|
- struct power_supply *fuel_gauge;
|
|
- struct power_supply **charger_stat;
|
|
-
|
|
#ifdef CONFIG_THERMAL
|
|
struct thermal_zone_device *tzd_batt;
|
|
#endif
|
|
diff --git a/include/linux/printk.h b/include/linux/printk.h
|
|
index fa47e27..cbf094f 100644
|
|
--- a/include/linux/printk.h
|
|
+++ b/include/linux/printk.h
|
|
@@ -132,9 +132,9 @@ asmlinkage __printf(1, 2) __cold
|
|
int printk(const char *fmt, ...);
|
|
|
|
/*
|
|
- * Special printk facility for scheduler use only, _DO_NOT_USE_ !
|
|
+ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
|
|
*/
|
|
-__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
|
|
+__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
|
|
|
|
/*
|
|
* Please don't use printk_ratelimit(), because it shares ratelimiting state
|
|
@@ -169,7 +169,7 @@ int printk(const char *s, ...)
|
|
return 0;
|
|
}
|
|
static inline __printf(1, 2) __cold
|
|
-int printk_sched(const char *s, ...)
|
|
+int printk_deferred(const char *s, ...)
|
|
{
|
|
return 0;
|
|
}
|
|
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
|
|
index 9974975..4af3fdc 100644
|
|
--- a/include/linux/pstore_ram.h
|
|
+++ b/include/linux/pstore_ram.h
|
|
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
|
|
};
|
|
|
|
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
|
- u32 sig, struct persistent_ram_ecc_info *ecc_info);
|
|
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
|
|
+ unsigned int memtype);
|
|
void persistent_ram_free(struct persistent_ram_zone *prz);
|
|
void persistent_ram_zap(struct persistent_ram_zone *prz);
|
|
|
|
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
|
|
struct ramoops_platform_data {
|
|
unsigned long mem_size;
|
|
unsigned long mem_address;
|
|
+ unsigned int mem_type;
|
|
unsigned long record_size;
|
|
unsigned long console_size;
|
|
unsigned long ftrace_size;
|
|
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
|
|
index 4039407..e8be53e 100644
|
|
--- a/include/linux/radix-tree.h
|
|
+++ b/include/linux/radix-tree.h
|
|
@@ -219,6 +219,7 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
|
|
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
|
|
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
|
|
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
|
|
+void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
|
|
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
|
|
unsigned int
|
|
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
|
@@ -226,10 +227,6 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
|
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
|
|
void ***results, unsigned long *indices,
|
|
unsigned long first_index, unsigned int max_items);
|
|
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
|
|
- unsigned long index, unsigned long max_scan);
|
|
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
|
|
- unsigned long index, unsigned long max_scan);
|
|
int radix_tree_preload(gfp_t gfp_mask);
|
|
int radix_tree_maybe_preload(gfp_t gfp_mask);
|
|
void radix_tree_init(void);
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index ccd0c6f..91fe6a3 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -59,6 +59,10 @@ struct sched_param {
|
|
|
|
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
|
|
|
|
+#define VMACACHE_BITS 2
|
|
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
|
|
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
|
|
+
|
|
/*
|
|
* Extended scheduling parameters data structure.
|
|
*
|
|
@@ -1228,6 +1232,9 @@ struct task_struct {
|
|
#ifdef CONFIG_COMPAT_BRK
|
|
unsigned brk_randomized:1;
|
|
#endif
|
|
+ /* per-thread vma caching */
|
|
+ u32 vmacache_seqnum;
|
|
+ struct vm_area_struct *vmacache[VMACACHE_SIZE];
|
|
#if defined(SPLIT_RSS_COUNTING)
|
|
struct task_rss_stat rss_stat;
|
|
#endif
|
|
@@ -1688,7 +1695,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
|
}
|
|
|
|
|
|
-static int pid_alive(const struct task_struct *p);
|
|
+static inline int pid_alive(const struct task_struct *p);
|
|
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
|
|
{
|
|
pid_t pid = 0;
|
|
@@ -1869,11 +1876,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
|
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
|
|
#define used_math() tsk_used_math(current)
|
|
|
|
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
|
|
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
|
|
+ * __GFP_FS is also cleared as it implies __GFP_IO.
|
|
+ */
|
|
static inline gfp_t memalloc_noio_flags(gfp_t flags)
|
|
{
|
|
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
|
|
- flags &= ~__GFP_IO;
|
|
+ flags &= ~(__GFP_IO | __GFP_FS);
|
|
return flags;
|
|
}
|
|
|
|
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
|
|
index 535f158..8cf3503 100644
|
|
--- a/include/linux/seqlock.h
|
|
+++ b/include/linux/seqlock.h
|
|
@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
|
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
|
|
{
|
|
unsigned ret = ACCESS_ONCE(s->sequence);
|
|
-
|
|
- seqcount_lockdep_reader_access(s);
|
|
smp_rmb();
|
|
return ret & ~1;
|
|
}
|
|
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
|
|
index 9d55438..4d1771c 100644
|
|
--- a/include/linux/shmem_fs.h
|
|
+++ b/include/linux/shmem_fs.h
|
|
@@ -51,6 +51,7 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
|
|
unsigned long flags);
|
|
extern int shmem_zero_setup(struct vm_area_struct *);
|
|
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
|
|
+extern bool shmem_mapping(struct address_space *mapping);
|
|
extern void shmem_unlock_mapping(struct address_space *mapping);
|
|
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
|
|
pgoff_t index, gfp_t gfp_mask);
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 6b84663..5ab5f97 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -661,6 +661,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
|
|
|
|
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
|
|
int node);
|
|
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
|
|
struct sk_buff *build_skb(void *data, unsigned int frag_size);
|
|
static inline struct sk_buff *alloc_skb(unsigned int size,
|
|
gfp_t priority)
|
|
@@ -2458,6 +2459,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
|
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
|
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
|
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
|
|
+struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
|
|
|
|
struct skb_checksum_ops {
|
|
__wsum (*update)(const void *mem, int len, __wsum wsum);
|
|
diff --git a/include/linux/string.h b/include/linux/string.h
|
|
index ac889c5..0ed878d 100644
|
|
--- a/include/linux/string.h
|
|
+++ b/include/linux/string.h
|
|
@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
|
|
#endif
|
|
|
|
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
|
|
- const void *from, size_t available);
|
|
+ const void *from, size_t available);
|
|
|
|
/**
|
|
* strstarts - does @str start with @prefix?
|
|
@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
|
|
return strncmp(str, prefix, strlen(prefix)) == 0;
|
|
}
|
|
|
|
-extern size_t memweight(const void *ptr, size_t bytes);
|
|
+size_t memweight(const void *ptr, size_t bytes);
|
|
+void memzero_explicit(void *s, size_t count);
|
|
|
|
/**
|
|
* kbasename - return the last part of a pathname.
|
|
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
|
|
index b05963f..f5bfb1a 100644
|
|
--- a/include/linux/sunrpc/svc_xprt.h
|
|
+++ b/include/linux/sunrpc/svc_xprt.h
|
|
@@ -32,6 +32,7 @@ struct svc_xprt_class {
|
|
struct svc_xprt_ops *xcl_ops;
|
|
struct list_head xcl_list;
|
|
u32 xcl_max_payload;
|
|
+ int xcl_ident;
|
|
};
|
|
|
|
/*
|
|
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
|
|
index 8097b9d..51009d2 100644
|
|
--- a/include/linux/sunrpc/xprt.h
|
|
+++ b/include/linux/sunrpc/xprt.h
|
|
@@ -340,6 +340,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
|
|
#define XPRT_CONNECTION_ABORT (7)
|
|
#define XPRT_CONNECTION_CLOSE (8)
|
|
#define XPRT_CONGESTED (9)
|
|
+#define XPRT_CONNECTION_REUSE (10)
|
|
|
|
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
|
{
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index 46ba0c6..241bf09 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -214,8 +214,9 @@ struct percpu_cluster {
|
|
struct swap_info_struct {
|
|
unsigned long flags; /* SWP_USED etc: see above */
|
|
signed short prio; /* swap priority of this type */
|
|
+ struct plist_node list; /* entry in swap_active_head */
|
|
+ struct plist_node avail_list; /* entry in swap_avail_head */
|
|
signed char type; /* strange name for an index */
|
|
- signed char next; /* next type on the swap list */
|
|
unsigned int max; /* extent of the swap_map */
|
|
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
|
|
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
|
|
@@ -255,11 +256,6 @@ struct swap_info_struct {
|
|
struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
|
|
};
|
|
|
|
-struct swap_list_t {
|
|
- int head; /* head of priority-ordered swapfile list */
|
|
- int next; /* swapfile to be used next */
|
|
-};
|
|
-
|
|
/* linux/mm/page_alloc.c */
|
|
extern unsigned long totalram_pages;
|
|
extern unsigned long totalreserve_pages;
|
|
@@ -272,12 +268,14 @@ extern unsigned long nr_free_pagecache_pages(void);
|
|
|
|
|
|
/* linux/mm/swap.c */
|
|
-extern void __lru_cache_add(struct page *);
|
|
extern void lru_cache_add(struct page *);
|
|
+extern void lru_cache_add_anon(struct page *page);
|
|
+extern void lru_cache_add_file(struct page *page);
|
|
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
|
|
struct lruvec *lruvec, struct list_head *head);
|
|
extern void activate_page(struct page *);
|
|
extern void mark_page_accessed(struct page *);
|
|
+extern void init_page_accessed(struct page *page);
|
|
extern void lru_add_drain(void);
|
|
extern void lru_add_drain_cpu(int cpu);
|
|
extern void lru_add_drain_all(void);
|
|
@@ -287,22 +285,6 @@ extern void swap_setup(void);
|
|
|
|
extern void add_page_to_unevictable_list(struct page *page);
|
|
|
|
-/**
|
|
- * lru_cache_add: add a page to the page lists
|
|
- * @page: the page to add
|
|
- */
|
|
-static inline void lru_cache_add_anon(struct page *page)
|
|
-{
|
|
- ClearPageActive(page);
|
|
- __lru_cache_add(page);
|
|
-}
|
|
-
|
|
-static inline void lru_cache_add_file(struct page *page)
|
|
-{
|
|
- ClearPageActive(page);
|
|
- __lru_cache_add(page);
|
|
-}
|
|
-
|
|
/* linux/mm/vmscan.c */
|
|
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|
gfp_t gfp_mask, nodemask_t *mask);
|
|
@@ -460,7 +442,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
|
|
#define free_page_and_swap_cache(page) \
|
|
page_cache_release(page)
|
|
#define free_pages_and_swap_cache(pages, nr) \
|
|
- release_pages((pages), (nr), 0);
|
|
+ release_pages((pages), (nr), false);
|
|
|
|
static inline void show_swap_cache_info(void)
|
|
{
|
|
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
|
|
index e282624..388293a 100644
|
|
--- a/include/linux/swapfile.h
|
|
+++ b/include/linux/swapfile.h
|
|
@@ -6,7 +6,7 @@
|
|
* want to expose them to the dozens of source files that include swap.h
|
|
*/
|
|
extern spinlock_t swap_lock;
|
|
-extern struct swap_list_t swap_list;
|
|
+extern struct plist_head swap_active_head;
|
|
extern struct swap_info_struct *swap_info[];
|
|
extern int try_to_unuse(unsigned int, bool, unsigned long);
|
|
|
|
diff --git a/include/linux/time.h b/include/linux/time.h
|
|
index d5d229b..7d532a3 100644
|
|
--- a/include/linux/time.h
|
|
+++ b/include/linux/time.h
|
|
@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
|
|
extern void monotonic_to_bootbased(struct timespec *ts);
|
|
extern void get_monotonic_boottime(struct timespec *ts);
|
|
|
|
+static inline bool timeval_valid(const struct timeval *tv)
|
|
+{
|
|
+ /* Dates before 1970 are bogus */
|
|
+ if (tv->tv_sec < 0)
|
|
+ return false;
|
|
+
|
|
+ /* Can't have more microseconds then a second */
|
|
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
|
|
extern int timekeeping_valid_for_hres(void);
|
|
extern u64 timekeeping_max_deferment(void);
|
|
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
|
|
index fff1d09..8350c53 100644
|
|
--- a/include/linux/tpm.h
|
|
+++ b/include/linux/tpm.h
|
|
@@ -39,6 +39,9 @@ struct tpm_class_ops {
|
|
int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
|
|
void (*cancel) (struct tpm_chip *chip);
|
|
u8 (*status) (struct tpm_chip *chip);
|
|
+ bool (*update_timeouts)(struct tpm_chip *chip,
|
|
+ unsigned long *timeout_cap);
|
|
+
|
|
};
|
|
|
|
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
|
|
diff --git a/include/linux/usb.h b/include/linux/usb.h
|
|
index 7f6eb85..49466be 100644
|
|
--- a/include/linux/usb.h
|
|
+++ b/include/linux/usb.h
|
|
@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf);
|
|
#define USB_MAXINTERFACES 32
|
|
#define USB_MAXIADS (USB_MAXINTERFACES/2)
|
|
|
|
+/*
|
|
+ * USB Resume Timer: Every Host controller driver should drive the resume
|
|
+ * signalling on the bus for the amount of time defined by this macro.
|
|
+ *
|
|
+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
|
|
+ *
|
|
+ * Note that the USB Specification states we should drive resume for *at least*
|
|
+ * 20 ms, but it doesn't give an upper bound. This creates two possible
|
|
+ * situations which we want to avoid:
|
|
+ *
|
|
+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
|
|
+ * us to fail USB Electrical Tests, thus failing Certification
|
|
+ *
|
|
+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
|
|
+ * and while we can argue that's against the USB Specification, we don't have
|
|
+ * control over which devices a certification laboratory will be using for
|
|
+ * certification. If CertLab uses a device which was tested against Windows and
|
|
+ * that happens to have relaxed resume signalling rules, we might fall into
|
|
+ * situations where we fail interoperability and electrical tests.
|
|
+ *
|
|
+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
|
|
+ * should cope with both LPJ calibration errors and devices not following every
|
|
+ * detail of the USB Specification.
|
|
+ */
|
|
+#define USB_RESUME_TIMEOUT 40 /* ms */
|
|
+
|
|
/**
|
|
* struct usb_interface_cache - long-term representation of a device interface
|
|
* @num_altsetting: number of altsettings defined.
|
|
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
|
|
index efe8d8a..e34bce3 100644
|
|
--- a/include/linux/usb/hcd.h
|
|
+++ b/include/linux/usb/hcd.h
|
|
@@ -447,6 +447,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
|
|
#endif /* CONFIG_PCI */
|
|
|
|
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
|
|
+void usb_init_pool_max(void);
|
|
int hcd_buffer_create(struct usb_hcd *hcd);
|
|
void hcd_buffer_destroy(struct usb_hcd *hcd);
|
|
|
|
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
|
|
index 52f944d..8b96ae2 100644
|
|
--- a/include/linux/usb/quirks.h
|
|
+++ b/include/linux/usb/quirks.h
|
|
@@ -30,4 +30,10 @@
|
|
descriptor */
|
|
#define USB_QUIRK_DELAY_INIT 0x00000040
|
|
|
|
+/* device generates spurious wakeup, ignore remote wakeup capability */
|
|
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
|
|
+
|
|
+/* device can't handle device_qualifier descriptor requests */
|
|
+#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
|
|
+
|
|
#endif /* __LINUX_USB_QUIRKS_H */
|
|
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
|
|
index 4836ba3..e92abf9 100644
|
|
--- a/include/linux/user_namespace.h
|
|
+++ b/include/linux/user_namespace.h
|
|
@@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
|
|
} extent[UID_GID_MAP_MAX_EXTENTS];
|
|
};
|
|
|
|
+#define USERNS_SETGROUPS_ALLOWED 1UL
|
|
+
|
|
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
|
|
+
|
|
struct user_namespace {
|
|
struct uid_gid_map uid_map;
|
|
struct uid_gid_map gid_map;
|
|
@@ -27,6 +31,7 @@ struct user_namespace {
|
|
kuid_t owner;
|
|
kgid_t group;
|
|
unsigned int proc_inum;
|
|
+ unsigned long flags;
|
|
|
|
/* Register of per-UID persistent keyrings for this namespace */
|
|
#ifdef CONFIG_PERSISTENT_KEYRINGS
|
|
@@ -63,6 +68,9 @@ extern struct seq_operations proc_projid_seq_operations;
|
|
extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
|
|
extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
|
|
extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
|
|
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
|
|
+extern int proc_setgroups_show(struct seq_file *m, void *v);
|
|
+extern bool userns_may_setgroups(const struct user_namespace *ns);
|
|
#else
|
|
|
|
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
|
|
@@ -87,6 +95,10 @@ static inline void put_user_ns(struct user_namespace *ns)
|
|
{
|
|
}
|
|
|
|
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
#endif
|
|
|
|
#endif /* _LINUX_USER_H */
|
|
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
|
|
index 502073a..b483abd 100644
|
|
--- a/include/linux/vga_switcheroo.h
|
|
+++ b/include/linux/vga_switcheroo.h
|
|
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
|
|
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
|
|
|
|
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
|
|
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
|
|
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
|
|
#else
|
|
|
|
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
|
|
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
|
|
|
|
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
|
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
|
|
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
|
|
|
#endif
|
|
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
|
|
index 704f4f6..bc9d2c2 100644
|
|
--- a/include/linux/workqueue.h
|
|
+++ b/include/linux/workqueue.h
|
|
@@ -71,7 +71,8 @@ enum {
|
|
/* data contains off-queue information when !WORK_STRUCT_PWQ */
|
|
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
|
|
|
|
- WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
|
|
+ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
|
|
+ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
|
|
|
|
/*
|
|
* When a work item is off queue, its high bits point to the last
|
|
@@ -452,7 +453,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
|
|
1, (name))
|
|
#define create_singlethread_workqueue(name) \
|
|
- alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
|
|
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
|
|
|
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
|
|
|
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
|
|
index bef53ce..b10682c 100644
|
|
--- a/include/media/videobuf2-core.h
|
|
+++ b/include/media/videobuf2-core.h
|
|
@@ -329,6 +329,9 @@ struct v4l2_fh;
|
|
* @retry_start_streaming: start_streaming() was called, but there were not enough
|
|
* buffers queued. If set, then retry calling start_streaming when
|
|
* queuing a new buffer.
|
|
+ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
|
|
+ * buffers. Only set for capture queues if qbuf has not yet been
|
|
+ * called since poll() needs to return POLLERR in that situation.
|
|
* @fileio: file io emulator internal data, used only if emulator is active
|
|
*/
|
|
struct vb2_queue {
|
|
@@ -362,6 +365,7 @@ struct vb2_queue {
|
|
|
|
unsigned int streaming:1;
|
|
unsigned int retry_start_streaming:1;
|
|
+ unsigned int waiting_for_buffers:1;
|
|
|
|
struct vb2_fileio_data *fileio;
|
|
};
|
|
diff --git a/include/net/dst.h b/include/net/dst.h
|
|
index 77eb53f..9090328 100644
|
|
--- a/include/net/dst.h
|
|
+++ b/include/net/dst.h
|
|
@@ -466,6 +466,7 @@ void dst_init(void);
|
|
/* Flags for xfrm_lookup flags argument. */
|
|
enum {
|
|
XFRM_LOOKUP_ICMP = 1 << 0,
|
|
+ XFRM_LOOKUP_QUEUE = 1 << 1,
|
|
};
|
|
|
|
struct flowi;
|
|
@@ -476,7 +477,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
|
|
int flags)
|
|
{
|
|
return dst_orig;
|
|
-}
|
|
+}
|
|
+
|
|
+static inline struct dst_entry *xfrm_lookup_route(struct net *net,
|
|
+ struct dst_entry *dst_orig,
|
|
+ const struct flowi *fl,
|
|
+ struct sock *sk,
|
|
+ int flags)
|
|
+{
|
|
+ return dst_orig;
|
|
+}
|
|
|
|
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
|
|
{
|
|
@@ -488,6 +498,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
|
|
const struct flowi *fl, struct sock *sk,
|
|
int flags);
|
|
|
|
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
|
|
+ const struct flowi *fl, struct sock *sk,
|
|
+ int flags);
|
|
+
|
|
/* skb attached with this dst needs transformation if dst->xfrm is valid */
|
|
static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
|
|
{
|
|
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
|
|
index c55aeed..cf92728 100644
|
|
--- a/include/net/inet_connection_sock.h
|
|
+++ b/include/net/inet_connection_sock.h
|
|
@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops {
|
|
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
|
|
int (*bind_conflict)(const struct sock *sk,
|
|
const struct inet_bind_bucket *tb, bool relax);
|
|
+ void (*mtu_reduced)(struct sock *sk);
|
|
};
|
|
|
|
/** inet_connection_sock - INET connection oriented sock
|
|
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
|
|
index 058271b..823ec7b 100644
|
|
--- a/include/net/inetpeer.h
|
|
+++ b/include/net/inetpeer.h
|
|
@@ -41,14 +41,13 @@ struct inet_peer {
|
|
struct rcu_head gc_rcu;
|
|
};
|
|
/*
|
|
- * Once inet_peer is queued for deletion (refcnt == -1), following fields
|
|
- * are not available: rid, ip_id_count
|
|
+ * Once inet_peer is queued for deletion (refcnt == -1), following field
|
|
+ * is not available: rid
|
|
* We can share memory with rcu_head to help keep inet_peer small.
|
|
*/
|
|
union {
|
|
struct {
|
|
atomic_t rid; /* Frag reception counter */
|
|
- atomic_t ip_id_count; /* IP ID for the next packet */
|
|
};
|
|
struct rcu_head rcu;
|
|
struct inet_peer *gc_next;
|
|
@@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
|
|
void inetpeer_invalidate_tree(struct inet_peer_base *);
|
|
|
|
/*
|
|
- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
|
|
+ * temporary check to make sure we dont access rid, tcp_ts,
|
|
* tcp_ts_stamp if no refcount is taken on inet_peer
|
|
*/
|
|
static inline void inet_peer_refcheck(const struct inet_peer *p)
|
|
@@ -173,13 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
|
|
WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
|
|
}
|
|
|
|
-
|
|
-/* can be called with or without local BH being disabled */
|
|
-static inline int inet_getid(struct inet_peer *p, int more)
|
|
-{
|
|
- more++;
|
|
- inet_peer_refcheck(p);
|
|
- return atomic_add_return(more, &p->ip_id_count) - more;
|
|
-}
|
|
-
|
|
#endif /* _NET_INETPEER_H */
|
|
diff --git a/include/net/ip.h b/include/net/ip.h
|
|
index 23be0fd..5128fa7 100644
|
|
--- a/include/net/ip.h
|
|
+++ b/include/net/ip.h
|
|
@@ -38,11 +38,12 @@ struct inet_skb_parm {
|
|
struct ip_options opt; /* Compiled IP options */
|
|
unsigned char flags;
|
|
|
|
-#define IPSKB_FORWARDED 1
|
|
-#define IPSKB_XFRM_TUNNEL_SIZE 2
|
|
-#define IPSKB_XFRM_TRANSFORMED 4
|
|
-#define IPSKB_FRAG_COMPLETE 8
|
|
-#define IPSKB_REROUTED 16
|
|
+#define IPSKB_FORWARDED BIT(0)
|
|
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
|
|
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
|
|
+#define IPSKB_FRAG_COMPLETE BIT(3)
|
|
+#define IPSKB_REROUTED BIT(4)
|
|
+#define IPSKB_DOREDIRECT BIT(5)
|
|
|
|
u16 frag_max_size;
|
|
};
|
|
@@ -174,7 +175,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
|
|
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
|
|
}
|
|
|
|
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
|
|
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
|
|
__be32 saddr, const struct ip_reply_arg *arg,
|
|
unsigned int len);
|
|
|
|
@@ -297,9 +298,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
|
|
}
|
|
}
|
|
|
|
-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
|
|
+u32 ip_idents_reserve(u32 hash, int segs);
|
|
+void __ip_select_ident(struct iphdr *iph, int segs);
|
|
|
|
-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
|
|
+static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
|
|
{
|
|
struct iphdr *iph = ip_hdr(skb);
|
|
|
|
@@ -309,24 +311,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
|
|
* does not change, they drop every other packet in
|
|
* a TCP stream using header compression.
|
|
*/
|
|
- iph->id = (sk && inet_sk(sk)->inet_daddr) ?
|
|
- htons(inet_sk(sk)->inet_id++) : 0;
|
|
- } else
|
|
- __ip_select_ident(iph, dst, 0);
|
|
-}
|
|
-
|
|
-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
|
|
-{
|
|
- struct iphdr *iph = ip_hdr(skb);
|
|
-
|
|
- if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
|
|
if (sk && inet_sk(sk)->inet_daddr) {
|
|
iph->id = htons(inet_sk(sk)->inet_id);
|
|
- inet_sk(sk)->inet_id += 1 + more;
|
|
- } else
|
|
+ inet_sk(sk)->inet_id += segs;
|
|
+ } else {
|
|
iph->id = 0;
|
|
- } else
|
|
- __ip_select_ident(iph, dst, more);
|
|
+ }
|
|
+ } else {
|
|
+ __ip_select_ident(iph, segs);
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
|
|
+{
|
|
+ ip_select_ident_segs(skb, sk, 1);
|
|
}
|
|
|
|
/*
|
|
@@ -409,22 +407,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
|
|
|
|
#endif
|
|
|
|
-static inline int sk_mc_loop(struct sock *sk)
|
|
-{
|
|
- if (!sk)
|
|
- return 1;
|
|
- switch (sk->sk_family) {
|
|
- case AF_INET:
|
|
- return inet_sk(sk)->mc_loop;
|
|
-#if IS_ENABLED(CONFIG_IPV6)
|
|
- case AF_INET6:
|
|
- return inet6_sk(sk)->mc_loop;
|
|
-#endif
|
|
- }
|
|
- WARN_ON(1);
|
|
- return 1;
|
|
-}
|
|
-
|
|
bool ip_call_ra_chain(struct sk_buff *skb);
|
|
|
|
/*
|
|
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
|
|
index 2e74c6c..ee2d53a 100644
|
|
--- a/include/net/ip6_route.h
|
|
+++ b/include/net/ip6_route.h
|
|
@@ -168,7 +168,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
|
|
|
|
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
|
|
{
|
|
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
|
|
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
|
|
+ inet6_sk(skb->sk) : NULL;
|
|
|
|
return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
|
|
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
|
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
|
|
index e77c104..7b9ec58 100644
|
|
--- a/include/net/ip_tunnels.h
|
|
+++ b/include/net/ip_tunnels.h
|
|
@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
|
|
|
|
struct ip_tunnel_dst {
|
|
struct dst_entry __rcu *dst;
|
|
+ __be32 saddr;
|
|
};
|
|
|
|
struct ip_tunnel {
|
|
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
|
|
index 4f541f1..a60948d 100644
|
|
--- a/include/net/ipv6.h
|
|
+++ b/include/net/ipv6.h
|
|
@@ -660,7 +660,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
|
|
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
|
|
}
|
|
|
|
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
|
|
+void ipv6_proxy_select_ident(struct sk_buff *skb);
|
|
|
|
int ip6_dst_hoplimit(struct dst_entry *dst);
|
|
|
|
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
|
|
index 80f500a..57c2da9 100644
|
|
--- a/include/net/netns/ipv4.h
|
|
+++ b/include/net/netns/ipv4.h
|
|
@@ -47,6 +47,7 @@ struct netns_ipv4 {
|
|
struct inet_peer_base *peers;
|
|
struct tcpm_hash_bucket *tcp_metrics_hash;
|
|
unsigned int tcp_metrics_hash_log;
|
|
+ struct sock * __percpu *tcp_sk;
|
|
struct netns_frags frags;
|
|
#ifdef CONFIG_NETFILTER
|
|
struct xt_table *iptable_filter;
|
|
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
|
|
index 3573a81..8ba379f 100644
|
|
--- a/include/net/netns/sctp.h
|
|
+++ b/include/net/netns/sctp.h
|
|
@@ -31,6 +31,7 @@ struct netns_sctp {
|
|
struct list_head addr_waitq;
|
|
struct timer_list addr_wq_timer;
|
|
struct list_head auto_asconf_splist;
|
|
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
|
|
spinlock_t addr_wq_lock;
|
|
|
|
/* Lock that protects the local_addr_list writers */
|
|
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
|
|
index b07cdc9..f103f30 100644
|
|
--- a/include/net/regulatory.h
|
|
+++ b/include/net/regulatory.h
|
|
@@ -160,7 +160,7 @@ struct ieee80211_reg_rule {
|
|
struct ieee80211_regdomain {
|
|
struct rcu_head rcu_head;
|
|
u32 n_reg_rules;
|
|
- char alpha2[2];
|
|
+ char alpha2[3];
|
|
enum nl80211_dfs_regions dfs_region;
|
|
struct ieee80211_reg_rule reg_rules[];
|
|
};
|
|
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
|
|
index 4b7cd69..cfcbc3f 100644
|
|
--- a/include/net/sctp/command.h
|
|
+++ b/include/net/sctp/command.h
|
|
@@ -115,7 +115,7 @@ typedef enum {
|
|
* analysis of the state functions, but in reality just taken from
|
|
* thin air in the hopes othat we don't trigger a kernel panic.
|
|
*/
|
|
-#define SCTP_MAX_NUM_COMMANDS 14
|
|
+#define SCTP_MAX_NUM_COMMANDS 20
|
|
|
|
typedef union {
|
|
__s32 i32;
|
|
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
|
|
index a3353f4..ba41e01 100644
|
|
--- a/include/net/sctp/sctp.h
|
|
+++ b/include/net/sctp/sctp.h
|
|
@@ -433,6 +433,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
|
|
asoc->pmtu_pending = 0;
|
|
}
|
|
|
|
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
|
|
+{
|
|
+ return !list_empty(&chunk->list);
|
|
+}
|
|
+
|
|
/* Walk through a list of TLV parameters. Don't trust the
|
|
* individual parameter lengths and instead depend on
|
|
* the chunk length to indicate when to stop. Make sure
|
|
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
|
|
index 7f4eeb3..72a31db 100644
|
|
--- a/include/net/sctp/sm.h
|
|
+++ b/include/net/sctp/sm.h
|
|
@@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
|
|
int, __be16);
|
|
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
|
|
union sctp_addr *addr);
|
|
-int sctp_verify_asconf(const struct sctp_association *asoc,
|
|
- struct sctp_paramhdr *param_hdr, void *chunk_end,
|
|
- struct sctp_paramhdr **errp);
|
|
+bool sctp_verify_asconf(const struct sctp_association *asoc,
|
|
+ struct sctp_chunk *chunk, bool addr_param_needed,
|
|
+ struct sctp_paramhdr **errp);
|
|
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
|
|
struct sctp_chunk *asconf);
|
|
int sctp_process_asconf_ack(struct sctp_association *asoc,
|
|
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
|
|
index 0dfcc92..2c2d388 100644
|
|
--- a/include/net/sctp/structs.h
|
|
+++ b/include/net/sctp/structs.h
|
|
@@ -219,6 +219,10 @@ struct sctp_sock {
|
|
atomic_t pd_mode;
|
|
/* Receive to here while partial delivery is in effect. */
|
|
struct sk_buff_head pd_lobby;
|
|
+
|
|
+ /* These must be the last fields, as they will skipped on copies,
|
|
+ * like on accept and peeloff operations
|
|
+ */
|
|
struct list_head auto_asconf_list;
|
|
int do_auto_asconf;
|
|
};
|
|
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
|
|
index f257486..3f36d45 100644
|
|
--- a/include/net/secure_seq.h
|
|
+++ b/include/net/secure_seq.h
|
|
@@ -3,8 +3,6 @@
|
|
|
|
#include <linux/types.h>
|
|
|
|
-__u32 secure_ip_id(__be32 daddr);
|
|
-__u32 secure_ipv6_id(const __be32 daddr[4]);
|
|
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
|
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
|
__be16 dport);
|
|
diff --git a/include/net/sock.h b/include/net/sock.h
|
|
index 2f7bc43..0c79a74 100644
|
|
--- a/include/net/sock.h
|
|
+++ b/include/net/sock.h
|
|
@@ -969,7 +969,6 @@ struct proto {
|
|
struct sk_buff *skb);
|
|
|
|
void (*release_cb)(struct sock *sk);
|
|
- void (*mtu_reduced)(struct sock *sk);
|
|
|
|
/* Keeping track of sk's, looking them up, and port selection methods. */
|
|
void (*hash)(struct sock *sk);
|
|
@@ -1816,6 +1815,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
|
|
|
|
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
|
|
|
|
+bool sk_mc_loop(struct sock *sk);
|
|
+
|
|
static inline bool sk_can_gso(const struct sock *sk)
|
|
{
|
|
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
|
|
diff --git a/include/net/tcp.h b/include/net/tcp.h
|
|
index 743acce..1f0d847 100644
|
|
--- a/include/net/tcp.h
|
|
+++ b/include/net/tcp.h
|
|
@@ -453,6 +453,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
|
|
*/
|
|
|
|
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
|
+void tcp_v4_mtu_reduced(struct sock *sk);
|
|
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
|
struct sock *tcp_create_openreq_child(struct sock *sk,
|
|
struct request_sock *req,
|
|
@@ -720,8 +721,10 @@ struct tcp_skb_cb {
|
|
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
|
|
#define TCPCB_LOST 0x04 /* SKB is lost */
|
|
#define TCPCB_TAGBITS 0x07 /* All tag bits */
|
|
+#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
|
|
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
|
|
-#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
|
|
+#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
|
|
+ TCPCB_REPAIRED)
|
|
|
|
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
|
|
/* 1 byte hole */
|
|
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
|
|
index b4f1eff..409fafb 100644
|
|
--- a/include/scsi/scsi_device.h
|
|
+++ b/include/scsi/scsi_device.h
|
|
@@ -149,6 +149,7 @@ struct scsi_device {
|
|
unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
|
|
unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
|
|
unsigned skip_vpd_pages:1; /* do not read VPD pages */
|
|
+ unsigned try_vpd_pages:1; /* attempt to read VPD pages */
|
|
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
|
|
unsigned no_start_on_add:1; /* do not issue start on add */
|
|
unsigned allow_restart:1; /* issue START_UNIT in error handler */
|
|
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
|
|
index 447d2d7..183eaab 100644
|
|
--- a/include/scsi/scsi_devinfo.h
|
|
+++ b/include/scsi/scsi_devinfo.h
|
|
@@ -32,4 +32,9 @@
|
|
#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
|
|
#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
|
|
#define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
|
|
+#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs
|
|
+ for sequential scan */
|
|
+#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
|
|
+#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
|
|
+
|
|
#endif
|
|
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
|
|
index 2609048..3a34f6e 100644
|
|
--- a/include/sound/ak4113.h
|
|
+++ b/include/sound/ak4113.h
|
|
@@ -286,7 +286,7 @@ struct ak4113 {
|
|
ak4113_write_t *write;
|
|
ak4113_read_t *read;
|
|
void *private_data;
|
|
- unsigned int init:1;
|
|
+ atomic_t wq_processing;
|
|
spinlock_t lock;
|
|
unsigned char regmap[AK4113_WRITABLE_REGS];
|
|
struct snd_kcontrol *kctls[AK4113_CONTROLS];
|
|
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
|
|
index 52f02a6..069299a 100644
|
|
--- a/include/sound/ak4114.h
|
|
+++ b/include/sound/ak4114.h
|
|
@@ -168,7 +168,7 @@ struct ak4114 {
|
|
ak4114_write_t * write;
|
|
ak4114_read_t * read;
|
|
void * private_data;
|
|
- unsigned int init: 1;
|
|
+ atomic_t wq_processing;
|
|
spinlock_t lock;
|
|
unsigned char regmap[6];
|
|
unsigned char txcsb[5];
|
|
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
|
|
index dfb42ca..8898cde 100644
|
|
--- a/include/sound/emu10k1.h
|
|
+++ b/include/sound/emu10k1.h
|
|
@@ -41,7 +41,8 @@
|
|
|
|
#define EMUPAGESIZE 4096
|
|
#define MAXREQVOICES 8
|
|
-#define MAXPAGES 8192
|
|
+#define MAXPAGES0 4096 /* 32 bit mode */
|
|
+#define MAXPAGES1 8192 /* 31 bit mode */
|
|
#define RESERVED 0
|
|
#define NUM_MIDI 16
|
|
#define NUM_G 64 /* use all channels */
|
|
@@ -50,8 +51,7 @@
|
|
|
|
/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
|
|
#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
|
|
-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
|
|
- /* See ALSA bug #1276 - rlrevell */
|
|
+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
|
|
|
|
#define TMEMSIZE 256*1024
|
|
#define TMEMSIZEREG 4
|
|
@@ -468,8 +468,11 @@
|
|
|
|
#define MAPB 0x0d /* Cache map B */
|
|
|
|
-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
|
|
-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
|
|
+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
|
|
+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
|
|
+
|
|
+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
|
|
+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
|
|
|
|
/* 0x0e, 0x0f: Not used */
|
|
|
|
@@ -1706,6 +1709,7 @@ struct snd_emu10k1 {
|
|
unsigned short model; /* subsystem id */
|
|
unsigned int card_type; /* EMU10K1_CARD_* */
|
|
unsigned int ecard_ctrl; /* ecard control bits */
|
|
+ unsigned int address_mode; /* address mode */
|
|
unsigned long dma_mask; /* PCI DMA mask */
|
|
unsigned int delay_pcm_irq; /* in samples */
|
|
int max_cache_pages; /* max memory size / PAGE_SIZE */
|
|
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
|
|
index 6e89ef6..13412ab 100644
|
|
--- a/include/sound/soc-dapm.h
|
|
+++ b/include/sound/soc-dapm.h
|
|
@@ -302,7 +302,7 @@ struct device;
|
|
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
|
|
.tlv.p = (tlv_array), \
|
|
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
|
|
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
|
|
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
|
|
#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
|
|
SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
|
|
#define SOC_DAPM_ENUM(xname, xenum) \
|
|
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
|
|
index 2883a7a..98f2ade 100644
|
|
--- a/include/sound/soc-dpcm.h
|
|
+++ b/include/sound/soc-dpcm.h
|
|
@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
|
|
/* state and update */
|
|
enum snd_soc_dpcm_update runtime_update;
|
|
enum snd_soc_dpcm_state state;
|
|
+
|
|
+ int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
|
|
};
|
|
|
|
/* can this BE stop and free */
|
|
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
|
|
index 1772fad..e4b9e01 100644
|
|
--- a/include/target/target_core_base.h
|
|
+++ b/include/target/target_core_base.h
|
|
@@ -407,7 +407,7 @@ struct t10_reservation {
|
|
/* Activate Persistence across Target Power Loss enabled
|
|
* for SCSI device */
|
|
int pr_aptpl_active;
|
|
-#define PR_APTPL_BUF_LEN 8192
|
|
+#define PR_APTPL_BUF_LEN 262144
|
|
u32 pr_generation;
|
|
spinlock_t registration_lock;
|
|
spinlock_t aptpl_reg_lock;
|
|
@@ -513,7 +513,7 @@ struct se_cmd {
|
|
sense_reason_t (*execute_cmd)(struct se_cmd *);
|
|
sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
|
|
u32, enum dma_data_direction);
|
|
- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
|
|
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
|
|
|
|
unsigned char *t_task_cdb;
|
|
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
|
|
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
|
|
index 06f544e..c6814b9 100644
|
|
--- a/include/trace/events/compaction.h
|
|
+++ b/include/trace/events/compaction.h
|
|
@@ -5,6 +5,7 @@
|
|
#define _TRACE_COMPACTION_H
|
|
|
|
#include <linux/types.h>
|
|
+#include <linux/list.h>
|
|
#include <linux/tracepoint.h>
|
|
#include <trace/events/gfpflags.h>
|
|
|
|
@@ -47,10 +48,11 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
|
|
|
|
TRACE_EVENT(mm_compaction_migratepages,
|
|
|
|
- TP_PROTO(unsigned long nr_migrated,
|
|
- unsigned long nr_failed),
|
|
+ TP_PROTO(unsigned long nr_all,
|
|
+ int migrate_rc,
|
|
+ struct list_head *migratepages),
|
|
|
|
- TP_ARGS(nr_migrated, nr_failed),
|
|
+ TP_ARGS(nr_all, migrate_rc, migratepages),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(unsigned long, nr_migrated)
|
|
@@ -58,7 +60,22 @@ TRACE_EVENT(mm_compaction_migratepages,
|
|
),
|
|
|
|
TP_fast_assign(
|
|
- __entry->nr_migrated = nr_migrated;
|
|
+ unsigned long nr_failed = 0;
|
|
+ struct list_head *page_lru;
|
|
+
|
|
+ /*
|
|
+ * migrate_pages() returns either a non-negative number
|
|
+ * with the number of pages that failed migration, or an
|
|
+ * error code, in which case we need to count the remaining
|
|
+ * pages manually
|
|
+ */
|
|
+ if (migrate_rc >= 0)
|
|
+ nr_failed = migrate_rc;
|
|
+ else
|
|
+ list_for_each(page_lru, migratepages)
|
|
+ nr_failed++;
|
|
+
|
|
+ __entry->nr_migrated = nr_all - nr_failed;
|
|
__entry->nr_failed = nr_failed;
|
|
),
|
|
|
|
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
|
|
index aece134..4ad10ba 100644
|
|
--- a/include/trace/events/kmem.h
|
|
+++ b/include/trace/events/kmem.h
|
|
@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
|
|
|
|
TP_PROTO(struct page *page,
|
|
int alloc_order, int fallback_order,
|
|
- int alloc_migratetype, int fallback_migratetype, int new_migratetype),
|
|
+ int alloc_migratetype, int fallback_migratetype),
|
|
|
|
TP_ARGS(page,
|
|
alloc_order, fallback_order,
|
|
- alloc_migratetype, fallback_migratetype, new_migratetype),
|
|
+ alloc_migratetype, fallback_migratetype),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( struct page *, page )
|
|
@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
|
|
__entry->fallback_order = fallback_order;
|
|
__entry->alloc_migratetype = alloc_migratetype;
|
|
__entry->fallback_migratetype = fallback_migratetype;
|
|
- __entry->change_ownership = (new_migratetype == alloc_migratetype);
|
|
+ __entry->change_ownership = (alloc_migratetype ==
|
|
+ get_pageblock_migratetype(page));
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
|
|
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
|
|
index 1c9fabd..ce0803b 100644
|
|
--- a/include/trace/events/pagemap.h
|
|
+++ b/include/trace/events/pagemap.h
|
|
@@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion,
|
|
|
|
TP_PROTO(
|
|
struct page *page,
|
|
- unsigned long pfn,
|
|
- int lru,
|
|
- unsigned long flags
|
|
+ int lru
|
|
),
|
|
|
|
- TP_ARGS(page, pfn, lru, flags),
|
|
+ TP_ARGS(page, lru),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct page *, page )
|
|
@@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion,
|
|
|
|
TP_fast_assign(
|
|
__entry->page = page;
|
|
- __entry->pfn = pfn;
|
|
+ __entry->pfn = page_to_pfn(page);
|
|
__entry->lru = lru;
|
|
- __entry->flags = flags;
|
|
+ __entry->flags = trace_pagemap_flags(page);
|
|
),
|
|
|
|
/* Flag format is based on page-types.c formatting for pagemap */
|
|
@@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion,
|
|
|
|
TRACE_EVENT(mm_lru_activate,
|
|
|
|
- TP_PROTO(struct page *page, unsigned long pfn),
|
|
+ TP_PROTO(struct page *page),
|
|
|
|
- TP_ARGS(page, pfn),
|
|
+ TP_ARGS(page),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(struct page *, page )
|
|
@@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate,
|
|
|
|
TP_fast_assign(
|
|
__entry->page = page;
|
|
- __entry->pfn = pfn;
|
|
+ __entry->pfn = page_to_pfn(page);
|
|
),
|
|
|
|
/* Flag format is based on page-types.c formatting for pagemap */
|
|
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
|
|
index 67e1bbf..dc7bb01 100644
|
|
--- a/include/trace/events/sched.h
|
|
+++ b/include/trace/events/sched.h
|
|
@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
|
|
/*
|
|
* For all intents and purposes a preempted task is a running task.
|
|
*/
|
|
- if (task_preempt_count(p) & PREEMPT_ACTIVE)
|
|
+ if (preempt_count() & PREEMPT_ACTIVE)
|
|
state = TASK_RUNNING | TASK_STATE_MAX;
|
|
#endif
|
|
|
|
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
|
|
index 87792a5..33b7395 100644
|
|
--- a/include/uapi/drm/vmwgfx_drm.h
|
|
+++ b/include/uapi/drm/vmwgfx_drm.h
|
|
@@ -29,7 +29,7 @@
|
|
#define __VMWGFX_DRM_H__
|
|
|
|
#ifndef __KERNEL__
|
|
-#include <drm.h>
|
|
+#include <drm/drm.h>
|
|
#endif
|
|
|
|
#define DRM_VMW_MAX_SURFACE_FACES 6
|
|
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
|
|
index e9a1d2d..4c399ae 100644
|
|
--- a/include/uapi/linux/in6.h
|
|
+++ b/include/uapi/linux/in6.h
|
|
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
|
|
/*
|
|
* IPV6 socket options
|
|
*/
|
|
-
|
|
+#if __UAPI_DEF_IPV6_OPTIONS
|
|
#define IPV6_ADDRFORM 1
|
|
#define IPV6_2292PKTINFO 2
|
|
#define IPV6_2292HOPOPTS 3
|
|
@@ -192,6 +192,7 @@ struct in6_flowlabel_req {
|
|
|
|
#define IPV6_IPSEC_POLICY 34
|
|
#define IPV6_XFRM_POLICY 35
|
|
+#endif
|
|
|
|
/*
|
|
* Multicast:
|
|
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
|
|
index c140620..e28807a 100644
|
|
--- a/include/uapi/linux/libc-compat.h
|
|
+++ b/include/uapi/linux/libc-compat.h
|
|
@@ -69,6 +69,7 @@
|
|
#define __UAPI_DEF_SOCKADDR_IN6 0
|
|
#define __UAPI_DEF_IPV6_MREQ 0
|
|
#define __UAPI_DEF_IPPROTO_V6 0
|
|
+#define __UAPI_DEF_IPV6_OPTIONS 0
|
|
|
|
#else
|
|
|
|
@@ -82,6 +83,7 @@
|
|
#define __UAPI_DEF_SOCKADDR_IN6 1
|
|
#define __UAPI_DEF_IPV6_MREQ 1
|
|
#define __UAPI_DEF_IPPROTO_V6 1
|
|
+#define __UAPI_DEF_IPV6_OPTIONS 1
|
|
|
|
#endif /* _NETINET_IN_H */
|
|
|
|
@@ -103,6 +105,7 @@
|
|
#define __UAPI_DEF_SOCKADDR_IN6 1
|
|
#define __UAPI_DEF_IPV6_MREQ 1
|
|
#define __UAPI_DEF_IPPROTO_V6 1
|
|
+#define __UAPI_DEF_IPV6_OPTIONS 1
|
|
|
|
/* Definitions for xattr.h */
|
|
#define __UAPI_DEF_XATTR 1
|
|
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
|
|
index 5dda450..2ec9fbc 100644
|
|
--- a/include/uapi/linux/netfilter/xt_bpf.h
|
|
+++ b/include/uapi/linux/netfilter/xt_bpf.h
|
|
@@ -6,6 +6,8 @@
|
|
|
|
#define XT_BPF_MAX_NUM_INSTR 64
|
|
|
|
+struct sk_filter;
|
|
+
|
|
struct xt_bpf_info {
|
|
__u16 bpf_program_num_elem;
|
|
struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
|
|
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
|
|
index 30db069..788c5aa 100644
|
|
--- a/include/uapi/linux/pci_regs.h
|
|
+++ b/include/uapi/linux/pci_regs.h
|
|
@@ -319,6 +319,7 @@
|
|
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
|
|
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
|
|
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
|
|
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
|
|
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
|
|
|
|
/* MSI-X Table entry format */
|
|
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
|
|
index 29e9c7a..f279394 100644
|
|
--- a/include/uapi/linux/usb/functionfs.h
|
|
+++ b/include/uapi/linux/usb/functionfs.h
|
|
@@ -27,24 +27,18 @@ struct usb_endpoint_descriptor_no_audio {
|
|
__u8 bInterval;
|
|
} __attribute__((packed));
|
|
|
|
-/* Legacy format, deprecated as of 3.14. */
|
|
-struct usb_functionfs_descs_head {
|
|
- __le32 magic;
|
|
- __le32 length;
|
|
- __le32 fs_count;
|
|
- __le32 hs_count;
|
|
-} __attribute__((packed, deprecated));
|
|
|
|
/*
|
|
* All numbers must be in little endian order.
|
|
*/
|
|
|
|
+/* Legacy format, deprecated as of 3.14. */
|
|
struct usb_functionfs_descs_head {
|
|
__le32 magic;
|
|
__le32 length;
|
|
__le32 fs_count;
|
|
__le32 hs_count;
|
|
-} __attribute__((packed));
|
|
+} __attribute__((packed, deprecated));
|
|
|
|
/*
|
|
* Descriptors format:
|
|
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
|
|
index 0c65e4b..ef29266 100644
|
|
--- a/include/uapi/linux/usbdevice_fs.h
|
|
+++ b/include/uapi/linux/usbdevice_fs.h
|
|
@@ -125,11 +125,12 @@ struct usbdevfs_hub_portinfo {
|
|
char port [127]; /* e.g. port 3 connects to device 27 */
|
|
};
|
|
|
|
-/* Device capability flags */
|
|
+/* System and bus capability flags */
|
|
#define USBDEVFS_CAP_ZERO_PACKET 0x01
|
|
#define USBDEVFS_CAP_BULK_CONTINUATION 0x02
|
|
#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04
|
|
#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08
|
|
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
|
|
|
|
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
|
|
|
|
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
|
|
index c38355c..1590c49 100644
|
|
--- a/include/uapi/linux/xattr.h
|
|
+++ b/include/uapi/linux/xattr.h
|
|
@@ -13,7 +13,7 @@
|
|
#ifndef _UAPI_LINUX_XATTR_H
|
|
#define _UAPI_LINUX_XATTR_H
|
|
|
|
-#ifdef __UAPI_DEF_XATTR
|
|
+#if __UAPI_DEF_XATTR
|
|
#define __USE_KERNEL_XATTR_DEFS
|
|
|
|
#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
|
|
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
|
|
index 99b80ab..3066718 100644
|
|
--- a/include/uapi/rdma/rdma_user_cm.h
|
|
+++ b/include/uapi/rdma/rdma_user_cm.h
|
|
@@ -34,6 +34,7 @@
|
|
#define RDMA_USER_CM_H
|
|
|
|
#include <linux/types.h>
|
|
+#include <linux/socket.h>
|
|
#include <linux/in6.h>
|
|
#include <rdma/ib_user_verbs.h>
|
|
#include <rdma/ib_user_sa.h>
|
|
diff --git a/include/xen/events.h b/include/xen/events.h
|
|
index c9c85cf..5d84cd0 100644
|
|
--- a/include/xen/events.h
|
|
+++ b/include/xen/events.h
|
|
@@ -14,7 +14,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
|
|
irq_handler_t handler,
|
|
unsigned long irqflags, const char *devname,
|
|
void *dev_id);
|
|
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
|
|
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
|
|
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
|
|
irq_handler_t handler,
|
|
unsigned long irqflags, const char *devname,
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index 93c5ef0..8b9521a 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -1389,6 +1389,7 @@ config FUTEX
|
|
|
|
config HAVE_FUTEX_CMPXCHG
|
|
bool
|
|
+ depends on FUTEX
|
|
help
|
|
Architectures should select this if futex_atomic_cmpxchg_inatomic()
|
|
is implemented and always working. This removes a couple of runtime
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 9c7fd4c..008edce 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -617,6 +617,10 @@ asmlinkage void __init start_kernel(void)
|
|
if (efi_enabled(EFI_RUNTIME_SERVICES))
|
|
efi_enter_virtual_mode();
|
|
#endif
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
+ /* Should be run before the first non-init thread is created */
|
|
+ init_espfix_bsp();
|
|
+#endif
|
|
thread_info_cache_init();
|
|
cred_init();
|
|
fork_init(totalram_pages);
|
|
@@ -639,6 +643,7 @@ asmlinkage void __init start_kernel(void)
|
|
|
|
check_bugs();
|
|
|
|
+ acpi_subsystem_init();
|
|
sfi_init_late();
|
|
|
|
if (efi_enabled(EFI_RUNTIME_SERVICES)) {
|
|
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
|
|
index 1702864..cadddc8 100644
|
|
--- a/ipc/ipc_sysctl.c
|
|
+++ b/ipc/ipc_sysctl.c
|
|
@@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
struct ctl_table ipc_table;
|
|
- size_t lenp_bef = *lenp;
|
|
int oldval;
|
|
int rc;
|
|
|
|
@@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
|
|
|
|
rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
|
|
|
|
- if (write && !rc && lenp_bef == *lenp) {
|
|
+ if (write && !rc) {
|
|
int newval = *((int *)(ipc_table.data));
|
|
/*
|
|
* The file "auto_msgmni" has correctly been set.
|
|
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
|
|
index c3b3117..9699d3f 100644
|
|
--- a/ipc/mqueue.c
|
|
+++ b/ipc/mqueue.c
|
|
@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
|
|
if (!leaf)
|
|
return -ENOMEM;
|
|
INIT_LIST_HEAD(&leaf->msg_list);
|
|
- info->qsize += sizeof(*leaf);
|
|
}
|
|
leaf->priority = msg->m_type;
|
|
rb_link_node(&leaf->rb_node, parent, p);
|
|
@@ -188,7 +187,6 @@ try_again:
|
|
"lazy leaf delete!\n");
|
|
rb_erase(&leaf->rb_node, &info->msg_tree);
|
|
if (info->node_cache) {
|
|
- info->qsize -= sizeof(*leaf);
|
|
kfree(leaf);
|
|
} else {
|
|
info->node_cache = leaf;
|
|
@@ -201,7 +199,6 @@ try_again:
|
|
if (list_empty(&leaf->msg_list)) {
|
|
rb_erase(&leaf->rb_node, &info->msg_tree);
|
|
if (info->node_cache) {
|
|
- info->qsize -= sizeof(*leaf);
|
|
kfree(leaf);
|
|
} else {
|
|
info->node_cache = leaf;
|
|
@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
|
|
/* Save our speculative allocation into the cache */
|
|
INIT_LIST_HEAD(&new_leaf->msg_list);
|
|
info->node_cache = new_leaf;
|
|
- info->qsize += sizeof(*new_leaf);
|
|
new_leaf = NULL;
|
|
} else {
|
|
kfree(new_leaf);
|
|
@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
|
|
/* Save our speculative allocation into the cache */
|
|
INIT_LIST_HEAD(&new_leaf->msg_list);
|
|
info->node_cache = new_leaf;
|
|
- info->qsize += sizeof(*new_leaf);
|
|
} else {
|
|
kfree(new_leaf);
|
|
}
|
|
diff --git a/ipc/sem.c b/ipc/sem.c
|
|
index bee5554..e53c96f 100644
|
|
--- a/ipc/sem.c
|
|
+++ b/ipc/sem.c
|
|
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
|
|
}
|
|
|
|
/*
|
|
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
|
|
+ * are only control barriers.
|
|
+ * The code must pair with spin_unlock(&sem->lock) or
|
|
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
|
|
+ *
|
|
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
|
|
+ */
|
|
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
|
|
+
|
|
+/*
|
|
* Wait until all currently ongoing simple ops have completed.
|
|
* Caller must own sem_perm.lock.
|
|
* New simple ops cannot start, because simple ops first check
|
|
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
|
|
sem = sma->sem_base + i;
|
|
spin_unlock_wait(&sem->lock);
|
|
}
|
|
+ ipc_smp_acquire__after_spin_is_unlocked();
|
|
}
|
|
|
|
/*
|
|
@@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
|
|
|
|
/* Then check that the global lock is free */
|
|
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
|
- /* spin_is_locked() is not a memory barrier */
|
|
- smp_mb();
|
|
+ /*
|
|
+ * We need a memory barrier with acquire semantics,
|
|
+ * otherwise we can race with another thread that does:
|
|
+ * complex_count++;
|
|
+ * spin_unlock(sem_perm.lock);
|
|
+ */
|
|
+ ipc_smp_acquire__after_spin_is_unlocked();
|
|
|
|
/* Now repeat the test of complex_count:
|
|
* It can't change anymore until we drop sem->lock.
|
|
@@ -2055,17 +2071,28 @@ void exit_sem(struct task_struct *tsk)
|
|
rcu_read_lock();
|
|
un = list_entry_rcu(ulp->list_proc.next,
|
|
struct sem_undo, list_proc);
|
|
- if (&un->list_proc == &ulp->list_proc)
|
|
- semid = -1;
|
|
- else
|
|
- semid = un->semid;
|
|
+ if (&un->list_proc == &ulp->list_proc) {
|
|
+ /*
|
|
+ * We must wait for freeary() before freeing this ulp,
|
|
+ * in case we raced with last sem_undo. There is a small
|
|
+ * possibility where we exit while freeary() didn't
|
|
+ * finish unlocking sem_undo_list.
|
|
+ */
|
|
+ spin_unlock_wait(&ulp->lock);
|
|
+ rcu_read_unlock();
|
|
+ break;
|
|
+ }
|
|
+ spin_lock(&ulp->lock);
|
|
+ semid = un->semid;
|
|
+ spin_unlock(&ulp->lock);
|
|
|
|
+ /* exit_sem raced with IPC_RMID, nothing to do */
|
|
if (semid == -1) {
|
|
rcu_read_unlock();
|
|
- break;
|
|
+ continue;
|
|
}
|
|
|
|
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
|
|
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
|
|
/* exit_sem raced with IPC_RMID, nothing to do */
|
|
if (IS_ERR(sma)) {
|
|
rcu_read_unlock();
|
|
diff --git a/kernel/audit.c b/kernel/audit.c
|
|
index 0c9dc86..b45b2da 100644
|
|
--- a/kernel/audit.c
|
|
+++ b/kernel/audit.c
|
|
@@ -687,7 +687,7 @@ static int audit_get_feature(struct sk_buff *skb)
|
|
|
|
seq = nlmsg_hdr(skb)->nlmsg_seq;
|
|
|
|
- audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af));
|
|
+ audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af));
|
|
|
|
return 0;
|
|
}
|
|
@@ -702,7 +702,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
|
|
|
|
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
|
|
audit_log_task_info(ab, current);
|
|
- audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
|
|
+ audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
|
|
audit_feature_names[which], !!old_feature, !!new_feature,
|
|
!!old_lock, !!new_lock, res);
|
|
audit_log_end(ab);
|
|
@@ -1628,7 +1628,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
|
|
audit_log_format(ab, " %s=", prefix);
|
|
CAP_FOR_EACH_U32(i) {
|
|
audit_log_format(ab, "%08x",
|
|
- cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
|
|
+ cap->cap[CAP_LAST_U32 - i]);
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
|
|
index 135944a..a79db03 100644
|
|
--- a/kernel/audit_tree.c
|
|
+++ b/kernel/audit_tree.c
|
|
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
|
|
chunk->owners[i].index = i;
|
|
}
|
|
fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
|
|
+ chunk->mark.mask = FS_IN_IGNORED;
|
|
return chunk;
|
|
}
|
|
|
|
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
|
|
index 92062fd..598c1dc 100644
|
|
--- a/kernel/auditfilter.c
|
|
+++ b/kernel/auditfilter.c
|
|
@@ -429,6 +429,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
|
|
if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
|
|
f->type = AUDIT_LOGINUID_SET;
|
|
f->val = 0;
|
|
+ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
|
|
}
|
|
|
|
err = audit_field_valid(entry, f);
|
|
@@ -604,6 +605,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
|
|
data->buflen += data->values[i] =
|
|
audit_pack_string(&bufp, krule->filterkey);
|
|
break;
|
|
+ case AUDIT_LOGINUID_SET:
|
|
+ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
|
|
+ data->fields[i] = AUDIT_LOGINUID;
|
|
+ data->values[i] = AUDIT_UID_UNSET;
|
|
+ break;
|
|
+ }
|
|
+ /* fallthrough if set */
|
|
default:
|
|
data->values[i] = f->val;
|
|
}
|
|
@@ -620,6 +628,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
|
|
int i;
|
|
|
|
if (a->flags != b->flags ||
|
|
+ a->pflags != b->pflags ||
|
|
a->listnr != b->listnr ||
|
|
a->action != b->action ||
|
|
a->field_count != b->field_count)
|
|
@@ -738,6 +747,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
|
|
new = &entry->rule;
|
|
new->vers_ops = old->vers_ops;
|
|
new->flags = old->flags;
|
|
+ new->pflags = old->pflags;
|
|
new->listnr = old->listnr;
|
|
new->action = old->action;
|
|
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
|
|
diff --git a/kernel/capability.c b/kernel/capability.c
|
|
index 1191a44..00adb21 100644
|
|
--- a/kernel/capability.c
|
|
+++ b/kernel/capability.c
|
|
@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
|
|
i++;
|
|
}
|
|
|
|
+ effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
|
|
+ permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
|
|
+ inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
|
|
+
|
|
new = prepare_creds();
|
|
if (!new)
|
|
return -ENOMEM;
|
|
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
|
|
index 0c753dd..18711f3 100644
|
|
--- a/kernel/cgroup.c
|
|
+++ b/kernel/cgroup.c
|
|
@@ -971,7 +971,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
|
|
parent = dentry->d_parent;
|
|
spin_lock(&parent->d_lock);
|
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
- list_del_init(&dentry->d_u.d_child);
|
|
+ list_del_init(&dentry->d_child);
|
|
spin_unlock(&dentry->d_lock);
|
|
spin_unlock(&parent->d_lock);
|
|
remove_dir(dentry);
|
|
@@ -3663,7 +3663,6 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
|
|
|
|
l = cgroup_pidlist_find_create(cgrp, type);
|
|
if (!l) {
|
|
- mutex_unlock(&cgrp->pidlist_mutex);
|
|
pidlist_free(array);
|
|
return -ENOMEM;
|
|
}
|
|
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
|
|
index 6b27e5c..7b4530b 100644
|
|
--- a/kernel/cpuset.c
|
|
+++ b/kernel/cpuset.c
|
|
@@ -61,12 +61,7 @@
|
|
#include <linux/cgroup.h>
|
|
#include <linux/wait.h>
|
|
|
|
-/*
|
|
- * Tracks how many cpusets are currently defined in system.
|
|
- * When there is only one cpuset (the root cpuset) we can
|
|
- * short circuit some hooks.
|
|
- */
|
|
-int number_of_cpusets __read_mostly;
|
|
+struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
|
|
|
|
/* See "Frequency meter" comments, below. */
|
|
|
|
@@ -508,9 +503,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
|
|
|
|
rcu_read_lock();
|
|
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
|
- if (cp == root_cs)
|
|
- continue;
|
|
-
|
|
/* skip the whole subtree if @cp doesn't have any CPU */
|
|
if (cpumask_empty(cp->cpus_allowed)) {
|
|
pos_css = css_rightmost_descendant(pos_css);
|
|
@@ -611,7 +603,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
|
goto done;
|
|
}
|
|
|
|
- csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
|
|
+ csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
|
|
if (!csa)
|
|
goto done;
|
|
csn = 0;
|
|
@@ -1022,7 +1014,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
|
task_lock(tsk);
|
|
/*
|
|
* Determine if a loop is necessary if another thread is doing
|
|
- * get_mems_allowed(). If at least one node remains unchanged and
|
|
+ * read_mems_allowed_begin(). If at least one node remains unchanged and
|
|
* tsk does not have a mempolicy, then an empty nodemask will not be
|
|
* possible when mems_allowed is larger than a word.
|
|
*/
|
|
@@ -1961,7 +1953,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
if (is_spread_slab(parent))
|
|
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
|
|
|
- number_of_cpusets++;
|
|
+ cpuset_inc();
|
|
|
|
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
|
|
goto out_unlock;
|
|
@@ -2012,7 +2004,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
|
|
if (is_sched_load_balance(cs))
|
|
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
|
|
|
|
- number_of_cpusets--;
|
|
+ cpuset_dec();
|
|
clear_bit(CS_ONLINE, &cs->flags);
|
|
|
|
mutex_unlock(&cpuset_mutex);
|
|
@@ -2067,7 +2059,6 @@ int __init cpuset_init(void)
|
|
if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
|
|
BUG();
|
|
|
|
- number_of_cpusets = 1;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
|
|
index 334b398..8865cae 100644
|
|
--- a/kernel/debug/debug_core.c
|
|
+++ b/kernel/debug/debug_core.c
|
|
@@ -49,6 +49,7 @@
|
|
#include <linux/pid.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
@@ -224,10 +225,17 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
|
|
if (!CACHE_FLUSH_IS_SAFE)
|
|
return;
|
|
|
|
- if (current->mm && current->mm->mmap_cache) {
|
|
- flush_cache_range(current->mm->mmap_cache,
|
|
- addr, addr + BREAK_INSTR_SIZE);
|
|
+ if (current->mm) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < VMACACHE_SIZE; i++) {
|
|
+ if (!current->vmacache[i])
|
|
+ continue;
|
|
+ flush_cache_range(current->vmacache[i],
|
|
+ addr, addr + BREAK_INSTR_SIZE);
|
|
+ }
|
|
}
|
|
+
|
|
/* Force flush instruction cache if it was outside the mm */
|
|
flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
|
|
}
|
|
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
|
|
index 0b097c8..449518e 100644
|
|
--- a/kernel/debug/kdb/kdb_main.c
|
|
+++ b/kernel/debug/kdb/kdb_main.c
|
|
@@ -2535,7 +2535,7 @@ static int kdb_summary(int argc, const char **argv)
|
|
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
|
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
|
|
"Buffers: %8lu kB\n",
|
|
- val.totalram, val.freeram, val.bufferram);
|
|
+ K(val.totalram), K(val.freeram), K(val.bufferram));
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index f774e93..3bf20e3 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -39,6 +39,7 @@
|
|
#include <linux/hw_breakpoint.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/cgroup.h>
|
|
+#include <linux/compat.h>
|
|
|
|
#include "internal.h"
|
|
|
|
@@ -1516,6 +1517,11 @@ retry:
|
|
*/
|
|
if (ctx->is_active) {
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
+ /*
|
|
+ * Reload the task pointer, it might have been changed by
|
|
+ * a concurrent perf_event_context_sched_out().
|
|
+ */
|
|
+ task = ctx->task;
|
|
goto retry;
|
|
}
|
|
|
|
@@ -1957,6 +1963,11 @@ retry:
|
|
*/
|
|
if (ctx->is_active) {
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
+ /*
|
|
+ * Reload the task pointer, it might have been changed by
|
|
+ * a concurrent perf_event_context_sched_out().
|
|
+ */
|
|
+ task = ctx->task;
|
|
goto retry;
|
|
}
|
|
|
|
@@ -3551,28 +3562,21 @@ static void perf_event_for_each(struct perf_event *event,
|
|
mutex_unlock(&ctx->mutex);
|
|
}
|
|
|
|
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
-{
|
|
- struct perf_event_context *ctx = event->ctx;
|
|
- int ret = 0, active;
|
|
+struct period_event {
|
|
+ struct perf_event *event;
|
|
u64 value;
|
|
+};
|
|
|
|
- if (!is_sampling_event(event))
|
|
- return -EINVAL;
|
|
-
|
|
- if (copy_from_user(&value, arg, sizeof(value)))
|
|
- return -EFAULT;
|
|
-
|
|
- if (!value)
|
|
- return -EINVAL;
|
|
+static int __perf_event_period(void *info)
|
|
+{
|
|
+ struct period_event *pe = info;
|
|
+ struct perf_event *event = pe->event;
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
+ u64 value = pe->value;
|
|
+ bool active;
|
|
|
|
- raw_spin_lock_irq(&ctx->lock);
|
|
+ raw_spin_lock(&ctx->lock);
|
|
if (event->attr.freq) {
|
|
- if (value > sysctl_perf_event_sample_rate) {
|
|
- ret = -EINVAL;
|
|
- goto unlock;
|
|
- }
|
|
-
|
|
event->attr.sample_freq = value;
|
|
} else {
|
|
event->attr.sample_period = value;
|
|
@@ -3591,11 +3595,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
event->pmu->start(event, PERF_EF_RELOAD);
|
|
perf_pmu_enable(ctx->pmu);
|
|
}
|
|
+ raw_spin_unlock(&ctx->lock);
|
|
|
|
-unlock:
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
+{
|
|
+ struct period_event pe = { .event = event, };
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
+ struct task_struct *task;
|
|
+ u64 value;
|
|
+
|
|
+ if (!is_sampling_event(event))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (copy_from_user(&value, arg, sizeof(value)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if (!value)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
|
+ return -EINVAL;
|
|
+
|
|
+ task = ctx->task;
|
|
+ pe.value = value;
|
|
+
|
|
+ if (!task) {
|
|
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+retry:
|
|
+ if (!task_function_call(task, __perf_event_period, &pe))
|
|
+ return 0;
|
|
+
|
|
+ raw_spin_lock_irq(&ctx->lock);
|
|
+ if (ctx->is_active) {
|
|
+ raw_spin_unlock_irq(&ctx->lock);
|
|
+ task = ctx->task;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ __perf_event_period(&pe);
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static const struct file_operations perf_fops;
|
|
@@ -3683,6 +3729,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
+static long perf_compat_ioctl(struct file *file, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ switch (_IOC_NR(cmd)) {
|
|
+ case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
|
|
+ case _IOC_NR(PERF_EVENT_IOC_ID):
|
|
+ /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
|
|
+ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
|
|
+ cmd &= ~IOCSIZE_MASK;
|
|
+ cmd |= sizeof(void *) << IOCSIZE_SHIFT;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ return perf_ioctl(file, cmd, arg);
|
|
+}
|
|
+#else
|
|
+# define perf_compat_ioctl NULL
|
|
+#endif
|
|
+
|
|
int perf_event_task_enable(void)
|
|
{
|
|
struct perf_event *event;
|
|
@@ -4175,7 +4241,7 @@ static const struct file_operations perf_fops = {
|
|
.read = perf_read,
|
|
.poll = perf_poll,
|
|
.unlocked_ioctl = perf_ioctl,
|
|
- .compat_ioctl = perf_ioctl,
|
|
+ .compat_ioctl = perf_compat_ioctl,
|
|
.mmap = perf_mmap,
|
|
.fasync = perf_fasync,
|
|
};
|
|
@@ -4187,12 +4253,20 @@ static const struct file_operations perf_fops = {
|
|
* to user-space before waking everybody up.
|
|
*/
|
|
|
|
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
|
|
+{
|
|
+ /* only the parent has fasync state */
|
|
+ if (event->parent)
|
|
+ event = event->parent;
|
|
+ return &event->fasync;
|
|
+}
|
|
+
|
|
void perf_event_wakeup(struct perf_event *event)
|
|
{
|
|
ring_buffer_wakeup(event);
|
|
|
|
if (event->pending_kill) {
|
|
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
|
|
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
|
|
event->pending_kill = 0;
|
|
}
|
|
}
|
|
@@ -4201,6 +4275,13 @@ static void perf_pending_event(struct irq_work *entry)
|
|
{
|
|
struct perf_event *event = container_of(entry,
|
|
struct perf_event, pending);
|
|
+ int rctx;
|
|
+
|
|
+ rctx = perf_swevent_get_recursion_context();
|
|
+ /*
|
|
+ * If we 'fail' here, that's OK, it means recursion is already disabled
|
|
+ * and we won't recurse 'further'.
|
|
+ */
|
|
|
|
if (event->pending_disable) {
|
|
event->pending_disable = 0;
|
|
@@ -4211,6 +4292,9 @@ static void perf_pending_event(struct irq_work *entry)
|
|
event->pending_wakeup = 0;
|
|
perf_event_wakeup(event);
|
|
}
|
|
+
|
|
+ if (rctx >= 0)
|
|
+ perf_swevent_put_recursion_context(rctx);
|
|
}
|
|
|
|
/*
|
|
@@ -5391,7 +5475,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
else
|
|
perf_event_output(event, data, regs);
|
|
|
|
- if (event->fasync && event->pending_kill) {
|
|
+ if (*perf_event_fasync(event) && event->pending_kill) {
|
|
event->pending_wakeup = 1;
|
|
irq_work_queue(&event->pending);
|
|
}
|
|
@@ -7209,11 +7293,11 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
|
if (move_group) {
|
|
synchronize_rcu();
|
|
- perf_install_in_context(ctx, group_leader, event->cpu);
|
|
+ perf_install_in_context(ctx, group_leader, group_leader->cpu);
|
|
get_ctx(ctx);
|
|
list_for_each_entry(sibling, &group_leader->sibling_list,
|
|
group_entry) {
|
|
- perf_install_in_context(ctx, sibling, event->cpu);
|
|
+ perf_install_in_context(ctx, sibling, sibling->cpu);
|
|
get_ctx(ctx);
|
|
}
|
|
}
|
|
@@ -7826,8 +7910,10 @@ int perf_event_init_task(struct task_struct *child)
|
|
|
|
for_each_task_context_nr(ctxn) {
|
|
ret = perf_event_init_context(child, ctxn);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ perf_event_free_task(child);
|
|
return ret;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
|
|
index 307d87c..1139b22 100644
|
|
--- a/kernel/events/uprobes.c
|
|
+++ b/kernel/events/uprobes.c
|
|
@@ -1621,7 +1621,6 @@ bool uprobe_deny_signal(void)
|
|
if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
|
|
utask->state = UTASK_SSTEP_TRAPPED;
|
|
set_tsk_thread_flag(t, TIF_UPROBE);
|
|
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index c44bff8..e2c6853 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -28,6 +28,8 @@
|
|
#include <linux/mman.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/fs.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/nsproxy.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/cpu.h>
|
|
@@ -363,7 +365,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
|
|
mm->locked_vm = 0;
|
|
mm->mmap = NULL;
|
|
- mm->mmap_cache = NULL;
|
|
+ mm->vmacache_seqnum = 0;
|
|
mm->map_count = 0;
|
|
cpumask_clear(mm_cpumask(mm));
|
|
mm->mm_rb = RB_ROOT;
|
|
@@ -876,6 +878,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
|
|
if (!oldmm)
|
|
return 0;
|
|
|
|
+ /* initialize the new vmacache entries */
|
|
+ vmacache_flush(tsk);
|
|
+
|
|
if (clone_flags & CLONE_VM) {
|
|
atomic_inc(&oldmm->mm_users);
|
|
mm = oldmm;
|
|
@@ -1323,7 +1328,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
goto bad_fork_cleanup_policy;
|
|
retval = audit_alloc(p);
|
|
if (retval)
|
|
- goto bad_fork_cleanup_policy;
|
|
+ goto bad_fork_cleanup_perf;
|
|
/* copy all the process information */
|
|
retval = copy_semundo(clone_flags, p);
|
|
if (retval)
|
|
@@ -1522,8 +1527,9 @@ bad_fork_cleanup_semundo:
|
|
exit_sem(p);
|
|
bad_fork_cleanup_audit:
|
|
audit_free(p);
|
|
-bad_fork_cleanup_policy:
|
|
+bad_fork_cleanup_perf:
|
|
perf_event_free_task(p);
|
|
+bad_fork_cleanup_policy:
|
|
#ifdef CONFIG_NUMA
|
|
mpol_put(p->mempolicy);
|
|
bad_fork_cleanup_cgroup:
|
|
diff --git a/kernel/freezer.c b/kernel/freezer.c
|
|
index aa6a8aa..8f9279b 100644
|
|
--- a/kernel/freezer.c
|
|
+++ b/kernel/freezer.c
|
|
@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
|
|
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
|
return false;
|
|
|
|
+ if (test_thread_flag(TIF_MEMDIE))
|
|
+ return false;
|
|
+
|
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
|
return true;
|
|
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index e3087af..fda2950 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -329,6 +329,8 @@ static void get_futex_key_refs(union futex_key *key)
|
|
case FUT_OFF_MMSHARED:
|
|
futex_get_mm(key); /* implies MB (B) */
|
|
break;
|
|
+ default:
|
|
+ smp_mb(); /* explicit MB (B) */
|
|
}
|
|
}
|
|
|
|
@@ -2614,6 +2616,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
* shared futexes. We need to compare the keys:
|
|
*/
|
|
if (match_futex(&q.key, &key2)) {
|
|
+ queue_unlock(hb);
|
|
ret = -EINVAL;
|
|
goto out_put_keys;
|
|
}
|
|
diff --git a/kernel/groups.c b/kernel/groups.c
|
|
index 90cf1c3..67b4ba3 100644
|
|
--- a/kernel/groups.c
|
|
+++ b/kernel/groups.c
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/security.h>
|
|
#include <linux/syscalls.h>
|
|
+#include <linux/user_namespace.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
/* init to 2 - one for init_task, one to ensure it is never freed */
|
|
@@ -223,6 +224,14 @@ out:
|
|
return i;
|
|
}
|
|
|
|
+bool may_setgroups(void)
|
|
+{
|
|
+ struct user_namespace *user_ns = current_user_ns();
|
|
+
|
|
+ return ns_capable(user_ns, CAP_SETGID) &&
|
|
+ userns_may_setgroups(user_ns);
|
|
+}
|
|
+
|
|
/*
|
|
* SMP: Our groups are copy-on-write. We can set them safely
|
|
* without another task interfering.
|
|
@@ -233,7 +242,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
|
|
struct group_info *group_info;
|
|
int retval;
|
|
|
|
- if (!ns_capable(current_user_ns(), CAP_SETGID))
|
|
+ if (!may_setgroups())
|
|
return -EPERM;
|
|
if ((unsigned)gidsetsize > NGROUPS_MAX)
|
|
return -EINVAL;
|
|
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
|
|
index 1ef0606..0296d6f 100644
|
|
--- a/kernel/irq/devres.c
|
|
+++ b/kernel/irq/devres.c
|
|
@@ -104,7 +104,7 @@ int devm_request_any_context_irq(struct device *dev, unsigned int irq,
|
|
return -ENOMEM;
|
|
|
|
rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
|
|
- if (rc) {
|
|
+ if (rc < 0) {
|
|
devres_free(dr);
|
|
return rc;
|
|
}
|
|
@@ -113,7 +113,7 @@ int devm_request_any_context_irq(struct device *dev, unsigned int irq,
|
|
dr->dev_id = dev_id;
|
|
devres_add(dev, dr);
|
|
|
|
- return 0;
|
|
+ return rc;
|
|
}
|
|
EXPORT_SYMBOL(devm_request_any_context_irq);
|
|
|
|
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
|
|
index 001fa5b..8a160e8 100644
|
|
--- a/kernel/irq/internals.h
|
|
+++ b/kernel/irq/internals.h
|
|
@@ -74,6 +74,14 @@ extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
|
|
extern void mask_irq(struct irq_desc *desc);
|
|
extern void unmask_irq(struct irq_desc *desc);
|
|
|
|
+#ifdef CONFIG_SPARSE_IRQ
|
|
+extern void irq_lock_sparse(void);
|
|
+extern void irq_unlock_sparse(void);
|
|
+#else
|
|
+static inline void irq_lock_sparse(void) { }
|
|
+static inline void irq_unlock_sparse(void) { }
|
|
+#endif
|
|
+
|
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
|
|
|
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
|
|
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
|
|
index 8ab8e93..07d4551 100644
|
|
--- a/kernel/irq/irqdesc.c
|
|
+++ b/kernel/irq/irqdesc.c
|
|
@@ -131,6 +131,16 @@ static void free_masks(struct irq_desc *desc)
|
|
static inline void free_masks(struct irq_desc *desc) { }
|
|
#endif
|
|
|
|
+void irq_lock_sparse(void)
|
|
+{
|
|
+ mutex_lock(&sparse_irq_lock);
|
|
+}
|
|
+
|
|
+void irq_unlock_sparse(void)
|
|
+{
|
|
+ mutex_unlock(&sparse_irq_lock);
|
|
+}
|
|
+
|
|
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
|
{
|
|
struct irq_desc *desc;
|
|
@@ -167,6 +177,12 @@ static void free_desc(unsigned int irq)
|
|
|
|
unregister_irq_proc(irq, desc);
|
|
|
|
+ /*
|
|
+ * sparse_irq_lock protects also show_interrupts() and
|
|
+ * kstat_irq_usr(). Once we deleted the descriptor from the
|
|
+ * sparse tree we can free it. Access in proc will fail to
|
|
+ * lookup the descriptor.
|
|
+ */
|
|
mutex_lock(&sparse_irq_lock);
|
|
delete_irq_desc(irq);
|
|
mutex_unlock(&sparse_irq_lock);
|
|
@@ -489,6 +505,15 @@ void dynamic_irq_cleanup(unsigned int irq)
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
}
|
|
|
|
+/**
|
|
+ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
|
|
+ * @irq: The interrupt number
|
|
+ * @cpu: The cpu number
|
|
+ *
|
|
+ * Returns the sum of interrupt counts on @cpu since boot for
|
|
+ * @irq. The caller must ensure that the interrupt is not removed
|
|
+ * concurrently.
|
|
+ */
|
|
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
@@ -497,6 +522,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
|
|
}
|
|
|
|
+/**
|
|
+ * kstat_irqs - Get the statistics for an interrupt
|
|
+ * @irq: The interrupt number
|
|
+ *
|
|
+ * Returns the sum of interrupt counts on all cpus since boot for
|
|
+ * @irq. The caller must ensure that the interrupt is not removed
|
|
+ * concurrently.
|
|
+ */
|
|
unsigned int kstat_irqs(unsigned int irq)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
@@ -509,3 +542,22 @@ unsigned int kstat_irqs(unsigned int irq)
|
|
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
|
|
return sum;
|
|
}
|
|
+
|
|
+/**
|
|
+ * kstat_irqs_usr - Get the statistics for an interrupt
|
|
+ * @irq: The interrupt number
|
|
+ *
|
|
+ * Returns the sum of interrupt counts on all cpus since boot for
|
|
+ * @irq. Contrary to kstat_irqs() this can be called from any
|
|
+ * preemptible context. It's protected against concurrent removal of
|
|
+ * an interrupt descriptor when sparse irqs are enabled.
|
|
+ */
|
|
+unsigned int kstat_irqs_usr(unsigned int irq)
|
|
+{
|
|
+ int sum;
|
|
+
|
|
+ irq_lock_sparse();
|
|
+ sum = kstat_irqs(irq);
|
|
+ irq_unlock_sparse();
|
|
+ return sum;
|
|
+}
|
|
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
|
|
index 36f6ee1..095cd72 100644
|
|
--- a/kernel/irq/proc.c
|
|
+++ b/kernel/irq/proc.c
|
|
@@ -15,6 +15,23 @@
|
|
|
|
#include "internals.h"
|
|
|
|
+/*
|
|
+ * Access rules:
|
|
+ *
|
|
+ * procfs protects read/write of /proc/irq/N/ files against a
|
|
+ * concurrent free of the interrupt descriptor. remove_proc_entry()
|
|
+ * immediately prevents new read/writes to happen and waits for
|
|
+ * already running read/write functions to complete.
|
|
+ *
|
|
+ * We remove the proc entries first and then delete the interrupt
|
|
+ * descriptor from the radix tree and free it. So it is guaranteed
|
|
+ * that irq_to_desc(N) is valid as long as the read/writes are
|
|
+ * permitted by procfs.
|
|
+ *
|
|
+ * The read from /proc/interrupts is a different problem because there
|
|
+ * is no protection. So the lookup and the access to irqdesc
|
|
+ * information must be protected by sparse_irq_lock.
|
|
+ */
|
|
static struct proc_dir_entry *root_irq_dir;
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
|
|
seq_putc(p, '\n');
|
|
}
|
|
|
|
+ irq_lock_sparse();
|
|
desc = irq_to_desc(i);
|
|
if (!desc)
|
|
- return 0;
|
|
+ goto outsparse;
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
for_each_online_cpu(j)
|
|
@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|
seq_putc(p, '\n');
|
|
out:
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
+outsparse:
|
|
+ irq_unlock_sparse();
|
|
return 0;
|
|
}
|
|
#endif
|
|
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
|
|
index 9065107..7a5237a 100644
|
|
--- a/kernel/irq/resend.c
|
|
+++ b/kernel/irq/resend.c
|
|
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
|
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
|
|
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
|
/*
|
|
- * If the interrupt has a parent irq and runs
|
|
- * in the thread context of the parent irq,
|
|
- * retrigger the parent.
|
|
+ * If the interrupt is running in the thread
|
|
+ * context of the parent irq we need to be
|
|
+ * careful, because we cannot trigger it
|
|
+ * directly.
|
|
*/
|
|
- if (desc->parent_irq &&
|
|
- irq_settings_is_nested_thread(desc))
|
|
+ if (irq_settings_is_nested_thread(desc)) {
|
|
+ /*
|
|
+ * If the parent_irq is valid, we
|
|
+ * retrigger the parent, otherwise we
|
|
+ * do nothing.
|
|
+ */
|
|
+ if (!desc->parent_irq)
|
|
+ return;
|
|
irq = desc->parent_irq;
|
|
+ }
|
|
/* Set it pending and activate the softirq: */
|
|
set_bit(irq, irqs_resend);
|
|
tasklet_schedule(&resend_tasklet);
|
|
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
|
|
index e30ac0f..0aa69ea 100644
|
|
--- a/kernel/kcmp.c
|
|
+++ b/kernel/kcmp.c
|
|
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
|
|
*/
|
|
static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
|
|
{
|
|
- long ret;
|
|
+ long t1, t2;
|
|
|
|
- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
|
|
+ t1 = kptr_obfuscate((long)v1, type);
|
|
+ t2 = kptr_obfuscate((long)v2, type);
|
|
|
|
- return (ret < 0) | ((ret > 0) << 1);
|
|
+ return (t1 < t2) | ((t1 > t2) << 1);
|
|
}
|
|
|
|
/* The caller must have pinned the task */
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 6716a1f..1d679a6 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -1841,7 +1841,9 @@ static void free_module(struct module *mod)
|
|
|
|
/* We leave it in list to prevent duplicate loads, but make sure
|
|
* that noone uses it while it's being deconstructed. */
|
|
+ mutex_lock(&module_mutex);
|
|
mod->state = MODULE_STATE_UNFORMED;
|
|
+ mutex_unlock(&module_mutex);
|
|
|
|
/* Remove dynamic debug info */
|
|
ddebug_remove_module(mod->name);
|
|
diff --git a/kernel/pid.c b/kernel/pid.c
|
|
index 9b9a266..82430c8 100644
|
|
--- a/kernel/pid.c
|
|
+++ b/kernel/pid.c
|
|
@@ -341,6 +341,8 @@ out:
|
|
|
|
out_unlock:
|
|
spin_unlock_irq(&pidmap_lock);
|
|
+ put_pid_ns(ns);
|
|
+
|
|
out_free:
|
|
while (++i <= ns->level)
|
|
free_pidmap(pid->numbers + i);
|
|
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
|
|
index 424c2d4..77e6b83 100644
|
|
--- a/kernel/posix-timers.c
|
|
+++ b/kernel/posix-timers.c
|
|
@@ -634,6 +634,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
|
|
goto out;
|
|
}
|
|
} else {
|
|
+ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
|
|
event.sigev_notify = SIGEV_SIGNAL;
|
|
event.sigev_signo = SIGALRM;
|
|
event.sigev_value.sival_int = new_timer->it_id;
|
|
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
|
|
index 2fac9cc..9d18628 100644
|
|
--- a/kernel/power/Kconfig
|
|
+++ b/kernel/power/Kconfig
|
|
@@ -191,7 +191,7 @@ config DPM_WATCHDOG
|
|
config DPM_WATCHDOG_TIMEOUT
|
|
int "Watchdog timeout in seconds"
|
|
range 1 120
|
|
- default 12
|
|
+ default 60
|
|
depends on DPM_WATCHDOG
|
|
|
|
config PM_TRACE
|
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
|
index 37170d4..126586a 100644
|
|
--- a/kernel/power/hibernate.c
|
|
+++ b/kernel/power/hibernate.c
|
|
@@ -492,8 +492,14 @@ int hibernation_restore(int platform_mode)
|
|
error = dpm_suspend_start(PMSG_QUIESCE);
|
|
if (!error) {
|
|
error = resume_target_kernel(platform_mode);
|
|
- dpm_resume_end(PMSG_RECOVER);
|
|
+ /*
|
|
+ * The above should either succeed and jump to the new kernel,
|
|
+ * or return with an error. Otherwise things are just
|
|
+ * undefined, so let's be paranoid.
|
|
+ */
|
|
+ BUG_ON(!error);
|
|
}
|
|
+ dpm_resume_end(PMSG_RECOVER);
|
|
pm_restore_gfp_mask();
|
|
ftrace_start();
|
|
resume_console();
|
|
diff --git a/kernel/power/main.c b/kernel/power/main.c
|
|
index 1d1bf63..3ae41cd 100644
|
|
--- a/kernel/power/main.c
|
|
+++ b/kernel/power/main.c
|
|
@@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
{
|
|
char *s = buf;
|
|
#ifdef CONFIG_SUSPEND
|
|
- int i;
|
|
+ suspend_state_t i;
|
|
+
|
|
+ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
|
|
+ if (pm_states[i].state)
|
|
+ s += sprintf(s,"%s ", pm_states[i].label);
|
|
|
|
- for (i = 0; i < PM_SUSPEND_MAX; i++) {
|
|
- if (pm_states[i] && valid_state(i))
|
|
- s += sprintf(s,"%s ", pm_states[i]);
|
|
- }
|
|
#endif
|
|
#ifdef CONFIG_HIBERNATION
|
|
s += sprintf(s, "%s\n", "disk");
|
|
@@ -314,7 +314,7 @@ static suspend_state_t decode_state(const char *buf, size_t n)
|
|
{
|
|
#ifdef CONFIG_SUSPEND
|
|
suspend_state_t state = PM_SUSPEND_MIN;
|
|
- const char * const *s;
|
|
+ struct pm_sleep_state *s;
|
|
#endif
|
|
char *p;
|
|
int len;
|
|
@@ -328,8 +328,9 @@ static suspend_state_t decode_state(const char *buf, size_t n)
|
|
|
|
#ifdef CONFIG_SUSPEND
|
|
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
|
|
- if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
|
|
- return state;
|
|
+ if (s->state && len == strlen(s->label)
|
|
+ && !strncmp(buf, s->label, len))
|
|
+ return s->state;
|
|
#endif
|
|
|
|
return PM_SUSPEND_ON;
|
|
@@ -447,8 +448,8 @@ static ssize_t autosleep_show(struct kobject *kobj,
|
|
|
|
#ifdef CONFIG_SUSPEND
|
|
if (state < PM_SUSPEND_MAX)
|
|
- return sprintf(buf, "%s\n", valid_state(state) ?
|
|
- pm_states[state] : "error");
|
|
+ return sprintf(buf, "%s\n", pm_states[state].state ?
|
|
+ pm_states[state].label : "error");
|
|
#endif
|
|
#ifdef CONFIG_HIBERNATION
|
|
return sprintf(buf, "disk\n");
|
|
diff --git a/kernel/power/power.h b/kernel/power/power.h
|
|
index 7d4b7ff..f770cad3 100644
|
|
--- a/kernel/power/power.h
|
|
+++ b/kernel/power/power.h
|
|
@@ -175,17 +175,20 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
|
|
unsigned int, char *);
|
|
|
|
#ifdef CONFIG_SUSPEND
|
|
+struct pm_sleep_state {
|
|
+ const char *label;
|
|
+ suspend_state_t state;
|
|
+};
|
|
+
|
|
/* kernel/power/suspend.c */
|
|
-extern const char *const pm_states[];
|
|
+extern struct pm_sleep_state pm_states[];
|
|
|
|
-extern bool valid_state(suspend_state_t state);
|
|
extern int suspend_devices_and_enter(suspend_state_t state);
|
|
#else /* !CONFIG_SUSPEND */
|
|
static inline int suspend_devices_and_enter(suspend_state_t state)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
-static inline bool valid_state(suspend_state_t state) { return false; }
|
|
#endif /* !CONFIG_SUSPEND */
|
|
|
|
#ifdef CONFIG_PM_TEST_SUSPEND
|
|
diff --git a/kernel/power/process.c b/kernel/power/process.c
|
|
index 14f9a8d..f1fe7ec 100644
|
|
--- a/kernel/power/process.c
|
|
+++ b/kernel/power/process.c
|
|
@@ -107,6 +107,28 @@ static int try_to_freeze_tasks(bool user_only)
|
|
return todo ? -EBUSY : 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Returns true if all freezable tasks (except for current) are frozen already
|
|
+ */
|
|
+static bool check_frozen_processes(void)
|
|
+{
|
|
+ struct task_struct *g, *p;
|
|
+ bool ret = true;
|
|
+
|
|
+ read_lock(&tasklist_lock);
|
|
+ for_each_process_thread(g, p) {
|
|
+ if (p != current && !freezer_should_skip(p) &&
|
|
+ !frozen(p)) {
|
|
+ ret = false;
|
|
+ goto done;
|
|
+ }
|
|
+ }
|
|
+done:
|
|
+ read_unlock(&tasklist_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/**
|
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
|
* The current thread will not be frozen. The same process that calls
|
|
@@ -117,6 +139,7 @@ static int try_to_freeze_tasks(bool user_only)
|
|
int freeze_processes(void)
|
|
{
|
|
int error;
|
|
+ int oom_kills_saved;
|
|
|
|
error = __usermodehelper_disable(UMH_FREEZING);
|
|
if (error)
|
|
@@ -130,12 +153,27 @@ int freeze_processes(void)
|
|
|
|
printk("Freezing user space processes ... ");
|
|
pm_freezing = true;
|
|
+ oom_kills_saved = oom_kills_count();
|
|
error = try_to_freeze_tasks(true);
|
|
if (!error) {
|
|
- printk("done.");
|
|
__usermodehelper_set_disable_depth(UMH_DISABLED);
|
|
oom_killer_disable();
|
|
+
|
|
+ /*
|
|
+ * There might have been an OOM kill while we were
|
|
+ * freezing tasks and the killed task might be still
|
|
+ * on the way out so we have to double check for race.
|
|
+ */
|
|
+ if (oom_kills_count() != oom_kills_saved &&
|
|
+ !check_frozen_processes()) {
|
|
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
|
|
+ printk("OOM in progress.");
|
|
+ error = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
+ printk("done.");
|
|
}
|
|
+done:
|
|
printk("\n");
|
|
BUG_ON(in_atomic());
|
|
|
|
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
|
|
index 62ee437..5455d5c 100644
|
|
--- a/kernel/power/suspend.c
|
|
+++ b/kernel/power/suspend.c
|
|
@@ -29,10 +29,10 @@
|
|
|
|
#include "power.h"
|
|
|
|
-const char *const pm_states[PM_SUSPEND_MAX] = {
|
|
- [PM_SUSPEND_FREEZE] = "freeze",
|
|
- [PM_SUSPEND_STANDBY] = "standby",
|
|
- [PM_SUSPEND_MEM] = "mem",
|
|
+struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
|
|
+ [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
|
|
+ [PM_SUSPEND_STANDBY] = { .label = "standby", },
|
|
+ [PM_SUSPEND_MEM] = { .label = "mem", },
|
|
};
|
|
|
|
static const struct platform_suspend_ops *suspend_ops;
|
|
@@ -62,42 +62,34 @@ void freeze_wake(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(freeze_wake);
|
|
|
|
+static bool valid_state(suspend_state_t state)
|
|
+{
|
|
+ /*
|
|
+ * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
|
|
+ * support and need to be valid to the low level
|
|
+ * implementation, no valid callback implies that none are valid.
|
|
+ */
|
|
+ return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
|
|
+}
|
|
+
|
|
/**
|
|
* suspend_set_ops - Set the global suspend method table.
|
|
* @ops: Suspend operations to use.
|
|
*/
|
|
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
|
{
|
|
+ suspend_state_t i;
|
|
+
|
|
lock_system_sleep();
|
|
+
|
|
suspend_ops = ops;
|
|
+ for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
|
|
+ pm_states[i].state = valid_state(i) ? i : 0;
|
|
+
|
|
unlock_system_sleep();
|
|
}
|
|
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
|
|
|
-bool valid_state(suspend_state_t state)
|
|
-{
|
|
- if (state == PM_SUSPEND_FREEZE) {
|
|
-#ifdef CONFIG_PM_DEBUG
|
|
- if (pm_test_level != TEST_NONE &&
|
|
- pm_test_level != TEST_FREEZER &&
|
|
- pm_test_level != TEST_DEVICES &&
|
|
- pm_test_level != TEST_PLATFORM) {
|
|
- printk(KERN_WARNING "Unsupported pm_test mode for "
|
|
- "freeze state, please choose "
|
|
- "none/freezer/devices/platform.\n");
|
|
- return false;
|
|
- }
|
|
-#endif
|
|
- return true;
|
|
- }
|
|
- /*
|
|
- * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
|
|
- * support and need to be valid to the lowlevel
|
|
- * implementation, no valid callback implies that none are valid.
|
|
- */
|
|
- return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
|
|
-}
|
|
-
|
|
/**
|
|
* suspend_valid_only_mem - Generic memory-only valid callback.
|
|
*
|
|
@@ -324,9 +316,17 @@ static int enter_state(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
- if (!valid_state(state))
|
|
- return -ENODEV;
|
|
-
|
|
+ if (state == PM_SUSPEND_FREEZE) {
|
|
+#ifdef CONFIG_PM_DEBUG
|
|
+ if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
|
|
+ pr_warning("PM: Unsupported test mode for freeze state,"
|
|
+ "please choose none/freezer/devices/platform.\n");
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+#endif
|
|
+ } else if (!valid_state(state)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
if (!mutex_trylock(&pm_mutex))
|
|
return -EBUSY;
|
|
|
|
@@ -337,7 +337,7 @@ static int enter_state(suspend_state_t state)
|
|
sys_sync();
|
|
printk("done.\n");
|
|
|
|
- pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
|
|
+ pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
|
|
error = suspend_prepare(state);
|
|
if (error)
|
|
goto Unlock;
|
|
@@ -345,7 +345,7 @@ static int enter_state(suspend_state_t state)
|
|
if (suspend_test(TEST_FREEZER))
|
|
goto Finish;
|
|
|
|
- pr_debug("PM: Entering %s sleep\n", pm_states[state]);
|
|
+ pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
|
|
pm_restrict_gfp_mask();
|
|
error = suspend_devices_and_enter(state);
|
|
pm_restore_gfp_mask();
|
|
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
|
|
index 9b2a1d5..269b097 100644
|
|
--- a/kernel/power/suspend_test.c
|
|
+++ b/kernel/power/suspend_test.c
|
|
@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
|
|
}
|
|
|
|
if (state == PM_SUSPEND_MEM) {
|
|
- printk(info_test, pm_states[state]);
|
|
+ printk(info_test, pm_states[state].label);
|
|
status = pm_suspend(state);
|
|
if (status == -ENODEV)
|
|
state = PM_SUSPEND_STANDBY;
|
|
}
|
|
if (state == PM_SUSPEND_STANDBY) {
|
|
- printk(info_test, pm_states[state]);
|
|
+ printk(info_test, pm_states[state].label);
|
|
status = pm_suspend(state);
|
|
}
|
|
if (status < 0)
|
|
@@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata =
|
|
|
|
static int __init setup_test_suspend(char *value)
|
|
{
|
|
- unsigned i;
|
|
+ suspend_state_t i;
|
|
|
|
/* "=mem" ==> "mem" */
|
|
value++;
|
|
- for (i = 0; i < PM_SUSPEND_MAX; i++) {
|
|
- if (!pm_states[i])
|
|
- continue;
|
|
- if (strcmp(pm_states[i], value) != 0)
|
|
- continue;
|
|
- test_state = (__force suspend_state_t) i;
|
|
- return 0;
|
|
- }
|
|
+ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
|
|
+ if (!strcmp(pm_states[i].label, value)) {
|
|
+ test_state = pm_states[i].state;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
printk(warn_bad_state, value);
|
|
return 0;
|
|
}
|
|
@@ -164,8 +162,8 @@ static int __init test_suspend(void)
|
|
/* PM is initialized by now; is that state testable? */
|
|
if (test_state == PM_SUSPEND_ON)
|
|
goto done;
|
|
- if (!valid_state(test_state)) {
|
|
- printk(warn_bad_state, pm_states[test_state]);
|
|
+ if (!pm_states[test_state].state) {
|
|
+ printk(warn_bad_state, pm_states[test_state].label);
|
|
goto done;
|
|
}
|
|
|
|
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
|
|
index cbd69d8..2ca4a8b 100644
|
|
--- a/kernel/printk/console_cmdline.h
|
|
+++ b/kernel/printk/console_cmdline.h
|
|
@@ -3,7 +3,7 @@
|
|
|
|
struct console_cmdline
|
|
{
|
|
- char name[8]; /* Name of the driver */
|
|
+ char name[16]; /* Name of the driver */
|
|
int index; /* Minor dev. to use */
|
|
char *options; /* Options for the driver */
|
|
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
|
|
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
|
index 4dae9cb..02e7fb4 100644
|
|
--- a/kernel/printk/printk.c
|
|
+++ b/kernel/printk/printk.c
|
|
@@ -383,11 +383,11 @@ static int check_syslog_permissions(int type, bool from_file)
|
|
* already done the capabilities checks at open time.
|
|
*/
|
|
if (from_file && type != SYSLOG_ACTION_OPEN)
|
|
- return 0;
|
|
+ goto ok;
|
|
|
|
if (syslog_action_restricted(type)) {
|
|
if (capable(CAP_SYSLOG))
|
|
- return 0;
|
|
+ goto ok;
|
|
/*
|
|
* For historical reasons, accept CAP_SYS_ADMIN too, with
|
|
* a warning.
|
|
@@ -397,10 +397,11 @@ static int check_syslog_permissions(int type, bool from_file)
|
|
"CAP_SYS_ADMIN but no CAP_SYSLOG "
|
|
"(deprecated).\n",
|
|
current->comm, task_pid_nr(current));
|
|
- return 0;
|
|
+ goto ok;
|
|
}
|
|
return -EPERM;
|
|
}
|
|
+ok:
|
|
return security_syslog(type);
|
|
}
|
|
|
|
@@ -1126,10 +1127,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
|
|
if (error)
|
|
goto out;
|
|
|
|
- error = security_syslog(type);
|
|
- if (error)
|
|
- return error;
|
|
-
|
|
switch (type) {
|
|
case SYSLOG_ACTION_CLOSE: /* Close log */
|
|
break;
|
|
@@ -2280,6 +2277,7 @@ void register_console(struct console *newcon)
|
|
for (i = 0, c = console_cmdline;
|
|
i < MAX_CMDLINECONSOLES && c->name[0];
|
|
i++, c++) {
|
|
+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
|
|
if (strcmp(c->name, newcon->name) != 0)
|
|
continue;
|
|
if (newcon->index >= 0 &&
|
|
@@ -2468,7 +2466,7 @@ void wake_up_klogd(void)
|
|
preempt_enable();
|
|
}
|
|
|
|
-int printk_sched(const char *fmt, ...)
|
|
+int printk_deferred(const char *fmt, ...)
|
|
{
|
|
unsigned long flags;
|
|
va_list args;
|
|
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
|
|
index 1f4bcb3..be9760f 100644
|
|
--- a/kernel/ptrace.c
|
|
+++ b/kernel/ptrace.c
|
|
@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
|
|
static int ptrace_resume(struct task_struct *child, long request,
|
|
unsigned long data)
|
|
{
|
|
+ bool need_siglock;
|
|
+
|
|
if (!valid_signal(data))
|
|
return -EIO;
|
|
|
|
@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request,
|
|
user_disable_single_step(child);
|
|
}
|
|
|
|
+ /*
|
|
+ * Change ->exit_code and ->state under siglock to avoid the race
|
|
+ * with wait_task_stopped() in between; a non-zero ->exit_code will
|
|
+ * wrongly look like another report from tracee.
|
|
+ *
|
|
+ * Note that we need siglock even if ->exit_code == data and/or this
|
|
+ * status was not reported yet, the new status must not be cleared by
|
|
+ * wait_task_stopped() after resume.
|
|
+ *
|
|
+ * If data == 0 we do not care if wait_task_stopped() reports the old
|
|
+ * status and clears the code too; this can't race with the tracee, it
|
|
+ * takes siglock after resume.
|
|
+ */
|
|
+ need_siglock = data && !thread_group_empty(current);
|
|
+ if (need_siglock)
|
|
+ spin_lock_irq(&child->sighand->siglock);
|
|
child->exit_code = data;
|
|
wake_up_state(child, __TASK_TRACED);
|
|
+ if (need_siglock)
|
|
+ spin_unlock_irq(&child->sighand->siglock);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
|
|
index 1254f31..ae359f0 100644
|
|
--- a/kernel/rcu/tiny.c
|
|
+++ b/kernel/rcu/tiny.c
|
|
@@ -284,6 +284,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
|
|
|
/* Move the ready-to-invoke callbacks to a local list. */
|
|
local_irq_save(flags);
|
|
+ if (rcp->donetail == &rcp->rcucblist) {
|
|
+ /* No callbacks ready, so just leave. */
|
|
+ local_irq_restore(flags);
|
|
+ return;
|
|
+ }
|
|
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
|
|
list = rcp->rcucblist;
|
|
rcp->rcucblist = *rcp->donetail;
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index b3d116c..6705d94 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -1228,6 +1228,22 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
}
|
|
|
|
/*
|
|
+ * Awaken the grace-period kthread for the specified flavor of RCU.
|
|
+ * Don't do a self-awaken, and don't bother awakening when there is
|
|
+ * nothing for the grace-period kthread to do (as in several CPUs
|
|
+ * raced to awaken, and we lost), and finally don't try to awaken
|
|
+ * a kthread that has not yet been created.
|
|
+ */
|
|
+static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
|
+{
|
|
+ if (current == rsp->gp_kthread ||
|
|
+ !ACCESS_ONCE(rsp->gp_flags) ||
|
|
+ !rsp->gp_kthread)
|
|
+ return;
|
|
+ wake_up(&rsp->gp_wq);
|
|
+}
|
|
+
|
|
+/*
|
|
* If there is room, assign a ->completed number to any callbacks on
|
|
* this CPU that have not already been assigned. Also accelerate any
|
|
* callbacks that were previously assigned a ->completed number that has
|
|
@@ -1670,7 +1686,7 @@ static void rsp_wakeup(struct irq_work *work)
|
|
struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
|
|
|
|
/* Wake up rcu_gp_kthread() to start the grace period. */
|
|
- wake_up(&rsp->gp_wq);
|
|
+ rcu_gp_kthread_wake(rsp);
|
|
}
|
|
|
|
/*
|
|
@@ -1746,7 +1762,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
|
{
|
|
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
|
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
|
|
- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
|
|
+ rcu_gp_kthread_wake(rsp);
|
|
}
|
|
|
|
/*
|
|
@@ -2322,7 +2338,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|
}
|
|
rsp->gp_flags |= RCU_GP_FLAG_FQS;
|
|
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
|
- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
|
|
+ rcu_gp_kthread_wake(rsp);
|
|
}
|
|
|
|
/*
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 0aae0fc..5e973ef 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -1322,7 +1322,7 @@ out:
|
|
* leave kernel.
|
|
*/
|
|
if (p->mm && printk_ratelimit()) {
|
|
- printk_sched("process %d (%s) no longer affine to cpu%d\n",
|
|
+ printk_deferred("process %d (%s) no longer affine to cpu%d\n",
|
|
task_pid_nr(p), p->comm, cpu);
|
|
}
|
|
}
|
|
@@ -1895,6 +1895,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
|
|
#ifdef CONFIG_SMP
|
|
inline struct dl_bw *dl_bw_of(int i)
|
|
{
|
|
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
|
|
+ "sched RCU must be held");
|
|
return &cpu_rq(i)->rd->dl_bw;
|
|
}
|
|
|
|
@@ -1903,6 +1905,8 @@ static inline int dl_bw_cpus(int i)
|
|
struct root_domain *rd = cpu_rq(i)->rd;
|
|
int cpus = 0;
|
|
|
|
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
|
|
+ "sched RCU must be held");
|
|
for_each_cpu_and(i, rd->span, cpu_active_mask)
|
|
cpus++;
|
|
|
|
@@ -2976,6 +2980,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
} else {
|
|
if (dl_prio(oldprio))
|
|
p->dl.dl_boosted = 0;
|
|
+ if (rt_prio(oldprio))
|
|
+ p->rt.timeout = 0;
|
|
p->sched_class = &fair_sched_class;
|
|
}
|
|
|
|
@@ -3511,9 +3517,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
|
|
};
|
|
|
|
/*
|
|
- * Fixup the legacy SCHED_RESET_ON_FORK hack
|
|
+ * Fixup the legacy SCHED_RESET_ON_FORK hack, except if
|
|
+ * the policy=-1 was passed by sched_setparam().
|
|
*/
|
|
- if (policy & SCHED_RESET_ON_FORK) {
|
|
+ if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) {
|
|
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
|
policy &= ~SCHED_RESET_ON_FORK;
|
|
attr.sched_policy = policy;
|
|
@@ -3936,13 +3943,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|
* root_domain.
|
|
*/
|
|
#ifdef CONFIG_SMP
|
|
- if (task_has_dl_policy(p)) {
|
|
- const struct cpumask *span = task_rq(p)->rd->span;
|
|
-
|
|
- if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
|
|
+ if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
|
|
+ rcu_read_lock();
|
|
+ if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
|
|
retval = -EBUSY;
|
|
+ rcu_read_unlock();
|
|
goto out_unlock;
|
|
}
|
|
+ rcu_read_unlock();
|
|
}
|
|
#endif
|
|
again:
|
|
@@ -7457,6 +7465,8 @@ static int sched_dl_global_constraints(void)
|
|
int cpu, ret = 0;
|
|
unsigned long flags;
|
|
|
|
+ rcu_read_lock();
|
|
+
|
|
/*
|
|
* Here we want to check the bandwidth not being set to some
|
|
* value smaller than the currently allocated bandwidth in
|
|
@@ -7478,6 +7488,8 @@ static int sched_dl_global_constraints(void)
|
|
break;
|
|
}
|
|
|
|
+ rcu_read_unlock();
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -7493,6 +7505,7 @@ static void sched_dl_do_global(void)
|
|
if (global_rt_runtime() != RUNTIME_INF)
|
|
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
|
|
|
|
+ rcu_read_lock();
|
|
/*
|
|
* FIXME: As above...
|
|
*/
|
|
@@ -7503,6 +7516,7 @@ static void sched_dl_do_global(void)
|
|
dl_b->bw = new_bw;
|
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
}
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static int sched_rt_global_validate(void)
|
|
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
|
|
index ce85264..8d3c5dd 100644
|
|
--- a/kernel/sched/deadline.c
|
|
+++ b/kernel/sched/deadline.c
|
|
@@ -329,7 +329,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
|
|
|
|
if (!lag_once) {
|
|
lag_once = true;
|
|
- printk_sched("sched: DL replenish lagged to much\n");
|
|
+ printk_deferred("sched: DL replenish lagged to much\n");
|
|
}
|
|
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
|
|
dl_se->runtime = pi_se->dl_runtime;
|
|
@@ -550,24 +550,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
|
|
static
|
|
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
|
|
{
|
|
- int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
|
|
- int rorun = dl_se->runtime <= 0;
|
|
-
|
|
- if (!rorun && !dmiss)
|
|
- return 0;
|
|
-
|
|
- /*
|
|
- * If we are beyond our current deadline and we are still
|
|
- * executing, then we have already used some of the runtime of
|
|
- * the next instance. Thus, if we do not account that, we are
|
|
- * stealing bandwidth from the system at each deadline miss!
|
|
- */
|
|
- if (dmiss) {
|
|
- dl_se->runtime = rorun ? dl_se->runtime : 0;
|
|
- dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
|
|
- }
|
|
-
|
|
- return 1;
|
|
+ return (dl_se->runtime <= 0);
|
|
}
|
|
|
|
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
|
@@ -806,10 +789,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
|
|
* parameters of the task might need updating. Otherwise,
|
|
* we want a replenishment of its runtime.
|
|
*/
|
|
- if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
|
|
- replenish_dl_entity(dl_se, pi_se);
|
|
- else
|
|
+ if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
|
|
update_dl_entity(dl_se, pi_se);
|
|
+ else if (flags & ENQUEUE_REPLENISH)
|
|
+ replenish_dl_entity(dl_se, pi_se);
|
|
|
|
__enqueue_dl_entity(dl_se);
|
|
}
|
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
index 1999021..27b8e83 100644
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -837,7 +837,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
|
|
if (!once) {
|
|
once = true;
|
|
- printk_sched("sched: RT throttling activated\n");
|
|
+ printk_deferred("sched: RT throttling activated\n");
|
|
}
|
|
} else {
|
|
/*
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 5d4b05a..bd57d8d 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -2768,7 +2768,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
|
|
* Other callers might not initialize the si_lsb field,
|
|
* so check explicitly for the right codes here.
|
|
*/
|
|
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
|
|
+ if (from->si_signo == SIGBUS &&
|
|
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
|
|
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
|
|
#endif
|
|
break;
|
|
@@ -3035,7 +3036,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
|
|
int, sig,
|
|
struct compat_siginfo __user *, uinfo)
|
|
{
|
|
- siginfo_t info;
|
|
+ siginfo_t info = {};
|
|
int ret = copy_siginfo_from_user32(&info, uinfo);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
@@ -3081,7 +3082,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
|
|
int, sig,
|
|
struct compat_siginfo __user *, uinfo)
|
|
{
|
|
- siginfo_t info;
|
|
+ siginfo_t info = {};
|
|
|
|
if (copy_siginfo_from_user32(&info, uinfo))
|
|
return -EFAULT;
|
|
diff --git a/kernel/smp.c b/kernel/smp.c
|
|
index ffee35b..ff87d44 100644
|
|
--- a/kernel/smp.c
|
|
+++ b/kernel/smp.c
|
|
@@ -617,7 +617,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
if (cond_func(cpu, info)) {
|
|
ret = smp_call_function_single(cpu, func,
|
|
info, wait);
|
|
- WARN_ON_ONCE(!ret);
|
|
+ WARN_ON_ONCE(ret);
|
|
}
|
|
preempt_enable();
|
|
}
|
|
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
|
|
index eb89e18..60d35ac5 100644
|
|
--- a/kernel/smpboot.c
|
|
+++ b/kernel/smpboot.c
|
|
@@ -279,6 +279,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
|
unsigned int cpu;
|
|
int ret = 0;
|
|
|
|
+ get_online_cpus();
|
|
mutex_lock(&smpboot_threads_lock);
|
|
for_each_online_cpu(cpu) {
|
|
ret = __smpboot_create_thread(plug_thread, cpu);
|
|
@@ -291,6 +292,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
|
list_add(&plug_thread->list, &hotplug_threads);
|
|
out:
|
|
mutex_unlock(&smpboot_threads_lock);
|
|
+ put_online_cpus();
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
|
|
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
|
index 490fcbb..93be750 100644
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -657,9 +657,13 @@ static void run_ksoftirqd(unsigned int cpu)
|
|
* in the task stack here.
|
|
*/
|
|
__do_softirq();
|
|
- rcu_note_context_switch(cpu);
|
|
local_irq_enable();
|
|
cond_resched();
|
|
+
|
|
+ preempt_disable();
|
|
+ rcu_note_context_switch(cpu);
|
|
+ preempt_enable();
|
|
+
|
|
return;
|
|
}
|
|
local_irq_enable();
|
|
diff --git a/kernel/time.c b/kernel/time.c
|
|
index 7c7964c..3eb322e 100644
|
|
--- a/kernel/time.c
|
|
+++ b/kernel/time.c
|
|
@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
|
|
if (tv) {
|
|
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
|
|
return -EFAULT;
|
|
+
|
|
+ if (!timeval_valid(&user_tv))
|
|
+ return -EINVAL;
|
|
+
|
|
new_ts.tv_sec = user_tv.tv_sec;
|
|
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
|
|
}
|
|
@@ -496,17 +500,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
|
|
* that a remainder subtract here would not do the right thing as the
|
|
* resolution values don't fall on second boundries. I.e. the line:
|
|
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
|
|
+ * Note that due to the small error in the multiplier here, this
|
|
+ * rounding is incorrect for sufficiently large values of tv_nsec, but
|
|
+ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
|
|
+ * OK.
|
|
*
|
|
* Rather, we just shift the bits off the right.
|
|
*
|
|
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
|
|
* value to a scaled second value.
|
|
*/
|
|
-unsigned long
|
|
-timespec_to_jiffies(const struct timespec *value)
|
|
+static unsigned long
|
|
+__timespec_to_jiffies(unsigned long sec, long nsec)
|
|
{
|
|
- unsigned long sec = value->tv_sec;
|
|
- long nsec = value->tv_nsec + TICK_NSEC - 1;
|
|
+ nsec = nsec + TICK_NSEC - 1;
|
|
|
|
if (sec >= MAX_SEC_IN_JIFFIES){
|
|
sec = MAX_SEC_IN_JIFFIES;
|
|
@@ -517,6 +524,13 @@ timespec_to_jiffies(const struct timespec *value)
|
|
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
|
|
|
|
}
|
|
+
|
|
+unsigned long
|
|
+timespec_to_jiffies(const struct timespec *value)
|
|
+{
|
|
+ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
|
|
+}
|
|
+
|
|
EXPORT_SYMBOL(timespec_to_jiffies);
|
|
|
|
void
|
|
@@ -533,31 +547,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
|
|
}
|
|
EXPORT_SYMBOL(jiffies_to_timespec);
|
|
|
|
-/* Same for "timeval"
|
|
+/*
|
|
+ * We could use a similar algorithm to timespec_to_jiffies (with a
|
|
+ * different multiplier for usec instead of nsec). But this has a
|
|
+ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
|
|
+ * usec value, since it's not necessarily integral.
|
|
*
|
|
- * Well, almost. The problem here is that the real system resolution is
|
|
- * in nanoseconds and the value being converted is in micro seconds.
|
|
- * Also for some machines (those that use HZ = 1024, in-particular),
|
|
- * there is a LARGE error in the tick size in microseconds.
|
|
-
|
|
- * The solution we use is to do the rounding AFTER we convert the
|
|
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
|
|
- * Instruction wise, this should cost only an additional add with carry
|
|
- * instruction above the way it was done above.
|
|
+ * We could instead round in the intermediate scaled representation
|
|
+ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
|
|
+ * perilous: the scaling introduces a small positive error, which
|
|
+ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
|
|
+ * units to the intermediate before shifting) leads to accidental
|
|
+ * overflow and overestimates.
|
|
+ *
|
|
+ * At the cost of one additional multiplication by a constant, just
|
|
+ * use the timespec implementation.
|
|
*/
|
|
unsigned long
|
|
timeval_to_jiffies(const struct timeval *value)
|
|
{
|
|
- unsigned long sec = value->tv_sec;
|
|
- long usec = value->tv_usec;
|
|
-
|
|
- if (sec >= MAX_SEC_IN_JIFFIES){
|
|
- sec = MAX_SEC_IN_JIFFIES;
|
|
- usec = 0;
|
|
- }
|
|
- return (((u64)sec * SEC_CONVERSION) +
|
|
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
|
|
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
|
|
+ return __timespec_to_jiffies(value->tv_sec,
|
|
+ value->tv_usec * NSEC_PER_USEC);
|
|
}
|
|
EXPORT_SYMBOL(timeval_to_jiffies);
|
|
|
|
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
|
|
index fe75444..cd45a07 100644
|
|
--- a/kernel/time/alarmtimer.c
|
|
+++ b/kernel/time/alarmtimer.c
|
|
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
|
|
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
|
|
ktime_t now)
|
|
{
|
|
+ unsigned long flags;
|
|
struct k_itimer *ptr = container_of(alarm, struct k_itimer,
|
|
it.alarm.alarmtimer);
|
|
- if (posix_timer_event(ptr, 0) != 0)
|
|
- ptr->it_overrun++;
|
|
+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
|
|
+
|
|
+ spin_lock_irqsave(&ptr->it_lock, flags);
|
|
+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
|
|
+ if (posix_timer_event(ptr, 0) != 0)
|
|
+ ptr->it_overrun++;
|
|
+ }
|
|
|
|
/* Re-add periodic timers */
|
|
if (ptr->it.alarm.interval.tv64) {
|
|
ptr->it_overrun += alarm_forward(alarm, now,
|
|
ptr->it.alarm.interval);
|
|
- return ALARMTIMER_RESTART;
|
|
+ result = ALARMTIMER_RESTART;
|
|
}
|
|
- return ALARMTIMER_NORESTART;
|
|
+ spin_unlock_irqrestore(&ptr->it_lock, flags);
|
|
+
|
|
+ return result;
|
|
}
|
|
|
|
/**
|
|
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
|
|
* @new_timer: k_itimer pointer
|
|
* @cur_setting: itimerspec data to fill
|
|
*
|
|
- * Copies the itimerspec data out from the k_itimer
|
|
+ * Copies out the current itimerspec data
|
|
*/
|
|
static void alarm_timer_get(struct k_itimer *timr,
|
|
struct itimerspec *cur_setting)
|
|
{
|
|
- memset(cur_setting, 0, sizeof(struct itimerspec));
|
|
+ ktime_t relative_expiry_time =
|
|
+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
|
|
+
|
|
+ if (ktime_to_ns(relative_expiry_time) > 0) {
|
|
+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
|
|
+ } else {
|
|
+ cur_setting->it_value.tv_sec = 0;
|
|
+ cur_setting->it_value.tv_nsec = 0;
|
|
+ }
|
|
|
|
- cur_setting->it_interval =
|
|
- ktime_to_timespec(timr->it.alarm.interval);
|
|
- cur_setting->it_value =
|
|
- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
|
|
- return;
|
|
+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
|
|
}
|
|
|
|
/**
|
|
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
|
|
index 086ad60..60ba1af 100644
|
|
--- a/kernel/time/clockevents.c
|
|
+++ b/kernel/time/clockevents.c
|
|
@@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
|
|
{
|
|
/* Nothing to do if we already reached the limit */
|
|
if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
|
|
- printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
|
|
+ printk_deferred(KERN_WARNING
|
|
+ "CE: Reprogramming failure. Giving up\n");
|
|
dev->next_event.tv64 = KTIME_MAX;
|
|
return -ETIME;
|
|
}
|
|
@@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
|
|
if (dev->min_delta_ns > MIN_DELTA_LIMIT)
|
|
dev->min_delta_ns = MIN_DELTA_LIMIT;
|
|
|
|
- printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
|
|
- dev->name ? dev->name : "?",
|
|
- (unsigned long long) dev->min_delta_ns);
|
|
+ printk_deferred(KERN_WARNING
|
|
+ "CE: %s increased min_delta_ns to %llu nsec\n",
|
|
+ dev->name ? dev->name : "?",
|
|
+ (unsigned long long) dev->min_delta_ns);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
|
|
index af8d1d4..6211d5d 100644
|
|
--- a/kernel/time/ntp.c
|
|
+++ b/kernel/time/ntp.c
|
|
@@ -631,6 +631,17 @@ int ntp_validate_timex(struct timex *txc)
|
|
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
|
|
return -EPERM;
|
|
|
|
+ /*
|
|
+ * Check for potential multiplication overflows that can
|
|
+ * only happen on 64-bit systems:
|
|
+ */
|
|
+ if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
|
|
+ if (LLONG_MIN / PPM_SCALE > txc->freq)
|
|
+ return -EINVAL;
|
|
+ if (LLONG_MAX / PPM_SCALE < txc->freq)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
|
|
index 4d23dc4..313a662 100644
|
|
--- a/kernel/time/sched_clock.c
|
|
+++ b/kernel/time/sched_clock.c
|
|
@@ -204,7 +204,8 @@ void __init sched_clock_postinit(void)
|
|
|
|
static int sched_clock_suspend(void)
|
|
{
|
|
- sched_clock_poll(&sched_clock_timer);
|
|
+ update_sched_clock();
|
|
+ hrtimer_cancel(&sched_clock_timer);
|
|
cd.suspended = true;
|
|
return 0;
|
|
}
|
|
@@ -212,6 +213,7 @@ static int sched_clock_suspend(void)
|
|
static void sched_clock_resume(void)
|
|
{
|
|
cd.epoch_cyc = read_sched_clock();
|
|
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
|
|
cd.suspended = false;
|
|
}
|
|
|
|
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
|
index 6558b7a..8c08a6f 100644
|
|
--- a/kernel/time/tick-sched.c
|
|
+++ b/kernel/time/tick-sched.c
|
|
@@ -807,7 +807,6 @@ void tick_nohz_idle_enter(void)
|
|
|
|
local_irq_enable();
|
|
}
|
|
-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
|
|
|
|
/**
|
|
* tick_nohz_irq_exit - update next tick event from interrupt exit
|
|
@@ -934,7 +933,6 @@ void tick_nohz_idle_exit(void)
|
|
|
|
local_irq_enable();
|
|
}
|
|
-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
|
|
|
|
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
|
|
{
|
|
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
|
|
index 0954450..da41de9 100644
|
|
--- a/kernel/trace/ring_buffer.c
|
|
+++ b/kernel/trace/ring_buffer.c
|
|
@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
|
|
work = &cpu_buffer->irq_work;
|
|
}
|
|
|
|
- work->waiters_pending = true;
|
|
poll_wait(filp, &work->waiters, poll_table);
|
|
+ work->waiters_pending = true;
|
|
+ /*
|
|
+ * There's a tight race between setting the waiters_pending and
|
|
+ * checking if the ring buffer is empty. Once the waiters_pending bit
|
|
+ * is set, the next event will wake the task up, but we can get stuck
|
|
+ * if there's only a single event in.
|
|
+ *
|
|
+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
|
|
+ * but adding a memory barrier to all events will cause too much of a
|
|
+ * performance hit in the fast path. We only need a memory barrier when
|
|
+ * the buffer goes from empty to having content. But as this race is
|
|
+ * extremely small, and it's not a problem if another event comes in, we
|
|
+ * will fix it later.
|
|
+ */
|
|
+ smp_mb();
|
|
|
|
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
|
|
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
|
|
@@ -1981,7 +1995,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
|
|
|
|
/**
|
|
* rb_update_event - update event type and data
|
|
- * @event: the even to update
|
|
+ * @event: the event to update
|
|
* @type: the type of event
|
|
* @length: the size of the event field in the ring buffer
|
|
*
|
|
@@ -2637,7 +2651,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
|
|
|
|
static __always_inline int trace_recursive_lock(void)
|
|
{
|
|
- unsigned int val = this_cpu_read(current_context);
|
|
+ unsigned int val = __this_cpu_read(current_context);
|
|
int bit;
|
|
|
|
if (in_interrupt()) {
|
|
@@ -2654,18 +2668,17 @@ static __always_inline int trace_recursive_lock(void)
|
|
return 1;
|
|
|
|
val |= (1 << bit);
|
|
- this_cpu_write(current_context, val);
|
|
+ __this_cpu_write(current_context, val);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline void trace_recursive_unlock(void)
|
|
{
|
|
- unsigned int val = this_cpu_read(current_context);
|
|
+ unsigned int val = __this_cpu_read(current_context);
|
|
|
|
- val--;
|
|
- val &= this_cpu_read(current_context);
|
|
- this_cpu_write(current_context, val);
|
|
+ val &= val & (val - 1);
|
|
+ __this_cpu_write(current_context, val);
|
|
}
|
|
|
|
#else
|
|
@@ -3354,21 +3367,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
|
|
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
|
|
|
/* Iterator usage is expected to have record disabled */
|
|
- if (list_empty(&cpu_buffer->reader_page->list)) {
|
|
- iter->head_page = rb_set_head_page(cpu_buffer);
|
|
- if (unlikely(!iter->head_page))
|
|
- return;
|
|
- iter->head = iter->head_page->read;
|
|
- } else {
|
|
- iter->head_page = cpu_buffer->reader_page;
|
|
- iter->head = cpu_buffer->reader_page->read;
|
|
- }
|
|
+ iter->head_page = cpu_buffer->reader_page;
|
|
+ iter->head = cpu_buffer->reader_page->read;
|
|
+
|
|
+ iter->cache_reader_page = iter->head_page;
|
|
+ iter->cache_read = cpu_buffer->read;
|
|
+
|
|
if (iter->head)
|
|
iter->read_stamp = cpu_buffer->read_stamp;
|
|
else
|
|
iter->read_stamp = iter->head_page->page->time_stamp;
|
|
- iter->cache_reader_page = cpu_buffer->reader_page;
|
|
- iter->cache_read = cpu_buffer->read;
|
|
}
|
|
|
|
/**
|
|
@@ -3761,12 +3769,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
return NULL;
|
|
|
|
/*
|
|
- * We repeat when a time extend is encountered.
|
|
- * Since the time extend is always attached to a data event,
|
|
- * we should never loop more than once.
|
|
- * (We never hit the following condition more than twice).
|
|
+ * We repeat when a time extend is encountered or we hit
|
|
+ * the end of the page. Since the time extend is always attached
|
|
+ * to a data event, we should never loop more than three times.
|
|
+ * Once for going to next page, once on time extend, and
|
|
+ * finally once to get the event.
|
|
+ * (We never hit the following condition more than thrice).
|
|
*/
|
|
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
|
|
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
|
|
return NULL;
|
|
|
|
if (rb_per_cpu_empty(cpu_buffer))
|
|
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
|
|
index a5457d5..6ad2e2d 100644
|
|
--- a/kernel/trace/ring_buffer_benchmark.c
|
|
+++ b/kernel/trace/ring_buffer_benchmark.c
|
|
@@ -455,7 +455,7 @@ static int __init ring_buffer_benchmark_init(void)
|
|
|
|
if (producer_fifo >= 0) {
|
|
struct sched_param param = {
|
|
- .sched_priority = consumer_fifo
|
|
+ .sched_priority = producer_fifo
|
|
};
|
|
sched_setscheduler(producer, SCHED_FIFO, ¶m);
|
|
} else
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index 7e259b2..a2d62b3 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -811,7 +811,7 @@ static struct {
|
|
{ trace_clock_local, "local", 1 },
|
|
{ trace_clock_global, "global", 1 },
|
|
{ trace_clock_counter, "counter", 0 },
|
|
- { trace_clock_jiffies, "uptime", 1 },
|
|
+ { trace_clock_jiffies, "uptime", 0 },
|
|
{ trace_clock, "perf", 1 },
|
|
ARCH_TRACE_CLOCKS
|
|
};
|
|
@@ -4694,7 +4694,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
*fpos += written;
|
|
|
|
out_unlock:
|
|
- for (i = 0; i < nr_pages; i++){
|
|
+ for (i = nr_pages - 1; i >= 0; i--) {
|
|
kunmap_atomic(map_page[i]);
|
|
put_page(pages[i]);
|
|
}
|
|
@@ -6158,7 +6158,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
|
|
int ret;
|
|
|
|
/* Paranoid: Make sure the parent is the "instances" directory */
|
|
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
|
|
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
|
|
if (WARN_ON_ONCE(parent != trace_instance_dir))
|
|
return -ENOENT;
|
|
|
|
@@ -6185,7 +6185,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
|
|
int ret;
|
|
|
|
/* Paranoid: Make sure the parent is the "instances" directory */
|
|
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
|
|
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
|
|
if (WARN_ON_ONCE(parent != trace_instance_dir))
|
|
return -ENOENT;
|
|
|
|
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
|
|
index c8bd809..c1be95c 100644
|
|
--- a/kernel/trace/trace.h
|
|
+++ b/kernel/trace/trace.h
|
|
@@ -422,6 +422,7 @@ enum {
|
|
|
|
TRACE_CONTROL_BIT,
|
|
|
|
+ TRACE_BRANCH_BIT,
|
|
/*
|
|
* Abuse of the trace_recursion.
|
|
* As we need a way to maintain state if we are tracing the function
|
|
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
|
|
index 697fb9b..60850b4 100644
|
|
--- a/kernel/trace/trace_branch.c
|
|
+++ b/kernel/trace/trace_branch.c
|
|
@@ -37,9 +37,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
|
struct trace_branch *entry;
|
|
struct ring_buffer *buffer;
|
|
unsigned long flags;
|
|
- int cpu, pc;
|
|
+ int pc;
|
|
const char *p;
|
|
|
|
+ if (current->trace_recursion & TRACE_BRANCH_BIT)
|
|
+ return;
|
|
+
|
|
/*
|
|
* I would love to save just the ftrace_likely_data pointer, but
|
|
* this code can also be used by modules. Ugly things can happen
|
|
@@ -50,10 +53,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
|
if (unlikely(!tr))
|
|
return;
|
|
|
|
- local_irq_save(flags);
|
|
- cpu = raw_smp_processor_id();
|
|
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
|
- if (atomic_inc_return(&data->disabled) != 1)
|
|
+ raw_local_irq_save(flags);
|
|
+ current->trace_recursion |= TRACE_BRANCH_BIT;
|
|
+ data = this_cpu_ptr(tr->trace_buffer.data);
|
|
+ if (atomic_read(&data->disabled))
|
|
goto out;
|
|
|
|
pc = preempt_count();
|
|
@@ -82,8 +85,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
|
out:
|
|
- atomic_dec(&data->disabled);
|
|
- local_irq_restore(flags);
|
|
+ current->trace_recursion &= ~TRACE_BRANCH_BIT;
|
|
+ raw_local_irq_restore(flags);
|
|
}
|
|
|
|
static inline
|
|
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
|
|
index 26dc348..57b67b1 100644
|
|
--- a/kernel/trace/trace_clock.c
|
|
+++ b/kernel/trace/trace_clock.c
|
|
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
|
|
|
|
/*
|
|
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
|
|
+ * Note that this use of jiffies_64 is not completely safe on
|
|
+ * 32-bit systems. But the window is tiny, and the effect if
|
|
+ * we are affected is that we will have an obviously bogus
|
|
+ * timestamp on a trace event - i.e. not life threatening.
|
|
*/
|
|
u64 notrace trace_clock_jiffies(void)
|
|
{
|
|
- u64 jiffy = jiffies - INITIAL_JIFFIES;
|
|
-
|
|
- /* Return nsecs */
|
|
- return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
|
|
+ return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
|
|
}
|
|
|
|
/*
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index e4c4efc..c6646a5 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -428,7 +428,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
|
|
|
|
if (dir) {
|
|
spin_lock(&dir->d_lock); /* probably unneeded */
|
|
- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
|
|
+ list_for_each_entry(child, &dir->d_subdirs, d_child) {
|
|
if (child->d_inode) /* probably unneeded */
|
|
child->d_inode->i_private = NULL;
|
|
}
|
|
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
|
|
index 8a86319..7b244d0 100644
|
|
--- a/kernel/trace/trace_events_filter.c
|
|
+++ b/kernel/trace/trace_events_filter.c
|
|
@@ -1086,6 +1086,9 @@ static void parse_init(struct filter_parse_state *ps,
|
|
|
|
static char infix_next(struct filter_parse_state *ps)
|
|
{
|
|
+ if (!ps->infix.cnt)
|
|
+ return 0;
|
|
+
|
|
ps->infix.cnt--;
|
|
|
|
return ps->infix.string[ps->infix.tail++];
|
|
@@ -1101,6 +1104,9 @@ static char infix_peek(struct filter_parse_state *ps)
|
|
|
|
static void infix_advance(struct filter_parse_state *ps)
|
|
{
|
|
+ if (!ps->infix.cnt)
|
|
+ return;
|
|
+
|
|
ps->infix.cnt--;
|
|
ps->infix.tail++;
|
|
}
|
|
@@ -1399,19 +1405,26 @@ static int check_preds(struct filter_parse_state *ps)
|
|
{
|
|
int n_normal_preds = 0, n_logical_preds = 0;
|
|
struct postfix_elt *elt;
|
|
+ int cnt = 0;
|
|
|
|
list_for_each_entry(elt, &ps->postfix, list) {
|
|
- if (elt->op == OP_NONE)
|
|
+ if (elt->op == OP_NONE) {
|
|
+ cnt++;
|
|
continue;
|
|
+ }
|
|
|
|
+ cnt--;
|
|
if (elt->op == OP_AND || elt->op == OP_OR) {
|
|
n_logical_preds++;
|
|
continue;
|
|
}
|
|
n_normal_preds++;
|
|
+ /* all ops should have operands */
|
|
+ if (cnt < 0)
|
|
+ break;
|
|
}
|
|
|
|
- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
|
|
+ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
|
|
parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
|
|
index 759d5e0..7e3cd7a 100644
|
|
--- a/kernel/trace/trace_syscalls.c
|
|
+++ b/kernel/trace/trace_syscalls.c
|
|
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
|
int size;
|
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
- if (syscall_nr < 0)
|
|
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
|
return;
|
|
|
|
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
|
|
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
|
int syscall_nr;
|
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
- if (syscall_nr < 0)
|
|
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
|
return;
|
|
|
|
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
|
|
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
|
|
int size;
|
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
- if (syscall_nr < 0)
|
|
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
|
return;
|
|
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
|
|
return;
|
|
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
|
|
int size;
|
|
|
|
syscall_nr = trace_get_syscall_nr(current, regs);
|
|
- if (syscall_nr < 0)
|
|
+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
|
return;
|
|
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
|
|
return;
|
|
diff --git a/kernel/uid16.c b/kernel/uid16.c
|
|
index 602e5bb..d58cc4d 100644
|
|
--- a/kernel/uid16.c
|
|
+++ b/kernel/uid16.c
|
|
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
|
|
struct group_info *group_info;
|
|
int retval;
|
|
|
|
- if (!ns_capable(current_user_ns(), CAP_SETGID))
|
|
+ if (!may_setgroups())
|
|
return -EPERM;
|
|
if ((unsigned)gidsetsize > NGROUPS_MAX)
|
|
return -EINVAL;
|
|
diff --git a/kernel/user.c b/kernel/user.c
|
|
index c006131..c2bbb50 100644
|
|
--- a/kernel/user.c
|
|
+++ b/kernel/user.c
|
|
@@ -51,6 +51,7 @@ struct user_namespace init_user_ns = {
|
|
.owner = GLOBAL_ROOT_UID,
|
|
.group = GLOBAL_ROOT_GID,
|
|
.proc_inum = PROC_USER_INIT_INO,
|
|
+ .flags = USERNS_INIT_FLAGS,
|
|
#ifdef CONFIG_PERSISTENT_KEYRINGS
|
|
.persistent_keyring_register_sem =
|
|
__RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
|
|
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
|
|
index 80a57af..153971e 100644
|
|
--- a/kernel/user_namespace.c
|
|
+++ b/kernel/user_namespace.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <linux/fs_struct.h>
|
|
|
|
static struct kmem_cache *user_ns_cachep __read_mostly;
|
|
+static DEFINE_MUTEX(userns_state_mutex);
|
|
|
|
static bool new_idmap_permitted(const struct file *file,
|
|
struct user_namespace *ns, int cap_setid,
|
|
@@ -99,6 +100,11 @@ int create_user_ns(struct cred *new)
|
|
ns->owner = owner;
|
|
ns->group = group;
|
|
|
|
+ /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
|
|
+ mutex_lock(&userns_state_mutex);
|
|
+ ns->flags = parent_ns->flags;
|
|
+ mutex_unlock(&userns_state_mutex);
|
|
+
|
|
set_cred_user_ns(new, ns);
|
|
|
|
#ifdef CONFIG_PERSISTENT_KEYRINGS
|
|
@@ -581,9 +587,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent
|
|
return false;
|
|
}
|
|
|
|
-
|
|
-static DEFINE_MUTEX(id_map_mutex);
|
|
-
|
|
static ssize_t map_write(struct file *file, const char __user *buf,
|
|
size_t count, loff_t *ppos,
|
|
int cap_setid,
|
|
@@ -600,7 +603,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|
ssize_t ret = -EINVAL;
|
|
|
|
/*
|
|
- * The id_map_mutex serializes all writes to any given map.
|
|
+ * The userns_state_mutex serializes all writes to any given map.
|
|
*
|
|
* Any map is only ever written once.
|
|
*
|
|
@@ -618,7 +621,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|
* order and smp_rmb() is guaranteed that we don't have crazy
|
|
* architectures returning stale data.
|
|
*/
|
|
- mutex_lock(&id_map_mutex);
|
|
+ mutex_lock(&userns_state_mutex);
|
|
|
|
ret = -EPERM;
|
|
/* Only allow one successful write to the map */
|
|
@@ -745,7 +748,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
|
|
*ppos = count;
|
|
ret = count;
|
|
out:
|
|
- mutex_unlock(&id_map_mutex);
|
|
+ mutex_unlock(&userns_state_mutex);
|
|
if (page)
|
|
free_page(page);
|
|
return ret;
|
|
@@ -804,17 +807,21 @@ static bool new_idmap_permitted(const struct file *file,
|
|
struct user_namespace *ns, int cap_setid,
|
|
struct uid_gid_map *new_map)
|
|
{
|
|
- /* Allow mapping to your own filesystem ids */
|
|
- if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
|
|
+ const struct cred *cred = file->f_cred;
|
|
+ /* Don't allow mappings that would allow anything that wouldn't
|
|
+ * be allowed without the establishment of unprivileged mappings.
|
|
+ */
|
|
+ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
|
|
+ uid_eq(ns->owner, cred->euid)) {
|
|
u32 id = new_map->extent[0].lower_first;
|
|
if (cap_setid == CAP_SETUID) {
|
|
kuid_t uid = make_kuid(ns->parent, id);
|
|
- if (uid_eq(uid, file->f_cred->fsuid))
|
|
+ if (uid_eq(uid, cred->euid))
|
|
return true;
|
|
- }
|
|
- else if (cap_setid == CAP_SETGID) {
|
|
+ } else if (cap_setid == CAP_SETGID) {
|
|
kgid_t gid = make_kgid(ns->parent, id);
|
|
- if (gid_eq(gid, file->f_cred->fsgid))
|
|
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
|
|
+ gid_eq(gid, cred->egid))
|
|
return true;
|
|
}
|
|
}
|
|
@@ -834,6 +841,100 @@ static bool new_idmap_permitted(const struct file *file,
|
|
return false;
|
|
}
|
|
|
|
+int proc_setgroups_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+ struct user_namespace *ns = seq->private;
|
|
+ unsigned long userns_flags = ACCESS_ONCE(ns->flags);
|
|
+
|
|
+ seq_printf(seq, "%s\n",
|
|
+ (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
|
|
+ "allow" : "deny");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
|
|
+ size_t count, loff_t *ppos)
|
|
+{
|
|
+ struct seq_file *seq = file->private_data;
|
|
+ struct user_namespace *ns = seq->private;
|
|
+ char kbuf[8], *pos;
|
|
+ bool setgroups_allowed;
|
|
+ ssize_t ret;
|
|
+
|
|
+ /* Only allow a very narrow range of strings to be written */
|
|
+ ret = -EINVAL;
|
|
+ if ((*ppos != 0) || (count >= sizeof(kbuf)))
|
|
+ goto out;
|
|
+
|
|
+ /* What was written? */
|
|
+ ret = -EFAULT;
|
|
+ if (copy_from_user(kbuf, buf, count))
|
|
+ goto out;
|
|
+ kbuf[count] = '\0';
|
|
+ pos = kbuf;
|
|
+
|
|
+ /* What is being requested? */
|
|
+ ret = -EINVAL;
|
|
+ if (strncmp(pos, "allow", 5) == 0) {
|
|
+ pos += 5;
|
|
+ setgroups_allowed = true;
|
|
+ }
|
|
+ else if (strncmp(pos, "deny", 4) == 0) {
|
|
+ pos += 4;
|
|
+ setgroups_allowed = false;
|
|
+ }
|
|
+ else
|
|
+ goto out;
|
|
+
|
|
+ /* Verify there is not trailing junk on the line */
|
|
+ pos = skip_spaces(pos);
|
|
+ if (*pos != '\0')
|
|
+ goto out;
|
|
+
|
|
+ ret = -EPERM;
|
|
+ mutex_lock(&userns_state_mutex);
|
|
+ if (setgroups_allowed) {
|
|
+ /* Enabling setgroups after setgroups has been disabled
|
|
+ * is not allowed.
|
|
+ */
|
|
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
|
|
+ goto out_unlock;
|
|
+ } else {
|
|
+ /* Permanently disabling setgroups after setgroups has
|
|
+ * been enabled by writing the gid_map is not allowed.
|
|
+ */
|
|
+ if (ns->gid_map.nr_extents != 0)
|
|
+ goto out_unlock;
|
|
+ ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
|
|
+ }
|
|
+ mutex_unlock(&userns_state_mutex);
|
|
+
|
|
+ /* Report a successful write */
|
|
+ *ppos = count;
|
|
+ ret = count;
|
|
+out:
|
|
+ return ret;
|
|
+out_unlock:
|
|
+ mutex_unlock(&userns_state_mutex);
|
|
+ goto out;
|
|
+}
|
|
+
|
|
+bool userns_may_setgroups(const struct user_namespace *ns)
|
|
+{
|
|
+ bool allowed;
|
|
+
|
|
+ mutex_lock(&userns_state_mutex);
|
|
+ /* It is not safe to use setgroups until a gid mapping in
|
|
+ * the user namespace has been established.
|
|
+ */
|
|
+ allowed = ns->gid_map.nr_extents != 0;
|
|
+ /* Is setgroups allowed? */
|
|
+ allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
|
|
+ mutex_unlock(&userns_state_mutex);
|
|
+
|
|
+ return allowed;
|
|
+}
|
|
+
|
|
static void *userns_get(struct task_struct *task)
|
|
{
|
|
struct user_namespace *user_ns;
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index b4defde..423c9e3 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -1962,17 +1962,13 @@ static void pool_mayday_timeout(unsigned long __pool)
|
|
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
* multiple times. Does GFP_KERNEL allocations. Called only from
|
|
* manager.
|
|
- *
|
|
- * Return:
|
|
- * %false if no action was taken and pool->lock stayed locked, %true
|
|
- * otherwise.
|
|
*/
|
|
-static bool maybe_create_worker(struct worker_pool *pool)
|
|
+static void maybe_create_worker(struct worker_pool *pool)
|
|
__releases(&pool->lock)
|
|
__acquires(&pool->lock)
|
|
{
|
|
if (!need_to_create_worker(pool))
|
|
- return false;
|
|
+ return;
|
|
restart:
|
|
spin_unlock_irq(&pool->lock);
|
|
|
|
@@ -1989,7 +1985,7 @@ restart:
|
|
start_worker(worker);
|
|
if (WARN_ON_ONCE(need_to_create_worker(pool)))
|
|
goto restart;
|
|
- return true;
|
|
+ return;
|
|
}
|
|
|
|
if (!need_to_create_worker(pool))
|
|
@@ -2006,7 +2002,7 @@ restart:
|
|
spin_lock_irq(&pool->lock);
|
|
if (need_to_create_worker(pool))
|
|
goto restart;
|
|
- return true;
|
|
+ return;
|
|
}
|
|
|
|
/**
|
|
@@ -2019,15 +2015,9 @@ restart:
|
|
* LOCKING:
|
|
* spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
* multiple times. Called only from manager.
|
|
- *
|
|
- * Return:
|
|
- * %false if no action was taken and pool->lock stayed locked, %true
|
|
- * otherwise.
|
|
*/
|
|
-static bool maybe_destroy_workers(struct worker_pool *pool)
|
|
+static void maybe_destroy_workers(struct worker_pool *pool)
|
|
{
|
|
- bool ret = false;
|
|
-
|
|
while (too_many_workers(pool)) {
|
|
struct worker *worker;
|
|
unsigned long expires;
|
|
@@ -2041,10 +2031,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
|
|
}
|
|
|
|
destroy_worker(worker);
|
|
- ret = true;
|
|
}
|
|
-
|
|
- return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -2064,16 +2051,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
|
|
* multiple times. Does GFP_KERNEL allocations.
|
|
*
|
|
* Return:
|
|
- * %false if the pool don't need management and the caller can safely start
|
|
- * processing works, %true indicates that the function released pool->lock
|
|
- * and reacquired it to perform some management function and that the
|
|
- * conditions that the caller verified while holding the lock before
|
|
- * calling the function might no longer be true.
|
|
+ * %false if the pool doesn't need management and the caller can safely
|
|
+ * start processing works, %true if management function was performed and
|
|
+ * the conditions that the caller verified before calling the function may
|
|
+ * no longer be true.
|
|
*/
|
|
static bool manage_workers(struct worker *worker)
|
|
{
|
|
struct worker_pool *pool = worker->pool;
|
|
- bool ret = false;
|
|
|
|
/*
|
|
* Managership is governed by two mutexes - manager_arb and
|
|
@@ -2097,7 +2082,7 @@ static bool manage_workers(struct worker *worker)
|
|
* manager_mutex.
|
|
*/
|
|
if (!mutex_trylock(&pool->manager_arb))
|
|
- return ret;
|
|
+ return false;
|
|
|
|
/*
|
|
* With manager arbitration won, manager_mutex would be free in
|
|
@@ -2107,7 +2092,6 @@ static bool manage_workers(struct worker *worker)
|
|
spin_unlock_irq(&pool->lock);
|
|
mutex_lock(&pool->manager_mutex);
|
|
spin_lock_irq(&pool->lock);
|
|
- ret = true;
|
|
}
|
|
|
|
pool->flags &= ~POOL_MANAGE_WORKERS;
|
|
@@ -2116,12 +2100,12 @@ static bool manage_workers(struct worker *worker)
|
|
* Destroy and then create so that may_start_working() is true
|
|
* on return.
|
|
*/
|
|
- ret |= maybe_destroy_workers(pool);
|
|
- ret |= maybe_create_worker(pool);
|
|
+ maybe_destroy_workers(pool);
|
|
+ maybe_create_worker(pool);
|
|
|
|
mutex_unlock(&pool->manager_mutex);
|
|
mutex_unlock(&pool->manager_arb);
|
|
- return ret;
|
|
+ return true;
|
|
}
|
|
|
|
/**
|
|
@@ -2909,19 +2893,57 @@ bool flush_work(struct work_struct *work)
|
|
}
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
+struct cwt_wait {
|
|
+ wait_queue_t wait;
|
|
+ struct work_struct *work;
|
|
+};
|
|
+
|
|
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
|
+{
|
|
+ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
|
|
+
|
|
+ if (cwait->work != key)
|
|
+ return 0;
|
|
+ return autoremove_wake_function(wait, mode, sync, key);
|
|
+}
|
|
+
|
|
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
|
{
|
|
+ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
do {
|
|
ret = try_to_grab_pending(work, is_dwork, &flags);
|
|
/*
|
|
- * If someone else is canceling, wait for the same event it
|
|
- * would be waiting for before retrying.
|
|
+ * If someone else is already canceling, wait for it to
|
|
+ * finish. flush_work() doesn't work for PREEMPT_NONE
|
|
+ * because we may get scheduled between @work's completion
|
|
+ * and the other canceling task resuming and clearing
|
|
+ * CANCELING - flush_work() will return false immediately
|
|
+ * as @work is no longer busy, try_to_grab_pending() will
|
|
+ * return -ENOENT as @work is still being canceled and the
|
|
+ * other canceling task won't be able to clear CANCELING as
|
|
+ * we're hogging the CPU.
|
|
+ *
|
|
+ * Let's wait for completion using a waitqueue. As this
|
|
+ * may lead to the thundering herd problem, use a custom
|
|
+ * wake function which matches @work along with exclusive
|
|
+ * wait and wakeup.
|
|
*/
|
|
- if (unlikely(ret == -ENOENT))
|
|
- flush_work(work);
|
|
+ if (unlikely(ret == -ENOENT)) {
|
|
+ struct cwt_wait cwait;
|
|
+
|
|
+ init_wait(&cwait.wait);
|
|
+ cwait.wait.func = cwt_wakefn;
|
|
+ cwait.work = work;
|
|
+
|
|
+ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
+ if (work_is_canceling(work))
|
|
+ schedule();
|
|
+ finish_wait(&cancel_waitq, &cwait.wait);
|
|
+ }
|
|
} while (unlikely(ret < 0));
|
|
|
|
/* tell other tasks trying to grab @work to back off */
|
|
@@ -2930,6 +2952,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
|
|
|
flush_work(work);
|
|
clear_work_data(work);
|
|
+
|
|
+ /*
|
|
+ * Paired with prepare_to_wait() above so that either
|
|
+ * waitqueue_active() is visible here or !work_is_canceling() is
|
|
+ * visible there.
|
|
+ */
|
|
+ smp_mb();
|
|
+ if (waitqueue_active(&cancel_waitq))
|
|
+ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
|
|
index c0b1007..2404d03 100644
|
|
--- a/lib/assoc_array.c
|
|
+++ b/lib/assoc_array.c
|
|
@@ -1723,11 +1723,13 @@ ascend_old_tree:
|
|
shortcut = assoc_array_ptr_to_shortcut(ptr);
|
|
slot = shortcut->parent_slot;
|
|
cursor = shortcut->back_pointer;
|
|
+ if (!cursor)
|
|
+ goto gc_complete;
|
|
} else {
|
|
slot = node->parent_slot;
|
|
cursor = ptr;
|
|
}
|
|
- BUG_ON(!ptr);
|
|
+ BUG_ON(!cursor);
|
|
node = assoc_array_ptr_to_node(cursor);
|
|
slot++;
|
|
goto continue_node;
|
|
@@ -1735,7 +1737,7 @@ ascend_old_tree:
|
|
gc_complete:
|
|
edit->set[0].to = new_root;
|
|
assoc_array_apply_edit(edit);
|
|
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
|
|
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
|
|
return 0;
|
|
|
|
enomem:
|
|
diff --git a/lib/bitmap.c b/lib/bitmap.c
|
|
index 06f7e4f..c0634aa 100644
|
|
--- a/lib/bitmap.c
|
|
+++ b/lib/bitmap.c
|
|
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
|
|
lower = src[off + k];
|
|
if (left && off + k == lim - 1)
|
|
lower &= mask;
|
|
- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
|
|
+ dst[k] = lower >> rem;
|
|
+ if (rem)
|
|
+ dst[k] |= upper << (BITS_PER_LONG - rem);
|
|
if (left && k == lim - 1)
|
|
dst[k] &= mask;
|
|
}
|
|
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
|
|
upper = src[k];
|
|
if (left && k == lim - 1)
|
|
upper &= (1UL << left) - 1;
|
|
- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
|
|
+ dst[k + off] = upper << rem;
|
|
+ if (rem)
|
|
+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
|
|
if (left && k + off == lim - 1)
|
|
dst[k + off] &= (1UL << left) - 1;
|
|
}
|
|
@@ -599,12 +603,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
|
|
unsigned a, b;
|
|
int c, old_c, totaldigits;
|
|
const char __user __force *ubuf = (const char __user __force *)buf;
|
|
- int exp_digit, in_range;
|
|
+ int at_start, in_range;
|
|
|
|
totaldigits = c = 0;
|
|
bitmap_zero(maskp, nmaskbits);
|
|
do {
|
|
- exp_digit = 1;
|
|
+ at_start = 1;
|
|
in_range = 0;
|
|
a = b = 0;
|
|
|
|
@@ -633,11 +637,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
|
|
break;
|
|
|
|
if (c == '-') {
|
|
- if (exp_digit || in_range)
|
|
+ if (at_start || in_range)
|
|
return -EINVAL;
|
|
b = 0;
|
|
in_range = 1;
|
|
- exp_digit = 1;
|
|
continue;
|
|
}
|
|
|
|
@@ -647,16 +650,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
|
|
b = b * 10 + (c - '0');
|
|
if (!in_range)
|
|
a = b;
|
|
- exp_digit = 0;
|
|
+ at_start = 0;
|
|
totaldigits++;
|
|
}
|
|
if (!(a <= b))
|
|
return -EINVAL;
|
|
if (b >= nmaskbits)
|
|
return -ERANGE;
|
|
- while (a <= b) {
|
|
- set_bit(a, maskp);
|
|
- a++;
|
|
+ if (!at_start) {
|
|
+ while (a <= b) {
|
|
+ set_bit(a, maskp);
|
|
+ a++;
|
|
+ }
|
|
}
|
|
} while (buflen && c == ',');
|
|
return 0;
|
|
diff --git a/lib/btree.c b/lib/btree.c
|
|
index f9a4846..4264871 100644
|
|
--- a/lib/btree.c
|
|
+++ b/lib/btree.c
|
|
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
|
|
|
|
void btree_destroy(struct btree_head *head)
|
|
{
|
|
+ mempool_free(head->node, head->mempool);
|
|
mempool_destroy(head->mempool);
|
|
head->mempool = NULL;
|
|
}
|
|
diff --git a/lib/checksum.c b/lib/checksum.c
|
|
index 129775e..8b39e86 100644
|
|
--- a/lib/checksum.c
|
|
+++ b/lib/checksum.c
|
|
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
|
|
EXPORT_SYMBOL(csum_partial_copy);
|
|
|
|
#ifndef csum_tcpudp_nofold
|
|
+static inline u32 from64to32(u64 x)
|
|
+{
|
|
+ /* add up 32-bit and 32-bit for 32+c bit */
|
|
+ x = (x & 0xffffffff) + (x >> 32);
|
|
+ /* add up carry.. */
|
|
+ x = (x & 0xffffffff) + (x >> 32);
|
|
+ return (u32)x;
|
|
+}
|
|
+
|
|
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
|
|
unsigned short len,
|
|
unsigned short proto,
|
|
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
|
|
#else
|
|
s += (proto + len) << 8;
|
|
#endif
|
|
- s += (s >> 32);
|
|
- return (__force __wsum)s;
|
|
+ return (__force __wsum)from64to32(s);
|
|
}
|
|
EXPORT_SYMBOL(csum_tcpudp_nofold);
|
|
#endif
|
|
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
|
|
index 31c5f76..f504027 100644
|
|
--- a/lib/decompress_bunzip2.c
|
|
+++ b/lib/decompress_bunzip2.c
|
|
@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
|
|
if (get_bits(bd, 1))
|
|
return RETVAL_OBSOLETE_INPUT;
|
|
origPtr = get_bits(bd, 24);
|
|
- if (origPtr > dbufSize)
|
|
+ if (origPtr >= dbufSize)
|
|
return RETVAL_DATA_ERROR;
|
|
/* mapping table: if some byte values are never used (encoding things
|
|
like ascii text), the compression code removes the gaps to have fewer
|
|
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
|
|
index 7a85967..f0f5c5c 100644
|
|
--- a/lib/lz4/lz4_decompress.c
|
|
+++ b/lib/lz4/lz4_decompress.c
|
|
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
|
|
/* Error: request to write beyond destination buffer */
|
|
if (cpy > oend)
|
|
goto _output_error;
|
|
+ if ((ref + COPYLENGTH) > oend ||
|
|
+ (op + COPYLENGTH) > oend)
|
|
+ goto _output_error;
|
|
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
|
|
while (op < cpy)
|
|
*op++ = *ref++;
|
|
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
|
|
index 8563081..a1c387f 100644
|
|
--- a/lib/lzo/lzo1x_decompress_safe.c
|
|
+++ b/lib/lzo/lzo1x_decompress_safe.c
|
|
@@ -19,31 +19,21 @@
|
|
#include <linux/lzo.h>
|
|
#include "lzodefs.h"
|
|
|
|
-#define HAVE_IP(t, x) \
|
|
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
|
|
- (((t + x) >= t) && ((t + x) >= x)))
|
|
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
|
|
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
|
|
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
|
|
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
|
|
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
|
|
|
|
-#define HAVE_OP(t, x) \
|
|
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
|
|
- (((t + x) >= t) && ((t + x) >= x)))
|
|
-
|
|
-#define NEED_IP(t, x) \
|
|
- do { \
|
|
- if (!HAVE_IP(t, x)) \
|
|
- goto input_overrun; \
|
|
- } while (0)
|
|
-
|
|
-#define NEED_OP(t, x) \
|
|
- do { \
|
|
- if (!HAVE_OP(t, x)) \
|
|
- goto output_overrun; \
|
|
- } while (0)
|
|
-
|
|
-#define TEST_LB(m_pos) \
|
|
- do { \
|
|
- if ((m_pos) < out) \
|
|
- goto lookbehind_overrun; \
|
|
- } while (0)
|
|
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
|
|
+ * count without overflowing an integer. The multiply will overflow when
|
|
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
|
|
+ * depending on the base count. Since the base count is taken from a u8
|
|
+ * and a few bits, it is safe to assume that it will always be lower than
|
|
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
|
|
+ * two less 255 steps. See Documentation/lzo.txt for more information.
|
|
+ */
|
|
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
|
|
|
|
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
|
unsigned char *out, size_t *out_len)
|
|
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
|
if (t < 16) {
|
|
if (likely(state == 0)) {
|
|
if (unlikely(t == 0)) {
|
|
+ size_t offset;
|
|
+ const unsigned char *ip_last = ip;
|
|
+
|
|
while (unlikely(*ip == 0)) {
|
|
- t += 255;
|
|
ip++;
|
|
- NEED_IP(1, 0);
|
|
+ NEED_IP(1);
|
|
}
|
|
- t += 15 + *ip++;
|
|
+ offset = ip - ip_last;
|
|
+ if (unlikely(offset > MAX_255_COUNT))
|
|
+ return LZO_E_ERROR;
|
|
+
|
|
+ offset = (offset << 8) - offset;
|
|
+ t += offset + 15 + *ip++;
|
|
}
|
|
t += 3;
|
|
copy_literal_run:
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
|
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
|
|
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
|
|
const unsigned char *ie = ip + t;
|
|
unsigned char *oe = op + t;
|
|
do {
|
|
@@ -101,8 +98,8 @@ copy_literal_run:
|
|
} else
|
|
#endif
|
|
{
|
|
- NEED_OP(t, 0);
|
|
- NEED_IP(t, 3);
|
|
+ NEED_OP(t);
|
|
+ NEED_IP(t + 3);
|
|
do {
|
|
*op++ = *ip++;
|
|
} while (--t > 0);
|
|
@@ -115,7 +112,7 @@ copy_literal_run:
|
|
m_pos -= t >> 2;
|
|
m_pos -= *ip++ << 2;
|
|
TEST_LB(m_pos);
|
|
- NEED_OP(2, 0);
|
|
+ NEED_OP(2);
|
|
op[0] = m_pos[0];
|
|
op[1] = m_pos[1];
|
|
op += 2;
|
|
@@ -136,13 +133,20 @@ copy_literal_run:
|
|
} else if (t >= 32) {
|
|
t = (t & 31) + (3 - 1);
|
|
if (unlikely(t == 2)) {
|
|
+ size_t offset;
|
|
+ const unsigned char *ip_last = ip;
|
|
+
|
|
while (unlikely(*ip == 0)) {
|
|
- t += 255;
|
|
ip++;
|
|
- NEED_IP(1, 0);
|
|
+ NEED_IP(1);
|
|
}
|
|
- t += 31 + *ip++;
|
|
- NEED_IP(2, 0);
|
|
+ offset = ip - ip_last;
|
|
+ if (unlikely(offset > MAX_255_COUNT))
|
|
+ return LZO_E_ERROR;
|
|
+
|
|
+ offset = (offset << 8) - offset;
|
|
+ t += offset + 31 + *ip++;
|
|
+ NEED_IP(2);
|
|
}
|
|
m_pos = op - 1;
|
|
next = get_unaligned_le16(ip);
|
|
@@ -154,13 +158,20 @@ copy_literal_run:
|
|
m_pos -= (t & 8) << 11;
|
|
t = (t & 7) + (3 - 1);
|
|
if (unlikely(t == 2)) {
|
|
+ size_t offset;
|
|
+ const unsigned char *ip_last = ip;
|
|
+
|
|
while (unlikely(*ip == 0)) {
|
|
- t += 255;
|
|
ip++;
|
|
- NEED_IP(1, 0);
|
|
+ NEED_IP(1);
|
|
}
|
|
- t += 7 + *ip++;
|
|
- NEED_IP(2, 0);
|
|
+ offset = ip - ip_last;
|
|
+ if (unlikely(offset > MAX_255_COUNT))
|
|
+ return LZO_E_ERROR;
|
|
+
|
|
+ offset = (offset << 8) - offset;
|
|
+ t += offset + 7 + *ip++;
|
|
+ NEED_IP(2);
|
|
}
|
|
next = get_unaligned_le16(ip);
|
|
ip += 2;
|
|
@@ -174,7 +185,7 @@ copy_literal_run:
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
|
if (op - m_pos >= 8) {
|
|
unsigned char *oe = op + t;
|
|
- if (likely(HAVE_OP(t, 15))) {
|
|
+ if (likely(HAVE_OP(t + 15))) {
|
|
do {
|
|
COPY8(op, m_pos);
|
|
op += 8;
|
|
@@ -184,7 +195,7 @@ copy_literal_run:
|
|
m_pos += 8;
|
|
} while (op < oe);
|
|
op = oe;
|
|
- if (HAVE_IP(6, 0)) {
|
|
+ if (HAVE_IP(6)) {
|
|
state = next;
|
|
COPY4(op, ip);
|
|
op += next;
|
|
@@ -192,7 +203,7 @@ copy_literal_run:
|
|
continue;
|
|
}
|
|
} else {
|
|
- NEED_OP(t, 0);
|
|
+ NEED_OP(t);
|
|
do {
|
|
*op++ = *m_pos++;
|
|
} while (op < oe);
|
|
@@ -201,7 +212,7 @@ copy_literal_run:
|
|
#endif
|
|
{
|
|
unsigned char *oe = op + t;
|
|
- NEED_OP(t, 0);
|
|
+ NEED_OP(t);
|
|
op[0] = m_pos[0];
|
|
op[1] = m_pos[1];
|
|
op += 2;
|
|
@@ -214,15 +225,15 @@ match_next:
|
|
state = next;
|
|
t = next;
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
|
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
|
|
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
|
|
COPY4(op, ip);
|
|
op += t;
|
|
ip += t;
|
|
} else
|
|
#endif
|
|
{
|
|
- NEED_IP(t, 3);
|
|
- NEED_OP(t, 0);
|
|
+ NEED_IP(t + 3);
|
|
+ NEED_OP(t);
|
|
while (t > 0) {
|
|
*op++ = *ip++;
|
|
t--;
|
|
diff --git a/lib/plist.c b/lib/plist.c
|
|
index 1ebc95f..0f2084d 100644
|
|
--- a/lib/plist.c
|
|
+++ b/lib/plist.c
|
|
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
|
|
plist_check_head(head);
|
|
}
|
|
|
|
+/**
|
|
+ * plist_requeue - Requeue @node at end of same-prio entries.
|
|
+ *
|
|
+ * This is essentially an optimized plist_del() followed by
|
|
+ * plist_add(). It moves an entry already in the plist to
|
|
+ * after any other same-priority entries.
|
|
+ *
|
|
+ * @node: &struct plist_node pointer - entry to be moved
|
|
+ * @head: &struct plist_head pointer - list head
|
|
+ */
|
|
+void plist_requeue(struct plist_node *node, struct plist_head *head)
|
|
+{
|
|
+ struct plist_node *iter;
|
|
+ struct list_head *node_next = &head->node_list;
|
|
+
|
|
+ plist_check_head(head);
|
|
+ BUG_ON(plist_head_empty(head));
|
|
+ BUG_ON(plist_node_empty(node));
|
|
+
|
|
+ if (node == plist_last(head))
|
|
+ return;
|
|
+
|
|
+ iter = plist_next(node);
|
|
+
|
|
+ if (node->prio != iter->prio)
|
|
+ return;
|
|
+
|
|
+ plist_del(node, head);
|
|
+
|
|
+ plist_for_each_continue(iter, head) {
|
|
+ if (node->prio != iter->prio) {
|
|
+ node_next = &iter->node_list;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ list_add_tail(&node->node_list, node_next);
|
|
+
|
|
+ plist_check_head(head);
|
|
+}
|
|
+
|
|
#ifdef CONFIG_DEBUG_PI_LIST
|
|
#include <linux/sched.h>
|
|
#include <linux/module.h>
|
|
@@ -170,6 +210,14 @@ static void __init plist_test_check(int nr_expect)
|
|
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
|
|
}
|
|
|
|
+static void __init plist_test_requeue(struct plist_node *node)
|
|
+{
|
|
+ plist_requeue(node, &test_head);
|
|
+
|
|
+ if (node != plist_last(&test_head))
|
|
+ BUG_ON(node->prio == plist_next(node)->prio);
|
|
+}
|
|
+
|
|
static int __init plist_test(void)
|
|
{
|
|
int nr_expect = 0, i, loop;
|
|
@@ -193,6 +241,10 @@ static int __init plist_test(void)
|
|
nr_expect--;
|
|
}
|
|
plist_test_check(nr_expect);
|
|
+ if (!plist_node_empty(test_node + i)) {
|
|
+ plist_test_requeue(test_node + i);
|
|
+ plist_test_check(nr_expect);
|
|
+ }
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index bd4a8df..7e30d2a 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -946,81 +946,6 @@ next:
|
|
}
|
|
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
|
|
|
|
-
|
|
-/**
|
|
- * radix_tree_next_hole - find the next hole (not-present entry)
|
|
- * @root: tree root
|
|
- * @index: index key
|
|
- * @max_scan: maximum range to search
|
|
- *
|
|
- * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
|
|
- * indexed hole.
|
|
- *
|
|
- * Returns: the index of the hole if found, otherwise returns an index
|
|
- * outside of the set specified (in which case 'return - index >= max_scan'
|
|
- * will be true). In rare cases of index wrap-around, 0 will be returned.
|
|
- *
|
|
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
|
|
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
|
|
- * the tree at a single point in time. For example, if a hole is created
|
|
- * at index 5, then subsequently a hole is created at index 10,
|
|
- * radix_tree_next_hole covering both indexes may return 10 if called
|
|
- * under rcu_read_lock.
|
|
- */
|
|
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
|
|
- unsigned long index, unsigned long max_scan)
|
|
-{
|
|
- unsigned long i;
|
|
-
|
|
- for (i = 0; i < max_scan; i++) {
|
|
- if (!radix_tree_lookup(root, index))
|
|
- break;
|
|
- index++;
|
|
- if (index == 0)
|
|
- break;
|
|
- }
|
|
-
|
|
- return index;
|
|
-}
|
|
-EXPORT_SYMBOL(radix_tree_next_hole);
|
|
-
|
|
-/**
|
|
- * radix_tree_prev_hole - find the prev hole (not-present entry)
|
|
- * @root: tree root
|
|
- * @index: index key
|
|
- * @max_scan: maximum range to search
|
|
- *
|
|
- * Search backwards in the range [max(index-max_scan+1, 0), index]
|
|
- * for the first hole.
|
|
- *
|
|
- * Returns: the index of the hole if found, otherwise returns an index
|
|
- * outside of the set specified (in which case 'index - return >= max_scan'
|
|
- * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
|
|
- *
|
|
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
|
|
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
|
|
- * the tree at a single point in time. For example, if a hole is created
|
|
- * at index 10, then subsequently a hole is created at index 5,
|
|
- * radix_tree_prev_hole covering both indexes may return 5 if called under
|
|
- * rcu_read_lock.
|
|
- */
|
|
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
|
|
- unsigned long index, unsigned long max_scan)
|
|
-{
|
|
- unsigned long i;
|
|
-
|
|
- for (i = 0; i < max_scan; i++) {
|
|
- if (!radix_tree_lookup(root, index))
|
|
- break;
|
|
- index--;
|
|
- if (index == ULONG_MAX)
|
|
- break;
|
|
- }
|
|
-
|
|
- return index;
|
|
-}
|
|
-EXPORT_SYMBOL(radix_tree_prev_hole);
|
|
-
|
|
/**
|
|
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
|
|
* @root: radix tree root
|
|
@@ -1337,15 +1262,18 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
|
|
}
|
|
|
|
/**
|
|
- * radix_tree_delete - delete an item from a radix tree
|
|
+ * radix_tree_delete_item - delete an item from a radix tree
|
|
* @root: radix tree root
|
|
* @index: index key
|
|
+ * @item: expected item
|
|
*
|
|
- * Remove the item at @index from the radix tree rooted at @root.
|
|
+ * Remove @item at @index from the radix tree rooted at @root.
|
|
*
|
|
- * Returns the address of the deleted item, or NULL if it was not present.
|
|
+ * Returns the address of the deleted item, or NULL if it was not present
|
|
+ * or the entry at the given @index was not @item.
|
|
*/
|
|
-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
|
|
+void *radix_tree_delete_item(struct radix_tree_root *root,
|
|
+ unsigned long index, void *item)
|
|
{
|
|
struct radix_tree_node *node = NULL;
|
|
struct radix_tree_node *slot = NULL;
|
|
@@ -1380,6 +1308,11 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
|
|
if (slot == NULL)
|
|
goto out;
|
|
|
|
+ if (item && slot != item) {
|
|
+ slot = NULL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
/*
|
|
* Clear all tags associated with the item to be deleted.
|
|
* This way of doing it would be inefficient, but seldom is any set.
|
|
@@ -1424,6 +1357,21 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
|
|
out:
|
|
return slot;
|
|
}
|
|
+EXPORT_SYMBOL(radix_tree_delete_item);
|
|
+
|
|
+/**
|
|
+ * radix_tree_delete - delete an item from a radix tree
|
|
+ * @root: radix tree root
|
|
+ * @index: index key
|
|
+ *
|
|
+ * Remove the item at @index from the radix tree rooted at @root.
|
|
+ *
|
|
+ * Returns the address of the deleted item, or NULL if it was not present.
|
|
+ */
|
|
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
|
|
+{
|
|
+ return radix_tree_delete_item(root, index, NULL);
|
|
+}
|
|
EXPORT_SYMBOL(radix_tree_delete);
|
|
|
|
/**
|
|
diff --git a/lib/string.c b/lib/string.c
|
|
index e5878de..cb9ea21 100644
|
|
--- a/lib/string.c
|
|
+++ b/lib/string.c
|
|
@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
|
|
EXPORT_SYMBOL(memset);
|
|
#endif
|
|
|
|
+/**
|
|
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
|
|
+ * keying data) with 0s.
|
|
+ * @s: Pointer to the start of the area.
|
|
+ * @count: The size of the area.
|
|
+ *
|
|
+ * memzero_explicit() doesn't need an arch-specific version as
|
|
+ * it just invokes the one of memset() implicitly.
|
|
+ */
|
|
+void memzero_explicit(void *s, size_t count)
|
|
+{
|
|
+ memset(s, 0, count);
|
|
+ barrier();
|
|
+}
|
|
+EXPORT_SYMBOL(memzero_explicit);
|
|
+
|
|
#ifndef __HAVE_ARCH_MEMCPY
|
|
/**
|
|
* memcpy - Copy one area of memory to another
|
|
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
|
|
index a28df52..1164961 100644
|
|
--- a/lib/strnlen_user.c
|
|
+++ b/lib/strnlen_user.c
|
|
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|
return res + find_zero(data) + 1 - align;
|
|
}
|
|
res += sizeof(unsigned long);
|
|
- if (unlikely(max < sizeof(unsigned long)))
|
|
+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
|
|
+ if (unlikely(max <= sizeof(unsigned long)))
|
|
break;
|
|
max -= sizeof(unsigned long);
|
|
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
|
diff --git a/mm/Makefile b/mm/Makefile
|
|
index b2aa3cc..2bad2e00 100644
|
|
--- a/mm/Makefile
|
|
+++ b/mm/Makefile
|
|
@@ -16,7 +16,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
|
|
readahead.o swap.o truncate.o vmscan.o shmem.o \
|
|
util.o mmzone.o vmstat.o backing-dev.o \
|
|
mm_init.o mmu_context.o percpu.o slab_common.o \
|
|
- compaction.o balloon_compaction.o \
|
|
+ compaction.o balloon_compaction.o vmacache.o \
|
|
interval_tree.o list_lru.o $(mmu-y)
|
|
|
|
obj-y += init-mm.o
|
|
diff --git a/mm/compaction.c b/mm/compaction.c
|
|
index 5f702ef..a522208 100644
|
|
--- a/mm/compaction.c
|
|
+++ b/mm/compaction.c
|
|
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
|
|
unsigned long end_pfn = zone_end_pfn(zone);
|
|
unsigned long pfn;
|
|
|
|
- zone->compact_cached_migrate_pfn = start_pfn;
|
|
+ zone->compact_cached_migrate_pfn[0] = start_pfn;
|
|
+ zone->compact_cached_migrate_pfn[1] = start_pfn;
|
|
zone->compact_cached_free_pfn = end_pfn;
|
|
zone->compact_blockskip_flush = false;
|
|
|
|
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
|
|
*/
|
|
static void update_pageblock_skip(struct compact_control *cc,
|
|
struct page *page, unsigned long nr_isolated,
|
|
- bool migrate_scanner)
|
|
+ bool set_unsuitable, bool migrate_scanner)
|
|
{
|
|
struct zone *zone = cc->zone;
|
|
+ unsigned long pfn;
|
|
|
|
if (cc->ignore_skip_hint)
|
|
return;
|
|
@@ -141,20 +143,32 @@ static void update_pageblock_skip(struct compact_control *cc,
|
|
if (!page)
|
|
return;
|
|
|
|
- if (!nr_isolated) {
|
|
- unsigned long pfn = page_to_pfn(page);
|
|
+ if (nr_isolated)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Only skip pageblocks when all forms of compaction will be known to
|
|
+ * fail in the near future.
|
|
+ */
|
|
+ if (set_unsuitable)
|
|
set_pageblock_skip(page);
|
|
|
|
- /* Update where compaction should restart */
|
|
- if (migrate_scanner) {
|
|
- if (!cc->finished_update_migrate &&
|
|
- pfn > zone->compact_cached_migrate_pfn)
|
|
- zone->compact_cached_migrate_pfn = pfn;
|
|
- } else {
|
|
- if (!cc->finished_update_free &&
|
|
- pfn < zone->compact_cached_free_pfn)
|
|
- zone->compact_cached_free_pfn = pfn;
|
|
- }
|
|
+ pfn = page_to_pfn(page);
|
|
+
|
|
+ /* Update where async and sync compaction should restart */
|
|
+ if (migrate_scanner) {
|
|
+ if (cc->finished_update_migrate)
|
|
+ return;
|
|
+ if (pfn > zone->compact_cached_migrate_pfn[0])
|
|
+ zone->compact_cached_migrate_pfn[0] = pfn;
|
|
+ if (cc->mode != MIGRATE_ASYNC &&
|
|
+ pfn > zone->compact_cached_migrate_pfn[1])
|
|
+ zone->compact_cached_migrate_pfn[1] = pfn;
|
|
+ } else {
|
|
+ if (cc->finished_update_free)
|
|
+ return;
|
|
+ if (pfn < zone->compact_cached_free_pfn)
|
|
+ zone->compact_cached_free_pfn = pfn;
|
|
}
|
|
}
|
|
#else
|
|
@@ -166,7 +180,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
|
|
|
|
static void update_pageblock_skip(struct compact_control *cc,
|
|
struct page *page, unsigned long nr_isolated,
|
|
- bool migrate_scanner)
|
|
+ bool set_unsuitable, bool migrate_scanner)
|
|
{
|
|
}
|
|
#endif /* CONFIG_COMPACTION */
|
|
@@ -195,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
|
|
}
|
|
|
|
/* async aborts if taking too long or contended */
|
|
- if (!cc->sync) {
|
|
+ if (cc->mode == MIGRATE_ASYNC) {
|
|
cc->contended = true;
|
|
return false;
|
|
}
|
|
@@ -208,30 +222,39 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
|
|
return true;
|
|
}
|
|
|
|
-static inline bool compact_trylock_irqsave(spinlock_t *lock,
|
|
- unsigned long *flags, struct compact_control *cc)
|
|
+/*
|
|
+ * Aside from avoiding lock contention, compaction also periodically checks
|
|
+ * need_resched() and either schedules in sync compaction or aborts async
|
|
+ * compaction. This is similar to what compact_checklock_irqsave() does, but
|
|
+ * is used where no lock is concerned.
|
|
+ *
|
|
+ * Returns false when no scheduling was needed, or sync compaction scheduled.
|
|
+ * Returns true when async compaction should abort.
|
|
+ */
|
|
+static inline bool compact_should_abort(struct compact_control *cc)
|
|
{
|
|
- return compact_checklock_irqsave(lock, flags, false, cc);
|
|
+ /* async compaction aborts if contended */
|
|
+ if (need_resched()) {
|
|
+ if (cc->mode == MIGRATE_ASYNC) {
|
|
+ cc->contended = true;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ cond_resched();
|
|
+ }
|
|
+
|
|
+ return false;
|
|
}
|
|
|
|
/* Returns true if the page is within a block suitable for migration to */
|
|
static bool suitable_migration_target(struct page *page)
|
|
{
|
|
- int migratetype = get_pageblock_migratetype(page);
|
|
-
|
|
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
|
|
- if (migratetype == MIGRATE_RESERVE)
|
|
- return false;
|
|
-
|
|
- if (is_migrate_isolate(migratetype))
|
|
- return false;
|
|
-
|
|
- /* If the page is a large free page, then allow migration */
|
|
+ /* If the page is a large free page, then disallow migration */
|
|
if (PageBuddy(page) && page_order(page) >= pageblock_order)
|
|
- return true;
|
|
+ return false;
|
|
|
|
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
|
|
- if (migrate_async_suitable(migratetype))
|
|
+ if (migrate_async_suitable(get_pageblock_migratetype(page)))
|
|
return true;
|
|
|
|
/* Otherwise skip the block */
|
|
@@ -253,6 +276,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|
struct page *cursor, *valid_page = NULL;
|
|
unsigned long flags;
|
|
bool locked = false;
|
|
+ bool checked_pageblock = false;
|
|
|
|
cursor = pfn_to_page(blockpfn);
|
|
|
|
@@ -284,8 +308,16 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|
break;
|
|
|
|
/* Recheck this is a suitable migration target under lock */
|
|
- if (!strict && !suitable_migration_target(page))
|
|
- break;
|
|
+ if (!strict && !checked_pageblock) {
|
|
+ /*
|
|
+ * We need to check suitability of pageblock only once
|
|
+ * and this isolate_freepages_block() is called with
|
|
+ * pageblock range, so just check once is sufficient.
|
|
+ */
|
|
+ checked_pageblock = true;
|
|
+ if (!suitable_migration_target(page))
|
|
+ break;
|
|
+ }
|
|
|
|
/* Recheck this is a buddy page under lock */
|
|
if (!PageBuddy(page))
|
|
@@ -329,7 +361,8 @@ isolate_fail:
|
|
|
|
/* Update the pageblock-skip if the whole pageblock was scanned */
|
|
if (blockpfn == end_pfn)
|
|
- update_pageblock_skip(cc, valid_page, total_isolated, false);
|
|
+ update_pageblock_skip(cc, valid_page, total_isolated, true,
|
|
+ false);
|
|
|
|
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
|
|
if (total_isolated)
|
|
@@ -460,12 +493,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
unsigned long last_pageblock_nr = 0, pageblock_nr;
|
|
unsigned long nr_scanned = 0, nr_isolated = 0;
|
|
struct list_head *migratelist = &cc->migratepages;
|
|
- isolate_mode_t mode = 0;
|
|
struct lruvec *lruvec;
|
|
unsigned long flags;
|
|
bool locked = false;
|
|
struct page *page = NULL, *valid_page = NULL;
|
|
- bool skipped_async_unsuitable = false;
|
|
+ bool set_unsuitable = true;
|
|
+ const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
|
|
+ ISOLATE_ASYNC_MIGRATE : 0) |
|
|
+ (unevictable ? ISOLATE_UNEVICTABLE : 0);
|
|
|
|
/*
|
|
* Ensure that there are not too many pages isolated from the LRU
|
|
@@ -474,7 +509,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
*/
|
|
while (unlikely(too_many_isolated(zone))) {
|
|
/* async migration should just abort */
|
|
- if (!cc->sync)
|
|
+ if (cc->mode == MIGRATE_ASYNC)
|
|
return 0;
|
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
@@ -483,11 +518,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
return 0;
|
|
}
|
|
|
|
+ if (compact_should_abort(cc))
|
|
+ return 0;
|
|
+
|
|
/* Time to isolate some pages for migration */
|
|
- cond_resched();
|
|
for (; low_pfn < end_pfn; low_pfn++) {
|
|
/* give a chance to irqs before checking need_resched() */
|
|
- if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
|
|
+ if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
|
|
if (should_release_lock(&zone->lru_lock)) {
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
locked = false;
|
|
@@ -526,8 +563,25 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
|
|
/* If isolation recently failed, do not retry */
|
|
pageblock_nr = low_pfn >> pageblock_order;
|
|
- if (!isolation_suitable(cc, page))
|
|
- goto next_pageblock;
|
|
+ if (last_pageblock_nr != pageblock_nr) {
|
|
+ int mt;
|
|
+
|
|
+ last_pageblock_nr = pageblock_nr;
|
|
+ if (!isolation_suitable(cc, page))
|
|
+ goto next_pageblock;
|
|
+
|
|
+ /*
|
|
+ * For async migration, also only scan in MOVABLE
|
|
+ * blocks. Async migration is optimistic to see if
|
|
+ * the minimum amount of work satisfies the allocation
|
|
+ */
|
|
+ mt = get_pageblock_migratetype(page);
|
|
+ if (cc->mode == MIGRATE_ASYNC &&
|
|
+ !migrate_async_suitable(mt)) {
|
|
+ set_unsuitable = false;
|
|
+ goto next_pageblock;
|
|
+ }
|
|
+ }
|
|
|
|
/*
|
|
* Skip if free. page_order cannot be used without zone->lock
|
|
@@ -537,18 +591,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
continue;
|
|
|
|
/*
|
|
- * For async migration, also only scan in MOVABLE blocks. Async
|
|
- * migration is optimistic to see if the minimum amount of work
|
|
- * satisfies the allocation
|
|
- */
|
|
- if (!cc->sync && last_pageblock_nr != pageblock_nr &&
|
|
- !migrate_async_suitable(get_pageblock_migratetype(page))) {
|
|
- cc->finished_update_migrate = true;
|
|
- skipped_async_unsuitable = true;
|
|
- goto next_pageblock;
|
|
- }
|
|
-
|
|
- /*
|
|
* Check may be lockless but that's ok as we recheck later.
|
|
* It's possible to migrate LRU pages and balloon pages
|
|
* Skip any other type of page
|
|
@@ -557,11 +599,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
if (unlikely(balloon_page_movable(page))) {
|
|
if (locked && balloon_page_isolate(page)) {
|
|
/* Successfully isolated */
|
|
- cc->finished_update_migrate = true;
|
|
- list_add(&page->lru, migratelist);
|
|
- cc->nr_migratepages++;
|
|
- nr_isolated++;
|
|
- goto check_compact_cluster;
|
|
+ goto isolate_success;
|
|
}
|
|
}
|
|
continue;
|
|
@@ -584,6 +622,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
continue;
|
|
}
|
|
|
|
+ /*
|
|
+ * Migration will fail if an anonymous page is pinned in memory,
|
|
+ * so avoid taking lru_lock and isolating it unnecessarily in an
|
|
+ * admittedly racy check.
|
|
+ */
|
|
+ if (!page_mapping(page) &&
|
|
+ page_count(page) > page_mapcount(page))
|
|
+ continue;
|
|
+
|
|
/* Check if it is ok to still hold the lock */
|
|
locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
|
|
locked, cc);
|
|
@@ -598,12 +645,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
continue;
|
|
}
|
|
|
|
- if (!cc->sync)
|
|
- mode |= ISOLATE_ASYNC_MIGRATE;
|
|
-
|
|
- if (unevictable)
|
|
- mode |= ISOLATE_UNEVICTABLE;
|
|
-
|
|
lruvec = mem_cgroup_page_lruvec(page, zone);
|
|
|
|
/* Try isolate the page */
|
|
@@ -613,13 +654,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
VM_BUG_ON_PAGE(PageTransCompound(page), page);
|
|
|
|
/* Successfully isolated */
|
|
- cc->finished_update_migrate = true;
|
|
del_page_from_lru_list(page, lruvec, page_lru(page));
|
|
+
|
|
+isolate_success:
|
|
+ cc->finished_update_migrate = true;
|
|
list_add(&page->lru, migratelist);
|
|
cc->nr_migratepages++;
|
|
nr_isolated++;
|
|
|
|
-check_compact_cluster:
|
|
/* Avoid isolating too much */
|
|
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
|
|
++low_pfn;
|
|
@@ -630,7 +672,6 @@ check_compact_cluster:
|
|
|
|
next_pageblock:
|
|
low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
|
|
- last_pageblock_nr = pageblock_nr;
|
|
}
|
|
|
|
acct_isolated(zone, locked, cc);
|
|
@@ -641,11 +682,10 @@ next_pageblock:
|
|
/*
|
|
* Update the pageblock-skip information and cached scanner pfn,
|
|
* if the whole pageblock was scanned without isolating any page.
|
|
- * This is not done when pageblock was skipped due to being unsuitable
|
|
- * for async compaction, so that eventual sync compaction can try.
|
|
*/
|
|
- if (low_pfn == end_pfn && !skipped_async_unsuitable)
|
|
- update_pageblock_skip(cc, valid_page, nr_isolated, true);
|
|
+ if (low_pfn == end_pfn)
|
|
+ update_pageblock_skip(cc, valid_page, nr_isolated,
|
|
+ set_unsuitable, true);
|
|
|
|
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
|
|
|
|
@@ -666,7 +706,9 @@ static void isolate_freepages(struct zone *zone,
|
|
struct compact_control *cc)
|
|
{
|
|
struct page *page;
|
|
- unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
|
|
+ unsigned long block_start_pfn; /* start of current pageblock */
|
|
+ unsigned long block_end_pfn; /* end of current pageblock */
|
|
+ unsigned long low_pfn; /* lowest pfn scanner is able to scan */
|
|
int nr_freepages = cc->nr_freepages;
|
|
struct list_head *freelist = &cc->freepages;
|
|
|
|
@@ -674,41 +716,38 @@ static void isolate_freepages(struct zone *zone,
|
|
* Initialise the free scanner. The starting point is where we last
|
|
* successfully isolated from, zone-cached value, or the end of the
|
|
* zone when isolating for the first time. We need this aligned to
|
|
- * the pageblock boundary, because we do pfn -= pageblock_nr_pages
|
|
- * in the for loop.
|
|
+ * the pageblock boundary, because we do
|
|
+ * block_start_pfn -= pageblock_nr_pages in the for loop.
|
|
+ * For ending point, take care when isolating in last pageblock of a
|
|
+ * a zone which ends in the middle of a pageblock.
|
|
* The low boundary is the end of the pageblock the migration scanner
|
|
* is using.
|
|
*/
|
|
- pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
|
|
+ block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
|
|
+ block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
|
|
+ zone_end_pfn(zone));
|
|
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
|
|
|
|
/*
|
|
- * Take care that if the migration scanner is at the end of the zone
|
|
- * that the free scanner does not accidentally move to the next zone
|
|
- * in the next isolation cycle.
|
|
- */
|
|
- high_pfn = min(low_pfn, pfn);
|
|
-
|
|
- z_end_pfn = zone_end_pfn(zone);
|
|
-
|
|
- /*
|
|
* Isolate free pages until enough are available to migrate the
|
|
* pages on cc->migratepages. We stop searching if the migrate
|
|
* and free page scanners meet or enough free pages are isolated.
|
|
*/
|
|
- for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
|
|
- pfn -= pageblock_nr_pages) {
|
|
+ for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
|
|
+ block_end_pfn = block_start_pfn,
|
|
+ block_start_pfn -= pageblock_nr_pages) {
|
|
unsigned long isolated;
|
|
- unsigned long end_pfn;
|
|
|
|
/*
|
|
* This can iterate a massively long zone without finding any
|
|
* suitable migration targets, so periodically check if we need
|
|
- * to schedule.
|
|
+ * to schedule, or even abort async compaction.
|
|
*/
|
|
- cond_resched();
|
|
+ if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
|
|
+ && compact_should_abort(cc))
|
|
+ break;
|
|
|
|
- if (!pfn_valid(pfn))
|
|
+ if (!pfn_valid(block_start_pfn))
|
|
continue;
|
|
|
|
/*
|
|
@@ -718,7 +757,7 @@ static void isolate_freepages(struct zone *zone,
|
|
* i.e. it's possible that all pages within a zones range of
|
|
* pages do not belong to a single zone.
|
|
*/
|
|
- page = pfn_to_page(pfn);
|
|
+ page = pfn_to_page(block_start_pfn);
|
|
if (page_zone(page) != zone)
|
|
continue;
|
|
|
|
@@ -731,26 +770,26 @@ static void isolate_freepages(struct zone *zone,
|
|
continue;
|
|
|
|
/* Found a block suitable for isolating free pages from */
|
|
- isolated = 0;
|
|
+ cc->free_pfn = block_start_pfn;
|
|
+ isolated = isolate_freepages_block(cc, block_start_pfn,
|
|
+ block_end_pfn, freelist, false);
|
|
+ nr_freepages += isolated;
|
|
|
|
/*
|
|
- * Take care when isolating in last pageblock of a zone which
|
|
- * ends in the middle of a pageblock.
|
|
+ * Set a flag that we successfully isolated in this pageblock.
|
|
+ * In the next loop iteration, zone->compact_cached_free_pfn
|
|
+ * will not be updated and thus it will effectively contain the
|
|
+ * highest pageblock we isolated pages from.
|
|
*/
|
|
- end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
|
|
- isolated = isolate_freepages_block(cc, pfn, end_pfn,
|
|
- freelist, false);
|
|
- nr_freepages += isolated;
|
|
+ if (isolated)
|
|
+ cc->finished_update_free = true;
|
|
|
|
/*
|
|
- * Record the highest PFN we isolated pages from. When next
|
|
- * looking for free pages, the search will restart here as
|
|
- * page migration may have returned some pages to the allocator
|
|
+ * isolate_freepages_block() might have aborted due to async
|
|
+ * compaction being contended
|
|
*/
|
|
- if (isolated) {
|
|
- cc->finished_update_free = true;
|
|
- high_pfn = max(high_pfn, pfn);
|
|
- }
|
|
+ if (cc->contended)
|
|
+ break;
|
|
}
|
|
|
|
/* split_free_page does not map the pages */
|
|
@@ -760,10 +799,9 @@ static void isolate_freepages(struct zone *zone,
|
|
* If we crossed the migrate scanner, we want to keep it that way
|
|
* so that compact_finished() may detect this
|
|
*/
|
|
- if (pfn < low_pfn)
|
|
- cc->free_pfn = max(pfn, zone->zone_start_pfn);
|
|
- else
|
|
- cc->free_pfn = high_pfn;
|
|
+ if (block_start_pfn < low_pfn)
|
|
+ cc->free_pfn = cc->migrate_pfn;
|
|
+
|
|
cc->nr_freepages = nr_freepages;
|
|
}
|
|
|
|
@@ -778,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage,
|
|
struct compact_control *cc = (struct compact_control *)data;
|
|
struct page *freepage;
|
|
|
|
- /* Isolate free pages if necessary */
|
|
+ /*
|
|
+ * Isolate free pages if necessary, and if we are not aborting due to
|
|
+ * contention.
|
|
+ */
|
|
if (list_empty(&cc->freepages)) {
|
|
- isolate_freepages(cc->zone, cc);
|
|
+ if (!cc->contended)
|
|
+ isolate_freepages(cc->zone, cc);
|
|
|
|
if (list_empty(&cc->freepages))
|
|
return NULL;
|
|
@@ -794,23 +836,16 @@ static struct page *compaction_alloc(struct page *migratepage,
|
|
}
|
|
|
|
/*
|
|
- * We cannot control nr_migratepages and nr_freepages fully when migration is
|
|
- * running as migrate_pages() has no knowledge of compact_control. When
|
|
- * migration is complete, we count the number of pages on the lists by hand.
|
|
+ * This is a migrate-callback that "frees" freepages back to the isolated
|
|
+ * freelist. All pages on the freelist are from the same zone, so there is no
|
|
+ * special handling needed for NUMA.
|
|
*/
|
|
-static void update_nr_listpages(struct compact_control *cc)
|
|
+static void compaction_free(struct page *page, unsigned long data)
|
|
{
|
|
- int nr_migratepages = 0;
|
|
- int nr_freepages = 0;
|
|
- struct page *page;
|
|
-
|
|
- list_for_each_entry(page, &cc->migratepages, lru)
|
|
- nr_migratepages++;
|
|
- list_for_each_entry(page, &cc->freepages, lru)
|
|
- nr_freepages++;
|
|
+ struct compact_control *cc = (struct compact_control *)data;
|
|
|
|
- cc->nr_migratepages = nr_migratepages;
|
|
- cc->nr_freepages = nr_freepages;
|
|
+ list_add(&page->lru, &cc->freepages);
|
|
+ cc->nr_freepages++;
|
|
}
|
|
|
|
/* possible outcome of isolate_migratepages */
|
|
@@ -857,13 +892,14 @@ static int compact_finished(struct zone *zone,
|
|
unsigned int order;
|
|
unsigned long watermark;
|
|
|
|
- if (fatal_signal_pending(current))
|
|
+ if (cc->contended || fatal_signal_pending(current))
|
|
return COMPACT_PARTIAL;
|
|
|
|
/* Compaction run completes if the migrate and free scanner meet */
|
|
if (cc->free_pfn <= cc->migrate_pfn) {
|
|
/* Let the next compaction start anew. */
|
|
- zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
|
|
+ zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
|
|
+ zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
|
|
zone->compact_cached_free_pfn = zone_end_pfn(zone);
|
|
|
|
/*
|
|
@@ -901,7 +937,7 @@ static int compact_finished(struct zone *zone,
|
|
return COMPACT_PARTIAL;
|
|
|
|
/* Job done if allocation would set block type */
|
|
- if (cc->order >= pageblock_order && area->nr_free)
|
|
+ if (order >= pageblock_order && area->nr_free)
|
|
return COMPACT_PARTIAL;
|
|
}
|
|
|
|
@@ -963,6 +999,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
int ret;
|
|
unsigned long start_pfn = zone->zone_start_pfn;
|
|
unsigned long end_pfn = zone_end_pfn(zone);
|
|
+ const bool sync = cc->mode != MIGRATE_ASYNC;
|
|
|
|
ret = compaction_suitable(zone, cc->order);
|
|
switch (ret) {
|
|
@@ -988,7 +1025,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
* information on where the scanners should start but check that it
|
|
* is initialised by ensuring the values are within zone boundaries.
|
|
*/
|
|
- cc->migrate_pfn = zone->compact_cached_migrate_pfn;
|
|
+ cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
|
|
cc->free_pfn = zone->compact_cached_free_pfn;
|
|
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
|
|
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
|
|
@@ -996,7 +1033,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
}
|
|
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
|
|
cc->migrate_pfn = start_pfn;
|
|
- zone->compact_cached_migrate_pfn = cc->migrate_pfn;
|
|
+ zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
|
|
+ zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
|
|
}
|
|
|
|
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
|
|
@@ -1004,7 +1042,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
migrate_prep_local();
|
|
|
|
while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
|
|
- unsigned long nr_migrate, nr_remaining;
|
|
int err;
|
|
|
|
switch (isolate_migratepages(zone, cc)) {
|
|
@@ -1019,21 +1056,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
;
|
|
}
|
|
|
|
- nr_migrate = cc->nr_migratepages;
|
|
+ if (!cc->nr_migratepages)
|
|
+ continue;
|
|
+
|
|
err = migrate_pages(&cc->migratepages, compaction_alloc,
|
|
- (unsigned long)cc,
|
|
- cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
|
|
+ compaction_free, (unsigned long)cc, cc->mode,
|
|
MR_COMPACTION);
|
|
- update_nr_listpages(cc);
|
|
- nr_remaining = cc->nr_migratepages;
|
|
|
|
- trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
|
|
- nr_remaining);
|
|
+ trace_mm_compaction_migratepages(cc->nr_migratepages, err,
|
|
+ &cc->migratepages);
|
|
|
|
- /* Release isolated pages not migrated */
|
|
+ /* All pages were either migrated or will be released */
|
|
+ cc->nr_migratepages = 0;
|
|
if (err) {
|
|
putback_movable_pages(&cc->migratepages);
|
|
- cc->nr_migratepages = 0;
|
|
/*
|
|
* migrate_pages() may return -ENOMEM when scanners meet
|
|
* and we want compact_finished() to detect it
|
|
@@ -1055,9 +1091,8 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-static unsigned long compact_zone_order(struct zone *zone,
|
|
- int order, gfp_t gfp_mask,
|
|
- bool sync, bool *contended)
|
|
+static unsigned long compact_zone_order(struct zone *zone, int order,
|
|
+ gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
|
|
{
|
|
unsigned long ret;
|
|
struct compact_control cc = {
|
|
@@ -1066,7 +1101,7 @@ static unsigned long compact_zone_order(struct zone *zone,
|
|
.order = order,
|
|
.migratetype = allocflags_to_migratetype(gfp_mask),
|
|
.zone = zone,
|
|
- .sync = sync,
|
|
+ .mode = mode,
|
|
};
|
|
INIT_LIST_HEAD(&cc.freepages);
|
|
INIT_LIST_HEAD(&cc.migratepages);
|
|
@@ -1088,7 +1123,7 @@ int sysctl_extfrag_threshold = 500;
|
|
* @order: The order of the current allocation
|
|
* @gfp_mask: The GFP mask of the current allocation
|
|
* @nodemask: The allowed nodes to allocate from
|
|
- * @sync: Whether migration is synchronous or not
|
|
+ * @mode: The migration mode for async, sync light, or sync migration
|
|
* @contended: Return value that is true if compaction was aborted due to lock contention
|
|
* @page: Optionally capture a free page of the requested order during compaction
|
|
*
|
|
@@ -1096,7 +1131,7 @@ int sysctl_extfrag_threshold = 500;
|
|
*/
|
|
unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
|
- bool sync, bool *contended)
|
|
+ enum migrate_mode mode, bool *contended)
|
|
{
|
|
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
|
int may_enter_fs = gfp_mask & __GFP_FS;
|
|
@@ -1121,7 +1156,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
nodemask) {
|
|
int status;
|
|
|
|
- status = compact_zone_order(zone, order, gfp_mask, sync,
|
|
+ status = compact_zone_order(zone, order, gfp_mask, mode,
|
|
contended);
|
|
rc = max(status, rc);
|
|
|
|
@@ -1160,9 +1195,6 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
|
|
if (zone_watermark_ok(zone, cc->order,
|
|
low_wmark_pages(zone), 0, 0))
|
|
compaction_defer_reset(zone, cc->order, false);
|
|
- /* Currently async compaction is never deferred. */
|
|
- else if (cc->sync)
|
|
- defer_compaction(zone, cc->order);
|
|
}
|
|
|
|
VM_BUG_ON(!list_empty(&cc->freepages));
|
|
@@ -1174,7 +1206,7 @@ void compact_pgdat(pg_data_t *pgdat, int order)
|
|
{
|
|
struct compact_control cc = {
|
|
.order = order,
|
|
- .sync = false,
|
|
+ .mode = MIGRATE_ASYNC,
|
|
};
|
|
|
|
if (!order)
|
|
@@ -1187,7 +1219,8 @@ static void compact_node(int nid)
|
|
{
|
|
struct compact_control cc = {
|
|
.order = -1,
|
|
- .sync = true,
|
|
+ .mode = MIGRATE_SYNC,
|
|
+ .ignore_skip_hint = true,
|
|
};
|
|
|
|
__compact_pgdat(NODE_DATA(nid), &cc);
|
|
diff --git a/mm/filemap.c b/mm/filemap.c
|
|
index 7a13f6a..217cfd3 100644
|
|
--- a/mm/filemap.c
|
|
+++ b/mm/filemap.c
|
|
@@ -192,9 +192,11 @@ static int filemap_check_errors(struct address_space *mapping)
|
|
{
|
|
int ret = 0;
|
|
/* Check for outstanding write errors */
|
|
- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
|
|
+ if (test_bit(AS_ENOSPC, &mapping->flags) &&
|
|
+ test_and_clear_bit(AS_ENOSPC, &mapping->flags))
|
|
ret = -ENOSPC;
|
|
- if (test_and_clear_bit(AS_EIO, &mapping->flags))
|
|
+ if (test_bit(AS_EIO, &mapping->flags) &&
|
|
+ test_and_clear_bit(AS_EIO, &mapping->flags))
|
|
ret = -EIO;
|
|
return ret;
|
|
}
|
|
@@ -446,6 +448,29 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
}
|
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
|
|
|
+static int page_cache_tree_insert(struct address_space *mapping,
|
|
+ struct page *page)
|
|
+{
|
|
+ void **slot;
|
|
+ int error;
|
|
+
|
|
+ slot = radix_tree_lookup_slot(&mapping->page_tree, page->index);
|
|
+ if (slot) {
|
|
+ void *p;
|
|
+
|
|
+ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
|
|
+ if (!radix_tree_exceptional_entry(p))
|
|
+ return -EEXIST;
|
|
+ radix_tree_replace_slot(slot, page);
|
|
+ mapping->nrpages++;
|
|
+ return 0;
|
|
+ }
|
|
+ error = radix_tree_insert(&mapping->page_tree, page->index, page);
|
|
+ if (!error)
|
|
+ mapping->nrpages++;
|
|
+ return error;
|
|
+}
|
|
+
|
|
/**
|
|
* add_to_page_cache_locked - add a locked page to the pagecache
|
|
* @page: page to add
|
|
@@ -480,11 +505,10 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
page->index = offset;
|
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
- error = radix_tree_insert(&mapping->page_tree, offset, page);
|
|
+ error = page_cache_tree_insert(mapping, page);
|
|
radix_tree_preload_end();
|
|
if (unlikely(error))
|
|
goto err_insert;
|
|
- mapping->nrpages++;
|
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
trace_mm_filemap_add_to_page_cache(page);
|
|
@@ -520,10 +544,10 @@ struct page *__page_cache_alloc(gfp_t gfp)
|
|
if (cpuset_do_page_mem_spread()) {
|
|
unsigned int cpuset_mems_cookie;
|
|
do {
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
n = cpuset_mem_spread_node();
|
|
page = alloc_pages_exact_node(n, gfp, 0);
|
|
- } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
|
|
+ } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
|
|
|
|
return page;
|
|
}
|
|
@@ -620,8 +644,17 @@ EXPORT_SYMBOL(unlock_page);
|
|
*/
|
|
void end_page_writeback(struct page *page)
|
|
{
|
|
- if (TestClearPageReclaim(page))
|
|
+ /*
|
|
+ * TestClearPageReclaim could be used here but it is an atomic
|
|
+ * operation and overkill in this particular case. Failing to
|
|
+ * shuffle a page marked for immediate reclaim is too mild to
|
|
+ * justify taking an atomic operation penalty at the end of
|
|
+ * ever page writeback.
|
|
+ */
|
|
+ if (PageReclaim(page)) {
|
|
+ ClearPageReclaim(page);
|
|
rotate_reclaimable_page(page);
|
|
+ }
|
|
|
|
if (!test_clear_page_writeback(page))
|
|
BUG();
|
|
@@ -686,14 +719,101 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
|
}
|
|
|
|
/**
|
|
- * find_get_page - find and get a page reference
|
|
+ * page_cache_next_hole - find the next hole (not-present entry)
|
|
+ * @mapping: mapping
|
|
+ * @index: index
|
|
+ * @max_scan: maximum range to search
|
|
+ *
|
|
+ * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
|
|
+ * lowest indexed hole.
|
|
+ *
|
|
+ * Returns: the index of the hole if found, otherwise returns an index
|
|
+ * outside of the set specified (in which case 'return - index >=
|
|
+ * max_scan' will be true). In rare cases of index wrap-around, 0 will
|
|
+ * be returned.
|
|
+ *
|
|
+ * page_cache_next_hole may be called under rcu_read_lock. However,
|
|
+ * like radix_tree_gang_lookup, this will not atomically search a
|
|
+ * snapshot of the tree at a single point in time. For example, if a
|
|
+ * hole is created at index 5, then subsequently a hole is created at
|
|
+ * index 10, page_cache_next_hole covering both indexes may return 10
|
|
+ * if called under rcu_read_lock.
|
|
+ */
|
|
+pgoff_t page_cache_next_hole(struct address_space *mapping,
|
|
+ pgoff_t index, unsigned long max_scan)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ for (i = 0; i < max_scan; i++) {
|
|
+ struct page *page;
|
|
+
|
|
+ page = radix_tree_lookup(&mapping->page_tree, index);
|
|
+ if (!page || radix_tree_exceptional_entry(page))
|
|
+ break;
|
|
+ index++;
|
|
+ if (index == 0)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return index;
|
|
+}
|
|
+EXPORT_SYMBOL(page_cache_next_hole);
|
|
+
|
|
+/**
|
|
+ * page_cache_prev_hole - find the prev hole (not-present entry)
|
|
+ * @mapping: mapping
|
|
+ * @index: index
|
|
+ * @max_scan: maximum range to search
|
|
+ *
|
|
+ * Search backwards in the range [max(index-max_scan+1, 0), index] for
|
|
+ * the first hole.
|
|
+ *
|
|
+ * Returns: the index of the hole if found, otherwise returns an index
|
|
+ * outside of the set specified (in which case 'index - return >=
|
|
+ * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
|
|
+ * will be returned.
|
|
+ *
|
|
+ * page_cache_prev_hole may be called under rcu_read_lock. However,
|
|
+ * like radix_tree_gang_lookup, this will not atomically search a
|
|
+ * snapshot of the tree at a single point in time. For example, if a
|
|
+ * hole is created at index 10, then subsequently a hole is created at
|
|
+ * index 5, page_cache_prev_hole covering both indexes may return 5 if
|
|
+ * called under rcu_read_lock.
|
|
+ */
|
|
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
|
|
+ pgoff_t index, unsigned long max_scan)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ for (i = 0; i < max_scan; i++) {
|
|
+ struct page *page;
|
|
+
|
|
+ page = radix_tree_lookup(&mapping->page_tree, index);
|
|
+ if (!page || radix_tree_exceptional_entry(page))
|
|
+ break;
|
|
+ index--;
|
|
+ if (index == ULONG_MAX)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return index;
|
|
+}
|
|
+EXPORT_SYMBOL(page_cache_prev_hole);
|
|
+
|
|
+/**
|
|
+ * find_get_entry - find and get a page cache entry
|
|
* @mapping: the address_space to search
|
|
- * @offset: the page index
|
|
+ * @offset: the page cache index
|
|
*
|
|
- * Is there a pagecache struct page at the given (mapping, offset) tuple?
|
|
- * If yes, increment its refcount and return it; if no, return NULL.
|
|
+ * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
+ * page cache page, it is returned with an increased refcount.
|
|
+ *
|
|
+ * If the slot holds a shadow entry of a previously evicted page, it
|
|
+ * is returned.
|
|
+ *
|
|
+ * Otherwise, %NULL is returned.
|
|
*/
|
|
-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
|
|
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
|
|
{
|
|
void **pagep;
|
|
struct page *page;
|
|
@@ -734,24 +854,30 @@ out:
|
|
|
|
return page;
|
|
}
|
|
-EXPORT_SYMBOL(find_get_page);
|
|
+EXPORT_SYMBOL(find_get_entry);
|
|
|
|
/**
|
|
- * find_lock_page - locate, pin and lock a pagecache page
|
|
+ * find_lock_entry - locate, pin and lock a page cache entry
|
|
* @mapping: the address_space to search
|
|
- * @offset: the page index
|
|
+ * @offset: the page cache index
|
|
*
|
|
- * Locates the desired pagecache page, locks it, increments its reference
|
|
- * count and returns its address.
|
|
+ * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
+ * page cache page, it is returned locked and with an increased
|
|
+ * refcount.
|
|
*
|
|
- * Returns zero if the page was not present. find_lock_page() may sleep.
|
|
+ * If the slot holds a shadow entry of a previously evicted page, it
|
|
+ * is returned.
|
|
+ *
|
|
+ * Otherwise, %NULL is returned.
|
|
+ *
|
|
+ * find_lock_entry() may sleep.
|
|
*/
|
|
-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
|
|
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
|
|
{
|
|
struct page *page;
|
|
|
|
repeat:
|
|
- page = find_get_page(mapping, offset);
|
|
+ page = find_get_entry(mapping, offset);
|
|
if (page && !radix_tree_exception(page)) {
|
|
lock_page(page);
|
|
/* Has the page been truncated? */
|
|
@@ -764,44 +890,86 @@ repeat:
|
|
}
|
|
return page;
|
|
}
|
|
-EXPORT_SYMBOL(find_lock_page);
|
|
+EXPORT_SYMBOL(find_lock_entry);
|
|
|
|
/**
|
|
- * find_or_create_page - locate or add a pagecache page
|
|
- * @mapping: the page's address_space
|
|
- * @index: the page's index into the mapping
|
|
- * @gfp_mask: page allocation mode
|
|
+ * pagecache_get_page - find and get a page reference
|
|
+ * @mapping: the address_space to search
|
|
+ * @offset: the page index
|
|
+ * @fgp_flags: PCG flags
|
|
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
|
|
+ *
|
|
+ * Looks up the page cache slot at @mapping & @offset.
|
|
*
|
|
- * Locates a page in the pagecache. If the page is not present, a new page
|
|
- * is allocated using @gfp_mask and is added to the pagecache and to the VM's
|
|
- * LRU list. The returned page is locked and has its reference count
|
|
- * incremented.
|
|
+ * PCG flags modify how the page is returned
|
|
*
|
|
- * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
|
|
- * allocation!
|
|
+ * FGP_ACCESSED: the page will be marked accessed
|
|
+ * FGP_LOCK: Page is return locked
|
|
+ * FGP_CREAT: If page is not present then a new page is allocated using
|
|
+ * @gfp_mask and added to the page cache and the VM's LRU
|
|
+ * list. The page is returned locked and with an increased
|
|
+ * refcount. Otherwise, %NULL is returned.
|
|
*
|
|
- * find_or_create_page() returns the desired page's address, or zero on
|
|
- * memory exhaustion.
|
|
+ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
|
|
+ * if the GFP flags specified for FGP_CREAT are atomic.
|
|
+ *
|
|
+ * If there is a page cache page, it is returned with an increased refcount.
|
|
*/
|
|
-struct page *find_or_create_page(struct address_space *mapping,
|
|
- pgoff_t index, gfp_t gfp_mask)
|
|
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
|
|
+ int fgp_flags, gfp_t gfp_mask)
|
|
{
|
|
struct page *page;
|
|
- int err;
|
|
+
|
|
repeat:
|
|
- page = find_lock_page(mapping, index);
|
|
- if (!page) {
|
|
+ page = find_get_entry(mapping, offset);
|
|
+ if (radix_tree_exceptional_entry(page))
|
|
+ page = NULL;
|
|
+ if (!page)
|
|
+ goto no_page;
|
|
+
|
|
+ if (fgp_flags & FGP_LOCK) {
|
|
+ if (fgp_flags & FGP_NOWAIT) {
|
|
+ if (!trylock_page(page)) {
|
|
+ page_cache_release(page);
|
|
+ return NULL;
|
|
+ }
|
|
+ } else {
|
|
+ lock_page(page);
|
|
+ }
|
|
+
|
|
+ /* Has the page been truncated? */
|
|
+ if (unlikely(page->mapping != mapping)) {
|
|
+ unlock_page(page);
|
|
+ page_cache_release(page);
|
|
+ goto repeat;
|
|
+ }
|
|
+ VM_BUG_ON(page->index != offset);
|
|
+ }
|
|
+
|
|
+ if (page && (fgp_flags & FGP_ACCESSED))
|
|
+ mark_page_accessed(page);
|
|
+
|
|
+no_page:
|
|
+ if (!page && (fgp_flags & FGP_CREAT)) {
|
|
+ int err;
|
|
+ if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
|
|
+ gfp_mask |= __GFP_WRITE;
|
|
+ if (fgp_flags & FGP_NOFS)
|
|
+ gfp_mask &= ~__GFP_FS;
|
|
+
|
|
page = __page_cache_alloc(gfp_mask);
|
|
if (!page)
|
|
return NULL;
|
|
- /*
|
|
- * We want a regular kernel memory (not highmem or DMA etc)
|
|
- * allocation for the radix tree nodes, but we need to honour
|
|
- * the context-specific requirements the caller has asked for.
|
|
- * GFP_RECLAIM_MASK collects those requirements.
|
|
- */
|
|
- err = add_to_page_cache_lru(page, mapping, index,
|
|
- (gfp_mask & GFP_RECLAIM_MASK));
|
|
+
|
|
+ if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
|
|
+ fgp_flags |= FGP_LOCK;
|
|
+
|
|
+ /* Init accessed so avoit atomic mark_page_accessed later */
|
|
+ if (fgp_flags & FGP_ACCESSED)
|
|
+ init_page_accessed(page);
|
|
+
|
|
+ err = add_to_page_cache_lru(page, mapping, offset,
|
|
+ gfp_mask & GFP_RECLAIM_MASK);
|
|
if (unlikely(err)) {
|
|
page_cache_release(page);
|
|
page = NULL;
|
|
@@ -809,9 +977,80 @@ repeat:
|
|
goto repeat;
|
|
}
|
|
}
|
|
+
|
|
return page;
|
|
}
|
|
-EXPORT_SYMBOL(find_or_create_page);
|
|
+EXPORT_SYMBOL(pagecache_get_page);
|
|
+
|
|
+/**
|
|
+ * find_get_entries - gang pagecache lookup
|
|
+ * @mapping: The address_space to search
|
|
+ * @start: The starting page cache index
|
|
+ * @nr_entries: The maximum number of entries
|
|
+ * @entries: Where the resulting entries are placed
|
|
+ * @indices: The cache indices corresponding to the entries in @entries
|
|
+ *
|
|
+ * find_get_entries() will search for and return a group of up to
|
|
+ * @nr_entries entries in the mapping. The entries are placed at
|
|
+ * @entries. find_get_entries() takes a reference against any actual
|
|
+ * pages it returns.
|
|
+ *
|
|
+ * The search returns a group of mapping-contiguous page cache entries
|
|
+ * with ascending indexes. There may be holes in the indices due to
|
|
+ * not-present pages.
|
|
+ *
|
|
+ * Any shadow entries of evicted pages are included in the returned
|
|
+ * array.
|
|
+ *
|
|
+ * find_get_entries() returns the number of pages and shadow entries
|
|
+ * which were found.
|
|
+ */
|
|
+unsigned find_get_entries(struct address_space *mapping,
|
|
+ pgoff_t start, unsigned int nr_entries,
|
|
+ struct page **entries, pgoff_t *indices)
|
|
+{
|
|
+ void **slot;
|
|
+ unsigned int ret = 0;
|
|
+ struct radix_tree_iter iter;
|
|
+
|
|
+ if (!nr_entries)
|
|
+ return 0;
|
|
+
|
|
+ rcu_read_lock();
|
|
+restart:
|
|
+ radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
|
|
+ struct page *page;
|
|
+repeat:
|
|
+ page = radix_tree_deref_slot(slot);
|
|
+ if (unlikely(!page))
|
|
+ continue;
|
|
+ if (radix_tree_exception(page)) {
|
|
+ if (radix_tree_deref_retry(page))
|
|
+ goto restart;
|
|
+ /*
|
|
+ * Otherwise, we must be storing a swap entry
|
|
+ * here as an exceptional entry: so return it
|
|
+ * without attempting to raise page count.
|
|
+ */
|
|
+ goto export;
|
|
+ }
|
|
+ if (!page_cache_get_speculative(page))
|
|
+ goto repeat;
|
|
+
|
|
+ /* Has the page moved? */
|
|
+ if (unlikely(page != *slot)) {
|
|
+ page_cache_release(page);
|
|
+ goto repeat;
|
|
+ }
|
|
+export:
|
|
+ indices[ret] = iter.index;
|
|
+ entries[ret] = page;
|
|
+ if (++ret == nr_entries)
|
|
+ break;
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ return ret;
|
|
+}
|
|
|
|
/**
|
|
* find_get_pages - gang pagecache lookup
|
|
@@ -1031,39 +1270,6 @@ repeat:
|
|
}
|
|
EXPORT_SYMBOL(find_get_pages_tag);
|
|
|
|
-/**
|
|
- * grab_cache_page_nowait - returns locked page at given index in given cache
|
|
- * @mapping: target address_space
|
|
- * @index: the page index
|
|
- *
|
|
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
|
|
- * This is intended for speculative data generators, where the data can
|
|
- * be regenerated if the page couldn't be grabbed. This routine should
|
|
- * be safe to call while holding the lock for another page.
|
|
- *
|
|
- * Clear __GFP_FS when allocating the page to avoid recursion into the fs
|
|
- * and deadlock against the caller's locked page.
|
|
- */
|
|
-struct page *
|
|
-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
|
|
-{
|
|
- struct page *page = find_get_page(mapping, index);
|
|
-
|
|
- if (page) {
|
|
- if (trylock_page(page))
|
|
- return page;
|
|
- page_cache_release(page);
|
|
- return NULL;
|
|
- }
|
|
- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
|
|
- if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
|
|
- page_cache_release(page);
|
|
- page = NULL;
|
|
- }
|
|
- return page;
|
|
-}
|
|
-EXPORT_SYMBOL(grab_cache_page_nowait);
|
|
-
|
|
/*
|
|
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
|
|
* a _large_ part of the i/o request. Imagine the worst scenario:
|
|
@@ -1795,6 +2001,18 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
|
|
EXPORT_SYMBOL(generic_file_mmap);
|
|
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
|
|
|
+static struct page *wait_on_page_read(struct page *page)
|
|
+{
|
|
+ if (!IS_ERR(page)) {
|
|
+ wait_on_page_locked(page);
|
|
+ if (!PageUptodate(page)) {
|
|
+ page_cache_release(page);
|
|
+ page = ERR_PTR(-EIO);
|
|
+ }
|
|
+ }
|
|
+ return page;
|
|
+}
|
|
+
|
|
static struct page *__read_cache_page(struct address_space *mapping,
|
|
pgoff_t index,
|
|
int (*filler)(void *, struct page *),
|
|
@@ -1821,6 +2039,8 @@ repeat:
|
|
if (err < 0) {
|
|
page_cache_release(page);
|
|
page = ERR_PTR(err);
|
|
+ } else {
|
|
+ page = wait_on_page_read(page);
|
|
}
|
|
}
|
|
return page;
|
|
@@ -1857,6 +2077,10 @@ retry:
|
|
if (err < 0) {
|
|
page_cache_release(page);
|
|
return ERR_PTR(err);
|
|
+ } else {
|
|
+ page = wait_on_page_read(page);
|
|
+ if (IS_ERR(page))
|
|
+ return page;
|
|
}
|
|
out:
|
|
mark_page_accessed(page);
|
|
@@ -1864,40 +2088,25 @@ out:
|
|
}
|
|
|
|
/**
|
|
- * read_cache_page_async - read into page cache, fill it if needed
|
|
+ * read_cache_page - read into page cache, fill it if needed
|
|
* @mapping: the page's address_space
|
|
* @index: the page index
|
|
* @filler: function to perform the read
|
|
* @data: first arg to filler(data, page) function, often left as NULL
|
|
*
|
|
- * Same as read_cache_page, but don't wait for page to become unlocked
|
|
- * after submitting it to the filler.
|
|
- *
|
|
* Read into the page cache. If a page already exists, and PageUptodate() is
|
|
- * not set, try to fill the page but don't wait for it to become unlocked.
|
|
+ * not set, try to fill the page and wait for it to become unlocked.
|
|
*
|
|
* If the page does not get brought uptodate, return -EIO.
|
|
*/
|
|
-struct page *read_cache_page_async(struct address_space *mapping,
|
|
+struct page *read_cache_page(struct address_space *mapping,
|
|
pgoff_t index,
|
|
int (*filler)(void *, struct page *),
|
|
void *data)
|
|
{
|
|
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
|
|
}
|
|
-EXPORT_SYMBOL(read_cache_page_async);
|
|
-
|
|
-static struct page *wait_on_page_read(struct page *page)
|
|
-{
|
|
- if (!IS_ERR(page)) {
|
|
- wait_on_page_locked(page);
|
|
- if (!PageUptodate(page)) {
|
|
- page_cache_release(page);
|
|
- page = ERR_PTR(-EIO);
|
|
- }
|
|
- }
|
|
- return page;
|
|
-}
|
|
+EXPORT_SYMBOL(read_cache_page);
|
|
|
|
/**
|
|
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
|
|
@@ -1916,31 +2125,10 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
|
|
{
|
|
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
|
|
|
|
- return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
|
|
+ return do_read_cache_page(mapping, index, filler, NULL, gfp);
|
|
}
|
|
EXPORT_SYMBOL(read_cache_page_gfp);
|
|
|
|
-/**
|
|
- * read_cache_page - read into page cache, fill it if needed
|
|
- * @mapping: the page's address_space
|
|
- * @index: the page index
|
|
- * @filler: function to perform the read
|
|
- * @data: first arg to filler(data, page) function, often left as NULL
|
|
- *
|
|
- * Read into the page cache. If a page already exists, and PageUptodate() is
|
|
- * not set, try to fill the page then wait for it to become unlocked.
|
|
- *
|
|
- * If the page does not get brought uptodate, return -EIO.
|
|
- */
|
|
-struct page *read_cache_page(struct address_space *mapping,
|
|
- pgoff_t index,
|
|
- int (*filler)(void *, struct page *),
|
|
- void *data)
|
|
-{
|
|
- return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
|
|
-}
|
|
-EXPORT_SYMBOL(read_cache_page);
|
|
-
|
|
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
|
|
const struct iovec *iov, size_t base, size_t bytes)
|
|
{
|
|
@@ -1974,7 +2162,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
char *kaddr;
|
|
size_t copied;
|
|
|
|
- BUG_ON(!in_atomic());
|
|
kaddr = kmap_atomic(page);
|
|
if (likely(i->nr_segs == 1)) {
|
|
int left;
|
|
@@ -2184,7 +2371,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
|
|
{
|
|
const struct address_space_operations *aops = mapping->a_ops;
|
|
|
|
- mark_page_accessed(page);
|
|
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
|
|
}
|
|
EXPORT_SYMBOL(pagecache_write_end);
|
|
@@ -2266,34 +2452,17 @@ EXPORT_SYMBOL(generic_file_direct_write);
|
|
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
|
pgoff_t index, unsigned flags)
|
|
{
|
|
- int status;
|
|
- gfp_t gfp_mask;
|
|
struct page *page;
|
|
- gfp_t gfp_notmask = 0;
|
|
+ int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
|
|
|
|
- gfp_mask = mapping_gfp_mask(mapping);
|
|
- if (mapping_cap_account_dirty(mapping))
|
|
- gfp_mask |= __GFP_WRITE;
|
|
if (flags & AOP_FLAG_NOFS)
|
|
- gfp_notmask = __GFP_FS;
|
|
-repeat:
|
|
- page = find_lock_page(mapping, index);
|
|
+ fgp_flags |= FGP_NOFS;
|
|
+
|
|
+ page = pagecache_get_page(mapping, index, fgp_flags,
|
|
+ mapping_gfp_mask(mapping));
|
|
if (page)
|
|
- goto found;
|
|
+ wait_for_stable_page(page);
|
|
|
|
- page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
|
|
- if (!page)
|
|
- return NULL;
|
|
- status = add_to_page_cache_lru(page, mapping, index,
|
|
- GFP_KERNEL & ~gfp_notmask);
|
|
- if (unlikely(status)) {
|
|
- page_cache_release(page);
|
|
- if (status == -EEXIST)
|
|
- goto repeat;
|
|
- return NULL;
|
|
- }
|
|
-found:
|
|
- wait_for_stable_page(page);
|
|
return page;
|
|
}
|
|
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
|
@@ -2342,18 +2511,15 @@ again:
|
|
|
|
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
|
|
&page, &fsdata);
|
|
- if (unlikely(status))
|
|
+ if (unlikely(status < 0))
|
|
break;
|
|
|
|
if (mapping_writably_mapped(mapping))
|
|
flush_dcache_page(page);
|
|
|
|
- pagefault_disable();
|
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
|
- pagefault_enable();
|
|
flush_dcache_page(page);
|
|
|
|
- mark_page_accessed(page);
|
|
status = a_ops->write_end(file, mapping, pos, bytes, copied,
|
|
page, fsdata);
|
|
if (unlikely(status < 0))
|
|
diff --git a/mm/frontswap.c b/mm/frontswap.c
|
|
index 1b24bdc..f2a3571 100644
|
|
--- a/mm/frontswap.c
|
|
+++ b/mm/frontswap.c
|
|
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
|
|
the (older) page from frontswap
|
|
*/
|
|
inc_frontswap_failed_stores();
|
|
- if (dup)
|
|
+ if (dup) {
|
|
__frontswap_clear(sis, offset);
|
|
+ frontswap_ops->invalidate_page(type, offset);
|
|
+ }
|
|
}
|
|
if (frontswap_writethrough_enabled)
|
|
/* report failure so swap also writes to swap device */
|
|
@@ -327,15 +329,12 @@ EXPORT_SYMBOL(__frontswap_invalidate_area);
|
|
|
|
static unsigned long __frontswap_curr_pages(void)
|
|
{
|
|
- int type;
|
|
unsigned long totalpages = 0;
|
|
struct swap_info_struct *si = NULL;
|
|
|
|
assert_spin_locked(&swap_lock);
|
|
- for (type = swap_list.head; type >= 0; type = si->next) {
|
|
- si = swap_info[type];
|
|
+ plist_for_each_entry(si, &swap_active_head, list)
|
|
totalpages += atomic_read(&si->frontswap_pages);
|
|
- }
|
|
return totalpages;
|
|
}
|
|
|
|
@@ -347,11 +346,9 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
|
|
int si_frontswap_pages;
|
|
unsigned long total_pages_to_unuse = total;
|
|
unsigned long pages = 0, pages_to_unuse = 0;
|
|
- int type;
|
|
|
|
assert_spin_locked(&swap_lock);
|
|
- for (type = swap_list.head; type >= 0; type = si->next) {
|
|
- si = swap_info[type];
|
|
+ plist_for_each_entry(si, &swap_active_head, list) {
|
|
si_frontswap_pages = atomic_read(&si->frontswap_pages);
|
|
if (total_pages_to_unuse < si_frontswap_pages) {
|
|
pages = pages_to_unuse = total_pages_to_unuse;
|
|
@@ -366,7 +363,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
|
|
}
|
|
vm_unacct_memory(pages);
|
|
*unused = pages_to_unuse;
|
|
- *swapid = type;
|
|
+ *swapid = si->type;
|
|
ret = 0;
|
|
break;
|
|
}
|
|
@@ -413,7 +410,7 @@ void frontswap_shrink(unsigned long target_pages)
|
|
/*
|
|
* we don't want to hold swap_lock while doing a very
|
|
* lengthy try_to_unuse, but swap_list may change
|
|
- * so restart scan from swap_list.head each time
|
|
+ * so restart scan from swap_active_head each time
|
|
*/
|
|
spin_lock(&swap_lock);
|
|
ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index 1c42d0c..adce656 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -199,7 +199,7 @@ retry:
|
|
preempt_disable();
|
|
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
|
|
preempt_enable();
|
|
- __free_page(zero_page);
|
|
+ __free_pages(zero_page, compound_order(zero_page));
|
|
goto retry;
|
|
}
|
|
|
|
@@ -231,7 +231,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
|
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
|
BUG_ON(zero_page == NULL);
|
|
- __free_page(zero_page);
|
|
+ __free_pages(zero_page, compound_order(zero_page));
|
|
return HPAGE_PMD_NR;
|
|
}
|
|
|
|
@@ -1819,21 +1819,24 @@ static int __split_huge_page_map(struct page *page,
|
|
if (pmd) {
|
|
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
|
|
pmd_populate(mm, &_pmd, pgtable);
|
|
+ if (pmd_write(*pmd))
|
|
+ BUG_ON(page_mapcount(page) != 1);
|
|
|
|
haddr = address;
|
|
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
|
pte_t *pte, entry;
|
|
BUG_ON(PageCompound(page+i));
|
|
+ /*
|
|
+ * Note that pmd_numa is not transferred deliberately
|
|
+ * to avoid any possibility that pte_numa leaks to
|
|
+ * a PROT_NONE VMA by accident.
|
|
+ */
|
|
entry = mk_pte(page + i, vma->vm_page_prot);
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
if (!pmd_write(*pmd))
|
|
entry = pte_wrprotect(entry);
|
|
- else
|
|
- BUG_ON(page_mapcount(page) != 1);
|
|
if (!pmd_young(*pmd))
|
|
entry = pte_mkold(entry);
|
|
- if (pmd_numa(*pmd))
|
|
- entry = pte_mknuma(entry);
|
|
pte = pte_offset_map(&_pmd, haddr);
|
|
BUG_ON(!pte_none(*pte));
|
|
set_pte_at(mm, haddr, pte, entry);
|
|
@@ -2270,6 +2273,30 @@ static void khugepaged_alloc_sleep(void)
|
|
|
|
static int khugepaged_node_load[MAX_NUMNODES];
|
|
|
|
+static bool khugepaged_scan_abort(int nid)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * If zone_reclaim_mode is disabled, then no extra effort is made to
|
|
+ * allocate memory locally.
|
|
+ */
|
|
+ if (!zone_reclaim_mode)
|
|
+ return false;
|
|
+
|
|
+ /* If there is a count for this node already, it must be acceptable */
|
|
+ if (khugepaged_node_load[nid])
|
|
+ return false;
|
|
+
|
|
+ for (i = 0; i < MAX_NUMNODES; i++) {
|
|
+ if (!khugepaged_node_load[i])
|
|
+ continue;
|
|
+ if (node_distance(nid, i) > RECLAIM_DISTANCE)
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_NUMA
|
|
static int khugepaged_find_target_node(void)
|
|
{
|
|
@@ -2586,6 +2613,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
|
|
* hit record.
|
|
*/
|
|
node = page_to_nid(page);
|
|
+ if (khugepaged_scan_abort(node))
|
|
+ goto out_unmap;
|
|
khugepaged_node_load[node]++;
|
|
VM_BUG_ON_PAGE(PageCompound(page), page);
|
|
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index 30dd626..c3e8660 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -540,7 +540,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
|
|
goto err;
|
|
|
|
retry_cpuset:
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
zonelist = huge_zonelist(vma, address,
|
|
htlb_alloc_mask(h), &mpol, &nodemask);
|
|
|
|
@@ -562,7 +562,7 @@ retry_cpuset:
|
|
}
|
|
|
|
mpol_cond_put(mpol);
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
return page;
|
|
|
|
@@ -2071,6 +2071,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
|
unsigned long tmp;
|
|
int ret;
|
|
|
|
+ if (!hugepages_supported())
|
|
+ return -ENOTSUPP;
|
|
+
|
|
tmp = h->max_huge_pages;
|
|
|
|
if (write && h->order >= MAX_ORDER)
|
|
@@ -2124,6 +2127,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
|
unsigned long tmp;
|
|
int ret;
|
|
|
|
+ if (!hugepages_supported())
|
|
+ return -ENOTSUPP;
|
|
+
|
|
tmp = h->nr_overcommit_huge_pages;
|
|
|
|
if (write && h->order >= MAX_ORDER)
|
|
@@ -2149,6 +2155,8 @@ out:
|
|
void hugetlb_report_meminfo(struct seq_file *m)
|
|
{
|
|
struct hstate *h = &default_hstate;
|
|
+ if (!hugepages_supported())
|
|
+ return;
|
|
seq_printf(m,
|
|
"HugePages_Total: %5lu\n"
|
|
"HugePages_Free: %5lu\n"
|
|
@@ -2165,6 +2173,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
|
|
int hugetlb_report_node_meminfo(int nid, char *buf)
|
|
{
|
|
struct hstate *h = &default_hstate;
|
|
+ if (!hugepages_supported())
|
|
+ return 0;
|
|
return sprintf(buf,
|
|
"Node %d HugePages_Total: %5u\n"
|
|
"Node %d HugePages_Free: %5u\n"
|
|
@@ -2179,6 +2189,9 @@ void hugetlb_show_meminfo(void)
|
|
struct hstate *h;
|
|
int nid;
|
|
|
|
+ if (!hugepages_supported())
|
|
+ return;
|
|
+
|
|
for_each_node_state(nid, N_MEMORY)
|
|
for_each_hstate(h)
|
|
pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
|
|
@@ -2422,6 +2435,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|
} else {
|
|
if (cow)
|
|
huge_ptep_set_wrprotect(src, addr, src_pte);
|
|
+ entry = huge_ptep_get(src_pte);
|
|
ptepage = pte_page(entry);
|
|
get_page(ptepage);
|
|
page_dup_rmap(ptepage);
|
|
@@ -2474,9 +2488,10 @@ again:
|
|
goto unlock;
|
|
|
|
/*
|
|
- * HWPoisoned hugepage is already unmapped and dropped reference
|
|
+ * Migrating hugepage or HWPoisoned hugepage is already
|
|
+ * unmapped and its refcount is dropped, so just clear pte here.
|
|
*/
|
|
- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
|
|
+ if (unlikely(!pte_present(pte))) {
|
|
huge_pte_clear(mm, address, ptep);
|
|
goto unlock;
|
|
}
|
|
@@ -3149,7 +3164,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
spin_unlock(ptl);
|
|
continue;
|
|
}
|
|
- if (!huge_pte_none(huge_ptep_get(ptep))) {
|
|
+ pte = huge_ptep_get(ptep);
|
|
+ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
|
|
+ spin_unlock(ptl);
|
|
+ continue;
|
|
+ }
|
|
+ if (unlikely(is_hugetlb_entry_migration(pte))) {
|
|
+ swp_entry_t entry = pte_to_swp_entry(pte);
|
|
+
|
|
+ if (is_write_migration_entry(entry)) {
|
|
+ pte_t newpte;
|
|
+
|
|
+ make_migration_entry_read(&entry);
|
|
+ newpte = swp_entry_to_pte(entry);
|
|
+ set_huge_pte_at(mm, address, ptep, newpte);
|
|
+ pages++;
|
|
+ }
|
|
+ spin_unlock(ptl);
|
|
+ continue;
|
|
+ }
|
|
+ if (!huge_pte_none(pte)) {
|
|
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
|
pte = pte_mkhuge(huge_pte_modify(pte, newprot));
|
|
pte = arch_make_huge_pte(pte, vma, NULL, 0);
|
|
@@ -3442,6 +3476,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
{
|
|
struct page *page;
|
|
|
|
+ if (!pmd_present(*pmd))
|
|
+ return NULL;
|
|
page = pte_page(*(pte_t *)pmd);
|
|
if (page)
|
|
page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
|
|
diff --git a/mm/internal.h b/mm/internal.h
|
|
index 3e91000..1a8a0d4 100644
|
|
--- a/mm/internal.h
|
|
+++ b/mm/internal.h
|
|
@@ -11,6 +11,7 @@
|
|
#ifndef __MM_INTERNAL_H
|
|
#define __MM_INTERNAL_H
|
|
|
|
+#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
|
|
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
|
@@ -21,6 +22,20 @@ static inline void set_page_count(struct page *page, int v)
|
|
atomic_set(&page->_count, v);
|
|
}
|
|
|
|
+extern int __do_page_cache_readahead(struct address_space *mapping,
|
|
+ struct file *filp, pgoff_t offset, unsigned long nr_to_read,
|
|
+ unsigned long lookahead_size);
|
|
+
|
|
+/*
|
|
+ * Submit IO for the read-ahead request in file_ra_state.
|
|
+ */
|
|
+static inline unsigned long ra_submit(struct file_ra_state *ra,
|
|
+ struct address_space *mapping, struct file *filp)
|
|
+{
|
|
+ return __do_page_cache_readahead(mapping, filp,
|
|
+ ra->start, ra->size, ra->async_size);
|
|
+}
|
|
+
|
|
/*
|
|
* Turn a non-refcounted page (->_count == 0) into refcounted with
|
|
* a count of one.
|
|
@@ -119,7 +134,7 @@ struct compact_control {
|
|
unsigned long nr_migratepages; /* Number of pages to migrate */
|
|
unsigned long free_pfn; /* isolate_freepages search base */
|
|
unsigned long migrate_pfn; /* isolate_migratepages search base */
|
|
- bool sync; /* Synchronous migration */
|
|
+ enum migrate_mode mode; /* Async or sync migration mode */
|
|
bool ignore_skip_hint; /* Scan blocks even if marked skip */
|
|
bool finished_update_free; /* True when the zone cached pfns are
|
|
* no longer being updated
|
|
@@ -129,7 +144,10 @@ struct compact_control {
|
|
int order; /* order a direct compactor needs */
|
|
int migratetype; /* MOVABLE, RECLAIMABLE etc */
|
|
struct zone *zone;
|
|
- bool contended; /* True if a lock was contended */
|
|
+ bool contended; /* True if a lock was contended, or
|
|
+ * need_resched() true during async
|
|
+ * compaction
|
|
+ */
|
|
};
|
|
|
|
unsigned long
|
|
diff --git a/mm/ksm.c b/mm/ksm.c
|
|
index 68710e8..5e706e3 100644
|
|
--- a/mm/ksm.c
|
|
+++ b/mm/ksm.c
|
|
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
|
|
else
|
|
ret = VM_FAULT_WRITE;
|
|
put_page(page);
|
|
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
|
|
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
|
|
/*
|
|
* We must loop because handle_mm_fault() may back out if there's
|
|
* any difficulty e.g. if pte accessed bit gets updated concurrently.
|
|
diff --git a/mm/madvise.c b/mm/madvise.c
|
|
index 539eeb9..a402f8f 100644
|
|
--- a/mm/madvise.c
|
|
+++ b/mm/madvise.c
|
|
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
|
|
for (; start < end; start += PAGE_SIZE) {
|
|
index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
|
|
|
- page = find_get_page(mapping, index);
|
|
+ page = find_get_entry(mapping, index);
|
|
if (!radix_tree_exceptional_entry(page)) {
|
|
if (page)
|
|
page_cache_release(page);
|
|
diff --git a/mm/memblock.c b/mm/memblock.c
|
|
index f3a07a4..dfee4aa 100644
|
|
--- a/mm/memblock.c
|
|
+++ b/mm/memblock.c
|
|
@@ -183,8 +183,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
phys_addr_t align, phys_addr_t start,
|
|
phys_addr_t end, int nid)
|
|
{
|
|
- int ret;
|
|
- phys_addr_t kernel_end;
|
|
+ phys_addr_t kernel_end, ret;
|
|
|
|
/* pump up @end */
|
|
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 5b6b003..b58d4fb 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -292,6 +292,9 @@ struct mem_cgroup {
|
|
/* vmpressure notifications */
|
|
struct vmpressure vmpressure;
|
|
|
|
+ /* css_online() has been completed */
|
|
+ int initialized;
|
|
+
|
|
/*
|
|
* the counter to account for mem+swap usage.
|
|
*/
|
|
@@ -1127,9 +1130,21 @@ skip_node:
|
|
* skipping css reference should be safe.
|
|
*/
|
|
if (next_css) {
|
|
- if ((next_css == &root->css) ||
|
|
- ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
|
|
- return mem_cgroup_from_css(next_css);
|
|
+ struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
|
|
+
|
|
+ if (next_css == &root->css)
|
|
+ return memcg;
|
|
+
|
|
+ if (css_tryget(next_css)) {
|
|
+ /*
|
|
+ * Make sure the memcg is initialized:
|
|
+ * mem_cgroup_css_online() orders the the
|
|
+ * initialization against setting the flag.
|
|
+ */
|
|
+ if (smp_load_acquire(&memcg->initialized))
|
|
+ return memcg;
|
|
+ css_put(next_css);
|
|
+ }
|
|
|
|
prev_css = next_css;
|
|
goto skip_node;
|
|
@@ -5670,8 +5685,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
|
|
{
|
|
struct mem_cgroup_eventfd_list *ev;
|
|
|
|
+ spin_lock(&memcg_oom_lock);
|
|
+
|
|
list_for_each_entry(ev, &memcg->oom_notify, list)
|
|
eventfd_signal(ev->eventfd, 1);
|
|
+
|
|
+ spin_unlock(&memcg_oom_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -6534,6 +6553,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|
{
|
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
|
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
|
|
+ int ret;
|
|
|
|
if (css->cgroup->id > MEM_CGROUP_ID_MAX)
|
|
return -ENOSPC;
|
|
@@ -6570,7 +6590,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|
}
|
|
mutex_unlock(&memcg_create_mutex);
|
|
|
|
- return memcg_init_kmem(memcg, &mem_cgroup_subsys);
|
|
+ ret = memcg_init_kmem(memcg, &mem_cgroup_subsys);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * Make sure the memcg is initialized: mem_cgroup_iter()
|
|
+ * orders reading memcg->initialized against its callers
|
|
+ * reading the memcg members.
|
|
+ */
|
|
+ smp_store_release(&memcg->initialized, 1);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
|
|
index 33365e9..42aeb84 100644
|
|
--- a/mm/memory-failure.c
|
|
+++ b/mm/memory-failure.c
|
|
@@ -1149,10 +1149,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|
* The check (unnecessarily) ignores LRU pages being isolated and
|
|
* walked by the page reclaim code, however that's not a big loss.
|
|
*/
|
|
- if (!PageHuge(p) && !PageTransTail(p)) {
|
|
- if (!PageLRU(p))
|
|
- shake_page(p, 0);
|
|
- if (!PageLRU(p)) {
|
|
+ if (!PageHuge(p)) {
|
|
+ if (!PageLRU(hpage))
|
|
+ shake_page(hpage, 0);
|
|
+ if (!PageLRU(hpage)) {
|
|
/*
|
|
* shake_page could have turned it free.
|
|
*/
|
|
@@ -1510,6 +1510,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
|
|
*/
|
|
ret = __get_any_page(page, pfn, 0);
|
|
if (!PageLRU(page)) {
|
|
+ /* Drop page reference which is from __get_any_page() */
|
|
+ put_page(page);
|
|
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
|
|
pfn, page->flags);
|
|
return -EIO;
|
|
@@ -1540,7 +1542,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
|
|
|
/* Keep page count to indicate a given hugepage is isolated. */
|
|
list_move(&hpage->lru, &pagelist);
|
|
- ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
|
|
+ ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
|
|
MIGRATE_SYNC, MR_MEMORY_FAILURE);
|
|
if (ret) {
|
|
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
|
|
@@ -1621,7 +1623,7 @@ static int __soft_offline_page(struct page *page, int flags)
|
|
inc_zone_page_state(page, NR_ISOLATED_ANON +
|
|
page_is_file_cache(page));
|
|
list_add(&page->lru, &pagelist);
|
|
- ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
|
|
+ ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
|
|
MIGRATE_SYNC, MR_MEMORY_FAILURE);
|
|
if (ret) {
|
|
if (!list_empty(&pagelist)) {
|
|
@@ -1645,8 +1647,6 @@ static int __soft_offline_page(struct page *page, int flags)
|
|
* setting PG_hwpoison.
|
|
*/
|
|
if (!is_free_buddy_page(page))
|
|
- lru_add_drain_all();
|
|
- if (!is_free_buddy_page(page))
|
|
drain_all_pages();
|
|
SetPageHWPoison(page);
|
|
if (!is_free_buddy_page(page))
|
|
@@ -1725,12 +1725,12 @@ int soft_offline_page(struct page *page, int flags)
|
|
} else if (ret == 0) { /* for free pages */
|
|
if (PageHuge(page)) {
|
|
set_page_hwpoison_huge_page(hpage);
|
|
- dequeue_hwpoisoned_huge_page(hpage);
|
|
- atomic_long_add(1 << compound_order(hpage),
|
|
+ if (!dequeue_hwpoisoned_huge_page(hpage))
|
|
+ atomic_long_add(1 << compound_order(hpage),
|
|
&num_poisoned_pages);
|
|
} else {
|
|
- SetPageHWPoison(page);
|
|
- atomic_long_inc(&num_poisoned_pages);
|
|
+ if (!TestSetPageHWPoison(page))
|
|
+ atomic_long_inc(&num_poisoned_pages);
|
|
}
|
|
}
|
|
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 2121d8b8..e9ddc7a 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -808,20 +808,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
if (!pte_file(pte)) {
|
|
swp_entry_t entry = pte_to_swp_entry(pte);
|
|
|
|
- if (swap_duplicate(entry) < 0)
|
|
- return entry.val;
|
|
-
|
|
- /* make sure dst_mm is on swapoff's mmlist. */
|
|
- if (unlikely(list_empty(&dst_mm->mmlist))) {
|
|
- spin_lock(&mmlist_lock);
|
|
- if (list_empty(&dst_mm->mmlist))
|
|
- list_add(&dst_mm->mmlist,
|
|
- &src_mm->mmlist);
|
|
- spin_unlock(&mmlist_lock);
|
|
- }
|
|
- if (likely(!non_swap_entry(entry)))
|
|
+ if (likely(!non_swap_entry(entry))) {
|
|
+ if (swap_duplicate(entry) < 0)
|
|
+ return entry.val;
|
|
+
|
|
+ /* make sure dst_mm is on swapoff's mmlist. */
|
|
+ if (unlikely(list_empty(&dst_mm->mmlist))) {
|
|
+ spin_lock(&mmlist_lock);
|
|
+ if (list_empty(&dst_mm->mmlist))
|
|
+ list_add(&dst_mm->mmlist,
|
|
+ &src_mm->mmlist);
|
|
+ spin_unlock(&mmlist_lock);
|
|
+ }
|
|
rss[MM_SWAPENTS]++;
|
|
- else if (is_migration_entry(entry)) {
|
|
+ } else if (is_migration_entry(entry)) {
|
|
page = migration_entry_to_page(entry);
|
|
|
|
if (PageAnon(page))
|
|
@@ -878,7 +878,7 @@ out_set_pte:
|
|
return 0;
|
|
}
|
|
|
|
-int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
+static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end)
|
|
{
|
|
@@ -1120,7 +1120,7 @@ again:
|
|
addr) != page->index) {
|
|
pte_t ptfile = pgoff_to_pte(page->index);
|
|
if (pte_soft_dirty(ptent))
|
|
- pte_file_mksoft_dirty(ptfile);
|
|
+ ptfile = pte_file_mksoft_dirty(ptfile);
|
|
set_pte_at(mm, addr, pte, ptfile);
|
|
}
|
|
if (PageAnon(page))
|
|
@@ -1836,7 +1836,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
else
|
|
return -EFAULT;
|
|
}
|
|
- if (ret & VM_FAULT_SIGBUS)
|
|
+ if (ret & (VM_FAULT_SIGBUS |
|
|
+ VM_FAULT_SIGSEGV))
|
|
return i ? i : -EFAULT;
|
|
BUG();
|
|
}
|
|
@@ -1946,7 +1947,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|
return -ENOMEM;
|
|
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
|
|
return -EHWPOISON;
|
|
- if (ret & VM_FAULT_SIGBUS)
|
|
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
|
|
return -EFAULT;
|
|
BUG();
|
|
}
|
|
@@ -3204,7 +3205,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
|
|
if (prev && prev->vm_end == address)
|
|
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
|
|
|
|
- expand_downwards(vma, address - PAGE_SIZE);
|
|
+ return expand_downwards(vma, address - PAGE_SIZE);
|
|
}
|
|
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
|
|
struct vm_area_struct *next = vma->vm_next;
|
|
@@ -3213,7 +3214,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
|
|
if (next && next->vm_start == address + PAGE_SIZE)
|
|
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
|
|
|
|
- expand_upwards(vma, address + PAGE_SIZE);
|
|
+ return expand_upwards(vma, address + PAGE_SIZE);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -3233,9 +3234,13 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
pte_unmap(page_table);
|
|
|
|
+ /* File mapping without ->vm_ops ? */
|
|
+ if (vma->vm_flags & VM_SHARED)
|
|
+ return VM_FAULT_SIGBUS;
|
|
+
|
|
/* Check if we need to add a guard page to the stack */
|
|
if (check_stack_guard_page(vma, address) < 0)
|
|
- return VM_FAULT_SIGBUS;
|
|
+ return VM_FAULT_SIGSEGV;
|
|
|
|
/* Use the zero-page for reads */
|
|
if (!(flags & FAULT_FLAG_WRITE)) {
|
|
@@ -3501,6 +3506,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
|
|
|
pte_unmap(page_table);
|
|
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
|
|
+ if (!vma->vm_ops->fault)
|
|
+ return VM_FAULT_SIGBUS;
|
|
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
|
|
}
|
|
|
|
@@ -3646,14 +3654,12 @@ static int handle_pte_fault(struct mm_struct *mm,
|
|
pte_t entry;
|
|
spinlock_t *ptl;
|
|
|
|
- entry = *pte;
|
|
+ entry = ACCESS_ONCE(*pte);
|
|
if (!pte_present(entry)) {
|
|
if (pte_none(entry)) {
|
|
- if (vma->vm_ops) {
|
|
- if (likely(vma->vm_ops->fault))
|
|
- return do_linear_fault(mm, vma, address,
|
|
+ if (vma->vm_ops)
|
|
+ return do_linear_fault(mm, vma, address,
|
|
pte, pmd, flags, entry);
|
|
- }
|
|
return do_anonymous_page(mm, vma, address,
|
|
pte, pmd, flags);
|
|
}
|
|
@@ -4024,7 +4030,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
|
if (follow_phys(vma, addr, write, &prot, &phys_addr))
|
|
return -EINVAL;
|
|
|
|
- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
|
|
+ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
|
|
if (write)
|
|
memcpy_toio(maddr + offset, buf, len);
|
|
else
|
|
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
|
|
index a650db2..5bba3b3 100644
|
|
--- a/mm/memory_hotplug.c
|
|
+++ b/mm/memory_hotplug.c
|
|
@@ -1016,6 +1016,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
|
|
return NULL;
|
|
|
|
arch_refresh_nodedata(nid, pgdat);
|
|
+ } else {
|
|
+ /* Reset the nr_zones and classzone_idx to 0 before reuse */
|
|
+ pgdat->nr_zones = 0;
|
|
+ pgdat->classzone_idx = 0;
|
|
}
|
|
|
|
/* we can use NODE_DATA(nid) from here */
|
|
@@ -1332,7 +1336,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|
* alloc_migrate_target should be improooooved!!
|
|
* migrate_pages returns # of failed pages.
|
|
*/
|
|
- ret = migrate_pages(&source, alloc_migrate_target, 0,
|
|
+ ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
|
|
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
|
|
if (ret)
|
|
putback_movable_pages(&source);
|
|
@@ -1860,18 +1864,11 @@ void try_offline_node(int nid)
|
|
* wait_table may be allocated from boot memory,
|
|
* here only free if it's allocated by vmalloc.
|
|
*/
|
|
- if (is_vmalloc_addr(zone->wait_table))
|
|
+ if (is_vmalloc_addr(zone->wait_table)) {
|
|
vfree(zone->wait_table);
|
|
+ zone->wait_table = NULL;
|
|
+ }
|
|
}
|
|
-
|
|
- /*
|
|
- * Since there is no way to guarentee the address of pgdat/zone is not
|
|
- * on stack of any kernel threads or used by other kernel objects
|
|
- * without reference counting or other symchronizing method, do not
|
|
- * reset node_data and free pgdat here. Just reset it to 0 and reuse
|
|
- * the memory when the node is online again.
|
|
- */
|
|
- memset(pgdat, 0, sizeof(*pgdat));
|
|
}
|
|
EXPORT_SYMBOL(try_offline_node);
|
|
|
|
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
|
|
index 15a8ea0..936866e 100644
|
|
--- a/mm/mempolicy.c
|
|
+++ b/mm/mempolicy.c
|
|
@@ -1060,7 +1060,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
|
|
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
|
|
|
|
if (!list_empty(&pagelist)) {
|
|
- err = migrate_pages(&pagelist, new_node_page, dest,
|
|
+ err = migrate_pages(&pagelist, new_node_page, NULL, dest,
|
|
MIGRATE_SYNC, MR_SYSCALL);
|
|
if (err)
|
|
putback_movable_pages(&pagelist);
|
|
@@ -1306,7 +1306,7 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|
|
|
if (!list_empty(&pagelist)) {
|
|
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
|
|
- nr_failed = migrate_pages(&pagelist, new_page,
|
|
+ nr_failed = migrate_pages(&pagelist, new_page, NULL,
|
|
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
|
|
if (nr_failed)
|
|
putback_movable_pages(&pagelist);
|
|
@@ -1897,7 +1897,7 @@ int node_random(const nodemask_t *maskp)
|
|
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
|
|
* @nodemask for filtering the zonelist.
|
|
*
|
|
- * Must be protected by get_mems_allowed()
|
|
+ * Must be protected by read_mems_allowed_begin()
|
|
*/
|
|
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
|
|
gfp_t gfp_flags, struct mempolicy **mpol,
|
|
@@ -2061,7 +2061,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
|
|
|
|
retry_cpuset:
|
|
pol = get_vma_policy(current, vma, addr);
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
|
|
unsigned nid;
|
|
@@ -2069,7 +2069,7 @@ retry_cpuset:
|
|
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
|
|
mpol_cond_put(pol);
|
|
page = alloc_page_interleave(gfp, order, nid);
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
|
|
return page;
|
|
@@ -2079,7 +2079,7 @@ retry_cpuset:
|
|
policy_nodemask(gfp, pol));
|
|
if (unlikely(mpol_needs_cond_ref(pol)))
|
|
__mpol_put(pol);
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
return page;
|
|
}
|
|
@@ -2113,7 +2113,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
|
|
pol = &default_policy;
|
|
|
|
retry_cpuset:
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
/*
|
|
* No reference counting needed for current->mempolicy
|
|
@@ -2126,7 +2126,7 @@ retry_cpuset:
|
|
policy_zonelist(gfp, pol, numa_node_id()),
|
|
policy_nodemask(gfp, pol));
|
|
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
|
|
return page;
|
|
@@ -2663,7 +2663,7 @@ static void __init check_numabalancing_enable(void)
|
|
if (numabalancing_override)
|
|
set_numabalancing_state(numabalancing_override == 1);
|
|
|
|
- if (nr_node_ids > 1 && !numabalancing_override) {
|
|
+ if (num_online_nodes() > 1 && !numabalancing_override) {
|
|
pr_info("%s automatic NUMA balancing. "
|
|
"Configure with numa_balancing= or the "
|
|
"kernel.numa_balancing sysctl",
|
|
diff --git a/mm/migrate.c b/mm/migrate.c
|
|
index bed4880..3acac4a 100644
|
|
--- a/mm/migrate.c
|
|
+++ b/mm/migrate.c
|
|
@@ -148,8 +148,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
|
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
|
|
if (pte_swp_soft_dirty(*ptep))
|
|
pte = pte_mksoft_dirty(pte);
|
|
+
|
|
+ /* Recheck VMA as permissions can change since migration started */
|
|
if (is_write_migration_entry(entry))
|
|
- pte = pte_mkwrite(pte);
|
|
+ pte = maybe_mkwrite(pte, vma);
|
|
+
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
if (PageHuge(new)) {
|
|
pte = pte_mkhuge(pte);
|
|
@@ -938,8 +941,9 @@ out:
|
|
* Obtain the lock on page, remove all ptes and migrate the page
|
|
* to the newly allocated page in newpage.
|
|
*/
|
|
-static int unmap_and_move(new_page_t get_new_page, unsigned long private,
|
|
- struct page *page, int force, enum migrate_mode mode)
|
|
+static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
|
|
+ unsigned long private, struct page *page, int force,
|
|
+ enum migrate_mode mode)
|
|
{
|
|
int rc = 0;
|
|
int *result = NULL;
|
|
@@ -983,11 +987,18 @@ out:
|
|
page_is_file_cache(page));
|
|
putback_lru_page(page);
|
|
}
|
|
+
|
|
/*
|
|
- * Move the new page to the LRU. If migration was not successful
|
|
- * then this will free the page.
|
|
+ * If migration was not successful and there's a freeing callback, use
|
|
+ * it. Otherwise, putback_lru_page() will drop the reference grabbed
|
|
+ * during isolation.
|
|
*/
|
|
- putback_lru_page(newpage);
|
|
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
|
|
+ ClearPageSwapBacked(newpage);
|
|
+ put_new_page(newpage, private);
|
|
+ } else
|
|
+ putback_lru_page(newpage);
|
|
+
|
|
if (result) {
|
|
if (rc)
|
|
*result = rc;
|
|
@@ -1016,8 +1027,9 @@ out:
|
|
* will wait in the page fault for migration to complete.
|
|
*/
|
|
static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|
- unsigned long private, struct page *hpage,
|
|
- int force, enum migrate_mode mode)
|
|
+ free_page_t put_new_page, unsigned long private,
|
|
+ struct page *hpage, int force,
|
|
+ enum migrate_mode mode)
|
|
{
|
|
int rc = 0;
|
|
int *result = NULL;
|
|
@@ -1056,20 +1068,30 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|
if (!page_mapped(hpage))
|
|
rc = move_to_new_page(new_hpage, hpage, 1, mode);
|
|
|
|
- if (rc)
|
|
+ if (rc != MIGRATEPAGE_SUCCESS)
|
|
remove_migration_ptes(hpage, hpage);
|
|
|
|
if (anon_vma)
|
|
put_anon_vma(anon_vma);
|
|
|
|
- if (!rc)
|
|
+ if (rc == MIGRATEPAGE_SUCCESS)
|
|
hugetlb_cgroup_migrate(hpage, new_hpage);
|
|
|
|
unlock_page(hpage);
|
|
out:
|
|
if (rc != -EAGAIN)
|
|
putback_active_hugepage(hpage);
|
|
- put_page(new_hpage);
|
|
+
|
|
+ /*
|
|
+ * If migration was not successful and there's a freeing callback, use
|
|
+ * it. Otherwise, put_page() will drop the reference grabbed during
|
|
+ * isolation.
|
|
+ */
|
|
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
|
|
+ put_new_page(new_hpage, private);
|
|
+ else
|
|
+ put_page(new_hpage);
|
|
+
|
|
if (result) {
|
|
if (rc)
|
|
*result = rc;
|
|
@@ -1086,6 +1108,8 @@ out:
|
|
* @from: The list of pages to be migrated.
|
|
* @get_new_page: The function used to allocate free pages to be used
|
|
* as the target of the page migration.
|
|
+ * @put_new_page: The function used to free target pages if migration
|
|
+ * fails, or NULL if no special handling is necessary.
|
|
* @private: Private data to be passed on to get_new_page()
|
|
* @mode: The migration mode that specifies the constraints for
|
|
* page migration, if any.
|
|
@@ -1099,7 +1123,8 @@ out:
|
|
* Returns the number of pages that were not migrated, or an error code.
|
|
*/
|
|
int migrate_pages(struct list_head *from, new_page_t get_new_page,
|
|
- unsigned long private, enum migrate_mode mode, int reason)
|
|
+ free_page_t put_new_page, unsigned long private,
|
|
+ enum migrate_mode mode, int reason)
|
|
{
|
|
int retry = 1;
|
|
int nr_failed = 0;
|
|
@@ -1121,10 +1146,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
|
|
|
|
if (PageHuge(page))
|
|
rc = unmap_and_move_huge_page(get_new_page,
|
|
- private, page, pass > 2, mode);
|
|
+ put_new_page, private, page,
|
|
+ pass > 2, mode);
|
|
else
|
|
- rc = unmap_and_move(get_new_page, private,
|
|
- page, pass > 2, mode);
|
|
+ rc = unmap_and_move(get_new_page, put_new_page,
|
|
+ private, page, pass > 2, mode);
|
|
|
|
switch(rc) {
|
|
case -ENOMEM:
|
|
@@ -1273,7 +1299,7 @@ set_status:
|
|
|
|
err = 0;
|
|
if (!list_empty(&pagelist)) {
|
|
- err = migrate_pages(&pagelist, new_page_node,
|
|
+ err = migrate_pages(&pagelist, new_page_node, NULL,
|
|
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
|
|
if (err)
|
|
putback_movable_pages(&pagelist);
|
|
@@ -1729,7 +1755,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|
|
|
list_add(&page->lru, &migratepages);
|
|
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
|
|
- node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
|
|
+ NULL, node, MIGRATE_ASYNC,
|
|
+ MR_NUMA_MISPLACED);
|
|
if (nr_remaining) {
|
|
if (!list_empty(&migratepages)) {
|
|
list_del(&page->lru);
|
|
diff --git a/mm/mincore.c b/mm/mincore.c
|
|
index 1016233..725c809 100644
|
|
--- a/mm/mincore.c
|
|
+++ b/mm/mincore.c
|
|
@@ -70,13 +70,21 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
|
|
* any other file mapping (ie. marked !present and faulted in with
|
|
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
|
|
*/
|
|
- page = find_get_page(mapping, pgoff);
|
|
#ifdef CONFIG_SWAP
|
|
- /* shmem/tmpfs may return swap: account for swapcache page too. */
|
|
- if (radix_tree_exceptional_entry(page)) {
|
|
- swp_entry_t swap = radix_to_swp_entry(page);
|
|
- page = find_get_page(swap_address_space(swap), swap.val);
|
|
- }
|
|
+ if (shmem_mapping(mapping)) {
|
|
+ page = find_get_entry(mapping, pgoff);
|
|
+ /*
|
|
+ * shmem/tmpfs may return swap: account for swapcache
|
|
+ * page too.
|
|
+ */
|
|
+ if (radix_tree_exceptional_entry(page)) {
|
|
+ swp_entry_t swp = radix_to_swp_entry(page);
|
|
+ page = find_get_page(swap_address_space(swp), swp.val);
|
|
+ }
|
|
+ } else
|
|
+ page = find_get_page(mapping, pgoff);
|
|
+#else
|
|
+ page = find_get_page(mapping, pgoff);
|
|
#endif
|
|
if (page) {
|
|
present = PageUptodate(page);
|
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
|
index 20ff0c3..d4c97ba 100644
|
|
--- a/mm/mmap.c
|
|
+++ b/mm/mmap.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/pagemap.h>
|
|
@@ -128,7 +129,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
|
|
*/
|
|
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
{
|
|
- unsigned long free, allowed, reserve;
|
|
+ long free, allowed, reserve;
|
|
|
|
vm_acct_memory(pages);
|
|
|
|
@@ -192,7 +193,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
*/
|
|
if (mm) {
|
|
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
|
|
- allowed -= min(mm->total_vm / 32, reserve);
|
|
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
|
|
}
|
|
|
|
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
|
|
@@ -681,8 +682,9 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
prev->vm_next = next = vma->vm_next;
|
|
if (next)
|
|
next->vm_prev = prev;
|
|
- if (mm->mmap_cache == vma)
|
|
- mm->mmap_cache = prev;
|
|
+
|
|
+ /* Kill the cache */
|
|
+ vmacache_invalidate(mm);
|
|
}
|
|
|
|
/*
|
|
@@ -743,8 +745,11 @@ again: remove_next = 1 + (end > next->vm_end);
|
|
* shrinking vma had, to cover any anon pages imported.
|
|
*/
|
|
if (exporter && exporter->anon_vma && !importer->anon_vma) {
|
|
- if (anon_vma_clone(importer, exporter))
|
|
- return -ENOMEM;
|
|
+ int error;
|
|
+
|
|
+ error = anon_vma_clone(importer, exporter);
|
|
+ if (error)
|
|
+ return error;
|
|
importer->anon_vma = exporter->anon_vma;
|
|
}
|
|
}
|
|
@@ -1989,34 +1994,33 @@ EXPORT_SYMBOL(get_unmapped_area);
|
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
|
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
- struct vm_area_struct *vma = NULL;
|
|
+ struct rb_node *rb_node;
|
|
+ struct vm_area_struct *vma;
|
|
|
|
/* Check the cache first. */
|
|
- /* (Cache hit rate is typically around 35%.) */
|
|
- vma = ACCESS_ONCE(mm->mmap_cache);
|
|
- if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
|
|
- struct rb_node *rb_node;
|
|
+ vma = vmacache_find(mm, addr);
|
|
+ if (likely(vma))
|
|
+ return vma;
|
|
|
|
- rb_node = mm->mm_rb.rb_node;
|
|
- vma = NULL;
|
|
+ rb_node = mm->mm_rb.rb_node;
|
|
+ vma = NULL;
|
|
|
|
- while (rb_node) {
|
|
- struct vm_area_struct *vma_tmp;
|
|
-
|
|
- vma_tmp = rb_entry(rb_node,
|
|
- struct vm_area_struct, vm_rb);
|
|
-
|
|
- if (vma_tmp->vm_end > addr) {
|
|
- vma = vma_tmp;
|
|
- if (vma_tmp->vm_start <= addr)
|
|
- break;
|
|
- rb_node = rb_node->rb_left;
|
|
- } else
|
|
- rb_node = rb_node->rb_right;
|
|
- }
|
|
- if (vma)
|
|
- mm->mmap_cache = vma;
|
|
+ while (rb_node) {
|
|
+ struct vm_area_struct *tmp;
|
|
+
|
|
+ tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
|
|
+
|
|
+ if (tmp->vm_end > addr) {
|
|
+ vma = tmp;
|
|
+ if (tmp->vm_start <= addr)
|
|
+ break;
|
|
+ rb_node = rb_node->rb_left;
|
|
+ } else
|
|
+ rb_node = rb_node->rb_right;
|
|
}
|
|
+
|
|
+ if (vma)
|
|
+ vmacache_update(addr, vma);
|
|
return vma;
|
|
}
|
|
|
|
@@ -2054,14 +2058,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct rlimit *rlim = current->signal->rlim;
|
|
- unsigned long new_start;
|
|
+ unsigned long new_start, actual_size;
|
|
|
|
/* address space limit tests */
|
|
if (!may_expand_vm(mm, grow))
|
|
return -ENOMEM;
|
|
|
|
/* Stack limit test */
|
|
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
|
|
+ actual_size = size;
|
|
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
|
|
+ actual_size -= PAGE_SIZE;
|
|
+ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
|
|
return -ENOMEM;
|
|
|
|
/* mlock limit tests */
|
|
@@ -2388,7 +2395,9 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
} else
|
|
mm->highest_vm_end = prev ? prev->vm_end : 0;
|
|
tail_vma->vm_next = NULL;
|
|
- mm->mmap_cache = NULL; /* Kill the cache. */
|
|
+
|
|
+ /* Kill the cache */
|
|
+ vmacache_invalidate(mm);
|
|
}
|
|
|
|
/*
|
|
@@ -2425,7 +2434,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
if (err)
|
|
goto out_free_vma;
|
|
|
|
- if (anon_vma_clone(new, vma))
|
|
+ err = anon_vma_clone(new, vma);
|
|
+ if (err)
|
|
goto out_free_mpol;
|
|
|
|
if (new->vm_file)
|
|
diff --git a/mm/nommu.c b/mm/nommu.c
|
|
index 8740213..76b3f90 100644
|
|
--- a/mm/nommu.c
|
|
+++ b/mm/nommu.c
|
|
@@ -15,6 +15,7 @@
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/vmacache.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/file.h>
|
|
@@ -768,16 +769,23 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
|
|
*/
|
|
static void delete_vma_from_mm(struct vm_area_struct *vma)
|
|
{
|
|
+ int i;
|
|
struct address_space *mapping;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
+ struct task_struct *curr = current;
|
|
|
|
kenter("%p", vma);
|
|
|
|
protect_vma(vma, 0);
|
|
|
|
mm->map_count--;
|
|
- if (mm->mmap_cache == vma)
|
|
- mm->mmap_cache = NULL;
|
|
+ for (i = 0; i < VMACACHE_SIZE; i++) {
|
|
+ /* if the vma is cached, invalidate the entire cache */
|
|
+ if (curr->vmacache[i] == vma) {
|
|
+ vmacache_invalidate(curr->mm);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
/* remove the VMA from the mapping */
|
|
if (vma->vm_file) {
|
|
@@ -825,8 +833,8 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
|
struct vm_area_struct *vma;
|
|
|
|
/* check the cache first */
|
|
- vma = ACCESS_ONCE(mm->mmap_cache);
|
|
- if (vma && vma->vm_start <= addr && vma->vm_end > addr)
|
|
+ vma = vmacache_find(mm, addr);
|
|
+ if (likely(vma))
|
|
return vma;
|
|
|
|
/* trawl the list (there may be multiple mappings in which addr
|
|
@@ -835,7 +843,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
|
if (vma->vm_start > addr)
|
|
return NULL;
|
|
if (vma->vm_end > addr) {
|
|
- mm->mmap_cache = vma;
|
|
+ vmacache_update(addr, vma);
|
|
return vma;
|
|
}
|
|
}
|
|
@@ -874,8 +882,8 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
|
|
unsigned long end = addr + len;
|
|
|
|
/* check the cache first */
|
|
- vma = mm->mmap_cache;
|
|
- if (vma && vma->vm_start == addr && vma->vm_end == end)
|
|
+ vma = vmacache_find_exact(mm, addr, end);
|
|
+ if (vma)
|
|
return vma;
|
|
|
|
/* trawl the list (there may be multiple mappings in which addr
|
|
@@ -886,7 +894,7 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
|
|
if (vma->vm_start > addr)
|
|
return NULL;
|
|
if (vma->vm_end == end) {
|
|
- mm->mmap_cache = vma;
|
|
+ vmacache_update(addr, vma);
|
|
return vma;
|
|
}
|
|
}
|
|
@@ -1897,7 +1905,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
|
|
*/
|
|
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
{
|
|
- unsigned long free, allowed, reserve;
|
|
+ long free, allowed, reserve;
|
|
|
|
vm_acct_memory(pages);
|
|
|
|
@@ -1961,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
*/
|
|
if (mm) {
|
|
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
|
|
- allowed -= min(mm->total_vm / 32, reserve);
|
|
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
|
|
}
|
|
|
|
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
|
|
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
|
|
index 3291e82..171c00f 100644
|
|
--- a/mm/oom_kill.c
|
|
+++ b/mm/oom_kill.c
|
|
@@ -406,6 +406,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
dump_tasks(memcg, nodemask);
|
|
}
|
|
|
|
+/*
|
|
+ * Number of OOM killer invocations (including memcg OOM killer).
|
|
+ * Primarily used by PM freezer to check for potential races with
|
|
+ * OOM killed frozen task.
|
|
+ */
|
|
+static atomic_t oom_kills = ATOMIC_INIT(0);
|
|
+
|
|
+int oom_kills_count(void)
|
|
+{
|
|
+ return atomic_read(&oom_kills);
|
|
+}
|
|
+
|
|
+void note_oom_kill(void)
|
|
+{
|
|
+ atomic_inc(&oom_kills);
|
|
+}
|
|
+
|
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
|
/*
|
|
* Must be called while holding a reference to p, which will be released upon
|
|
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
|
|
index d013dba..656a549 100644
|
|
--- a/mm/page-writeback.c
|
|
+++ b/mm/page-writeback.c
|
|
@@ -601,7 +601,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
|
|
long x;
|
|
|
|
x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
|
|
- limit - setpoint + 1);
|
|
+ (limit - setpoint) | 1);
|
|
pos_ratio = x;
|
|
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
|
|
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
|
|
@@ -828,7 +828,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
|
|
* scale global setpoint to bdi's:
|
|
* bdi_setpoint = setpoint * bdi_thresh / thresh
|
|
*/
|
|
- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
|
|
+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
|
|
bdi_setpoint = setpoint * (u64)x >> 16;
|
|
/*
|
|
* Use span=(8*write_bw) in single bdi case as indicated by
|
|
@@ -843,7 +843,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
|
|
|
|
if (bdi_dirty < x_intercept - span / 4) {
|
|
pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
|
|
- x_intercept - bdi_setpoint + 1);
|
|
+ (x_intercept - bdi_setpoint) | 1);
|
|
} else
|
|
pos_ratio /= 4;
|
|
|
|
@@ -878,8 +878,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
|
|
* bw * elapsed + write_bandwidth * (period - elapsed)
|
|
* write_bandwidth = ---------------------------------------------------
|
|
* period
|
|
+ *
|
|
+ * @written may have decreased due to account_page_redirty().
|
|
+ * Avoid underflowing @bw calculation.
|
|
*/
|
|
- bw = written - bdi->written_stamp;
|
|
+ bw = written - min(written, bdi->written_stamp);
|
|
bw *= HZ;
|
|
if (unlikely(elapsed > period)) {
|
|
do_div(bw, elapsed);
|
|
@@ -943,7 +946,7 @@ static void global_update_bandwidth(unsigned long thresh,
|
|
unsigned long now)
|
|
{
|
|
static DEFINE_SPINLOCK(dirty_lock);
|
|
- static unsigned long update_time;
|
|
+ static unsigned long update_time = INITIAL_JIFFIES;
|
|
|
|
/*
|
|
* check locklessly first to optimize away locking for the most time
|
|
@@ -1324,9 +1327,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
|
|
*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
|
|
|
if (bdi_bg_thresh)
|
|
- *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
|
|
- background_thresh,
|
|
- dirty_thresh);
|
|
+ *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
|
|
+ background_thresh,
|
|
+ dirty_thresh) : 0;
|
|
|
|
/*
|
|
* In order to avoid the stacked BDI deadlock we need
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index 7e7f947..0479732 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -408,7 +408,8 @@ static int destroy_compound_page(struct page *page, unsigned long order)
|
|
return bad;
|
|
}
|
|
|
|
-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
|
|
+static inline void prep_zero_page(struct page *page, unsigned int order,
|
|
+ gfp_t gfp_flags)
|
|
{
|
|
int i;
|
|
|
|
@@ -452,7 +453,7 @@ static inline void set_page_guard_flag(struct page *page) { }
|
|
static inline void clear_page_guard_flag(struct page *page) { }
|
|
#endif
|
|
|
|
-static inline void set_page_order(struct page *page, int order)
|
|
+static inline void set_page_order(struct page *page, unsigned int order)
|
|
{
|
|
set_page_private(page, order);
|
|
__SetPageBuddy(page);
|
|
@@ -503,21 +504,31 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
|
|
* For recording page's order, we use page_private(page).
|
|
*/
|
|
static inline int page_is_buddy(struct page *page, struct page *buddy,
|
|
- int order)
|
|
+ unsigned int order)
|
|
{
|
|
if (!pfn_valid_within(page_to_pfn(buddy)))
|
|
return 0;
|
|
|
|
- if (page_zone_id(page) != page_zone_id(buddy))
|
|
- return 0;
|
|
-
|
|
if (page_is_guard(buddy) && page_order(buddy) == order) {
|
|
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
|
|
+
|
|
+ if (page_zone_id(page) != page_zone_id(buddy))
|
|
+ return 0;
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
if (PageBuddy(buddy) && page_order(buddy) == order) {
|
|
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
|
|
+
|
|
+ /*
|
|
+ * zone check is done late to avoid uselessly
|
|
+ * calculating zone/node ids for pages that could
|
|
+ * never merge.
|
|
+ */
|
|
+ if (page_zone_id(page) != page_zone_id(buddy))
|
|
+ return 0;
|
|
+
|
|
return 1;
|
|
}
|
|
return 0;
|
|
@@ -549,6 +560,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
|
|
*/
|
|
|
|
static inline void __free_one_page(struct page *page,
|
|
+ unsigned long pfn,
|
|
struct zone *zone, unsigned int order,
|
|
int migratetype)
|
|
{
|
|
@@ -565,7 +577,7 @@ static inline void __free_one_page(struct page *page,
|
|
|
|
VM_BUG_ON(migratetype == -1);
|
|
|
|
- page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
|
|
+ page_idx = pfn & ((1 << MAX_ORDER) - 1);
|
|
|
|
VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
|
|
VM_BUG_ON_PAGE(bad_range(zone, page), page);
|
|
@@ -666,9 +678,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
int migratetype = 0;
|
|
int batch_free = 0;
|
|
int to_free = count;
|
|
+ unsigned long nr_scanned;
|
|
|
|
spin_lock(&zone->lock);
|
|
- zone->pages_scanned = 0;
|
|
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
|
|
+ if (nr_scanned)
|
|
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
|
|
|
|
while (to_free) {
|
|
struct page *page;
|
|
@@ -700,7 +715,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
list_del(&page->lru);
|
|
mt = get_freepage_migratetype(page);
|
|
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
|
|
- __free_one_page(page, zone, 0, mt);
|
|
+ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
|
|
trace_mm_page_pcpu_drain(page, 0, mt);
|
|
if (likely(!is_migrate_isolate_page(page))) {
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
|
|
@@ -712,13 +727,18 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
|
spin_unlock(&zone->lock);
|
|
}
|
|
|
|
-static void free_one_page(struct zone *zone, struct page *page, int order,
|
|
+static void free_one_page(struct zone *zone,
|
|
+ struct page *page, unsigned long pfn,
|
|
+ unsigned int order,
|
|
int migratetype)
|
|
{
|
|
+ unsigned long nr_scanned;
|
|
spin_lock(&zone->lock);
|
|
- zone->pages_scanned = 0;
|
|
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
|
|
+ if (nr_scanned)
|
|
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
|
|
|
|
- __free_one_page(page, zone, order, migratetype);
|
|
+ __free_one_page(page, pfn, zone, order, migratetype);
|
|
if (unlikely(!is_migrate_isolate(migratetype)))
|
|
__mod_zone_freepage_state(zone, 1 << order, migratetype);
|
|
spin_unlock(&zone->lock);
|
|
@@ -755,15 +775,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
|
{
|
|
unsigned long flags;
|
|
int migratetype;
|
|
+ unsigned long pfn = page_to_pfn(page);
|
|
|
|
if (!free_pages_prepare(page, order))
|
|
return;
|
|
|
|
+ migratetype = get_pfnblock_migratetype(page, pfn);
|
|
local_irq_save(flags);
|
|
__count_vm_events(PGFREE, 1 << order);
|
|
- migratetype = get_pageblock_migratetype(page);
|
|
set_freepage_migratetype(page, migratetype);
|
|
- free_one_page(page_zone(page), page, order, migratetype);
|
|
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
@@ -894,7 +915,7 @@ static inline int check_new_page(struct page *page)
|
|
return 0;
|
|
}
|
|
|
|
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
|
|
+static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
|
|
{
|
|
int i;
|
|
|
|
@@ -943,6 +964,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|
rmv_page_order(page);
|
|
area->nr_free--;
|
|
expand(zone, page, order, current_order, area, migratetype);
|
|
+ set_freepage_migratetype(page, migratetype);
|
|
return page;
|
|
}
|
|
|
|
@@ -1059,8 +1081,8 @@ static void change_pageblock_range(struct page *pageblock_page,
|
|
* nor move CMA pages to different free lists. We don't want unmovable pages
|
|
* to be allocated from MIGRATE_CMA areas.
|
|
*
|
|
- * Returns the new migratetype of the pageblock (or the same old migratetype
|
|
- * if it was unchanged).
|
|
+ * Returns the allocation migratetype if free pages were stolen, or the
|
|
+ * fallback migratetype if it was decided not to steal.
|
|
*/
|
|
static int try_to_steal_freepages(struct zone *zone, struct page *page,
|
|
int start_type, int fallback_type)
|
|
@@ -1069,7 +1091,9 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
|
|
|
|
/*
|
|
* When borrowing from MIGRATE_CMA, we need to release the excess
|
|
- * buddy pages to CMA itself.
|
|
+ * buddy pages to CMA itself. We also ensure the freepage_migratetype
|
|
+ * is set to CMA so it is returned to the correct freelist in case
|
|
+ * the page ends up being not actually allocated from the pcp lists.
|
|
*/
|
|
if (is_migrate_cma(fallback_type))
|
|
return fallback_type;
|
|
@@ -1089,12 +1113,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
|
|
|
|
/* Claim the whole block if over half of it is free */
|
|
if (pages >= (1 << (pageblock_order-1)) ||
|
|
- page_group_by_mobility_disabled) {
|
|
-
|
|
+ page_group_by_mobility_disabled)
|
|
set_pageblock_migratetype(page, start_type);
|
|
- return start_type;
|
|
- }
|
|
|
|
+ return start_type;
|
|
}
|
|
|
|
return fallback_type;
|
|
@@ -1102,16 +1124,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
|
|
|
|
/* Remove an element from the buddy allocator from the fallback list */
|
|
static inline struct page *
|
|
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|
+__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
|
|
{
|
|
struct free_area *area;
|
|
- int current_order;
|
|
+ unsigned int current_order;
|
|
struct page *page;
|
|
int migratetype, new_type, i;
|
|
|
|
/* Find the largest possible block of pages in the other list */
|
|
- for (current_order = MAX_ORDER-1; current_order >= order;
|
|
- --current_order) {
|
|
+ for (current_order = MAX_ORDER-1;
|
|
+ current_order >= order && current_order <= MAX_ORDER-1;
|
|
+ --current_order) {
|
|
for (i = 0;; i++) {
|
|
migratetype = fallbacks[start_migratetype][i];
|
|
|
|
@@ -1137,9 +1160,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
|
|
|
expand(zone, page, order, current_order, area,
|
|
new_type);
|
|
+ /* The freepage_migratetype may differ from pageblock's
|
|
+ * migratetype depending on the decisions in
|
|
+ * try_to_steal_freepages. This is OK as long as it does
|
|
+ * not differ for MIGRATE_CMA type.
|
|
+ */
|
|
+ set_freepage_migratetype(page, new_type);
|
|
|
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
|
- start_migratetype, migratetype, new_type);
|
|
+ start_migratetype, migratetype);
|
|
|
|
return page;
|
|
}
|
|
@@ -1185,9 +1214,9 @@ retry_reserve:
|
|
*/
|
|
static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
unsigned long count, struct list_head *list,
|
|
- int migratetype, int cold)
|
|
+ int migratetype, bool cold)
|
|
{
|
|
- int mt = migratetype, i;
|
|
+ int i;
|
|
|
|
spin_lock(&zone->lock);
|
|
for (i = 0; i < count; ++i) {
|
|
@@ -1204,18 +1233,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
* merge IO requests if the physical pages are ordered
|
|
* properly.
|
|
*/
|
|
- if (likely(cold == 0))
|
|
+ if (likely(!cold))
|
|
list_add(&page->lru, list);
|
|
else
|
|
list_add_tail(&page->lru, list);
|
|
- if (IS_ENABLED(CONFIG_CMA)) {
|
|
- mt = get_pageblock_migratetype(page);
|
|
- if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
|
|
- mt = migratetype;
|
|
- }
|
|
- set_freepage_migratetype(page, mt);
|
|
list = &page->lru;
|
|
- if (is_migrate_cma(mt))
|
|
+ if (is_migrate_cma(get_freepage_migratetype(page)))
|
|
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
|
|
-(1 << order));
|
|
}
|
|
@@ -1339,7 +1362,7 @@ void mark_free_pages(struct zone *zone)
|
|
{
|
|
unsigned long pfn, max_zone_pfn;
|
|
unsigned long flags;
|
|
- int order, t;
|
|
+ unsigned int order, t;
|
|
struct list_head *curr;
|
|
|
|
if (zone_is_empty(zone))
|
|
@@ -1371,19 +1394,20 @@ void mark_free_pages(struct zone *zone)
|
|
|
|
/*
|
|
* Free a 0-order page
|
|
- * cold == 1 ? free a cold page : free a hot page
|
|
+ * cold == true ? free a cold page : free a hot page
|
|
*/
|
|
-void free_hot_cold_page(struct page *page, int cold)
|
|
+void free_hot_cold_page(struct page *page, bool cold)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
struct per_cpu_pages *pcp;
|
|
unsigned long flags;
|
|
+ unsigned long pfn = page_to_pfn(page);
|
|
int migratetype;
|
|
|
|
if (!free_pages_prepare(page, 0))
|
|
return;
|
|
|
|
- migratetype = get_pageblock_migratetype(page);
|
|
+ migratetype = get_pfnblock_migratetype(page, pfn);
|
|
set_freepage_migratetype(page, migratetype);
|
|
local_irq_save(flags);
|
|
__count_vm_event(PGFREE);
|
|
@@ -1397,17 +1421,17 @@ void free_hot_cold_page(struct page *page, int cold)
|
|
*/
|
|
if (migratetype >= MIGRATE_PCPTYPES) {
|
|
if (unlikely(is_migrate_isolate(migratetype))) {
|
|
- free_one_page(zone, page, 0, migratetype);
|
|
+ free_one_page(zone, page, pfn, 0, migratetype);
|
|
goto out;
|
|
}
|
|
migratetype = MIGRATE_MOVABLE;
|
|
}
|
|
|
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
|
- if (cold)
|
|
- list_add_tail(&page->lru, &pcp->lists[migratetype]);
|
|
- else
|
|
+ if (!cold)
|
|
list_add(&page->lru, &pcp->lists[migratetype]);
|
|
+ else
|
|
+ list_add_tail(&page->lru, &pcp->lists[migratetype]);
|
|
pcp->count++;
|
|
if (pcp->count >= pcp->high) {
|
|
unsigned long batch = ACCESS_ONCE(pcp->batch);
|
|
@@ -1422,7 +1446,7 @@ out:
|
|
/*
|
|
* Free a list of 0-order pages
|
|
*/
|
|
-void free_hot_cold_page_list(struct list_head *list, int cold)
|
|
+void free_hot_cold_page_list(struct list_head *list, bool cold)
|
|
{
|
|
struct page *page, *next;
|
|
|
|
@@ -1534,12 +1558,12 @@ int split_free_page(struct page *page)
|
|
*/
|
|
static inline
|
|
struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
- struct zone *zone, int order, gfp_t gfp_flags,
|
|
- int migratetype)
|
|
+ struct zone *zone, unsigned int order,
|
|
+ gfp_t gfp_flags, int migratetype)
|
|
{
|
|
unsigned long flags;
|
|
struct page *page;
|
|
- int cold = !!(gfp_flags & __GFP_COLD);
|
|
+ bool cold = ((gfp_flags & __GFP_COLD) != 0);
|
|
|
|
again:
|
|
if (likely(order == 0)) {
|
|
@@ -1584,10 +1608,13 @@ again:
|
|
if (!page)
|
|
goto failed;
|
|
__mod_zone_freepage_state(zone, -(1 << order),
|
|
- get_pageblock_migratetype(page));
|
|
+ get_freepage_migratetype(page));
|
|
}
|
|
|
|
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
|
+ if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
|
|
+ !zone_is_fair_depleted(zone))
|
|
+ zone_set_flag(zone, ZONE_FAIR_DEPLETED);
|
|
|
|
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
|
zone_statistics(preferred_zone, zone, gfp_flags);
|
|
@@ -1684,12 +1711,12 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
* Return true if free pages are above 'mark'. This takes into account the order
|
|
* of the allocation.
|
|
*/
|
|
-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
- int classzone_idx, int alloc_flags, long free_pages)
|
|
+static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|
+ unsigned long mark, int classzone_idx, int alloc_flags,
|
|
+ long free_pages)
|
|
{
|
|
/* free_pages my go negative - that's OK */
|
|
long min = mark;
|
|
- long lowmem_reserve = z->lowmem_reserve[classzone_idx];
|
|
int o;
|
|
long free_cma = 0;
|
|
|
|
@@ -1704,7 +1731,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
|
|
#endif
|
|
|
|
- if (free_pages - free_cma <= min + lowmem_reserve)
|
|
+ if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
|
|
return false;
|
|
for (o = 0; o < order; o++) {
|
|
/* At the next order, this order's pages become unavailable */
|
|
@@ -1719,15 +1746,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
return true;
|
|
}
|
|
|
|
-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|
+bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|
|
int classzone_idx, int alloc_flags)
|
|
{
|
|
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
|
|
zone_page_state(z, NR_FREE_PAGES));
|
|
}
|
|
|
|
-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
|
|
- int classzone_idx, int alloc_flags)
|
|
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
|
+ unsigned long mark, int classzone_idx, int alloc_flags)
|
|
{
|
|
long free_pages = zone_page_state(z, NR_FREE_PAGES);
|
|
|
|
@@ -1869,7 +1896,7 @@ static void __paginginit init_zone_allows_reclaim(int nid)
|
|
{
|
|
int i;
|
|
|
|
- for_each_online_node(i)
|
|
+ for_each_node_state(i, N_MEMORY)
|
|
if (node_distance(nid, i) <= RECLAIM_DISTANCE)
|
|
node_set(i, NODE_DATA(nid)->reclaim_nodes);
|
|
else
|
|
@@ -1912,6 +1939,18 @@ static inline void init_zone_allows_reclaim(int nid)
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
+static void reset_alloc_batches(struct zone *preferred_zone)
|
|
+{
|
|
+ struct zone *zone = preferred_zone->zone_pgdat->node_zones;
|
|
+
|
|
+ do {
|
|
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
|
+ high_wmark_pages(zone) - low_wmark_pages(zone) -
|
|
+ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
|
+ zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
|
|
+ } while (zone++ != preferred_zone);
|
|
+}
|
|
+
|
|
/*
|
|
* get_page_from_freelist goes through the zonelist trying to allocate
|
|
* a page.
|
|
@@ -1919,18 +1958,22 @@ static inline void init_zone_allows_reclaim(int nid)
|
|
static struct page *
|
|
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
|
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
|
|
- struct zone *preferred_zone, int migratetype)
|
|
+ struct zone *preferred_zone, int classzone_idx, int migratetype)
|
|
{
|
|
struct zoneref *z;
|
|
struct page *page = NULL;
|
|
- int classzone_idx;
|
|
struct zone *zone;
|
|
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
|
|
int zlc_active = 0; /* set if using zonelist_cache */
|
|
int did_zlc_setup = 0; /* just call zlc_setup() one time */
|
|
+ bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
|
|
+ (gfp_mask & __GFP_WRITE);
|
|
+ int nr_fair_skipped = 0;
|
|
+ bool zonelist_rescan;
|
|
|
|
- classzone_idx = zone_idx(preferred_zone);
|
|
zonelist_scan:
|
|
+ zonelist_rescan = false;
|
|
+
|
|
/*
|
|
* Scan zonelist, looking for a zone with enough free.
|
|
* See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
|
|
@@ -1942,12 +1985,10 @@ zonelist_scan:
|
|
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
|
|
!zlc_zone_worth_trying(zonelist, z, allowednodes))
|
|
continue;
|
|
- if ((alloc_flags & ALLOC_CPUSET) &&
|
|
+ if (cpusets_enabled() &&
|
|
+ (alloc_flags & ALLOC_CPUSET) &&
|
|
!cpuset_zone_allowed_softwall(zone, gfp_mask))
|
|
continue;
|
|
- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
|
|
- if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
|
|
- goto try_this_zone;
|
|
/*
|
|
* Distribute pages in proportion to the individual
|
|
* zone size to ensure fair page aging. The zone a
|
|
@@ -1956,9 +1997,11 @@ zonelist_scan:
|
|
*/
|
|
if (alloc_flags & ALLOC_FAIR) {
|
|
if (!zone_local(preferred_zone, zone))
|
|
+ break;
|
|
+ if (zone_is_fair_depleted(zone)) {
|
|
+ nr_fair_skipped++;
|
|
continue;
|
|
- if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
|
|
- continue;
|
|
+ }
|
|
}
|
|
/*
|
|
* When allocating a page cache page for writing, we
|
|
@@ -1986,15 +2029,19 @@ zonelist_scan:
|
|
* will require awareness of zones in the
|
|
* dirty-throttling and the flusher threads.
|
|
*/
|
|
- if ((alloc_flags & ALLOC_WMARK_LOW) &&
|
|
- (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
|
|
- goto this_zone_full;
|
|
+ if (consider_zone_dirty && !zone_dirty_ok(zone))
|
|
+ continue;
|
|
|
|
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
|
if (!zone_watermark_ok(zone, order, mark,
|
|
classzone_idx, alloc_flags)) {
|
|
int ret;
|
|
|
|
+ /* Checked here to keep the fast path fast */
|
|
+ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
|
|
+ if (alloc_flags & ALLOC_NO_WATERMARKS)
|
|
+ goto try_this_zone;
|
|
+
|
|
if (IS_ENABLED(CONFIG_NUMA) &&
|
|
!did_zlc_setup && nr_online_nodes > 1) {
|
|
/*
|
|
@@ -2056,17 +2103,11 @@ try_this_zone:
|
|
if (page)
|
|
break;
|
|
this_zone_full:
|
|
- if (IS_ENABLED(CONFIG_NUMA))
|
|
+ if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
|
|
zlc_mark_zone_full(zonelist, z);
|
|
}
|
|
|
|
- if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
|
|
- /* Disable zlc cache for second zonelist scan */
|
|
- zlc_active = 0;
|
|
- goto zonelist_scan;
|
|
- }
|
|
-
|
|
- if (page)
|
|
+ if (page) {
|
|
/*
|
|
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
|
|
* necessary to allocate the page. The expectation is
|
|
@@ -2075,8 +2116,37 @@ this_zone_full:
|
|
* for !PFMEMALLOC purposes.
|
|
*/
|
|
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
|
|
+ return page;
|
|
+ }
|
|
|
|
- return page;
|
|
+ /*
|
|
+ * The first pass makes sure allocations are spread fairly within the
|
|
+ * local node. However, the local node might have free pages left
|
|
+ * after the fairness batches are exhausted, and remote zones haven't
|
|
+ * even been considered yet. Try once more without fairness, and
|
|
+ * include remote zones now, before entering the slowpath and waking
|
|
+ * kswapd: prefer spilling to a remote zone over swapping locally.
|
|
+ */
|
|
+ if (alloc_flags & ALLOC_FAIR) {
|
|
+ alloc_flags &= ~ALLOC_FAIR;
|
|
+ if (nr_fair_skipped) {
|
|
+ zonelist_rescan = true;
|
|
+ reset_alloc_batches(preferred_zone);
|
|
+ }
|
|
+ if (nr_online_nodes > 1)
|
|
+ zonelist_rescan = true;
|
|
+ }
|
|
+
|
|
+ if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
|
|
+ /* Disable zlc cache for second zonelist scan */
|
|
+ zlc_active = 0;
|
|
+ zonelist_rescan = true;
|
|
+ }
|
|
+
|
|
+ if (zonelist_rescan)
|
|
+ goto zonelist_scan;
|
|
+
|
|
+ return NULL;
|
|
}
|
|
|
|
/*
|
|
@@ -2185,7 +2255,7 @@ static inline struct page *
|
|
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
- int migratetype)
|
|
+ int classzone_idx, int migratetype)
|
|
{
|
|
struct page *page;
|
|
|
|
@@ -2196,6 +2266,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
|
}
|
|
|
|
/*
|
|
+ * PM-freezer should be notified that there might be an OOM killer on
|
|
+ * its way to kill and wake somebody up. This is too early and we might
|
|
+ * end up not killing anything but false positives are acceptable.
|
|
+ * See freeze_processes.
|
|
+ */
|
|
+ note_oom_kill();
|
|
+
|
|
+ /*
|
|
* Go through the zonelist yet one more time, keep very high watermark
|
|
* here, this is only to catch a parallel oom killing, we must fail if
|
|
* we're still under heavy pressure.
|
|
@@ -2203,7 +2281,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
|
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
|
|
order, zonelist, high_zoneidx,
|
|
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
if (page)
|
|
goto out;
|
|
|
|
@@ -2238,7 +2316,7 @@ static struct page *
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
|
- int migratetype, bool sync_migration,
|
|
+ int classzone_idx, int migratetype, enum migrate_mode mode,
|
|
bool *contended_compaction, bool *deferred_compaction,
|
|
unsigned long *did_some_progress)
|
|
{
|
|
@@ -2252,7 +2330,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
|
current->flags |= PF_MEMALLOC;
|
|
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
|
|
- nodemask, sync_migration,
|
|
+ nodemask, mode,
|
|
contended_compaction);
|
|
current->flags &= ~PF_MEMALLOC;
|
|
|
|
@@ -2266,7 +2344,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
page = get_page_from_freelist(gfp_mask, nodemask,
|
|
order, zonelist, high_zoneidx,
|
|
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
if (page) {
|
|
preferred_zone->compact_blockskip_flush = false;
|
|
compaction_defer_reset(preferred_zone, order, true);
|
|
@@ -2285,7 +2363,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
* As async compaction considers a subset of pageblocks, only
|
|
* defer if the failure was a sync compaction failure.
|
|
*/
|
|
- if (sync_migration)
|
|
+ if (mode != MIGRATE_ASYNC)
|
|
defer_compaction(preferred_zone, order);
|
|
|
|
cond_resched();
|
|
@@ -2298,9 +2376,9 @@ static inline struct page *
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
|
- int migratetype, bool sync_migration,
|
|
- bool *contended_compaction, bool *deferred_compaction,
|
|
- unsigned long *did_some_progress)
|
|
+ int classzone_idx, int migratetype,
|
|
+ enum migrate_mode mode, bool *contended_compaction,
|
|
+ bool *deferred_compaction, unsigned long *did_some_progress)
|
|
{
|
|
return NULL;
|
|
}
|
|
@@ -2339,7 +2417,7 @@ static inline struct page *
|
|
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
|
- int migratetype, unsigned long *did_some_progress)
|
|
+ int classzone_idx, int migratetype, unsigned long *did_some_progress)
|
|
{
|
|
struct page *page = NULL;
|
|
bool drained = false;
|
|
@@ -2357,7 +2435,8 @@ retry:
|
|
page = get_page_from_freelist(gfp_mask, nodemask, order,
|
|
zonelist, high_zoneidx,
|
|
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx,
|
|
+ migratetype);
|
|
|
|
/*
|
|
* If an allocation failed after direct reclaim, it could be because
|
|
@@ -2380,14 +2459,14 @@ static inline struct page *
|
|
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
- int migratetype)
|
|
+ int classzone_idx, int migratetype)
|
|
{
|
|
struct page *page;
|
|
|
|
do {
|
|
page = get_page_from_freelist(gfp_mask, nodemask, order,
|
|
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
|
|
if (!page && gfp_mask & __GFP_NOFAIL)
|
|
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
|
|
@@ -2396,28 +2475,6 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
|
|
return page;
|
|
}
|
|
|
|
-static void reset_alloc_batches(struct zonelist *zonelist,
|
|
- enum zone_type high_zoneidx,
|
|
- struct zone *preferred_zone)
|
|
-{
|
|
- struct zoneref *z;
|
|
- struct zone *zone;
|
|
-
|
|
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
|
- /*
|
|
- * Only reset the batches of zones that were actually
|
|
- * considered in the fairness pass, we don't want to
|
|
- * trash fairness information for zones that are not
|
|
- * actually part of this zonelist's round-robin cycle.
|
|
- */
|
|
- if (!zone_local(preferred_zone, zone))
|
|
- continue;
|
|
- mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
|
- high_wmark_pages(zone) - low_wmark_pages(zone) -
|
|
- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
|
- }
|
|
-}
|
|
-
|
|
static void wake_all_kswapds(unsigned int order,
|
|
struct zonelist *zonelist,
|
|
enum zone_type high_zoneidx,
|
|
@@ -2434,7 +2491,7 @@ static inline int
|
|
gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
{
|
|
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
|
|
- const gfp_t wait = gfp_mask & __GFP_WAIT;
|
|
+ const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
|
|
|
|
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
|
|
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
|
|
@@ -2443,20 +2500,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
* The caller may dip into page reserves a bit more if the caller
|
|
* cannot run direct reclaim, or if the caller has realtime scheduling
|
|
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
|
|
- * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
|
|
+ * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
|
|
*/
|
|
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
|
|
|
|
- if (!wait) {
|
|
+ if (atomic) {
|
|
/*
|
|
- * Not worth trying to allocate harder for
|
|
- * __GFP_NOMEMALLOC even if it can't schedule.
|
|
+ * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
|
|
+ * if it can't schedule.
|
|
*/
|
|
- if (!(gfp_mask & __GFP_NOMEMALLOC))
|
|
+ if (!(gfp_mask & __GFP_NOMEMALLOC))
|
|
alloc_flags |= ALLOC_HARDER;
|
|
/*
|
|
- * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
|
|
- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
|
|
+ * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
|
|
+ * comment for __cpuset_node_allowed_softwall().
|
|
*/
|
|
alloc_flags &= ~ALLOC_CPUSET;
|
|
} else if (unlikely(rt_task(current)) && !in_interrupt())
|
|
@@ -2488,14 +2545,14 @@ static inline struct page *
|
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
|
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
- int migratetype)
|
|
+ int classzone_idx, int migratetype)
|
|
{
|
|
const gfp_t wait = gfp_mask & __GFP_WAIT;
|
|
struct page *page = NULL;
|
|
int alloc_flags;
|
|
unsigned long pages_reclaimed = 0;
|
|
unsigned long did_some_progress;
|
|
- bool sync_migration = false;
|
|
+ enum migrate_mode migration_mode = MIGRATE_ASYNC;
|
|
bool deferred_compaction = false;
|
|
bool contended_compaction = false;
|
|
|
|
@@ -2537,15 +2594,19 @@ restart:
|
|
* Find the true preferred zone if the allocation is unconstrained by
|
|
* cpusets.
|
|
*/
|
|
- if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
|
|
- first_zones_zonelist(zonelist, high_zoneidx, NULL,
|
|
- &preferred_zone);
|
|
+ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
|
|
+ struct zoneref *preferred_zoneref;
|
|
+ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
|
|
+ NULL,
|
|
+ &preferred_zone);
|
|
+ classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
+ }
|
|
|
|
rebalance:
|
|
/* This is the last chance, in general, before the goto nopage. */
|
|
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
|
|
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
if (page)
|
|
goto got_pg;
|
|
|
|
@@ -2560,7 +2621,7 @@ rebalance:
|
|
|
|
page = __alloc_pages_high_priority(gfp_mask, order,
|
|
zonelist, high_zoneidx, nodemask,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
if (page) {
|
|
goto got_pg;
|
|
}
|
|
@@ -2589,17 +2650,16 @@ rebalance:
|
|
* Try direct compaction. The first pass is asynchronous. Subsequent
|
|
* attempts after direct reclaim are synchronous
|
|
*/
|
|
- page = __alloc_pages_direct_compact(gfp_mask, order,
|
|
- zonelist, high_zoneidx,
|
|
- nodemask,
|
|
- alloc_flags, preferred_zone,
|
|
- migratetype, sync_migration,
|
|
- &contended_compaction,
|
|
+ page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
|
|
+ high_zoneidx, nodemask, alloc_flags,
|
|
+ preferred_zone,
|
|
+ classzone_idx, migratetype,
|
|
+ migration_mode, &contended_compaction,
|
|
&deferred_compaction,
|
|
&did_some_progress);
|
|
if (page)
|
|
goto got_pg;
|
|
- sync_migration = true;
|
|
+ migration_mode = MIGRATE_SYNC_LIGHT;
|
|
|
|
/*
|
|
* If compaction is deferred for high-order allocations, it is because
|
|
@@ -2616,7 +2676,8 @@ rebalance:
|
|
zonelist, high_zoneidx,
|
|
nodemask,
|
|
alloc_flags, preferred_zone,
|
|
- migratetype, &did_some_progress);
|
|
+ classzone_idx, migratetype,
|
|
+ &did_some_progress);
|
|
if (page)
|
|
goto got_pg;
|
|
|
|
@@ -2635,7 +2696,7 @@ rebalance:
|
|
page = __alloc_pages_may_oom(gfp_mask, order,
|
|
zonelist, high_zoneidx,
|
|
nodemask, preferred_zone,
|
|
- migratetype);
|
|
+ classzone_idx, migratetype);
|
|
if (page)
|
|
goto got_pg;
|
|
|
|
@@ -2674,12 +2735,11 @@ rebalance:
|
|
* direct reclaim and reclaim/compaction depends on compaction
|
|
* being called after reclaim so call directly if necessary
|
|
*/
|
|
- page = __alloc_pages_direct_compact(gfp_mask, order,
|
|
- zonelist, high_zoneidx,
|
|
- nodemask,
|
|
- alloc_flags, preferred_zone,
|
|
- migratetype, sync_migration,
|
|
- &contended_compaction,
|
|
+ page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
|
|
+ high_zoneidx, nodemask, alloc_flags,
|
|
+ preferred_zone,
|
|
+ classzone_idx, migratetype,
|
|
+ migration_mode, &contended_compaction,
|
|
&deferred_compaction,
|
|
&did_some_progress);
|
|
if (page)
|
|
@@ -2705,11 +2765,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
{
|
|
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
|
struct zone *preferred_zone;
|
|
+ struct zoneref *preferred_zoneref;
|
|
struct page *page = NULL;
|
|
int migratetype = allocflags_to_migratetype(gfp_mask);
|
|
unsigned int cpuset_mems_cookie;
|
|
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
|
|
struct mem_cgroup *memcg = NULL;
|
|
+ int classzone_idx;
|
|
|
|
gfp_mask &= gfp_allowed_mask;
|
|
|
|
@@ -2736,42 +2798,26 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
return NULL;
|
|
|
|
retry_cpuset:
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
/* The preferred zone is used for statistics later */
|
|
- first_zones_zonelist(zonelist, high_zoneidx,
|
|
+ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
|
|
nodemask ? : &cpuset_current_mems_allowed,
|
|
&preferred_zone);
|
|
if (!preferred_zone)
|
|
goto out;
|
|
+ classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
|
|
#ifdef CONFIG_CMA
|
|
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
|
alloc_flags |= ALLOC_CMA;
|
|
#endif
|
|
-retry:
|
|
/* First allocation attempt */
|
|
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
|
|
zonelist, high_zoneidx, alloc_flags,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
if (unlikely(!page)) {
|
|
/*
|
|
- * The first pass makes sure allocations are spread
|
|
- * fairly within the local node. However, the local
|
|
- * node might have free pages left after the fairness
|
|
- * batches are exhausted, and remote zones haven't
|
|
- * even been considered yet. Try once more without
|
|
- * fairness, and include remote zones now, before
|
|
- * entering the slowpath and waking kswapd: prefer
|
|
- * spilling to a remote zone over swapping locally.
|
|
- */
|
|
- if (alloc_flags & ALLOC_FAIR) {
|
|
- reset_alloc_batches(zonelist, high_zoneidx,
|
|
- preferred_zone);
|
|
- alloc_flags &= ~ALLOC_FAIR;
|
|
- goto retry;
|
|
- }
|
|
- /*
|
|
* Runtime PM, block IO and its error handling path
|
|
* can deadlock because I/O on the device might not
|
|
* complete.
|
|
@@ -2779,7 +2825,7 @@ retry:
|
|
gfp_mask = memalloc_noio_flags(gfp_mask);
|
|
page = __alloc_pages_slowpath(gfp_mask, order,
|
|
zonelist, high_zoneidx, nodemask,
|
|
- preferred_zone, migratetype);
|
|
+ preferred_zone, classzone_idx, migratetype);
|
|
}
|
|
|
|
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
|
|
@@ -2791,7 +2837,7 @@ out:
|
|
* the mask is being updated. If a page allocation is about to fail,
|
|
* check if the cpuset changed during allocation and if so, retry.
|
|
*/
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
|
|
memcg_kmem_commit_charge(page, memcg, order);
|
|
@@ -2830,7 +2876,7 @@ void __free_pages(struct page *page, unsigned int order)
|
|
{
|
|
if (put_page_testzero(page)) {
|
|
if (order == 0)
|
|
- free_hot_cold_page(page, 0);
|
|
+ free_hot_cold_page(page, false);
|
|
else
|
|
__free_pages_ok(page, order);
|
|
}
|
|
@@ -3059,9 +3105,9 @@ bool skip_free_areas_node(unsigned int flags, int nid)
|
|
goto out;
|
|
|
|
do {
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
ret = !node_isset(nid, cpuset_current_mems_allowed);
|
|
- } while (!put_mems_allowed(cpuset_mems_cookie));
|
|
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
|
|
out:
|
|
return ret;
|
|
}
|
|
@@ -3214,12 +3260,12 @@ void show_free_areas(unsigned int filter)
|
|
K(zone_page_state(zone, NR_BOUNCE)),
|
|
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
|
|
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
|
|
- zone->pages_scanned,
|
|
+ K(zone_page_state(zone, NR_PAGES_SCANNED)),
|
|
(!zone_reclaimable(zone) ? "yes" : "no")
|
|
);
|
|
printk("lowmem_reserve[]:");
|
|
for (i = 0; i < MAX_NR_ZONES; i++)
|
|
- printk(" %lu", zone->lowmem_reserve[i]);
|
|
+ printk(" %ld", zone->lowmem_reserve[i]);
|
|
printk("\n");
|
|
}
|
|
|
|
@@ -4107,7 +4153,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
|
static void __meminit zone_init_free_lists(struct zone *zone)
|
|
{
|
|
- int order, t;
|
|
+ unsigned int order, t;
|
|
for_each_migratetype_order(order, t) {
|
|
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
|
|
zone->free_area[order].nr_free = 0;
|
|
@@ -4933,7 +4979,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
|
|
|
|
pgdat->node_id = nid;
|
|
pgdat->node_start_pfn = node_start_pfn;
|
|
- init_zone_allows_reclaim(nid);
|
|
+ if (node_state(nid, N_MEMORY))
|
|
+ init_zone_allows_reclaim(nid);
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
|
|
#endif
|
|
@@ -5546,7 +5593,7 @@ static void calculate_totalreserve_pages(void)
|
|
for_each_online_pgdat(pgdat) {
|
|
for (i = 0; i < MAX_NR_ZONES; i++) {
|
|
struct zone *zone = pgdat->node_zones + i;
|
|
- unsigned long max = 0;
|
|
+ long max = 0;
|
|
|
|
/* Find valid and maximum lowmem_reserve in the zone */
|
|
for (j = i; j < MAX_NR_ZONES; j++) {
|
|
@@ -5661,9 +5708,8 @@ static void __setup_per_zone_wmarks(void)
|
|
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
|
|
|
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
|
- high_wmark_pages(zone) -
|
|
- low_wmark_pages(zone) -
|
|
- zone_page_state(zone, NR_ALLOC_BATCH));
|
|
+ high_wmark_pages(zone) - low_wmark_pages(zone) -
|
|
+ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
|
|
|
setup_zone_migrate_reserve(zone);
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
@@ -6035,17 +6081,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
|
|
* @end_bitidx: The last bit of interest
|
|
* returns pageblock_bits flags
|
|
*/
|
|
-unsigned long get_pageblock_flags_mask(struct page *page,
|
|
+unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask)
|
|
{
|
|
struct zone *zone;
|
|
unsigned long *bitmap;
|
|
- unsigned long pfn, bitidx, word_bitidx;
|
|
+ unsigned long bitidx, word_bitidx;
|
|
unsigned long word;
|
|
|
|
zone = page_zone(page);
|
|
- pfn = page_to_pfn(page);
|
|
bitmap = get_pageblock_bitmap(zone, pfn);
|
|
bitidx = pfn_to_bitidx(zone, pfn);
|
|
word_bitidx = bitidx / BITS_PER_LONG;
|
|
@@ -6057,25 +6102,25 @@ unsigned long get_pageblock_flags_mask(struct page *page,
|
|
}
|
|
|
|
/**
|
|
- * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
|
|
+ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
|
|
* @page: The page within the block of interest
|
|
* @start_bitidx: The first bit of interest
|
|
* @end_bitidx: The last bit of interest
|
|
* @flags: The flags to set
|
|
*/
|
|
-void set_pageblock_flags_mask(struct page *page, unsigned long flags,
|
|
+void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
|
|
+ unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask)
|
|
{
|
|
struct zone *zone;
|
|
unsigned long *bitmap;
|
|
- unsigned long pfn, bitidx, word_bitidx;
|
|
+ unsigned long bitidx, word_bitidx;
|
|
unsigned long old_word, word;
|
|
|
|
BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
|
|
|
|
zone = page_zone(page);
|
|
- pfn = page_to_pfn(page);
|
|
bitmap = get_pageblock_bitmap(zone, pfn);
|
|
bitidx = pfn_to_bitidx(zone, pfn);
|
|
word_bitidx = bitidx / BITS_PER_LONG;
|
|
@@ -6253,7 +6298,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
|
|
cc->nr_migratepages -= nr_reclaimed;
|
|
|
|
ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
|
|
- 0, MIGRATE_SYNC, MR_CMA);
|
|
+ NULL, 0, cc->mode, MR_CMA);
|
|
}
|
|
if (ret < 0) {
|
|
putback_movable_pages(&cc->migratepages);
|
|
@@ -6292,7 +6337,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
|
.nr_migratepages = 0,
|
|
.order = -1,
|
|
.zone = page_zone(pfn_to_page(start)),
|
|
- .sync = true,
|
|
+ .mode = MIGRATE_SYNC,
|
|
.ignore_skip_hint = true,
|
|
};
|
|
INIT_LIST_HEAD(&cc.migratepages);
|
|
@@ -6447,7 +6492,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
|
|
{
|
|
struct page *page;
|
|
struct zone *zone;
|
|
- int order, i;
|
|
+ unsigned int order, i;
|
|
unsigned long pfn;
|
|
unsigned long flags;
|
|
/* find the first valid pfn */
|
|
@@ -6499,7 +6544,7 @@ bool is_free_buddy_page(struct page *page)
|
|
struct zone *zone = page_zone(page);
|
|
unsigned long pfn = page_to_pfn(page);
|
|
unsigned long flags;
|
|
- int order;
|
|
+ unsigned int order;
|
|
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
for (order = 0; order < MAX_ORDER; order++) {
|
|
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
|
|
index cfd1628..0e9a319 100644
|
|
--- a/mm/page_cgroup.c
|
|
+++ b/mm/page_cgroup.c
|
|
@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
|
|
sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
|
|
|
BUG_ON(PageReserved(page));
|
|
+ kmemleak_free(addr);
|
|
free_pages_exact(addr, table_size);
|
|
}
|
|
}
|
|
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
|
|
index 2beeabf..9056d22 100644
|
|
--- a/mm/pagewalk.c
|
|
+++ b/mm/pagewalk.c
|
|
@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
|
|
*/
|
|
if ((vma->vm_start <= addr) &&
|
|
(vma->vm_flags & VM_PFNMAP)) {
|
|
- next = vma->vm_end;
|
|
+ if (walk->pte_hole)
|
|
+ err = walk->pte_hole(addr, next, walk);
|
|
+ if (err)
|
|
+ break;
|
|
pgd = pgd_offset(walk->mm, next);
|
|
continue;
|
|
}
|
|
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
|
|
index 3707c71..5110816 100644
|
|
--- a/mm/percpu-vm.c
|
|
+++ b/mm/percpu-vm.c
|
|
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
|
|
int page_start, int page_end)
|
|
{
|
|
const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
|
|
- unsigned int cpu;
|
|
+ unsigned int cpu, tcpu;
|
|
int i;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
|
|
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
|
|
|
|
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
|
|
- if (!*pagep) {
|
|
- pcpu_free_pages(chunk, pages, populated,
|
|
- page_start, page_end);
|
|
- return -ENOMEM;
|
|
- }
|
|
+ if (!*pagep)
|
|
+ goto err;
|
|
}
|
|
}
|
|
return 0;
|
|
+
|
|
+err:
|
|
+ while (--i >= page_start)
|
|
+ __free_page(pages[pcpu_page_idx(cpu, i)]);
|
|
+
|
|
+ for_each_possible_cpu(tcpu) {
|
|
+ if (tcpu == cpu)
|
|
+ break;
|
|
+ for (i = page_start; i < page_end; i++)
|
|
+ __free_page(pages[pcpu_page_idx(tcpu, i)]);
|
|
+ }
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
/**
|
|
@@ -263,6 +272,7 @@ err:
|
|
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
|
|
page_end - page_start);
|
|
}
|
|
+ pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/mm/readahead.c b/mm/readahead.c
|
|
index 0de2360..0ca36a7 100644
|
|
--- a/mm/readahead.c
|
|
+++ b/mm/readahead.c
|
|
@@ -8,9 +8,7 @@
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
-#include <linux/fs.h>
|
|
#include <linux/gfp.h>
|
|
-#include <linux/mm.h>
|
|
#include <linux/export.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/backing-dev.h>
|
|
@@ -20,6 +18,8 @@
|
|
#include <linux/syscalls.h>
|
|
#include <linux/file.h>
|
|
|
|
+#include "internal.h"
|
|
+
|
|
/*
|
|
* Initialise a struct file's readahead state. Assumes that the caller has
|
|
* memset *ra to zero.
|
|
@@ -149,8 +149,7 @@ out:
|
|
*
|
|
* Returns the number of pages requested, or the maximum amount of I/O allowed.
|
|
*/
|
|
-static int
|
|
-__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
+int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read,
|
|
unsigned long lookahead_size)
|
|
{
|
|
@@ -179,7 +178,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
rcu_read_lock();
|
|
page = radix_tree_lookup(&mapping->page_tree, page_offset);
|
|
rcu_read_unlock();
|
|
- if (page)
|
|
+ if (page && !radix_tree_exceptional_entry(page))
|
|
continue;
|
|
|
|
page = page_cache_alloc_readahead(mapping);
|
|
@@ -233,28 +232,14 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
return 0;
|
|
}
|
|
|
|
+#define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE)
|
|
/*
|
|
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
|
|
* sensible upper limit.
|
|
*/
|
|
unsigned long max_sane_readahead(unsigned long nr)
|
|
{
|
|
- return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
|
|
- + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
|
|
-}
|
|
-
|
|
-/*
|
|
- * Submit IO for the read-ahead request in file_ra_state.
|
|
- */
|
|
-unsigned long ra_submit(struct file_ra_state *ra,
|
|
- struct address_space *mapping, struct file *filp)
|
|
-{
|
|
- int actual;
|
|
-
|
|
- actual = __do_page_cache_readahead(mapping, filp,
|
|
- ra->start, ra->size, ra->async_size);
|
|
-
|
|
- return actual;
|
|
+ return min(nr, MAX_READAHEAD);
|
|
}
|
|
|
|
/*
|
|
@@ -347,7 +332,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
|
|
pgoff_t head;
|
|
|
|
rcu_read_lock();
|
|
- head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
|
|
+ head = page_cache_prev_hole(mapping, offset - 1, max);
|
|
rcu_read_unlock();
|
|
|
|
return offset - 1 - head;
|
|
@@ -427,7 +412,7 @@ ondemand_readahead(struct address_space *mapping,
|
|
pgoff_t start;
|
|
|
|
rcu_read_lock();
|
|
- start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
|
|
+ start = page_cache_next_hole(mapping, offset + 1, max);
|
|
rcu_read_unlock();
|
|
|
|
if (!start || start - offset > max)
|
|
diff --git a/mm/rmap.c b/mm/rmap.c
|
|
index cdbd312..cab9820 100644
|
|
--- a/mm/rmap.c
|
|
+++ b/mm/rmap.c
|
|
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|
{
|
|
struct anon_vma_chain *avc;
|
|
struct anon_vma *anon_vma;
|
|
+ int error;
|
|
|
|
/* Don't bother if the parent process has no anon_vma here. */
|
|
if (!pvma->anon_vma)
|
|
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|
* First, attach the new VMA to the parent VMA's anon_vmas,
|
|
* so rmap can find non-COWed pages in child processes.
|
|
*/
|
|
- if (anon_vma_clone(vma, pvma))
|
|
- return -ENOMEM;
|
|
+ error = anon_vma_clone(vma, pvma);
|
|
+ if (error)
|
|
+ return error;
|
|
|
|
/* Then add our own anon_vma. */
|
|
anon_vma = anon_vma_alloc();
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index ff85863..85d8a1a 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -243,19 +243,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
|
|
pgoff_t index, void *expected, void *replacement)
|
|
{
|
|
void **pslot;
|
|
- void *item = NULL;
|
|
+ void *item;
|
|
|
|
VM_BUG_ON(!expected);
|
|
+ VM_BUG_ON(!replacement);
|
|
pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
|
|
- if (pslot)
|
|
- item = radix_tree_deref_slot_protected(pslot,
|
|
- &mapping->tree_lock);
|
|
+ if (!pslot)
|
|
+ return -ENOENT;
|
|
+ item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
|
|
if (item != expected)
|
|
return -ENOENT;
|
|
- if (replacement)
|
|
- radix_tree_replace_slot(pslot, replacement);
|
|
- else
|
|
- radix_tree_delete(&mapping->page_tree, index);
|
|
+ radix_tree_replace_slot(pslot, replacement);
|
|
return 0;
|
|
}
|
|
|
|
@@ -332,84 +330,20 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
|
|
}
|
|
|
|
/*
|
|
- * Like find_get_pages, but collecting swap entries as well as pages.
|
|
- */
|
|
-static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
|
|
- pgoff_t start, unsigned int nr_pages,
|
|
- struct page **pages, pgoff_t *indices)
|
|
-{
|
|
- void **slot;
|
|
- unsigned int ret = 0;
|
|
- struct radix_tree_iter iter;
|
|
-
|
|
- if (!nr_pages)
|
|
- return 0;
|
|
-
|
|
- rcu_read_lock();
|
|
-restart:
|
|
- radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
|
|
- struct page *page;
|
|
-repeat:
|
|
- page = radix_tree_deref_slot(slot);
|
|
- if (unlikely(!page))
|
|
- continue;
|
|
- if (radix_tree_exception(page)) {
|
|
- if (radix_tree_deref_retry(page))
|
|
- goto restart;
|
|
- /*
|
|
- * Otherwise, we must be storing a swap entry
|
|
- * here as an exceptional entry: so return it
|
|
- * without attempting to raise page count.
|
|
- */
|
|
- goto export;
|
|
- }
|
|
- if (!page_cache_get_speculative(page))
|
|
- goto repeat;
|
|
-
|
|
- /* Has the page moved? */
|
|
- if (unlikely(page != *slot)) {
|
|
- page_cache_release(page);
|
|
- goto repeat;
|
|
- }
|
|
-export:
|
|
- indices[ret] = iter.index;
|
|
- pages[ret] = page;
|
|
- if (++ret == nr_pages)
|
|
- break;
|
|
- }
|
|
- rcu_read_unlock();
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
* Remove swap entry from radix tree, free the swap and its page cache.
|
|
*/
|
|
static int shmem_free_swap(struct address_space *mapping,
|
|
pgoff_t index, void *radswap)
|
|
{
|
|
- int error;
|
|
+ void *old;
|
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
- error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
|
|
+ old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
- if (!error)
|
|
- free_swap_and_cache(radix_to_swp_entry(radswap));
|
|
- return error;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Pagevec may contain swap entries, so shuffle up pages before releasing.
|
|
- */
|
|
-static void shmem_deswap_pagevec(struct pagevec *pvec)
|
|
-{
|
|
- int i, j;
|
|
-
|
|
- for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
|
|
- struct page *page = pvec->pages[i];
|
|
- if (!radix_tree_exceptional_entry(page))
|
|
- pvec->pages[j++] = page;
|
|
- }
|
|
- pvec->nr = j;
|
|
+ if (old != radswap)
|
|
+ return -ENOENT;
|
|
+ free_swap_and_cache(radix_to_swp_entry(radswap));
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -430,12 +364,12 @@ void shmem_unlock_mapping(struct address_space *mapping)
|
|
* Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
|
|
* has finished, if it hits a row of PAGEVEC_SIZE swap entries.
|
|
*/
|
|
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
|
|
- PAGEVEC_SIZE, pvec.pages, indices);
|
|
+ pvec.nr = find_get_entries(mapping, index,
|
|
+ PAGEVEC_SIZE, pvec.pages, indices);
|
|
if (!pvec.nr)
|
|
break;
|
|
index = indices[pvec.nr - 1] + 1;
|
|
- shmem_deswap_pagevec(&pvec);
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
check_move_unevictable_pages(pvec.pages, pvec.nr);
|
|
pagevec_release(&pvec);
|
|
cond_resched();
|
|
@@ -467,9 +401,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
pagevec_init(&pvec, 0);
|
|
index = start;
|
|
while (index < end) {
|
|
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
- pvec.pages, indices);
|
|
+ pvec.nr = find_get_entries(mapping, index,
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
+ pvec.pages, indices);
|
|
if (!pvec.nr)
|
|
break;
|
|
mem_cgroup_uncharge_start();
|
|
@@ -498,7 +432,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
}
|
|
unlock_page(page);
|
|
}
|
|
- shmem_deswap_pagevec(&pvec);
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
cond_resched();
|
|
@@ -536,9 +470,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
index = start;
|
|
while (index < end) {
|
|
cond_resched();
|
|
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
|
|
+
|
|
+ pvec.nr = find_get_entries(mapping, index,
|
|
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
- pvec.pages, indices);
|
|
+ pvec.pages, indices);
|
|
if (!pvec.nr) {
|
|
/* If all gone or hole-punch or unfalloc, we're done */
|
|
if (index == start || end != -1)
|
|
@@ -581,7 +516,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
}
|
|
unlock_page(page);
|
|
}
|
|
- shmem_deswap_pagevec(&pvec);
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
index++;
|
|
@@ -1088,7 +1023,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
|
return -EFBIG;
|
|
repeat:
|
|
swap.val = 0;
|
|
- page = find_lock_page(mapping, index);
|
|
+ page = find_lock_entry(mapping, index);
|
|
if (radix_tree_exceptional_entry(page)) {
|
|
swap = radix_to_swp_entry(page);
|
|
page = NULL;
|
|
@@ -1100,6 +1035,9 @@ repeat:
|
|
goto failed;
|
|
}
|
|
|
|
+ if (page && sgp == SGP_WRITE)
|
|
+ mark_page_accessed(page);
|
|
+
|
|
/* fallocated page? */
|
|
if (page && !PageUptodate(page)) {
|
|
if (sgp != SGP_READ)
|
|
@@ -1181,6 +1119,9 @@ repeat:
|
|
shmem_recalc_inode(inode);
|
|
spin_unlock(&info->lock);
|
|
|
|
+ if (sgp == SGP_WRITE)
|
|
+ mark_page_accessed(page);
|
|
+
|
|
delete_from_swap_cache(page);
|
|
set_page_dirty(page);
|
|
swap_free(swap);
|
|
@@ -1205,8 +1146,11 @@ repeat:
|
|
goto decused;
|
|
}
|
|
|
|
- SetPageSwapBacked(page);
|
|
+ __SetPageSwapBacked(page);
|
|
__set_page_locked(page);
|
|
+ if (sgp == SGP_WRITE)
|
|
+ init_page_accessed(page);
|
|
+
|
|
error = mem_cgroup_cache_charge(page, current->mm,
|
|
gfp & GFP_RECLAIM_MASK);
|
|
if (error)
|
|
@@ -1483,6 +1427,11 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
|
|
return inode;
|
|
}
|
|
|
|
+bool shmem_mapping(struct address_space *mapping)
|
|
+{
|
|
+ return mapping->backing_dev_info == &shmem_backing_dev_info;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_TMPFS
|
|
static const struct inode_operations shmem_symlink_inode_operations;
|
|
static const struct inode_operations shmem_short_symlink_operations;
|
|
@@ -1795,7 +1744,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
|
|
pagevec_init(&pvec, 0);
|
|
pvec.nr = 1; /* start small: we may be there already */
|
|
while (!done) {
|
|
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
|
|
+ pvec.nr = find_get_entries(mapping, index,
|
|
pvec.nr, pvec.pages, indices);
|
|
if (!pvec.nr) {
|
|
if (whence == SEEK_DATA)
|
|
@@ -1822,7 +1771,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
|
|
break;
|
|
}
|
|
}
|
|
- shmem_deswap_pagevec(&pvec);
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
pvec.nr = PAGEVEC_SIZE;
|
|
cond_resched();
|
|
@@ -2143,8 +2092,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
|
|
|
|
if (new_dentry->d_inode) {
|
|
(void) shmem_unlink(new_dir, new_dentry);
|
|
- if (they_are_dirs)
|
|
+ if (they_are_dirs) {
|
|
+ drop_nlink(new_dentry->d_inode);
|
|
drop_nlink(old_dir);
|
|
+ }
|
|
} else if (they_are_dirs) {
|
|
drop_nlink(old_dir);
|
|
inc_nlink(new_dir);
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 6dd8d5f..0b1c2a5 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -2189,7 +2189,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
int
|
|
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
{
|
|
- size_t left_over, freelist_size, ralign;
|
|
+ size_t left_over, freelist_size;
|
|
+ size_t ralign = BYTES_PER_WORD;
|
|
gfp_t gfp;
|
|
int err;
|
|
size_t size = cachep->size;
|
|
@@ -2222,14 +2223,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
size &= ~(BYTES_PER_WORD - 1);
|
|
}
|
|
|
|
- /*
|
|
- * Redzoning and user store require word alignment or possibly larger.
|
|
- * Note this will be overridden by architecture or caller mandated
|
|
- * alignment if either is greater than BYTES_PER_WORD.
|
|
- */
|
|
- if (flags & SLAB_STORE_USER)
|
|
- ralign = BYTES_PER_WORD;
|
|
-
|
|
if (flags & SLAB_RED_ZONE) {
|
|
ralign = REDZONE_ALIGN;
|
|
/* If redzoning, ensure that the second redzone is suitably
|
|
@@ -3129,7 +3122,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
|
|
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
|
|
|
retry_cpuset:
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
zonelist = node_zonelist(slab_node(), flags);
|
|
|
|
retry:
|
|
@@ -3187,7 +3180,7 @@ retry:
|
|
}
|
|
}
|
|
|
|
- if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
|
|
+ if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
goto retry_cpuset;
|
|
return obj;
|
|
}
|
|
diff --git a/mm/slab_common.c b/mm/slab_common.c
|
|
index 1ec3c61..f149e67 100644
|
|
--- a/mm/slab_common.c
|
|
+++ b/mm/slab_common.c
|
|
@@ -56,7 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
|
|
continue;
|
|
}
|
|
|
|
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
|
|
+#if !defined(CONFIG_SLUB)
|
|
/*
|
|
* For simplicity, we won't check this in the list of memcg
|
|
* caches. We have control over memcg naming, and if there
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index 25f14ad..7611f14 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -1684,7 +1684,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
|
return NULL;
|
|
|
|
do {
|
|
- cpuset_mems_cookie = get_mems_allowed();
|
|
+ cpuset_mems_cookie = read_mems_allowed_begin();
|
|
zonelist = node_zonelist(slab_node(), flags);
|
|
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
|
struct kmem_cache_node *n;
|
|
@@ -1696,19 +1696,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
|
object = get_partial_node(s, n, c, flags);
|
|
if (object) {
|
|
/*
|
|
- * Return the object even if
|
|
- * put_mems_allowed indicated that
|
|
- * the cpuset mems_allowed was
|
|
- * updated in parallel. It's a
|
|
- * harmless race between the alloc
|
|
- * and the cpuset update.
|
|
+ * Don't check read_mems_allowed_retry()
|
|
+ * here - if mems_allowed was updated in
|
|
+ * parallel, that was a harmless race
|
|
+ * between allocation and the cpuset
|
|
+ * update
|
|
*/
|
|
- put_mems_allowed(cpuset_mems_cookie);
|
|
return object;
|
|
}
|
|
}
|
|
}
|
|
- } while (!put_mems_allowed(cpuset_mems_cookie));
|
|
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
|
|
#endif
|
|
return NULL;
|
|
}
|
|
diff --git a/mm/swap.c b/mm/swap.c
|
|
index 0092097..d2ceddf 100644
|
|
--- a/mm/swap.c
|
|
+++ b/mm/swap.c
|
|
@@ -67,7 +67,7 @@ static void __page_cache_release(struct page *page)
|
|
static void __put_single_page(struct page *page)
|
|
{
|
|
__page_cache_release(page);
|
|
- free_hot_cold_page(page, 0);
|
|
+ free_hot_cold_page(page, false);
|
|
}
|
|
|
|
static void __put_compound_page(struct page *page)
|
|
@@ -469,7 +469,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
|
|
SetPageActive(page);
|
|
lru += LRU_ACTIVE;
|
|
add_page_to_lru_list(page, lruvec, lru);
|
|
- trace_mm_lru_activate(page, page_to_pfn(page));
|
|
+ trace_mm_lru_activate(page);
|
|
|
|
__count_vm_event(PGACTIVATE);
|
|
update_page_reclaim_stat(lruvec, file, 1);
|
|
@@ -581,12 +581,17 @@ void mark_page_accessed(struct page *page)
|
|
EXPORT_SYMBOL(mark_page_accessed);
|
|
|
|
/*
|
|
- * Queue the page for addition to the LRU via pagevec. The decision on whether
|
|
- * to add the page to the [in]active [file|anon] list is deferred until the
|
|
- * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
|
|
- * have the page added to the active list using mark_page_accessed().
|
|
+ * Used to mark_page_accessed(page) that is not visible yet and when it is
|
|
+ * still safe to use non-atomic ops
|
|
*/
|
|
-void __lru_cache_add(struct page *page)
|
|
+void init_page_accessed(struct page *page)
|
|
+{
|
|
+ if (!PageReferenced(page))
|
|
+ __SetPageReferenced(page);
|
|
+}
|
|
+EXPORT_SYMBOL(init_page_accessed);
|
|
+
|
|
+static void __lru_cache_add(struct page *page)
|
|
{
|
|
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
|
|
|
|
@@ -596,11 +601,34 @@ void __lru_cache_add(struct page *page)
|
|
pagevec_add(pvec, page);
|
|
put_cpu_var(lru_add_pvec);
|
|
}
|
|
-EXPORT_SYMBOL(__lru_cache_add);
|
|
+
|
|
+/**
|
|
+ * lru_cache_add: add a page to the page lists
|
|
+ * @page: the page to add
|
|
+ */
|
|
+void lru_cache_add_anon(struct page *page)
|
|
+{
|
|
+ if (PageActive(page))
|
|
+ ClearPageActive(page);
|
|
+ __lru_cache_add(page);
|
|
+}
|
|
+
|
|
+void lru_cache_add_file(struct page *page)
|
|
+{
|
|
+ if (PageActive(page))
|
|
+ ClearPageActive(page);
|
|
+ __lru_cache_add(page);
|
|
+}
|
|
+EXPORT_SYMBOL(lru_cache_add_file);
|
|
|
|
/**
|
|
* lru_cache_add - add a page to a page list
|
|
* @page: the page to be added to the LRU.
|
|
+ *
|
|
+ * Queue the page for addition to the LRU via pagevec. The decision on whether
|
|
+ * to add the page to the [in]active [file|anon] list is deferred until the
|
|
+ * pagevec is drained. This gives a chance for the caller of lru_cache_add()
|
|
+ * have the page added to the active list using mark_page_accessed().
|
|
*/
|
|
void lru_cache_add(struct page *page)
|
|
{
|
|
@@ -811,7 +839,7 @@ void lru_add_drain_all(void)
|
|
* grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
|
|
* will free it.
|
|
*/
|
|
-void release_pages(struct page **pages, int nr, int cold)
|
|
+void release_pages(struct page **pages, int nr, bool cold)
|
|
{
|
|
int i;
|
|
LIST_HEAD(pages_to_free);
|
|
@@ -852,7 +880,7 @@ void release_pages(struct page **pages, int nr, int cold)
|
|
}
|
|
|
|
/* Clear Active bit in case of parallel mark_page_accessed */
|
|
- ClearPageActive(page);
|
|
+ __ClearPageActive(page);
|
|
|
|
list_add(&page->lru, &pages_to_free);
|
|
}
|
|
@@ -934,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
|
|
SetPageLRU(page);
|
|
add_page_to_lru_list(page, lruvec, lru);
|
|
update_page_reclaim_stat(lruvec, file, active);
|
|
- trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
|
|
+ trace_mm_lru_insertion(page, lru);
|
|
}
|
|
|
|
/*
|
|
@@ -948,6 +976,57 @@ void __pagevec_lru_add(struct pagevec *pvec)
|
|
EXPORT_SYMBOL(__pagevec_lru_add);
|
|
|
|
/**
|
|
+ * pagevec_lookup_entries - gang pagecache lookup
|
|
+ * @pvec: Where the resulting entries are placed
|
|
+ * @mapping: The address_space to search
|
|
+ * @start: The starting entry index
|
|
+ * @nr_entries: The maximum number of entries
|
|
+ * @indices: The cache indices corresponding to the entries in @pvec
|
|
+ *
|
|
+ * pagevec_lookup_entries() will search for and return a group of up
|
|
+ * to @nr_entries pages and shadow entries in the mapping. All
|
|
+ * entries are placed in @pvec. pagevec_lookup_entries() takes a
|
|
+ * reference against actual pages in @pvec.
|
|
+ *
|
|
+ * The search returns a group of mapping-contiguous entries with
|
|
+ * ascending indexes. There may be holes in the indices due to
|
|
+ * not-present entries.
|
|
+ *
|
|
+ * pagevec_lookup_entries() returns the number of entries which were
|
|
+ * found.
|
|
+ */
|
|
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
|
|
+ struct address_space *mapping,
|
|
+ pgoff_t start, unsigned nr_pages,
|
|
+ pgoff_t *indices)
|
|
+{
|
|
+ pvec->nr = find_get_entries(mapping, start, nr_pages,
|
|
+ pvec->pages, indices);
|
|
+ return pagevec_count(pvec);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * pagevec_remove_exceptionals - pagevec exceptionals pruning
|
|
+ * @pvec: The pagevec to prune
|
|
+ *
|
|
+ * pagevec_lookup_entries() fills both pages and exceptional radix
|
|
+ * tree entries into the pagevec. This function prunes all
|
|
+ * exceptionals from @pvec without leaving holes, so that it can be
|
|
+ * passed on to page-only pagevec operations.
|
|
+ */
|
|
+void pagevec_remove_exceptionals(struct pagevec *pvec)
|
|
+{
|
|
+ int i, j;
|
|
+
|
|
+ for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
|
|
+ struct page *page = pvec->pages[i];
|
|
+ if (!radix_tree_exceptional_entry(page))
|
|
+ pvec->pages[j++] = page;
|
|
+ }
|
|
+ pvec->nr = j;
|
|
+}
|
|
+
|
|
+/**
|
|
* pagevec_lookup - gang pagecache lookup
|
|
* @pvec: Where the resulting pages are placed
|
|
* @mapping: The address_space to search
|
|
diff --git a/mm/swap_state.c b/mm/swap_state.c
|
|
index e76ace3..2972eee 100644
|
|
--- a/mm/swap_state.c
|
|
+++ b/mm/swap_state.c
|
|
@@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
|
|
|
|
for (i = 0; i < todo; i++)
|
|
free_swap_cache(pagep[i]);
|
|
- release_pages(pagep, todo, 0);
|
|
+ release_pages(pagep, todo, false);
|
|
pagep += todo;
|
|
nr -= todo;
|
|
}
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 4a7f7e6..beeeef8 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -51,14 +51,32 @@ atomic_long_t nr_swap_pages;
|
|
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
|
|
long total_swap_pages;
|
|
static int least_priority;
|
|
-static atomic_t highest_priority_index = ATOMIC_INIT(-1);
|
|
|
|
static const char Bad_file[] = "Bad swap file entry ";
|
|
static const char Unused_file[] = "Unused swap file entry ";
|
|
static const char Bad_offset[] = "Bad swap offset entry ";
|
|
static const char Unused_offset[] = "Unused swap offset entry ";
|
|
|
|
-struct swap_list_t swap_list = {-1, -1};
|
|
+/*
|
|
+ * all active swap_info_structs
|
|
+ * protected with swap_lock, and ordered by priority.
|
|
+ */
|
|
+PLIST_HEAD(swap_active_head);
|
|
+
|
|
+/*
|
|
+ * all available (active, not full) swap_info_structs
|
|
+ * protected with swap_avail_lock, ordered by priority.
|
|
+ * This is used by get_swap_page() instead of swap_active_head
|
|
+ * because swap_active_head includes all swap_info_structs,
|
|
+ * but get_swap_page() doesn't need to look at full ones.
|
|
+ * This uses its own lock instead of swap_lock because when a
|
|
+ * swap_info_struct changes between not-full/full, it needs to
|
|
+ * add/remove itself to/from this list, but the swap_info_struct->lock
|
|
+ * is held and the locking order requires swap_lock to be taken
|
|
+ * before any swap_info_struct->lock.
|
|
+ */
|
|
+static PLIST_HEAD(swap_avail_head);
|
|
+static DEFINE_SPINLOCK(swap_avail_lock);
|
|
|
|
struct swap_info_struct *swap_info[MAX_SWAPFILES];
|
|
|
|
@@ -591,6 +609,9 @@ checks:
|
|
if (si->inuse_pages == si->pages) {
|
|
si->lowest_bit = si->max;
|
|
si->highest_bit = 0;
|
|
+ spin_lock(&swap_avail_lock);
|
|
+ plist_del(&si->avail_list, &swap_avail_head);
|
|
+ spin_unlock(&swap_avail_lock);
|
|
}
|
|
si->swap_map[offset] = usage;
|
|
inc_cluster_info_page(si, si->cluster_info, offset);
|
|
@@ -640,71 +661,65 @@ no_page:
|
|
|
|
swp_entry_t get_swap_page(void)
|
|
{
|
|
- struct swap_info_struct *si;
|
|
+ struct swap_info_struct *si, *next;
|
|
pgoff_t offset;
|
|
- int type, next;
|
|
- int wrapped = 0;
|
|
- int hp_index;
|
|
|
|
- spin_lock(&swap_lock);
|
|
if (atomic_long_read(&nr_swap_pages) <= 0)
|
|
goto noswap;
|
|
atomic_long_dec(&nr_swap_pages);
|
|
|
|
- for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
|
|
- hp_index = atomic_xchg(&highest_priority_index, -1);
|
|
- /*
|
|
- * highest_priority_index records current highest priority swap
|
|
- * type which just frees swap entries. If its priority is
|
|
- * higher than that of swap_list.next swap type, we use it. It
|
|
- * isn't protected by swap_lock, so it can be an invalid value
|
|
- * if the corresponding swap type is swapoff. We double check
|
|
- * the flags here. It's even possible the swap type is swapoff
|
|
- * and swapon again and its priority is changed. In such rare
|
|
- * case, low prority swap type might be used, but eventually
|
|
- * high priority swap will be used after several rounds of
|
|
- * swap.
|
|
- */
|
|
- if (hp_index != -1 && hp_index != type &&
|
|
- swap_info[type]->prio < swap_info[hp_index]->prio &&
|
|
- (swap_info[hp_index]->flags & SWP_WRITEOK)) {
|
|
- type = hp_index;
|
|
- swap_list.next = type;
|
|
- }
|
|
-
|
|
- si = swap_info[type];
|
|
- next = si->next;
|
|
- if (next < 0 ||
|
|
- (!wrapped && si->prio != swap_info[next]->prio)) {
|
|
- next = swap_list.head;
|
|
- wrapped++;
|
|
- }
|
|
+ spin_lock(&swap_avail_lock);
|
|
|
|
+start_over:
|
|
+ plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
|
|
+ /* requeue si to after same-priority siblings */
|
|
+ plist_requeue(&si->avail_list, &swap_avail_head);
|
|
+ spin_unlock(&swap_avail_lock);
|
|
spin_lock(&si->lock);
|
|
- if (!si->highest_bit) {
|
|
+ if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
|
|
+ spin_lock(&swap_avail_lock);
|
|
+ if (plist_node_empty(&si->avail_list)) {
|
|
+ spin_unlock(&si->lock);
|
|
+ goto nextsi;
|
|
+ }
|
|
+ WARN(!si->highest_bit,
|
|
+ "swap_info %d in list but !highest_bit\n",
|
|
+ si->type);
|
|
+ WARN(!(si->flags & SWP_WRITEOK),
|
|
+ "swap_info %d in list but !SWP_WRITEOK\n",
|
|
+ si->type);
|
|
+ plist_del(&si->avail_list, &swap_avail_head);
|
|
spin_unlock(&si->lock);
|
|
- continue;
|
|
+ goto nextsi;
|
|
}
|
|
- if (!(si->flags & SWP_WRITEOK)) {
|
|
- spin_unlock(&si->lock);
|
|
- continue;
|
|
- }
|
|
-
|
|
- swap_list.next = next;
|
|
|
|
- spin_unlock(&swap_lock);
|
|
/* This is called for allocating swap entry for cache */
|
|
offset = scan_swap_map(si, SWAP_HAS_CACHE);
|
|
spin_unlock(&si->lock);
|
|
if (offset)
|
|
- return swp_entry(type, offset);
|
|
- spin_lock(&swap_lock);
|
|
- next = swap_list.next;
|
|
+ return swp_entry(si->type, offset);
|
|
+ pr_debug("scan_swap_map of si %d failed to find offset\n",
|
|
+ si->type);
|
|
+ spin_lock(&swap_avail_lock);
|
|
+nextsi:
|
|
+ /*
|
|
+ * if we got here, it's likely that si was almost full before,
|
|
+ * and since scan_swap_map() can drop the si->lock, multiple
|
|
+ * callers probably all tried to get a page from the same si
|
|
+ * and it filled up before we could get one; or, the si filled
|
|
+ * up between us dropping swap_avail_lock and taking si->lock.
|
|
+ * Since we dropped the swap_avail_lock, the swap_avail_head
|
|
+ * list may have been modified; so if next is still in the
|
|
+ * swap_avail_head list then try it, otherwise start over.
|
|
+ */
|
|
+ if (plist_node_empty(&next->avail_list))
|
|
+ goto start_over;
|
|
}
|
|
|
|
+ spin_unlock(&swap_avail_lock);
|
|
+
|
|
atomic_long_inc(&nr_swap_pages);
|
|
noswap:
|
|
- spin_unlock(&swap_lock);
|
|
return (swp_entry_t) {0};
|
|
}
|
|
|
|
@@ -766,27 +781,6 @@ out:
|
|
return NULL;
|
|
}
|
|
|
|
-/*
|
|
- * This swap type frees swap entry, check if it is the highest priority swap
|
|
- * type which just frees swap entry. get_swap_page() uses
|
|
- * highest_priority_index to search highest priority swap type. The
|
|
- * swap_info_struct.lock can't protect us if there are multiple swap types
|
|
- * active, so we use atomic_cmpxchg.
|
|
- */
|
|
-static void set_highest_priority_index(int type)
|
|
-{
|
|
- int old_hp_index, new_hp_index;
|
|
-
|
|
- do {
|
|
- old_hp_index = atomic_read(&highest_priority_index);
|
|
- if (old_hp_index != -1 &&
|
|
- swap_info[old_hp_index]->prio >= swap_info[type]->prio)
|
|
- break;
|
|
- new_hp_index = type;
|
|
- } while (atomic_cmpxchg(&highest_priority_index,
|
|
- old_hp_index, new_hp_index) != old_hp_index);
|
|
-}
|
|
-
|
|
static unsigned char swap_entry_free(struct swap_info_struct *p,
|
|
swp_entry_t entry, unsigned char usage)
|
|
{
|
|
@@ -828,9 +822,18 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
|
|
dec_cluster_info_page(p, p->cluster_info, offset);
|
|
if (offset < p->lowest_bit)
|
|
p->lowest_bit = offset;
|
|
- if (offset > p->highest_bit)
|
|
+ if (offset > p->highest_bit) {
|
|
+ bool was_full = !p->highest_bit;
|
|
p->highest_bit = offset;
|
|
- set_highest_priority_index(p->type);
|
|
+ if (was_full && (p->flags & SWP_WRITEOK)) {
|
|
+ spin_lock(&swap_avail_lock);
|
|
+ WARN_ON(!plist_node_empty(&p->avail_list));
|
|
+ if (plist_node_empty(&p->avail_list))
|
|
+ plist_add(&p->avail_list,
|
|
+ &swap_avail_head);
|
|
+ spin_unlock(&swap_avail_lock);
|
|
+ }
|
|
+ }
|
|
atomic_long_inc(&nr_swap_pages);
|
|
p->inuse_pages--;
|
|
frontswap_invalidate_page(p->type, offset);
|
|
@@ -1765,30 +1768,37 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
|
|
unsigned char *swap_map,
|
|
struct swap_cluster_info *cluster_info)
|
|
{
|
|
- int i, prev;
|
|
-
|
|
if (prio >= 0)
|
|
p->prio = prio;
|
|
else
|
|
p->prio = --least_priority;
|
|
+ /*
|
|
+ * the plist prio is negated because plist ordering is
|
|
+ * low-to-high, while swap ordering is high-to-low
|
|
+ */
|
|
+ p->list.prio = -p->prio;
|
|
+ p->avail_list.prio = -p->prio;
|
|
p->swap_map = swap_map;
|
|
p->cluster_info = cluster_info;
|
|
p->flags |= SWP_WRITEOK;
|
|
atomic_long_add(p->pages, &nr_swap_pages);
|
|
total_swap_pages += p->pages;
|
|
|
|
- /* insert swap space into swap_list: */
|
|
- prev = -1;
|
|
- for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
|
|
- if (p->prio >= swap_info[i]->prio)
|
|
- break;
|
|
- prev = i;
|
|
- }
|
|
- p->next = i;
|
|
- if (prev < 0)
|
|
- swap_list.head = swap_list.next = p->type;
|
|
- else
|
|
- swap_info[prev]->next = p->type;
|
|
+ assert_spin_locked(&swap_lock);
|
|
+ /*
|
|
+ * both lists are plists, and thus priority ordered.
|
|
+ * swap_active_head needs to be priority ordered for swapoff(),
|
|
+ * which on removal of any swap_info_struct with an auto-assigned
|
|
+ * (i.e. negative) priority increments the auto-assigned priority
|
|
+ * of any lower-priority swap_info_structs.
|
|
+ * swap_avail_head needs to be priority ordered for get_swap_page(),
|
|
+ * which allocates swap pages from the highest available priority
|
|
+ * swap_info_struct.
|
|
+ */
|
|
+ plist_add(&p->list, &swap_active_head);
|
|
+ spin_lock(&swap_avail_lock);
|
|
+ plist_add(&p->avail_list, &swap_avail_head);
|
|
+ spin_unlock(&swap_avail_lock);
|
|
}
|
|
|
|
static void enable_swap_info(struct swap_info_struct *p, int prio,
|
|
@@ -1823,8 +1833,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
struct address_space *mapping;
|
|
struct inode *inode;
|
|
struct filename *pathname;
|
|
- int i, type, prev;
|
|
- int err;
|
|
+ int err, found = 0;
|
|
unsigned int old_block_size;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
@@ -1842,17 +1851,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
goto out;
|
|
|
|
mapping = victim->f_mapping;
|
|
- prev = -1;
|
|
spin_lock(&swap_lock);
|
|
- for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
|
|
- p = swap_info[type];
|
|
+ plist_for_each_entry(p, &swap_active_head, list) {
|
|
if (p->flags & SWP_WRITEOK) {
|
|
- if (p->swap_file->f_mapping == mapping)
|
|
+ if (p->swap_file->f_mapping == mapping) {
|
|
+ found = 1;
|
|
break;
|
|
+ }
|
|
}
|
|
- prev = type;
|
|
}
|
|
- if (type < 0) {
|
|
+ if (!found) {
|
|
err = -EINVAL;
|
|
spin_unlock(&swap_lock);
|
|
goto out_dput;
|
|
@@ -1864,20 +1872,21 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
spin_unlock(&swap_lock);
|
|
goto out_dput;
|
|
}
|
|
- if (prev < 0)
|
|
- swap_list.head = p->next;
|
|
- else
|
|
- swap_info[prev]->next = p->next;
|
|
- if (type == swap_list.next) {
|
|
- /* just pick something that's safe... */
|
|
- swap_list.next = swap_list.head;
|
|
- }
|
|
+ spin_lock(&swap_avail_lock);
|
|
+ plist_del(&p->avail_list, &swap_avail_head);
|
|
+ spin_unlock(&swap_avail_lock);
|
|
spin_lock(&p->lock);
|
|
if (p->prio < 0) {
|
|
- for (i = p->next; i >= 0; i = swap_info[i]->next)
|
|
- swap_info[i]->prio = p->prio--;
|
|
+ struct swap_info_struct *si = p;
|
|
+
|
|
+ plist_for_each_entry_continue(si, &swap_active_head, list) {
|
|
+ si->prio++;
|
|
+ si->list.prio--;
|
|
+ si->avail_list.prio--;
|
|
+ }
|
|
least_priority++;
|
|
}
|
|
+ plist_del(&p->list, &swap_active_head);
|
|
atomic_long_sub(p->pages, &nr_swap_pages);
|
|
total_swap_pages -= p->pages;
|
|
p->flags &= ~SWP_WRITEOK;
|
|
@@ -1885,7 +1894,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
spin_unlock(&swap_lock);
|
|
|
|
set_current_oom_origin();
|
|
- err = try_to_unuse(type, false, 0); /* force all pages to be unused */
|
|
+ err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
|
|
clear_current_oom_origin();
|
|
|
|
if (err) {
|
|
@@ -1926,7 +1935,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
frontswap_map = frontswap_map_get(p);
|
|
spin_unlock(&p->lock);
|
|
spin_unlock(&swap_lock);
|
|
- frontswap_invalidate_area(type);
|
|
+ frontswap_invalidate_area(p->type);
|
|
frontswap_map_set(p, NULL);
|
|
mutex_unlock(&swapon_mutex);
|
|
free_percpu(p->percpu_cluster);
|
|
@@ -1935,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
vfree(cluster_info);
|
|
vfree(frontswap_map);
|
|
/* Destroy swap account information */
|
|
- swap_cgroup_swapoff(type);
|
|
+ swap_cgroup_swapoff(p->type);
|
|
|
|
inode = mapping->host;
|
|
if (S_ISBLK(inode->i_mode)) {
|
|
@@ -2142,8 +2151,9 @@ static struct swap_info_struct *alloc_swap_info(void)
|
|
*/
|
|
}
|
|
INIT_LIST_HEAD(&p->first_swap_extent.list);
|
|
+ plist_node_init(&p->list, 0);
|
|
+ plist_node_init(&p->avail_list, 0);
|
|
p->flags = SWP_USED;
|
|
- p->next = -1;
|
|
spin_unlock(&swap_lock);
|
|
spin_lock_init(&p->lock);
|
|
|
|
diff --git a/mm/truncate.c b/mm/truncate.c
|
|
index 353b683..827ad8d 100644
|
|
--- a/mm/truncate.c
|
|
+++ b/mm/truncate.c
|
|
@@ -20,8 +20,25 @@
|
|
#include <linux/buffer_head.h> /* grr. try_to_release_page,
|
|
do_invalidatepage */
|
|
#include <linux/cleancache.h>
|
|
+#include <linux/rmap.h>
|
|
#include "internal.h"
|
|
|
|
+static void clear_exceptional_entry(struct address_space *mapping,
|
|
+ pgoff_t index, void *entry)
|
|
+{
|
|
+ /* Handled by shmem itself */
|
|
+ if (shmem_mapping(mapping))
|
|
+ return;
|
|
+
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
+ /*
|
|
+ * Regular page slots are stabilized by the page lock even
|
|
+ * without the tree itself locked. These unlocked entries
|
|
+ * need verification under the tree lock.
|
|
+ */
|
|
+ radix_tree_delete_item(&mapping->page_tree, index, entry);
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
+}
|
|
|
|
/**
|
|
* do_invalidatepage - invalidate part or all of a page
|
|
@@ -208,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
unsigned int partial_start; /* inclusive */
|
|
unsigned int partial_end; /* exclusive */
|
|
struct pagevec pvec;
|
|
+ pgoff_t indices[PAGEVEC_SIZE];
|
|
pgoff_t index;
|
|
int i;
|
|
|
|
@@ -238,17 +256,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
|
pagevec_init(&pvec, 0);
|
|
index = start;
|
|
- while (index < end && pagevec_lookup(&pvec, mapping, index,
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
|
|
+ while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
+ indices)) {
|
|
mem_cgroup_uncharge_start();
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
struct page *page = pvec.pages[i];
|
|
|
|
/* We rely upon deletion not changing page->index */
|
|
- index = page->index;
|
|
+ index = indices[i];
|
|
if (index >= end)
|
|
break;
|
|
|
|
+ if (radix_tree_exceptional_entry(page)) {
|
|
+ clear_exceptional_entry(mapping, index, page);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (!trylock_page(page))
|
|
continue;
|
|
WARN_ON(page->index != index);
|
|
@@ -259,6 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
truncate_inode_page(mapping, page);
|
|
unlock_page(page);
|
|
}
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
cond_resched();
|
|
@@ -307,14 +332,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
index = start;
|
|
for ( ; ; ) {
|
|
cond_resched();
|
|
- if (!pagevec_lookup(&pvec, mapping, index,
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
|
|
+ if (!pagevec_lookup_entries(&pvec, mapping, index,
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
+ indices)) {
|
|
if (index == start)
|
|
break;
|
|
index = start;
|
|
continue;
|
|
}
|
|
- if (index == start && pvec.pages[0]->index >= end) {
|
|
+ if (index == start && indices[0] >= end) {
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
break;
|
|
}
|
|
@@ -323,16 +350,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
struct page *page = pvec.pages[i];
|
|
|
|
/* We rely upon deletion not changing page->index */
|
|
- index = page->index;
|
|
+ index = indices[i];
|
|
if (index >= end)
|
|
break;
|
|
|
|
+ if (radix_tree_exceptional_entry(page)) {
|
|
+ clear_exceptional_entry(mapping, index, page);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
lock_page(page);
|
|
WARN_ON(page->index != index);
|
|
wait_on_page_writeback(page);
|
|
truncate_inode_page(mapping, page);
|
|
unlock_page(page);
|
|
}
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
index++;
|
|
@@ -375,6 +408,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
|
|
unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|
pgoff_t start, pgoff_t end)
|
|
{
|
|
+ pgoff_t indices[PAGEVEC_SIZE];
|
|
struct pagevec pvec;
|
|
pgoff_t index = start;
|
|
unsigned long ret;
|
|
@@ -390,17 +424,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|
*/
|
|
|
|
pagevec_init(&pvec, 0);
|
|
- while (index <= end && pagevec_lookup(&pvec, mapping, index,
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
|
|
+ while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
|
+ indices)) {
|
|
mem_cgroup_uncharge_start();
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
struct page *page = pvec.pages[i];
|
|
|
|
/* We rely upon deletion not changing page->index */
|
|
- index = page->index;
|
|
+ index = indices[i];
|
|
if (index > end)
|
|
break;
|
|
|
|
+ if (radix_tree_exceptional_entry(page)) {
|
|
+ clear_exceptional_entry(mapping, index, page);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (!trylock_page(page))
|
|
continue;
|
|
WARN_ON(page->index != index);
|
|
@@ -414,6 +454,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|
deactivate_page(page);
|
|
count += ret;
|
|
}
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
cond_resched();
|
|
@@ -481,6 +522,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
|
|
int invalidate_inode_pages2_range(struct address_space *mapping,
|
|
pgoff_t start, pgoff_t end)
|
|
{
|
|
+ pgoff_t indices[PAGEVEC_SIZE];
|
|
struct pagevec pvec;
|
|
pgoff_t index;
|
|
int i;
|
|
@@ -491,17 +533,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
|
cleancache_invalidate_inode(mapping);
|
|
pagevec_init(&pvec, 0);
|
|
index = start;
|
|
- while (index <= end && pagevec_lookup(&pvec, mapping, index,
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
|
|
+ while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
|
+ indices)) {
|
|
mem_cgroup_uncharge_start();
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
struct page *page = pvec.pages[i];
|
|
|
|
/* We rely upon deletion not changing page->index */
|
|
- index = page->index;
|
|
+ index = indices[i];
|
|
if (index > end)
|
|
break;
|
|
|
|
+ if (radix_tree_exceptional_entry(page)) {
|
|
+ clear_exceptional_entry(mapping, index, page);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
lock_page(page);
|
|
WARN_ON(page->index != index);
|
|
if (page->mapping != mapping) {
|
|
@@ -539,6 +587,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
|
ret = ret2;
|
|
unlock_page(page);
|
|
}
|
|
+ pagevec_remove_exceptionals(&pvec);
|
|
pagevec_release(&pvec);
|
|
mem_cgroup_uncharge_end();
|
|
cond_resched();
|
|
@@ -613,12 +662,67 @@ EXPORT_SYMBOL(truncate_pagecache);
|
|
*/
|
|
void truncate_setsize(struct inode *inode, loff_t newsize)
|
|
{
|
|
+ loff_t oldsize = inode->i_size;
|
|
+
|
|
i_size_write(inode, newsize);
|
|
+ if (newsize > oldsize)
|
|
+ pagecache_isize_extended(inode, oldsize, newsize);
|
|
truncate_pagecache(inode, newsize);
|
|
}
|
|
EXPORT_SYMBOL(truncate_setsize);
|
|
|
|
/**
|
|
+ * pagecache_isize_extended - update pagecache after extension of i_size
|
|
+ * @inode: inode for which i_size was extended
|
|
+ * @from: original inode size
|
|
+ * @to: new inode size
|
|
+ *
|
|
+ * Handle extension of inode size either caused by extending truncate or by
|
|
+ * write starting after current i_size. We mark the page straddling current
|
|
+ * i_size RO so that page_mkwrite() is called on the nearest write access to
|
|
+ * the page. This way filesystem can be sure that page_mkwrite() is called on
|
|
+ * the page before user writes to the page via mmap after the i_size has been
|
|
+ * changed.
|
|
+ *
|
|
+ * The function must be called after i_size is updated so that page fault
|
|
+ * coming after we unlock the page will already see the new i_size.
|
|
+ * The function must be called while we still hold i_mutex - this not only
|
|
+ * makes sure i_size is stable but also that userspace cannot observe new
|
|
+ * i_size value before we are prepared to store mmap writes at new inode size.
|
|
+ */
|
|
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
|
|
+{
|
|
+ int bsize = 1 << inode->i_blkbits;
|
|
+ loff_t rounded_from;
|
|
+ struct page *page;
|
|
+ pgoff_t index;
|
|
+
|
|
+ WARN_ON(to > inode->i_size);
|
|
+
|
|
+ if (from >= to || bsize == PAGE_CACHE_SIZE)
|
|
+ return;
|
|
+ /* Page straddling @from will not have any hole block created? */
|
|
+ rounded_from = round_up(from, bsize);
|
|
+ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
|
|
+ return;
|
|
+
|
|
+ index = from >> PAGE_CACHE_SHIFT;
|
|
+ page = find_lock_page(inode->i_mapping, index);
|
|
+ /* Page not cached? Nothing to do */
|
|
+ if (!page)
|
|
+ return;
|
|
+ /*
|
|
+ * See clear_page_dirty_for_io() for details why set_page_dirty()
|
|
+ * is needed.
|
|
+ */
|
|
+ if (page_mkclean(page))
|
|
+ set_page_dirty(page);
|
|
+ unlock_page(page);
|
|
+ page_cache_release(page);
|
|
+}
|
|
+EXPORT_SYMBOL(pagecache_isize_extended);
|
|
+
|
|
+/**
|
|
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
|
|
* @inode: inode
|
|
* @lstart: offset of beginning of hole
|
|
diff --git a/mm/util.c b/mm/util.c
|
|
index a24aa22..c1010cb 100644
|
|
--- a/mm/util.c
|
|
+++ b/mm/util.c
|
|
@@ -275,17 +275,14 @@ pid_t vm_is_stack(struct task_struct *task,
|
|
|
|
if (in_group) {
|
|
struct task_struct *t;
|
|
- rcu_read_lock();
|
|
- if (!pid_alive(task))
|
|
- goto done;
|
|
|
|
- t = task;
|
|
- do {
|
|
+ rcu_read_lock();
|
|
+ for_each_thread(task, t) {
|
|
if (vm_is_stack_for_task(t, vma)) {
|
|
ret = t->pid;
|
|
goto done;
|
|
}
|
|
- } while_each_thread(task, t);
|
|
+ }
|
|
done:
|
|
rcu_read_unlock();
|
|
}
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
|
index 0fdf968..aa3891e 100644
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -2681,14 +2681,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
|
|
|
|
prev_end = VMALLOC_START;
|
|
|
|
- spin_lock(&vmap_area_lock);
|
|
+ rcu_read_lock();
|
|
|
|
if (list_empty(&vmap_area_list)) {
|
|
vmi->largest_chunk = VMALLOC_TOTAL;
|
|
goto out;
|
|
}
|
|
|
|
- list_for_each_entry(va, &vmap_area_list, list) {
|
|
+ list_for_each_entry_rcu(va, &vmap_area_list, list) {
|
|
unsigned long addr = va->va_start;
|
|
|
|
/*
|
|
@@ -2715,7 +2715,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
|
|
vmi->largest_chunk = VMALLOC_END - prev_end;
|
|
|
|
out:
|
|
- spin_unlock(&vmap_area_lock);
|
|
+ rcu_read_unlock();
|
|
}
|
|
#endif
|
|
|
|
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
|
|
index d4042e7..c5afd57 100644
|
|
--- a/mm/vmpressure.c
|
|
+++ b/mm/vmpressure.c
|
|
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
|
|
unsigned long scanned;
|
|
unsigned long reclaimed;
|
|
|
|
+ spin_lock(&vmpr->sr_lock);
|
|
/*
|
|
* Several contexts might be calling vmpressure(), so it is
|
|
* possible that the work was rescheduled again before the old
|
|
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
|
|
* here. No need for any locks here since we don't care if
|
|
* vmpr->reclaimed is in sync.
|
|
*/
|
|
- if (!vmpr->scanned)
|
|
+ scanned = vmpr->scanned;
|
|
+ if (!scanned) {
|
|
+ spin_unlock(&vmpr->sr_lock);
|
|
return;
|
|
+ }
|
|
|
|
- spin_lock(&vmpr->sr_lock);
|
|
- scanned = vmpr->scanned;
|
|
reclaimed = vmpr->reclaimed;
|
|
vmpr->scanned = 0;
|
|
vmpr->reclaimed = 0;
|
|
diff --git a/mm/vmscan.c b/mm/vmscan.c
|
|
index 6ef484f..88edf53 100644
|
|
--- a/mm/vmscan.c
|
|
+++ b/mm/vmscan.c
|
|
@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
|
|
|
|
bool zone_reclaimable(struct zone *zone)
|
|
{
|
|
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
|
|
+ return zone_page_state(zone, NR_PAGES_SCANNED) <
|
|
+ zone_reclaimable_pages(zone) * 6;
|
|
}
|
|
|
|
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
@@ -224,15 +225,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
|
|
unsigned long freed = 0;
|
|
unsigned long long delta;
|
|
long total_scan;
|
|
- long max_pass;
|
|
+ long freeable;
|
|
long nr;
|
|
long new_nr;
|
|
int nid = shrinkctl->nid;
|
|
long batch_size = shrinker->batch ? shrinker->batch
|
|
: SHRINK_BATCH;
|
|
|
|
- max_pass = shrinker->count_objects(shrinker, shrinkctl);
|
|
- if (max_pass == 0)
|
|
+ freeable = shrinker->count_objects(shrinker, shrinkctl);
|
|
+ if (freeable == 0)
|
|
return 0;
|
|
|
|
/*
|
|
@@ -244,14 +245,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
|
|
|
|
total_scan = nr;
|
|
delta = (4 * nr_pages_scanned) / shrinker->seeks;
|
|
- delta *= max_pass;
|
|
+ delta *= freeable;
|
|
do_div(delta, lru_pages + 1);
|
|
total_scan += delta;
|
|
if (total_scan < 0) {
|
|
printk(KERN_ERR
|
|
"shrink_slab: %pF negative objects to delete nr=%ld\n",
|
|
shrinker->scan_objects, total_scan);
|
|
- total_scan = max_pass;
|
|
+ total_scan = freeable;
|
|
}
|
|
|
|
/*
|
|
@@ -260,26 +261,26 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
|
|
* shrinkers to return -1 all the time. This results in a large
|
|
* nr being built up so when a shrink that can do some work
|
|
* comes along it empties the entire cache due to nr >>>
|
|
- * max_pass. This is bad for sustaining a working set in
|
|
+ * freeable. This is bad for sustaining a working set in
|
|
* memory.
|
|
*
|
|
* Hence only allow the shrinker to scan the entire cache when
|
|
* a large delta change is calculated directly.
|
|
*/
|
|
- if (delta < max_pass / 4)
|
|
- total_scan = min(total_scan, max_pass / 2);
|
|
+ if (delta < freeable / 4)
|
|
+ total_scan = min(total_scan, freeable / 2);
|
|
|
|
/*
|
|
* Avoid risking looping forever due to too large nr value:
|
|
* never try to free more than twice the estimate number of
|
|
* freeable entries.
|
|
*/
|
|
- if (total_scan > max_pass * 2)
|
|
- total_scan = max_pass * 2;
|
|
+ if (total_scan > freeable * 2)
|
|
+ total_scan = freeable * 2;
|
|
|
|
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
|
|
nr_pages_scanned, lru_pages,
|
|
- max_pass, delta, total_scan);
|
|
+ freeable, delta, total_scan);
|
|
|
|
/*
|
|
* Normally, we should not scan less than batch_size objects in one
|
|
@@ -292,12 +293,12 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
|
|
*
|
|
* We detect the "tight on memory" situations by looking at the total
|
|
* number of objects we want to scan (total_scan). If it is greater
|
|
- * than the total number of objects on slab (max_pass), we must be
|
|
+ * than the total number of objects on slab (freeable), we must be
|
|
* scanning at high prio and therefore should try to reclaim as much as
|
|
* possible.
|
|
*/
|
|
while (total_scan >= batch_size ||
|
|
- total_scan >= max_pass) {
|
|
+ total_scan >= freeable) {
|
|
unsigned long ret;
|
|
unsigned long nr_to_scan = min(batch_size, total_scan);
|
|
|
|
@@ -870,21 +871,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
*
|
|
* 2) Global reclaim encounters a page, memcg encounters a
|
|
* page that is not marked for immediate reclaim or
|
|
- * the caller does not have __GFP_IO. In this case mark
|
|
+ * the caller does not have __GFP_FS (or __GFP_IO if it's
|
|
+ * simply going to swap, not to fs). In this case mark
|
|
* the page for immediate reclaim and continue scanning.
|
|
*
|
|
- * __GFP_IO is checked because a loop driver thread might
|
|
+ * Require may_enter_fs because we would wait on fs, which
|
|
+ * may not have submitted IO yet. And the loop driver might
|
|
* enter reclaim, and deadlock if it waits on a page for
|
|
* which it is needed to do the write (loop masks off
|
|
* __GFP_IO|__GFP_FS for this reason); but more thought
|
|
* would probably show more reasons.
|
|
*
|
|
- * Don't require __GFP_FS, since we're not going into the
|
|
- * FS, just waiting on its writeback completion. Worryingly,
|
|
- * ext4 gfs2 and xfs allocate pages with
|
|
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
|
|
- * may_enter_fs here is liable to OOM on them.
|
|
- *
|
|
* 3) memcg encounters a page that is not already marked
|
|
* PageReclaim. memcg does not have any dirty pages
|
|
* throttling so we could easily OOM just because too many
|
|
@@ -901,7 +898,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
|
/* Case 2 above */
|
|
} else if (global_reclaim(sc) ||
|
|
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
|
|
+ !PageReclaim(page) || !may_enter_fs) {
|
|
/*
|
|
* This is slightly racy - end_page_writeback()
|
|
* might have just cleared PageReclaim, then
|
|
@@ -1107,7 +1104,7 @@ keep:
|
|
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
|
|
}
|
|
|
|
- free_hot_cold_page_list(&free_pages, 1);
|
|
+ free_hot_cold_page_list(&free_pages, true);
|
|
|
|
list_splice(&ret_pages, page_list);
|
|
count_vm_events(PGACTIVATE, pgactivate);
|
|
@@ -1144,7 +1141,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|
TTU_UNMAP|TTU_IGNORE_ACCESS,
|
|
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
|
|
list_splice(&clean_pages, page_list);
|
|
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
|
|
+ mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1470,7 +1467,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
|
|
|
|
if (global_reclaim(sc)) {
|
|
- zone->pages_scanned += nr_scanned;
|
|
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
|
if (current_is_kswapd())
|
|
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
|
|
else
|
|
@@ -1505,7 +1502,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
- free_hot_cold_page_list(&page_list, 1);
|
|
+ free_hot_cold_page_list(&page_list, true);
|
|
|
|
/*
|
|
* If reclaim is isolating dirty pages under writeback, it implies
|
|
@@ -1659,7 +1656,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
|
|
&nr_scanned, sc, isolate_mode, lru);
|
|
if (global_reclaim(sc))
|
|
- zone->pages_scanned += nr_scanned;
|
|
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
|
|
|
reclaim_stat->recent_scanned[file] += nr_taken;
|
|
|
|
@@ -1725,7 +1722,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
- free_hot_cold_page_list(&l_hold, 1);
|
|
+ free_hot_cold_page_list(&l_hold, true);
|
|
}
|
|
|
|
#ifdef CONFIG_SWAP
|
|
@@ -1847,7 +1844,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
|
struct zone *zone = lruvec_zone(lruvec);
|
|
unsigned long anon_prio, file_prio;
|
|
enum scan_balance scan_balance;
|
|
- unsigned long anon, file, free;
|
|
+ unsigned long anon, file;
|
|
bool force_scan = false;
|
|
unsigned long ap, fp;
|
|
enum lru_list lru;
|
|
@@ -1895,11 +1892,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
|
goto out;
|
|
}
|
|
|
|
- anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
|
|
- get_lru_size(lruvec, LRU_INACTIVE_ANON);
|
|
- file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
|
|
- get_lru_size(lruvec, LRU_INACTIVE_FILE);
|
|
-
|
|
/*
|
|
* If it's foreseeable that reclaiming the file cache won't be
|
|
* enough to get the zone back into a desirable shape, we have
|
|
@@ -1907,8 +1899,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
|
* thrashing - remaining file pages alone.
|
|
*/
|
|
if (global_reclaim(sc)) {
|
|
- free = zone_page_state(zone, NR_FREE_PAGES);
|
|
- if (unlikely(file + free <= high_wmark_pages(zone))) {
|
|
+ unsigned long zonefile;
|
|
+ unsigned long zonefree;
|
|
+
|
|
+ zonefree = zone_page_state(zone, NR_FREE_PAGES);
|
|
+ zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
|
|
+ zone_page_state(zone, NR_INACTIVE_FILE);
|
|
+
|
|
+ if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
|
|
scan_balance = SCAN_ANON;
|
|
goto out;
|
|
}
|
|
@@ -1943,6 +1941,12 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
|
|
*
|
|
* anon in [0], file in [1]
|
|
*/
|
|
+
|
|
+ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
|
|
+ get_lru_size(lruvec, LRU_INACTIVE_ANON);
|
|
+ file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
|
|
+ get_lru_size(lruvec, LRU_INACTIVE_FILE);
|
|
+
|
|
spin_lock_irq(&zone->lru_lock);
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
|
reclaim_stat->recent_scanned[0] /= 2;
|
|
@@ -2018,13 +2022,27 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|
unsigned long nr_reclaimed = 0;
|
|
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
|
|
struct blk_plug plug;
|
|
- bool scan_adjusted = false;
|
|
+ bool scan_adjusted;
|
|
|
|
get_scan_count(lruvec, sc, nr);
|
|
|
|
/* Record the original scan target for proportional adjustments later */
|
|
memcpy(targets, nr, sizeof(nr));
|
|
|
|
+ /*
|
|
+ * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
|
|
+ * event that can occur when there is little memory pressure e.g.
|
|
+ * multiple streaming readers/writers. Hence, we do not abort scanning
|
|
+ * when the requested number of pages are reclaimed when scanning at
|
|
+ * DEF_PRIORITY on the assumption that the fact we are direct
|
|
+ * reclaiming implies that kswapd is not keeping up and it is best to
|
|
+ * do a batch of work at once. For memcg reclaim one check is made to
|
|
+ * abort proportional reclaim if either the file or anon lru has already
|
|
+ * dropped to zero at the first pass.
|
|
+ */
|
|
+ scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
|
|
+ sc->priority == DEF_PRIORITY);
|
|
+
|
|
blk_start_plug(&plug);
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
nr[LRU_INACTIVE_FILE]) {
|
|
@@ -2045,17 +2063,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|
continue;
|
|
|
|
/*
|
|
- * For global direct reclaim, reclaim only the number of pages
|
|
- * requested. Less care is taken to scan proportionally as it
|
|
- * is more important to minimise direct reclaim stall latency
|
|
- * than it is to properly age the LRU lists.
|
|
- */
|
|
- if (global_reclaim(sc) && !current_is_kswapd())
|
|
- break;
|
|
-
|
|
- /*
|
|
* For kswapd and memcg, reclaim at least the number of pages
|
|
- * requested. Ensure that the anon and file LRUs shrink
|
|
+ * requested. Ensure that the anon and file LRUs are scanned
|
|
* proportionally what was requested by get_scan_count(). We
|
|
* stop reclaiming one LRU and reduce the amount scanning
|
|
* proportional to the original scan target.
|
|
@@ -2063,6 +2072,15 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|
nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
|
|
nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
|
|
|
|
+ /*
|
|
+ * It's just vindictive to attack the larger once the smaller
|
|
+ * has gone to zero. And given the way we stop scanning the
|
|
+ * smaller below, this makes sure that we only make one nudge
|
|
+ * towards proportionality once we've got nr_to_reclaim.
|
|
+ */
|
|
+ if (!nr_file || !nr_anon)
|
|
+ break;
|
|
+
|
|
if (nr_file > nr_anon) {
|
|
unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
|
|
targets[LRU_ACTIVE_ANON] + 1;
|
|
@@ -2424,8 +2442,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
unsigned long lru_pages = 0;
|
|
|
|
nodes_clear(shrink->nodes_to_scan);
|
|
- for_each_zone_zonelist(zone, z, zonelist,
|
|
- gfp_zone(sc->gfp_mask)) {
|
|
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
continue;
|
|
|
|
@@ -2846,18 +2864,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
|
|
return false;
|
|
|
|
/*
|
|
- * There is a potential race between when kswapd checks its watermarks
|
|
- * and a process gets throttled. There is also a potential race if
|
|
- * processes get throttled, kswapd wakes, a large process exits therby
|
|
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
|
|
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
|
|
- * so wake them now if necessary. If necessary, processes will wake
|
|
- * kswapd and get throttled again
|
|
+ * The throttled processes are normally woken up in balance_pgdat() as
|
|
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
|
|
+ * race between when kswapd checks the watermarks and a process gets
|
|
+ * throttled. There is also a potential race if processes get
|
|
+ * throttled, kswapd wakes, a large process exits thereby balancing the
|
|
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
|
|
+ * the wake up checks. If kswapd is going to sleep, no process should
|
|
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
|
|
+ * the wake up is premature, processes will wake kswapd and get
|
|
+ * throttled again. The difference from wake ups in balance_pgdat() is
|
|
+ * that here we are under prepare_to_wait().
|
|
*/
|
|
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
|
|
- wake_up(&pgdat->pfmemalloc_wait);
|
|
- return false;
|
|
- }
|
|
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
|
|
+ wake_up_all(&pgdat->pfmemalloc_wait);
|
|
|
|
return pgdat_balanced(pgdat, order, classzone_idx);
|
|
}
|
|
diff --git a/mm/vmstat.c b/mm/vmstat.c
|
|
index def5dd2..eded190 100644
|
|
--- a/mm/vmstat.c
|
|
+++ b/mm/vmstat.c
|
|
@@ -200,7 +200,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|
continue;
|
|
|
|
threshold = (*calculate_pressure)(zone);
|
|
- for_each_possible_cpu(cpu)
|
|
+ for_each_online_cpu(cpu)
|
|
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
|
|
= threshold;
|
|
}
|
|
@@ -761,6 +761,7 @@ const char * const vmstat_text[] = {
|
|
"nr_shmem",
|
|
"nr_dirtied",
|
|
"nr_written",
|
|
+ "nr_pages_scanned",
|
|
|
|
#ifdef CONFIG_NUMA
|
|
"numa_hit",
|
|
@@ -1055,7 +1056,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
|
|
min_wmark_pages(zone),
|
|
low_wmark_pages(zone),
|
|
high_wmark_pages(zone),
|
|
- zone->pages_scanned,
|
|
+ zone_page_state(zone, NR_PAGES_SCANNED),
|
|
zone->spanned_pages,
|
|
zone->present_pages,
|
|
zone->managed_pages);
|
|
@@ -1065,10 +1066,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
|
|
zone_page_state(zone, i));
|
|
|
|
seq_printf(m,
|
|
- "\n protection: (%lu",
|
|
+ "\n protection: (%ld",
|
|
zone->lowmem_reserve[0]);
|
|
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
|
|
- seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
|
|
+ seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
|
|
seq_printf(m,
|
|
")"
|
|
"\n pagesets");
|
|
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
|
|
index 7e57135..5d56e05 100644
|
|
--- a/net/8021q/vlan_core.c
|
|
+++ b/net/8021q/vlan_core.c
|
|
@@ -106,59 +106,6 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
|
|
}
|
|
EXPORT_SYMBOL(vlan_dev_vlan_id);
|
|
|
|
-static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
|
|
-{
|
|
- if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
|
- kfree_skb(skb);
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
|
|
- skb->mac_header += VLAN_HLEN;
|
|
- return skb;
|
|
-}
|
|
-
|
|
-struct sk_buff *vlan_untag(struct sk_buff *skb)
|
|
-{
|
|
- struct vlan_hdr *vhdr;
|
|
- u16 vlan_tci;
|
|
-
|
|
- if (unlikely(vlan_tx_tag_present(skb))) {
|
|
- /* vlan_tci is already set-up so leave this for another time */
|
|
- return skb;
|
|
- }
|
|
-
|
|
- skb = skb_share_check(skb, GFP_ATOMIC);
|
|
- if (unlikely(!skb))
|
|
- goto err_free;
|
|
-
|
|
- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
|
|
- goto err_free;
|
|
-
|
|
- vhdr = (struct vlan_hdr *) skb->data;
|
|
- vlan_tci = ntohs(vhdr->h_vlan_TCI);
|
|
- __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
|
|
-
|
|
- skb_pull_rcsum(skb, VLAN_HLEN);
|
|
- vlan_set_encap_proto(skb, vhdr);
|
|
-
|
|
- skb = vlan_reorder_header(skb);
|
|
- if (unlikely(!skb))
|
|
- goto err_free;
|
|
-
|
|
- skb_reset_network_header(skb);
|
|
- skb_reset_transport_header(skb);
|
|
- skb_reset_mac_len(skb);
|
|
-
|
|
- return skb;
|
|
-
|
|
-err_free:
|
|
- kfree_skb(skb);
|
|
- return NULL;
|
|
-}
|
|
-EXPORT_SYMBOL(vlan_untag);
|
|
-
|
|
-
|
|
/*
|
|
* vlan info and vid list
|
|
*/
|
|
diff --git a/net/9p/client.c b/net/9p/client.c
|
|
index 9186550..08046f3 100644
|
|
--- a/net/9p/client.c
|
|
+++ b/net/9p/client.c
|
|
@@ -839,7 +839,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
|
|
if (err < 0) {
|
|
if (err == -EIO)
|
|
c->status = Disconnected;
|
|
- goto reterr;
|
|
+ if (err != -ERESTARTSYS)
|
|
+ goto reterr;
|
|
}
|
|
if (req->status == REQ_STATUS_ERROR) {
|
|
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
|
|
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
|
|
index cc1cfd6..e5c5f57 100644
|
|
--- a/net/batman-adv/fragmentation.c
|
|
+++ b/net/batman-adv/fragmentation.c
|
|
@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
|
|
{
|
|
struct batadv_frag_table_entry *chain;
|
|
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
|
|
+ struct batadv_frag_list_entry *frag_entry_last = NULL;
|
|
struct batadv_frag_packet *frag_packet;
|
|
uint8_t bucket;
|
|
uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
|
|
@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
|
|
ret = true;
|
|
goto out;
|
|
}
|
|
+
|
|
+ /* store current entry because it could be the last in list */
|
|
+ frag_entry_last = frag_entry_curr;
|
|
}
|
|
|
|
- /* Reached the end of the list, so insert after 'frag_entry_curr'. */
|
|
- if (likely(frag_entry_curr)) {
|
|
- hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
|
|
+ /* Reached the end of the list, so insert after 'frag_entry_last'. */
|
|
+ if (likely(frag_entry_last)) {
|
|
+ hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
|
|
chain->size += skb->len - hdr_size;
|
|
chain->timestamp = jiffies;
|
|
ret = true;
|
|
@@ -247,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
|
|
kfree(entry);
|
|
|
|
/* Make room for the rest of the fragments. */
|
|
- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
|
|
+ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
|
|
kfree_skb(skb_out);
|
|
skb_out = NULL;
|
|
goto free;
|
|
@@ -430,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
|
|
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
|
|
*/
|
|
mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
|
|
- max_fragment_size = (mtu - header_size - ETH_HLEN);
|
|
+ max_fragment_size = mtu - header_size;
|
|
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
|
|
|
|
/* Don't even try to fragment, if we need more than 16 fragments */
|
|
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
|
|
index 36b9ae6..2393ea7 100644
|
|
--- a/net/batman-adv/gateway_client.c
|
|
+++ b/net/batman-adv/gateway_client.c
|
|
@@ -812,7 +812,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
|
|
goto out;
|
|
|
|
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
|
|
- if (!gw_node->bandwidth_down == 0)
|
|
+ if (!gw_node)
|
|
goto out;
|
|
|
|
switch (atomic_read(&bat_priv->gw_mode)) {
|
|
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
|
|
index b851cc5..fbda6b5 100644
|
|
--- a/net/batman-adv/hard-interface.c
|
|
+++ b/net/batman-adv/hard-interface.c
|
|
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
|
|
return true;
|
|
|
|
/* no more parents..stop recursion */
|
|
- if (net_dev->iflink == net_dev->ifindex)
|
|
+ if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
|
|
return false;
|
|
|
|
/* recurse over the parent device */
|
|
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
|
|
index 6afa3b4..0007c9e 100644
|
|
--- a/net/bluetooth/l2cap_core.c
|
|
+++ b/net/bluetooth/l2cap_core.c
|
|
@@ -2608,12 +2608,8 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
|
|
|
|
BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
|
|
|
|
- pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
|
|
-
|
|
- pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
|
|
-
|
|
sdu_len = len;
|
|
- pdu_len -= L2CAP_SDULEN_SIZE;
|
|
+ pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
|
|
|
|
while (len > 0) {
|
|
if (len <= pdu_len)
|
|
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
|
|
index 27ae841..06a7a76 100644
|
|
--- a/net/bluetooth/l2cap_sock.c
|
|
+++ b/net/bluetooth/l2cap_sock.c
|
|
@@ -1112,7 +1112,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
|
|
l2cap_chan_close(chan, 0);
|
|
lock_sock(sk);
|
|
|
|
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
|
|
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
|
+ !(current->flags & PF_EXITING))
|
|
err = bt_sock_wait_state(sk, BT_CLOSED,
|
|
sk->sk_lingertime);
|
|
}
|
|
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
|
|
index facd8a7..b088651 100644
|
|
--- a/net/bluetooth/rfcomm/core.c
|
|
+++ b/net/bluetooth/rfcomm/core.c
|
|
@@ -1859,10 +1859,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
|
|
/* Get data directly from socket receive queue without copying it. */
|
|
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
|
|
skb_orphan(skb);
|
|
- if (!skb_linearize(skb))
|
|
+ if (!skb_linearize(skb)) {
|
|
s = rfcomm_recv_frame(s, skb);
|
|
- else
|
|
+ if (!s)
|
|
+ break;
|
|
+ } else {
|
|
kfree_skb(skb);
|
|
+ }
|
|
}
|
|
|
|
if (s && (sk->sk_state == BT_CLOSED))
|
|
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
|
|
index 3c2d3e4..a0050de 100644
|
|
--- a/net/bluetooth/rfcomm/sock.c
|
|
+++ b/net/bluetooth/rfcomm/sock.c
|
|
@@ -898,7 +898,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
|
|
sk->sk_shutdown = SHUTDOWN_MASK;
|
|
__rfcomm_sock_close(sk);
|
|
|
|
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
|
|
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
|
+ !(current->flags & PF_EXITING))
|
|
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
|
|
}
|
|
release_sock(sk);
|
|
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
|
|
index 24fa396..316dd4e 100644
|
|
--- a/net/bluetooth/sco.c
|
|
+++ b/net/bluetooth/sco.c
|
|
@@ -909,7 +909,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
|
|
sco_sock_clear_timer(sk);
|
|
__sco_sock_close(sk);
|
|
|
|
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
|
|
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
|
+ !(current->flags & PF_EXITING))
|
|
err = bt_sock_wait_state(sk, BT_CLOSED,
|
|
sk->sk_lingertime);
|
|
}
|
|
@@ -929,7 +930,8 @@ static int sco_sock_release(struct socket *sock)
|
|
|
|
sco_sock_close(sk);
|
|
|
|
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
|
|
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
|
+ !(current->flags & PF_EXITING)) {
|
|
lock_sock(sk);
|
|
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
|
|
release_sock(sk);
|
|
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
|
|
index 9203d5a..09152d1 100644
|
|
--- a/net/bridge/br_fdb.c
|
|
+++ b/net/bridge/br_fdb.c
|
|
@@ -705,9 +705,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
|
|
int err = 0;
|
|
|
|
if (ndm->ndm_flags & NTF_USE) {
|
|
+ local_bh_disable();
|
|
rcu_read_lock();
|
|
br_fdb_update(p->br, p, addr, vid, true);
|
|
rcu_read_unlock();
|
|
+ local_bh_enable();
|
|
} else {
|
|
spin_lock_bh(&p->br->hash_lock);
|
|
err = fdb_add_entry(p, addr, ndm->ndm_state,
|
|
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
|
|
index a9a4a1b..8d423bc 100644
|
|
--- a/net/bridge/br_ioctl.c
|
|
+++ b/net/bridge/br_ioctl.c
|
|
@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
|
|
return -EPERM;
|
|
|
|
- spin_lock_bh(&br->lock);
|
|
br_stp_set_bridge_priority(br, args[1]);
|
|
- spin_unlock_bh(&br->lock);
|
|
return 0;
|
|
|
|
case BRCTL_SET_PORT_PRIORITY:
|
|
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
|
|
index 93067ec..7bbc8fe 100644
|
|
--- a/net/bridge/br_multicast.c
|
|
+++ b/net/bridge/br_multicast.c
|
|
@@ -1056,7 +1056,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
|
|
|
|
err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
|
|
vid);
|
|
- if (!err)
|
|
+ if (err)
|
|
break;
|
|
}
|
|
|
|
@@ -1086,6 +1086,9 @@ static void br_multicast_add_router(struct net_bridge *br,
|
|
struct net_bridge_port *p;
|
|
struct hlist_node *slot = NULL;
|
|
|
|
+ if (!hlist_unhashed(&port->rlist))
|
|
+ return;
|
|
+
|
|
hlist_for_each_entry(p, &br->router_list, rlist) {
|
|
if ((unsigned long) port >= (unsigned long) p)
|
|
break;
|
|
@@ -1113,12 +1116,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
|
|
if (port->multicast_router != 1)
|
|
return;
|
|
|
|
- if (!hlist_unhashed(&port->rlist))
|
|
- goto timer;
|
|
-
|
|
br_multicast_add_router(br, port);
|
|
|
|
-timer:
|
|
mod_timer(&port->multicast_router_timer,
|
|
now + br->multicast_querier_interval);
|
|
}
|
|
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
|
|
index f2d254b..4acfc3e 100644
|
|
--- a/net/bridge/br_private.h
|
|
+++ b/net/bridge/br_private.h
|
|
@@ -302,6 +302,9 @@ struct br_input_skb_cb {
|
|
int igmp;
|
|
int mrouters_only;
|
|
#endif
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ bool vlan_filtered;
|
|
+#endif
|
|
};
|
|
|
|
#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
|
|
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
|
|
index 189ba1e..9a0005a 100644
|
|
--- a/net/bridge/br_stp_if.c
|
|
+++ b/net/bridge/br_stp_if.c
|
|
@@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
|
|
return true;
|
|
}
|
|
|
|
-/* called under bridge lock */
|
|
+/* Acquires and releases bridge lock */
|
|
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
|
|
{
|
|
struct net_bridge_port *p;
|
|
int wasroot;
|
|
|
|
+ spin_lock_bh(&br->lock);
|
|
wasroot = br_is_root_bridge(br);
|
|
|
|
list_for_each_entry(p, &br->port_list, list) {
|
|
@@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
|
|
br_port_state_selection(br);
|
|
if (br_is_root_bridge(br) && !wasroot)
|
|
br_become_root_bridge(br);
|
|
+ spin_unlock_bh(&br->lock);
|
|
}
|
|
|
|
/* called under bridge lock */
|
|
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
|
|
index b1c6372..e1bd253 100644
|
|
--- a/net/bridge/br_vlan.c
|
|
+++ b/net/bridge/br_vlan.c
|
|
@@ -125,7 +125,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
|
{
|
|
u16 vid;
|
|
|
|
- if (!br->vlan_enabled)
|
|
+ /* If this packet was not filtered at input, let it pass */
|
|
+ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
|
|
goto out;
|
|
|
|
/* Vlan filter table must be configured at this point. The
|
|
@@ -163,8 +164,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|
/* If VLAN filtering is disabled on the bridge, all packets are
|
|
* permitted.
|
|
*/
|
|
- if (!br->vlan_enabled)
|
|
+ if (!br->vlan_enabled) {
|
|
+ BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
|
|
return true;
|
|
+ }
|
|
|
|
/* If there are no vlan in the permitted list, all packets are
|
|
* rejected.
|
|
@@ -172,6 +175,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|
if (!v)
|
|
goto drop;
|
|
|
|
+ BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
|
|
+
|
|
/* If vlan tx offload is disabled on bridge device and frame was
|
|
* sent from vlan device on the bridge device, it does not have
|
|
* HW accelerated vlan tag.
|
|
@@ -179,7 +184,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|
if (unlikely(!vlan_tx_tag_present(skb) &&
|
|
(skb->protocol == htons(ETH_P_8021Q) ||
|
|
skb->protocol == htons(ETH_P_8021AD)))) {
|
|
- skb = vlan_untag(skb);
|
|
+ skb = skb_vlan_untag(skb);
|
|
if (unlikely(!skb))
|
|
return false;
|
|
}
|
|
@@ -228,7 +233,8 @@ bool br_allowed_egress(struct net_bridge *br,
|
|
{
|
|
u16 vid;
|
|
|
|
- if (!br->vlan_enabled)
|
|
+ /* If this packet was not filtered at input, let it pass */
|
|
+ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
|
|
return true;
|
|
|
|
if (!v)
|
|
@@ -247,6 +253,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
|
|
struct net_bridge *br = p->br;
|
|
struct net_port_vlans *v;
|
|
|
|
+ /* If filtering was disabled at input, let it pass. */
|
|
if (!br->vlan_enabled)
|
|
return true;
|
|
|
|
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
|
|
index d6be3ed..afeb8e0 100644
|
|
--- a/net/caif/caif_socket.c
|
|
+++ b/net/caif/caif_socket.c
|
|
@@ -283,7 +283,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
int copylen;
|
|
|
|
ret = -EOPNOTSUPP;
|
|
- if (m->msg_flags&MSG_OOB)
|
|
+ if (flags & MSG_OOB)
|
|
goto read_error;
|
|
|
|
skb = skb_recv_datagram(sk, flags, 0 , &ret);
|
|
@@ -332,6 +332,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
|
|
release_sock(sk);
|
|
timeo = schedule_timeout(timeo);
|
|
lock_sock(sk);
|
|
+
|
|
+ if (sock_flag(sk, SOCK_DEAD))
|
|
+ break;
|
|
+
|
|
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
|
}
|
|
|
|
@@ -376,6 +380,10 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
struct sk_buff *skb;
|
|
|
|
lock_sock(sk);
|
|
+ if (sock_flag(sk, SOCK_DEAD)) {
|
|
+ err = -ECONNRESET;
|
|
+ goto unlock;
|
|
+ }
|
|
skb = skb_dequeue(&sk->sk_receive_queue);
|
|
caif_check_flow_release(sk);
|
|
|
|
diff --git a/net/can/af_can.c b/net/can/af_can.c
|
|
index a27f8aa..5e9a227 100644
|
|
--- a/net/can/af_can.c
|
|
+++ b/net/can/af_can.c
|
|
@@ -262,6 +262,9 @@ int can_send(struct sk_buff *skb, int loop)
|
|
goto inval_skb;
|
|
}
|
|
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
+
|
|
+ skb_reset_mac_header(skb);
|
|
skb_reset_network_header(skb);
|
|
skb_reset_transport_header(skb);
|
|
|
|
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
|
|
index 96238ba..de6662b 100644
|
|
--- a/net/ceph/auth_x.c
|
|
+++ b/net/ceph/auth_x.c
|
|
@@ -13,8 +13,6 @@
|
|
#include "auth_x.h"
|
|
#include "auth_x_protocol.h"
|
|
|
|
-#define TEMP_TICKET_BUF_LEN 256
|
|
-
|
|
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
|
|
|
|
static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
|
|
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
|
|
}
|
|
|
|
static int ceph_x_decrypt(struct ceph_crypto_key *secret,
|
|
- void **p, void *end, void *obuf, size_t olen)
|
|
+ void **p, void *end, void **obuf, size_t olen)
|
|
{
|
|
struct ceph_x_encrypt_header head;
|
|
size_t head_len = sizeof(head);
|
|
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
|
|
return -EINVAL;
|
|
|
|
dout("ceph_x_decrypt len %d\n", len);
|
|
- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
|
|
- *p, len);
|
|
+ if (*obuf == NULL) {
|
|
+ *obuf = kmalloc(len, GFP_NOFS);
|
|
+ if (!*obuf)
|
|
+ return -ENOMEM;
|
|
+ olen = len;
|
|
+ }
|
|
+
|
|
+ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
|
|
if (ret)
|
|
return ret;
|
|
if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
|
|
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
|
|
kfree(th);
|
|
}
|
|
|
|
-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
|
|
- struct ceph_crypto_key *secret,
|
|
- void *buf, void *end)
|
|
+static int process_one_ticket(struct ceph_auth_client *ac,
|
|
+ struct ceph_crypto_key *secret,
|
|
+ void **p, void *end)
|
|
{
|
|
struct ceph_x_info *xi = ac->private;
|
|
- int num;
|
|
- void *p = buf;
|
|
+ int type;
|
|
+ u8 tkt_struct_v, blob_struct_v;
|
|
+ struct ceph_x_ticket_handler *th;
|
|
+ void *dbuf = NULL;
|
|
+ void *dp, *dend;
|
|
+ int dlen;
|
|
+ char is_enc;
|
|
+ struct timespec validity;
|
|
+ struct ceph_crypto_key old_key;
|
|
+ void *ticket_buf = NULL;
|
|
+ void *tp, *tpend;
|
|
+ struct ceph_timespec new_validity;
|
|
+ struct ceph_crypto_key new_session_key;
|
|
+ struct ceph_buffer *new_ticket_blob;
|
|
+ unsigned long new_expires, new_renew_after;
|
|
+ u64 new_secret_id;
|
|
int ret;
|
|
- char *dbuf;
|
|
- char *ticket_buf;
|
|
- u8 reply_struct_v;
|
|
|
|
- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
|
|
- if (!dbuf)
|
|
- return -ENOMEM;
|
|
+ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
|
|
|
|
- ret = -ENOMEM;
|
|
- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
|
|
- if (!ticket_buf)
|
|
- goto out_dbuf;
|
|
+ type = ceph_decode_32(p);
|
|
+ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
|
|
|
|
- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
|
|
- reply_struct_v = ceph_decode_8(&p);
|
|
- if (reply_struct_v != 1)
|
|
+ tkt_struct_v = ceph_decode_8(p);
|
|
+ if (tkt_struct_v != 1)
|
|
goto bad;
|
|
- num = ceph_decode_32(&p);
|
|
- dout("%d tickets\n", num);
|
|
- while (num--) {
|
|
- int type;
|
|
- u8 tkt_struct_v, blob_struct_v;
|
|
- struct ceph_x_ticket_handler *th;
|
|
- void *dp, *dend;
|
|
- int dlen;
|
|
- char is_enc;
|
|
- struct timespec validity;
|
|
- struct ceph_crypto_key old_key;
|
|
- void *tp, *tpend;
|
|
- struct ceph_timespec new_validity;
|
|
- struct ceph_crypto_key new_session_key;
|
|
- struct ceph_buffer *new_ticket_blob;
|
|
- unsigned long new_expires, new_renew_after;
|
|
- u64 new_secret_id;
|
|
-
|
|
- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
|
|
-
|
|
- type = ceph_decode_32(&p);
|
|
- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
|
|
-
|
|
- tkt_struct_v = ceph_decode_8(&p);
|
|
- if (tkt_struct_v != 1)
|
|
- goto bad;
|
|
-
|
|
- th = get_ticket_handler(ac, type);
|
|
- if (IS_ERR(th)) {
|
|
- ret = PTR_ERR(th);
|
|
- goto out;
|
|
- }
|
|
|
|
- /* blob for me */
|
|
- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
|
|
- TEMP_TICKET_BUF_LEN);
|
|
- if (dlen <= 0) {
|
|
- ret = dlen;
|
|
- goto out;
|
|
- }
|
|
- dout(" decrypted %d bytes\n", dlen);
|
|
- dend = dbuf + dlen;
|
|
- dp = dbuf;
|
|
+ th = get_ticket_handler(ac, type);
|
|
+ if (IS_ERR(th)) {
|
|
+ ret = PTR_ERR(th);
|
|
+ goto out;
|
|
+ }
|
|
|
|
- tkt_struct_v = ceph_decode_8(&dp);
|
|
- if (tkt_struct_v != 1)
|
|
- goto bad;
|
|
+ /* blob for me */
|
|
+ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
|
|
+ if (dlen <= 0) {
|
|
+ ret = dlen;
|
|
+ goto out;
|
|
+ }
|
|
+ dout(" decrypted %d bytes\n", dlen);
|
|
+ dp = dbuf;
|
|
+ dend = dp + dlen;
|
|
|
|
- memcpy(&old_key, &th->session_key, sizeof(old_key));
|
|
- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
|
|
- if (ret)
|
|
- goto out;
|
|
+ tkt_struct_v = ceph_decode_8(&dp);
|
|
+ if (tkt_struct_v != 1)
|
|
+ goto bad;
|
|
|
|
- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
|
|
- ceph_decode_timespec(&validity, &new_validity);
|
|
- new_expires = get_seconds() + validity.tv_sec;
|
|
- new_renew_after = new_expires - (validity.tv_sec / 4);
|
|
- dout(" expires=%lu renew_after=%lu\n", new_expires,
|
|
- new_renew_after);
|
|
+ memcpy(&old_key, &th->session_key, sizeof(old_key));
|
|
+ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
|
|
+ if (ret)
|
|
+ goto out;
|
|
|
|
- /* ticket blob for service */
|
|
- ceph_decode_8_safe(&p, end, is_enc, bad);
|
|
- tp = ticket_buf;
|
|
- if (is_enc) {
|
|
- /* encrypted */
|
|
- dout(" encrypted ticket\n");
|
|
- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
|
|
- TEMP_TICKET_BUF_LEN);
|
|
- if (dlen < 0) {
|
|
- ret = dlen;
|
|
- goto out;
|
|
- }
|
|
- dlen = ceph_decode_32(&tp);
|
|
- } else {
|
|
- /* unencrypted */
|
|
- ceph_decode_32_safe(&p, end, dlen, bad);
|
|
- ceph_decode_need(&p, end, dlen, bad);
|
|
- ceph_decode_copy(&p, ticket_buf, dlen);
|
|
+ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
|
|
+ ceph_decode_timespec(&validity, &new_validity);
|
|
+ new_expires = get_seconds() + validity.tv_sec;
|
|
+ new_renew_after = new_expires - (validity.tv_sec / 4);
|
|
+ dout(" expires=%lu renew_after=%lu\n", new_expires,
|
|
+ new_renew_after);
|
|
+
|
|
+ /* ticket blob for service */
|
|
+ ceph_decode_8_safe(p, end, is_enc, bad);
|
|
+ if (is_enc) {
|
|
+ /* encrypted */
|
|
+ dout(" encrypted ticket\n");
|
|
+ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
|
|
+ if (dlen < 0) {
|
|
+ ret = dlen;
|
|
+ goto out;
|
|
}
|
|
- tpend = tp + dlen;
|
|
- dout(" ticket blob is %d bytes\n", dlen);
|
|
- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
|
|
- blob_struct_v = ceph_decode_8(&tp);
|
|
- new_secret_id = ceph_decode_64(&tp);
|
|
- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
|
|
- if (ret)
|
|
+ tp = ticket_buf;
|
|
+ dlen = ceph_decode_32(&tp);
|
|
+ } else {
|
|
+ /* unencrypted */
|
|
+ ceph_decode_32_safe(p, end, dlen, bad);
|
|
+ ticket_buf = kmalloc(dlen, GFP_NOFS);
|
|
+ if (!ticket_buf) {
|
|
+ ret = -ENOMEM;
|
|
goto out;
|
|
-
|
|
- /* all is well, update our ticket */
|
|
- ceph_crypto_key_destroy(&th->session_key);
|
|
- if (th->ticket_blob)
|
|
- ceph_buffer_put(th->ticket_blob);
|
|
- th->session_key = new_session_key;
|
|
- th->ticket_blob = new_ticket_blob;
|
|
- th->validity = new_validity;
|
|
- th->secret_id = new_secret_id;
|
|
- th->expires = new_expires;
|
|
- th->renew_after = new_renew_after;
|
|
- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
|
|
- type, ceph_entity_type_name(type), th->secret_id,
|
|
- (int)th->ticket_blob->vec.iov_len);
|
|
- xi->have_keys |= th->service;
|
|
+ }
|
|
+ tp = ticket_buf;
|
|
+ ceph_decode_need(p, end, dlen, bad);
|
|
+ ceph_decode_copy(p, ticket_buf, dlen);
|
|
}
|
|
+ tpend = tp + dlen;
|
|
+ dout(" ticket blob is %d bytes\n", dlen);
|
|
+ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
|
|
+ blob_struct_v = ceph_decode_8(&tp);
|
|
+ new_secret_id = ceph_decode_64(&tp);
|
|
+ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ /* all is well, update our ticket */
|
|
+ ceph_crypto_key_destroy(&th->session_key);
|
|
+ if (th->ticket_blob)
|
|
+ ceph_buffer_put(th->ticket_blob);
|
|
+ th->session_key = new_session_key;
|
|
+ th->ticket_blob = new_ticket_blob;
|
|
+ th->validity = new_validity;
|
|
+ th->secret_id = new_secret_id;
|
|
+ th->expires = new_expires;
|
|
+ th->renew_after = new_renew_after;
|
|
+ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
|
|
+ type, ceph_entity_type_name(type), th->secret_id,
|
|
+ (int)th->ticket_blob->vec.iov_len);
|
|
+ xi->have_keys |= th->service;
|
|
|
|
- ret = 0;
|
|
out:
|
|
kfree(ticket_buf);
|
|
-out_dbuf:
|
|
kfree(dbuf);
|
|
return ret;
|
|
|
|
@@ -270,6 +255,34 @@ bad:
|
|
goto out;
|
|
}
|
|
|
|
+static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
|
|
+ struct ceph_crypto_key *secret,
|
|
+ void *buf, void *end)
|
|
+{
|
|
+ void *p = buf;
|
|
+ u8 reply_struct_v;
|
|
+ u32 num;
|
|
+ int ret;
|
|
+
|
|
+ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
|
|
+ if (reply_struct_v != 1)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ceph_decode_32_safe(&p, end, num, bad);
|
|
+ dout("%d tickets\n", num);
|
|
+
|
|
+ while (num--) {
|
|
+ ret = process_one_ticket(ac, secret, &p, end);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+bad:
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
|
|
struct ceph_x_ticket_handler *th,
|
|
struct ceph_x_authorizer *au)
|
|
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
|
|
struct ceph_x_ticket_handler *th;
|
|
int ret = 0;
|
|
struct ceph_x_authorize_reply reply;
|
|
+ void *preply = &reply;
|
|
void *p = au->reply_buf;
|
|
void *end = p + sizeof(au->reply_buf);
|
|
|
|
th = get_ticket_handler(ac, au->service);
|
|
if (IS_ERR(th))
|
|
return PTR_ERR(th);
|
|
- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
|
|
+ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
|
|
if (ret < 0)
|
|
return ret;
|
|
if (ret != sizeof(reply))
|
|
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
|
|
index 074bb2a..0a08902 100644
|
|
--- a/net/ceph/crush/mapper.c
|
|
+++ b/net/ceph/crush/mapper.c
|
|
@@ -290,6 +290,7 @@ static int is_out(const struct crush_map *map,
|
|
* @type: the type of item to choose
|
|
* @out: pointer to output vector
|
|
* @outpos: our position in that vector
|
|
+ * @out_size: size of the out vector
|
|
* @tries: number of attempts to make
|
|
* @recurse_tries: number of attempts to have recursive chooseleaf make
|
|
* @local_retries: localized retries
|
|
@@ -302,6 +303,7 @@ static int crush_choose_firstn(const struct crush_map *map,
|
|
const __u32 *weight, int weight_max,
|
|
int x, int numrep, int type,
|
|
int *out, int outpos,
|
|
+ int out_size,
|
|
unsigned int tries,
|
|
unsigned int recurse_tries,
|
|
unsigned int local_retries,
|
|
@@ -318,11 +320,12 @@ static int crush_choose_firstn(const struct crush_map *map,
|
|
int item = 0;
|
|
int itemtype;
|
|
int collide, reject;
|
|
+ int count = out_size;
|
|
|
|
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
|
|
bucket->id, x, outpos, numrep);
|
|
|
|
- for (rep = outpos; rep < numrep; rep++) {
|
|
+ for (rep = outpos; rep < numrep && count > 0 ; rep++) {
|
|
/* keep trying until we get a non-out, non-colliding item */
|
|
ftotal = 0;
|
|
skip_rep = 0;
|
|
@@ -391,7 +394,7 @@ static int crush_choose_firstn(const struct crush_map *map,
|
|
map->buckets[-1-item],
|
|
weight, weight_max,
|
|
x, outpos+1, 0,
|
|
- out2, outpos,
|
|
+ out2, outpos, count,
|
|
recurse_tries, 0,
|
|
local_retries,
|
|
local_fallback_retries,
|
|
@@ -449,6 +452,7 @@ reject:
|
|
dprintk("CHOOSE got %d\n", item);
|
|
out[outpos] = item;
|
|
outpos++;
|
|
+ count--;
|
|
}
|
|
|
|
dprintk("CHOOSE returns %d\n", outpos);
|
|
@@ -640,6 +644,7 @@ int crush_do_rule(const struct crush_map *map,
|
|
__u32 step;
|
|
int i, j;
|
|
int numrep;
|
|
+ int out_size;
|
|
/*
|
|
* the original choose_total_tries value was off by one (it
|
|
* counted "retries" and not "tries"). add one.
|
|
@@ -740,6 +745,7 @@ int crush_do_rule(const struct crush_map *map,
|
|
x, numrep,
|
|
curstep->arg2,
|
|
o+osize, j,
|
|
+ result_max-osize,
|
|
choose_tries,
|
|
recurse_tries,
|
|
choose_local_retries,
|
|
@@ -747,11 +753,13 @@ int crush_do_rule(const struct crush_map *map,
|
|
recurse_to_leaf,
|
|
c+osize);
|
|
} else {
|
|
+ out_size = ((numrep < (result_max-osize)) ?
|
|
+ numrep : (result_max-osize));
|
|
crush_choose_indep(
|
|
map,
|
|
map->buckets[-1-w[i]],
|
|
weight, weight_max,
|
|
- x, numrep, numrep,
|
|
+ x, out_size, numrep,
|
|
curstep->arg2,
|
|
o+osize, j,
|
|
choose_tries,
|
|
@@ -760,7 +768,7 @@ int crush_do_rule(const struct crush_map *map,
|
|
recurse_to_leaf,
|
|
c+osize,
|
|
0);
|
|
- osize += numrep;
|
|
+ osize += out_size;
|
|
}
|
|
}
|
|
|
|
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
|
|
index 6e7a236..06f19b9 100644
|
|
--- a/net/ceph/crypto.c
|
|
+++ b/net/ceph/crypto.c
|
|
@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
|
|
|
|
static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
|
|
|
|
+/*
|
|
+ * Should be used for buffers allocated with ceph_kvmalloc().
|
|
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
|
|
+ * in-buffer (msg front).
|
|
+ *
|
|
+ * Dispose of @sgt with teardown_sgtable().
|
|
+ *
|
|
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
|
|
+ * in cases where a single sg is sufficient. No attempt to reduce the
|
|
+ * number of sgs by squeezing physically contiguous pages together is
|
|
+ * made though, for simplicity.
|
|
+ */
|
|
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
|
|
+ const void *buf, unsigned int buf_len)
|
|
+{
|
|
+ struct scatterlist *sg;
|
|
+ const bool is_vmalloc = is_vmalloc_addr(buf);
|
|
+ unsigned int off = offset_in_page(buf);
|
|
+ unsigned int chunk_cnt = 1;
|
|
+ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ if (buf_len == 0) {
|
|
+ memset(sgt, 0, sizeof(*sgt));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (is_vmalloc) {
|
|
+ chunk_cnt = chunk_len >> PAGE_SHIFT;
|
|
+ chunk_len = PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ if (chunk_cnt > 1) {
|
|
+ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ } else {
|
|
+ WARN_ON(chunk_cnt != 1);
|
|
+ sg_init_table(prealloc_sg, 1);
|
|
+ sgt->sgl = prealloc_sg;
|
|
+ sgt->nents = sgt->orig_nents = 1;
|
|
+ }
|
|
+
|
|
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
|
+ struct page *page;
|
|
+ unsigned int len = min(chunk_len - off, buf_len);
|
|
+
|
|
+ if (is_vmalloc)
|
|
+ page = vmalloc_to_page(buf);
|
|
+ else
|
|
+ page = virt_to_page(buf);
|
|
+
|
|
+ sg_set_page(sg, page, len, off);
|
|
+
|
|
+ off = 0;
|
|
+ buf += len;
|
|
+ buf_len -= len;
|
|
+ }
|
|
+ WARN_ON(buf_len != 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void teardown_sgtable(struct sg_table *sgt)
|
|
+{
|
|
+ if (sgt->orig_nents > 1)
|
|
+ sg_free_table(sgt);
|
|
+}
|
|
+
|
|
static int ceph_aes_encrypt(const void *key, int key_len,
|
|
void *dst, size_t *dst_len,
|
|
const void *src, size_t src_len)
|
|
{
|
|
- struct scatterlist sg_in[2], sg_out[1];
|
|
+ struct scatterlist sg_in[2], prealloc_sg;
|
|
+ struct sg_table sg_out;
|
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
|
struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
|
|
int ret;
|
|
@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
|
|
|
|
*dst_len = src_len + zero_padding;
|
|
|
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
sg_init_table(sg_in, 2);
|
|
sg_set_buf(&sg_in[0], src, src_len);
|
|
sg_set_buf(&sg_in[1], pad, zero_padding);
|
|
- sg_init_table(sg_out, 1);
|
|
- sg_set_buf(sg_out, dst, *dst_len);
|
|
+ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
|
|
+ if (ret)
|
|
+ goto out_tfm;
|
|
+
|
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
|
-
|
|
memcpy(iv, aes_iv, ivsize);
|
|
+
|
|
/*
|
|
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
|
|
key, key_len, 1);
|
|
@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
|
|
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
|
|
pad, zero_padding, 1);
|
|
*/
|
|
- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
|
|
+ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
|
|
src_len + zero_padding);
|
|
- crypto_free_blkcipher(tfm);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
pr_err("ceph_aes_crypt failed %d\n", ret);
|
|
+ goto out_sg;
|
|
+ }
|
|
/*
|
|
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
|
|
dst, *dst_len, 1);
|
|
*/
|
|
- return 0;
|
|
+
|
|
+out_sg:
|
|
+ teardown_sgtable(&sg_out);
|
|
+out_tfm:
|
|
+ crypto_free_blkcipher(tfm);
|
|
+ return ret;
|
|
}
|
|
|
|
static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
|
@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
|
const void *src1, size_t src1_len,
|
|
const void *src2, size_t src2_len)
|
|
{
|
|
- struct scatterlist sg_in[3], sg_out[1];
|
|
+ struct scatterlist sg_in[3], prealloc_sg;
|
|
+ struct sg_table sg_out;
|
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
|
struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
|
|
int ret;
|
|
@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
|
|
|
*dst_len = src1_len + src2_len + zero_padding;
|
|
|
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
sg_init_table(sg_in, 3);
|
|
sg_set_buf(&sg_in[0], src1, src1_len);
|
|
sg_set_buf(&sg_in[1], src2, src2_len);
|
|
sg_set_buf(&sg_in[2], pad, zero_padding);
|
|
- sg_init_table(sg_out, 1);
|
|
- sg_set_buf(sg_out, dst, *dst_len);
|
|
+ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
|
|
+ if (ret)
|
|
+ goto out_tfm;
|
|
+
|
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
|
-
|
|
memcpy(iv, aes_iv, ivsize);
|
|
+
|
|
/*
|
|
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
|
|
key, key_len, 1);
|
|
@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
|
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
|
|
pad, zero_padding, 1);
|
|
*/
|
|
- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
|
|
+ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
|
|
src1_len + src2_len + zero_padding);
|
|
- crypto_free_blkcipher(tfm);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
pr_err("ceph_aes_crypt2 failed %d\n", ret);
|
|
+ goto out_sg;
|
|
+ }
|
|
/*
|
|
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
|
|
dst, *dst_len, 1);
|
|
*/
|
|
- return 0;
|
|
+
|
|
+out_sg:
|
|
+ teardown_sgtable(&sg_out);
|
|
+out_tfm:
|
|
+ crypto_free_blkcipher(tfm);
|
|
+ return ret;
|
|
}
|
|
|
|
static int ceph_aes_decrypt(const void *key, int key_len,
|
|
void *dst, size_t *dst_len,
|
|
const void *src, size_t src_len)
|
|
{
|
|
- struct scatterlist sg_in[1], sg_out[2];
|
|
+ struct sg_table sg_in;
|
|
+ struct scatterlist sg_out[2], prealloc_sg;
|
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
|
struct blkcipher_desc desc = { .tfm = tfm };
|
|
char pad[16];
|
|
@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
|
if (IS_ERR(tfm))
|
|
return PTR_ERR(tfm);
|
|
|
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
- sg_init_table(sg_in, 1);
|
|
sg_init_table(sg_out, 2);
|
|
- sg_set_buf(sg_in, src, src_len);
|
|
sg_set_buf(&sg_out[0], dst, *dst_len);
|
|
sg_set_buf(&sg_out[1], pad, sizeof(pad));
|
|
+ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
|
|
+ if (ret)
|
|
+ goto out_tfm;
|
|
|
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
|
-
|
|
memcpy(iv, aes_iv, ivsize);
|
|
|
|
/*
|
|
@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
|
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
|
|
src, src_len, 1);
|
|
*/
|
|
-
|
|
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
|
|
- crypto_free_blkcipher(tfm);
|
|
+ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
|
|
if (ret < 0) {
|
|
pr_err("ceph_aes_decrypt failed %d\n", ret);
|
|
- return ret;
|
|
+ goto out_sg;
|
|
}
|
|
|
|
if (src_len <= *dst_len)
|
|
@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
|
print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
|
|
dst, *dst_len, 1);
|
|
*/
|
|
- return 0;
|
|
+
|
|
+out_sg:
|
|
+ teardown_sgtable(&sg_in);
|
|
+out_tfm:
|
|
+ crypto_free_blkcipher(tfm);
|
|
+ return ret;
|
|
}
|
|
|
|
static int ceph_aes_decrypt2(const void *key, int key_len,
|
|
@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
|
void *dst2, size_t *dst2_len,
|
|
const void *src, size_t src_len)
|
|
{
|
|
- struct scatterlist sg_in[1], sg_out[3];
|
|
+ struct sg_table sg_in;
|
|
+ struct scatterlist sg_out[3], prealloc_sg;
|
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
|
struct blkcipher_desc desc = { .tfm = tfm };
|
|
char pad[16];
|
|
@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
|
if (IS_ERR(tfm))
|
|
return PTR_ERR(tfm);
|
|
|
|
- sg_init_table(sg_in, 1);
|
|
- sg_set_buf(sg_in, src, src_len);
|
|
sg_init_table(sg_out, 3);
|
|
sg_set_buf(&sg_out[0], dst1, *dst1_len);
|
|
sg_set_buf(&sg_out[1], dst2, *dst2_len);
|
|
sg_set_buf(&sg_out[2], pad, sizeof(pad));
|
|
+ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
|
|
+ if (ret)
|
|
+ goto out_tfm;
|
|
|
|
crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
|
-
|
|
memcpy(iv, aes_iv, ivsize);
|
|
|
|
/*
|
|
@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
|
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
|
|
src, src_len, 1);
|
|
*/
|
|
-
|
|
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
|
|
- crypto_free_blkcipher(tfm);
|
|
+ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
|
|
if (ret < 0) {
|
|
pr_err("ceph_aes_decrypt failed %d\n", ret);
|
|
- return ret;
|
|
+ goto out_sg;
|
|
}
|
|
|
|
if (src_len <= *dst1_len)
|
|
@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
|
dst2, *dst2_len, 1);
|
|
*/
|
|
|
|
- return 0;
|
|
+out_sg:
|
|
+ teardown_sgtable(&sg_in);
|
|
+out_tfm:
|
|
+ crypto_free_blkcipher(tfm);
|
|
+ return ret;
|
|
}
|
|
|
|
|
|
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
|
|
index 988721a..2e87eec 100644
|
|
--- a/net/ceph/messenger.c
|
|
+++ b/net/ceph/messenger.c
|
|
@@ -291,7 +291,11 @@ int ceph_msgr_init(void)
|
|
if (ceph_msgr_slab_init())
|
|
return -ENOMEM;
|
|
|
|
- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
|
|
+ /*
|
|
+ * The number of active work items is limited by the number of
|
|
+ * connections, so leave @max_active at default.
|
|
+ */
|
|
+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
|
|
if (ceph_msgr_wq)
|
|
return 0;
|
|
|
|
@@ -900,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
|
|
BUG_ON(page_count > (int)USHRT_MAX);
|
|
cursor->page_count = (unsigned short)page_count;
|
|
BUG_ON(length > SIZE_MAX - cursor->page_offset);
|
|
- cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
|
|
+ cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
|
|
}
|
|
|
|
static struct page *
|
|
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
|
|
index 2ac9ef3..dbcbf5a 100644
|
|
--- a/net/ceph/mon_client.c
|
|
+++ b/net/ceph/mon_client.c
|
|
@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
|
|
if (!m) {
|
|
pr_info("alloc_msg unknown type %d\n", type);
|
|
*skip = 1;
|
|
+ } else if (front_len > m->front_alloc_len) {
|
|
+ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
|
|
+ front_len, m->front_alloc_len,
|
|
+ (unsigned int)con->peer_name.type,
|
|
+ le64_to_cpu(con->peer_name.num));
|
|
+ ceph_msg_put(m);
|
|
+ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
|
|
}
|
|
+
|
|
return m;
|
|
}
|
|
|
|
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
|
|
index 0676f2b..5b7ef5b 100644
|
|
--- a/net/ceph/osd_client.c
|
|
+++ b/net/ceph/osd_client.c
|
|
@@ -977,12 +977,24 @@ static void put_osd(struct ceph_osd *osd)
|
|
*/
|
|
static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
|
{
|
|
- dout("__remove_osd %p\n", osd);
|
|
- BUG_ON(!list_empty(&osd->o_requests));
|
|
- rb_erase(&osd->o_node, &osdc->osds);
|
|
+ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
|
|
+ WARN_ON(!list_empty(&osd->o_requests));
|
|
+ WARN_ON(!list_empty(&osd->o_linger_requests));
|
|
+
|
|
list_del_init(&osd->o_osd_lru);
|
|
- ceph_con_close(&osd->o_con);
|
|
- put_osd(osd);
|
|
+ rb_erase(&osd->o_node, &osdc->osds);
|
|
+ RB_CLEAR_NODE(&osd->o_node);
|
|
+}
|
|
+
|
|
+static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
|
+{
|
|
+ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
|
|
+
|
|
+ if (!RB_EMPTY_NODE(&osd->o_node)) {
|
|
+ ceph_con_close(&osd->o_con);
|
|
+ __remove_osd(osdc, osd);
|
|
+ put_osd(osd);
|
|
+ }
|
|
}
|
|
|
|
static void remove_all_osds(struct ceph_osd_client *osdc)
|
|
@@ -992,7 +1004,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
|
|
while (!RB_EMPTY_ROOT(&osdc->osds)) {
|
|
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
|
|
struct ceph_osd, o_node);
|
|
- __remove_osd(osdc, osd);
|
|
+ remove_osd(osdc, osd);
|
|
}
|
|
mutex_unlock(&osdc->request_mutex);
|
|
}
|
|
@@ -1022,7 +1034,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
|
|
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
|
|
if (time_before(jiffies, osd->lru_ttl))
|
|
break;
|
|
- __remove_osd(osdc, osd);
|
|
+ remove_osd(osdc, osd);
|
|
}
|
|
mutex_unlock(&osdc->request_mutex);
|
|
}
|
|
@@ -1037,8 +1049,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
|
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
|
|
if (list_empty(&osd->o_requests) &&
|
|
list_empty(&osd->o_linger_requests)) {
|
|
- __remove_osd(osdc, osd);
|
|
-
|
|
+ remove_osd(osdc, osd);
|
|
return -ENODEV;
|
|
}
|
|
|
|
@@ -1840,6 +1851,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
|
|
{
|
|
struct rb_node *p, *n;
|
|
|
|
+ dout("%s %p\n", __func__, osdc);
|
|
for (p = rb_first(&osdc->osds); p; p = n) {
|
|
struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
|
|
|
|
@@ -1920,20 +1932,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
|
|
err = __map_request(osdc, req,
|
|
force_resend || force_resend_writes);
|
|
dout("__map_request returned %d\n", err);
|
|
- if (err == 0)
|
|
- continue; /* no change and no osd was specified */
|
|
if (err < 0)
|
|
continue; /* hrm! */
|
|
- if (req->r_osd == NULL) {
|
|
- dout("tid %llu maps to no valid osd\n", req->r_tid);
|
|
- needmap++; /* request a newer map */
|
|
- continue;
|
|
- }
|
|
+ if (req->r_osd == NULL || err > 0) {
|
|
+ if (req->r_osd == NULL) {
|
|
+ dout("lingering %p tid %llu maps to no osd\n",
|
|
+ req, req->r_tid);
|
|
+ /*
|
|
+ * A homeless lingering request makes
|
|
+ * no sense, as it's job is to keep
|
|
+ * a particular OSD connection open.
|
|
+ * Request a newer map and kick the
|
|
+ * request, knowing that it won't be
|
|
+ * resent until we actually get a map
|
|
+ * that can tell us where to send it.
|
|
+ */
|
|
+ needmap++;
|
|
+ }
|
|
|
|
- dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
|
|
- req->r_osd ? req->r_osd->o_osd : -1);
|
|
- __register_request(osdc, req);
|
|
- __unregister_linger_request(osdc, req);
|
|
+ dout("kicking lingering %p tid %llu osd%d\n", req,
|
|
+ req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
|
|
+ __register_request(osdc, req);
|
|
+ __unregister_linger_request(osdc, req);
|
|
+ }
|
|
}
|
|
reset_changed_osds(osdc);
|
|
mutex_unlock(&osdc->request_mutex);
|
|
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
|
|
index aade4a5..bde94d8 100644
|
|
--- a/net/ceph/osdmap.c
|
|
+++ b/net/ceph/osdmap.c
|
|
@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
|
|
{
|
|
int j;
|
|
dout("crush_decode_tree_bucket %p to %p\n", *p, end);
|
|
- ceph_decode_32_safe(p, end, b->num_nodes, bad);
|
|
+ ceph_decode_8_safe(p, end, b->num_nodes, bad);
|
|
b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
|
|
if (b->node_weights == NULL)
|
|
return -ENOMEM;
|
|
diff --git a/net/compat.c b/net/compat.c
|
|
index f50161f..d125290 100644
|
|
--- a/net/compat.c
|
|
+++ b/net/compat.c
|
|
@@ -71,6 +71,13 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
|
|
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
|
|
__get_user(kmsg->msg_flags, &umsg->msg_flags))
|
|
return -EFAULT;
|
|
+
|
|
+ if (!tmp1)
|
|
+ kmsg->msg_namelen = 0;
|
|
+
|
|
+ if (kmsg->msg_namelen < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
|
|
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
|
|
kmsg->msg_name = compat_ptr(tmp1);
|
|
@@ -85,7 +92,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
|
|
{
|
|
int tot_len;
|
|
|
|
- if (kern_msg->msg_namelen) {
|
|
+ if (kern_msg->msg_name && kern_msg->msg_namelen) {
|
|
if (mode == VERIFY_READ) {
|
|
int err = move_addr_to_kernel(kern_msg->msg_name,
|
|
kern_msg->msg_namelen,
|
|
@@ -93,10 +100,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
|
|
if (err < 0)
|
|
return err;
|
|
}
|
|
- if (kern_msg->msg_name)
|
|
- kern_msg->msg_name = kern_address;
|
|
- } else
|
|
+ kern_msg->msg_name = kern_address;
|
|
+ } else {
|
|
kern_msg->msg_name = NULL;
|
|
+ kern_msg->msg_namelen = 0;
|
|
+ }
|
|
|
|
tot_len = iov_from_user_compat_to_kern(kern_iov,
|
|
(struct compat_iovec __user *)kern_msg->msg_iov,
|
|
@@ -737,24 +745,18 @@ static unsigned char nas[21] = {
|
|
|
|
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
|
|
{
|
|
- if (flags & MSG_CMSG_COMPAT)
|
|
- return -EINVAL;
|
|
return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
|
|
}
|
|
|
|
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
|
|
unsigned int vlen, unsigned int flags)
|
|
{
|
|
- if (flags & MSG_CMSG_COMPAT)
|
|
- return -EINVAL;
|
|
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
|
|
flags | MSG_CMSG_COMPAT);
|
|
}
|
|
|
|
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
|
|
{
|
|
- if (flags & MSG_CMSG_COMPAT)
|
|
- return -EINVAL;
|
|
return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
|
|
}
|
|
|
|
@@ -777,9 +779,6 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
|
|
int datagrams;
|
|
struct timespec ktspec;
|
|
|
|
- if (flags & MSG_CMSG_COMPAT)
|
|
- return -EINVAL;
|
|
-
|
|
if (timeout == NULL)
|
|
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
|
|
flags | MSG_CMSG_COMPAT, NULL);
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 62f13f3..47a95f1 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -940,7 +940,7 @@ bool dev_valid_name(const char *name)
|
|
return false;
|
|
|
|
while (*name) {
|
|
- if (*name == '/' || isspace(*name))
|
|
+ if (*name == '/' || *name == ':' || isspace(*name))
|
|
return false;
|
|
name++;
|
|
}
|
|
@@ -1709,6 +1709,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
skb_scrub_packet(skb, true);
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
|
|
|
return netif_rx_internal(skb);
|
|
}
|
|
@@ -2529,11 +2530,14 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
|
|
if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
|
|
features &= ~NETIF_F_GSO_MASK;
|
|
|
|
- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
|
|
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
|
- protocol = veh->h_vlan_encapsulated_proto;
|
|
- } else if (!vlan_tx_tag_present(skb)) {
|
|
- return harmonize_features(skb, dev, features);
|
|
+ if (!vlan_tx_tag_present(skb)) {
|
|
+ if (unlikely(protocol == htons(ETH_P_8021Q) ||
|
|
+ protocol == htons(ETH_P_8021AD))) {
|
|
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
|
+ protocol = veh->h_vlan_encapsulated_proto;
|
|
+ } else {
|
|
+ return harmonize_features(skb, dev, features);
|
|
+ }
|
|
}
|
|
|
|
features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
|
@@ -2771,7 +2775,9 @@ static void skb_update_prio(struct sk_buff *skb)
|
|
#define skb_update_prio(skb)
|
|
#endif
|
|
|
|
-static DEFINE_PER_CPU(int, xmit_recursion);
|
|
+DEFINE_PER_CPU(int, xmit_recursion);
|
|
+EXPORT_SYMBOL(xmit_recursion);
|
|
+
|
|
#define RECURSION_LIMIT 10
|
|
|
|
/**
|
|
@@ -3554,7 +3560,7 @@ another_round:
|
|
|
|
if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
|
|
skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
|
|
- skb = vlan_untag(skb);
|
|
+ skb = skb_vlan_untag(skb);
|
|
if (unlikely(!skb))
|
|
goto unlock;
|
|
}
|
|
@@ -4701,9 +4707,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
|
|
sysfs_remove_link(&(dev->dev.kobj), linkname);
|
|
}
|
|
|
|
-#define netdev_adjacent_is_neigh_list(dev, dev_list) \
|
|
- (dev_list == &dev->adj_list.upper || \
|
|
- dev_list == &dev->adj_list.lower)
|
|
+static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
|
|
+ struct net_device *adj_dev,
|
|
+ struct list_head *dev_list)
|
|
+{
|
|
+ return (dev_list == &dev->adj_list.upper ||
|
|
+ dev_list == &dev->adj_list.lower) &&
|
|
+ net_eq(dev_net(dev), dev_net(adj_dev));
|
|
+}
|
|
|
|
static int __netdev_adjacent_dev_insert(struct net_device *dev,
|
|
struct net_device *adj_dev,
|
|
@@ -4733,7 +4744,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
|
|
pr_debug("dev_hold for %s, because of link added from %s to %s\n",
|
|
adj_dev->name, dev->name, adj_dev->name);
|
|
|
|
- if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
|
|
+ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
|
|
ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
|
|
if (ret)
|
|
goto free_adj;
|
|
@@ -4754,7 +4765,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
|
|
return 0;
|
|
|
|
remove_symlinks:
|
|
- if (netdev_adjacent_is_neigh_list(dev, dev_list))
|
|
+ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
|
|
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
|
|
free_adj:
|
|
kfree(adj);
|
|
@@ -4787,7 +4798,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
|
|
if (adj->master)
|
|
sysfs_remove_link(&(dev->dev.kobj), "master");
|
|
|
|
- if (netdev_adjacent_is_neigh_list(dev, dev_list))
|
|
+ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
|
|
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
|
|
|
|
list_del_rcu(&adj->list);
|
|
@@ -4892,7 +4903,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
|
|
if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
|
|
return -EBUSY;
|
|
|
|
- if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
|
|
+ if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
|
|
return -EEXIST;
|
|
|
|
if (master && netdev_master_upper_dev_get(dev))
|
|
@@ -5057,11 +5068,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
|
|
}
|
|
EXPORT_SYMBOL(netdev_upper_dev_unlink);
|
|
|
|
+void netdev_adjacent_add_links(struct net_device *dev)
|
|
+{
|
|
+ struct netdev_adjacent *iter;
|
|
+
|
|
+ struct net *net = dev_net(dev);
|
|
+
|
|
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
+ netdev_adjacent_sysfs_add(iter->dev, dev,
|
|
+ &iter->dev->adj_list.lower);
|
|
+ netdev_adjacent_sysfs_add(dev, iter->dev,
|
|
+ &dev->adj_list.upper);
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
+ netdev_adjacent_sysfs_add(iter->dev, dev,
|
|
+ &iter->dev->adj_list.upper);
|
|
+ netdev_adjacent_sysfs_add(dev, iter->dev,
|
|
+ &dev->adj_list.lower);
|
|
+ }
|
|
+}
|
|
+
|
|
+void netdev_adjacent_del_links(struct net_device *dev)
|
|
+{
|
|
+ struct netdev_adjacent *iter;
|
|
+
|
|
+ struct net *net = dev_net(dev);
|
|
+
|
|
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
|
|
+ &iter->dev->adj_list.lower);
|
|
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
|
|
+ &dev->adj_list.upper);
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
+ netdev_adjacent_sysfs_del(iter->dev, dev->name,
|
|
+ &iter->dev->adj_list.upper);
|
|
+ netdev_adjacent_sysfs_del(dev, iter->dev->name,
|
|
+ &dev->adj_list.lower);
|
|
+ }
|
|
+}
|
|
+
|
|
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
|
|
{
|
|
struct netdev_adjacent *iter;
|
|
|
|
+ struct net *net = dev_net(dev);
|
|
+
|
|
list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
netdev_adjacent_sysfs_del(iter->dev, oldname,
|
|
&iter->dev->adj_list.lower);
|
|
netdev_adjacent_sysfs_add(iter->dev, dev,
|
|
@@ -5069,6 +5134,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
|
|
}
|
|
|
|
list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
|
+ if (!net_eq(net,dev_net(iter->dev)))
|
|
+ continue;
|
|
netdev_adjacent_sysfs_del(iter->dev, oldname,
|
|
&iter->dev->adj_list.upper);
|
|
netdev_adjacent_sysfs_add(iter->dev, dev,
|
|
@@ -6675,6 +6742,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
|
|
|
/* Send a netdev-removed uevent to the old namespace */
|
|
kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
|
|
+ netdev_adjacent_del_links(dev);
|
|
|
|
/* Actually switch the network namespace */
|
|
dev_net_set(dev, net);
|
|
@@ -6689,6 +6757,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
|
|
|
/* Send a netdev-add uevent to the new namespace */
|
|
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
|
|
+ netdev_adjacent_add_links(dev);
|
|
|
|
/* Fixup kobjects */
|
|
err = device_rename(&dev->dev, dev->name);
|
|
@@ -6745,10 +6814,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|
oldsd->output_queue = NULL;
|
|
oldsd->output_queue_tailp = &oldsd->output_queue;
|
|
}
|
|
- /* Append NAPI poll list from offline CPU. */
|
|
- if (!list_empty(&oldsd->poll_list)) {
|
|
- list_splice_init(&oldsd->poll_list, &sd->poll_list);
|
|
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
+ /* Append NAPI poll list from offline CPU, with one exception :
|
|
+ * process_backlog() must be called by cpu owning percpu backlog.
|
|
+ * We properly handle process_queue & input_pkt_queue later.
|
|
+ */
|
|
+ while (!list_empty(&oldsd->poll_list)) {
|
|
+ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
|
|
+ struct napi_struct,
|
|
+ poll_list);
|
|
+
|
|
+ list_del_init(&napi->poll_list);
|
|
+ if (napi->poll == process_backlog)
|
|
+ napi->state = 0;
|
|
+ else
|
|
+ ____napi_schedule(sd, napi);
|
|
}
|
|
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
@@ -6759,7 +6838,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|
netif_rx_internal(skb);
|
|
input_queue_head_incr(oldsd);
|
|
}
|
|
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
|
|
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
|
|
netif_rx_internal(skb);
|
|
input_queue_head_incr(oldsd);
|
|
}
|
|
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
|
|
index 9d3d9e7..372ac66 100644
|
|
--- a/net/core/gen_stats.c
|
|
+++ b/net/core/gen_stats.c
|
|
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
|
|
return 0;
|
|
|
|
nla_put_failure:
|
|
+ kfree(d->xstats);
|
|
+ d->xstats = NULL;
|
|
+ d->xstats_len = 0;
|
|
spin_unlock_bh(d->lock);
|
|
return -1;
|
|
}
|
|
@@ -217,7 +220,9 @@ int
|
|
gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
|
|
{
|
|
if (d->compat_xstats) {
|
|
- d->xstats = st;
|
|
+ d->xstats = kmemdup(st, len, GFP_ATOMIC);
|
|
+ if (!d->xstats)
|
|
+ goto err_out;
|
|
d->xstats_len = len;
|
|
}
|
|
|
|
@@ -225,6 +230,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
|
|
return gnet_stats_copy(d, TCA_STATS_APP, st, len);
|
|
|
|
return 0;
|
|
+
|
|
+err_out:
|
|
+ d->xstats_len = 0;
|
|
+ spin_unlock_bh(d->lock);
|
|
+ return -1;
|
|
}
|
|
EXPORT_SYMBOL(gnet_stats_copy_app);
|
|
|
|
@@ -257,6 +267,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
|
|
return -1;
|
|
}
|
|
|
|
+ kfree(d->xstats);
|
|
+ d->xstats = NULL;
|
|
+ d->xstats_len = 0;
|
|
spin_unlock_bh(d->lock);
|
|
return 0;
|
|
}
|
|
diff --git a/net/core/iovec.c b/net/core/iovec.c
|
|
index b618694..26dc006 100644
|
|
--- a/net/core/iovec.c
|
|
+++ b/net/core/iovec.c
|
|
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
|
{
|
|
int size, ct, err;
|
|
|
|
- if (m->msg_namelen) {
|
|
+ if (m->msg_name && m->msg_namelen) {
|
|
if (mode == VERIFY_READ) {
|
|
void __user *namep;
|
|
namep = (void __user __force *) m->msg_name;
|
|
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
|
if (err < 0)
|
|
return err;
|
|
}
|
|
- if (m->msg_name)
|
|
- m->msg_name = address;
|
|
+ m->msg_name = address;
|
|
} else {
|
|
m->msg_name = NULL;
|
|
+ m->msg_namelen = 0;
|
|
}
|
|
|
|
size = m->msg_iovlen * sizeof(struct iovec);
|
|
@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
|
|
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
|
int offset, int len)
|
|
{
|
|
+ /* No data? Done! */
|
|
+ if (len == 0)
|
|
+ return 0;
|
|
+
|
|
/* Skip over the finished iovecs */
|
|
while (offset >= iov->iov_len) {
|
|
offset -= iov->iov_len;
|
|
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
|
|
index 7d95f69..0f062c6 100644
|
|
--- a/net/core/neighbour.c
|
|
+++ b/net/core/neighbour.c
|
|
@@ -976,6 +976,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
|
|
rc = 0;
|
|
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
|
|
goto out_unlock_bh;
|
|
+ if (neigh->dead)
|
|
+ goto out_dead;
|
|
|
|
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
|
|
if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
|
|
@@ -1032,6 +1034,13 @@ out_unlock_bh:
|
|
write_unlock(&neigh->lock);
|
|
local_bh_enable();
|
|
return rc;
|
|
+
|
|
+out_dead:
|
|
+ if (neigh->nud_state & NUD_STALE)
|
|
+ goto out_unlock_bh;
|
|
+ write_unlock_bh(&neigh->lock);
|
|
+ kfree_skb(skb);
|
|
+ return 1;
|
|
}
|
|
EXPORT_SYMBOL(__neigh_event_send);
|
|
|
|
@@ -1095,6 +1104,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
|
|
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
|
|
(old & (NUD_NOARP | NUD_PERMANENT)))
|
|
goto out;
|
|
+ if (neigh->dead)
|
|
+ goto out;
|
|
|
|
if (!(new & NUD_VALID)) {
|
|
neigh_del_timer(neigh);
|
|
@@ -1244,6 +1255,8 @@ EXPORT_SYMBOL(neigh_update);
|
|
*/
|
|
void __neigh_set_probe_once(struct neighbour *neigh)
|
|
{
|
|
+ if (neigh->dead)
|
|
+ return;
|
|
neigh->updated = jiffies;
|
|
if (!(neigh->nud_state & NUD_FAILED))
|
|
return;
|
|
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
|
|
index df9e6b1..723fa7d 100644
|
|
--- a/net/core/netpoll.c
|
|
+++ b/net/core/netpoll.c
|
|
@@ -788,7 +788,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
|
}
|
|
|
|
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
|
|
- skb = vlan_untag(skb);
|
|
+ skb = skb_vlan_untag(skb);
|
|
if (unlikely(!skb))
|
|
goto out;
|
|
}
|
|
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
|
|
index fdac61c..ca68d32 100644
|
|
--- a/net/core/pktgen.c
|
|
+++ b/net/core/pktgen.c
|
|
@@ -2812,25 +2812,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
|
skb->dev = odev;
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
|
|
+
|
|
if (!(pkt_dev->flags & F_UDPCSUM)) {
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
} else if (odev->features & NETIF_F_V4_CSUM) {
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
skb->csum = 0;
|
|
- udp4_hwcsum(skb, udph->source, udph->dest);
|
|
+ udp4_hwcsum(skb, iph->saddr, iph->daddr);
|
|
} else {
|
|
- __wsum csum = udp_csum(skb);
|
|
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
|
|
|
|
/* add protocol-dependent pseudo-header */
|
|
- udph->check = csum_tcpudp_magic(udph->source, udph->dest,
|
|
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
|
|
datalen + 8, IPPROTO_UDP, csum);
|
|
|
|
if (udph->check == 0)
|
|
udph->check = CSUM_MANGLED_0;
|
|
}
|
|
|
|
- pktgen_finalize_skb(pkt_dev, skb, datalen);
|
|
-
|
|
#ifdef CONFIG_XFRM
|
|
if (!process_ipsec(pkt_dev, skb, protocol))
|
|
return NULL;
|
|
@@ -2946,6 +2946,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
|
skb->dev = odev;
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
|
|
+
|
|
if (!(pkt_dev->flags & F_UDPCSUM)) {
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
} else if (odev->features & NETIF_F_V6_CSUM) {
|
|
@@ -2954,7 +2956,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
|
skb->csum_offset = offsetof(struct udphdr, check);
|
|
udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
|
|
} else {
|
|
- __wsum csum = udp_csum(skb);
|
|
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
|
|
|
|
/* add protocol-dependent pseudo-header */
|
|
udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
|
|
@@ -2963,8 +2965,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
|
udph->check = CSUM_MANGLED_0;
|
|
}
|
|
|
|
- pktgen_finalize_skb(pkt_dev, skb, datalen);
|
|
-
|
|
return skb;
|
|
}
|
|
|
|
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
|
|
index f19d2f2..c1bfd5a 100644
|
|
--- a/net/core/rtnetlink.c
|
|
+++ b/net/core/rtnetlink.c
|
|
@@ -799,7 +799,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
|
|
(nla_total_size(sizeof(struct ifla_vf_mac)) +
|
|
nla_total_size(sizeof(struct ifla_vf_vlan)) +
|
|
nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
|
|
- nla_total_size(sizeof(struct ifla_vf_spoofchk)));
|
|
+ nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
|
|
+ nla_total_size(sizeof(struct ifla_vf_link_state)));
|
|
return size;
|
|
} else
|
|
return 0;
|
|
@@ -1196,14 +1197,10 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
|
|
};
|
|
|
|
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
|
|
- [IFLA_VF_MAC] = { .type = NLA_BINARY,
|
|
- .len = sizeof(struct ifla_vf_mac) },
|
|
- [IFLA_VF_VLAN] = { .type = NLA_BINARY,
|
|
- .len = sizeof(struct ifla_vf_vlan) },
|
|
- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
|
|
- .len = sizeof(struct ifla_vf_tx_rate) },
|
|
- [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
|
|
- .len = sizeof(struct ifla_vf_spoofchk) },
|
|
+ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
|
|
+ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
|
|
+ [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
|
|
+ [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
|
|
};
|
|
|
|
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
|
|
@@ -1440,6 +1437,7 @@ static int do_setlink(const struct sk_buff *skb,
|
|
goto errout;
|
|
}
|
|
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
|
|
+ put_net(net);
|
|
err = -EPERM;
|
|
goto errout;
|
|
}
|
|
@@ -2020,8 +2018,16 @@ replay:
|
|
}
|
|
}
|
|
err = rtnl_configure_link(dev, ifm);
|
|
- if (err < 0)
|
|
- unregister_netdevice(dev);
|
|
+ if (err < 0) {
|
|
+ if (ops->newlink) {
|
|
+ LIST_HEAD(list_kill);
|
|
+
|
|
+ ops->dellink(dev, &list_kill);
|
|
+ unregister_netdevice_many(&list_kill);
|
|
+ } else {
|
|
+ unregister_netdevice(dev);
|
|
+ }
|
|
+ }
|
|
out:
|
|
put_net(dest_net);
|
|
return err;
|
|
@@ -2635,12 +2641,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
|
|
goto errout;
|
|
}
|
|
|
|
+ if (!skb->len)
|
|
+ goto errout;
|
|
+
|
|
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
|
|
return 0;
|
|
errout:
|
|
WARN_ON(err == -EMSGSIZE);
|
|
kfree_skb(skb);
|
|
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
|
|
+ if (err)
|
|
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
|
|
index 897da56..ba71212 100644
|
|
--- a/net/core/secure_seq.c
|
|
+++ b/net/core/secure_seq.c
|
|
@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
|
|
#endif
|
|
|
|
#ifdef CONFIG_INET
|
|
-__u32 secure_ip_id(__be32 daddr)
|
|
-{
|
|
- u32 hash[MD5_DIGEST_WORDS];
|
|
-
|
|
- net_secret_init();
|
|
- hash[0] = (__force __u32) daddr;
|
|
- hash[1] = net_secret[13];
|
|
- hash[2] = net_secret[14];
|
|
- hash[3] = net_secret[15];
|
|
-
|
|
- md5_transform(hash, net_secret);
|
|
-
|
|
- return hash[0];
|
|
-}
|
|
-
|
|
-__u32 secure_ipv6_id(const __be32 daddr[4])
|
|
-{
|
|
- __u32 hash[4];
|
|
-
|
|
- net_secret_init();
|
|
- memcpy(hash, daddr, 16);
|
|
- md5_transform(hash, net_secret);
|
|
-
|
|
- return hash[0];
|
|
-}
|
|
|
|
__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
|
|
__be16 sport, __be16 dport)
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index 7f2e1fc..8207f8d 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -62,6 +62,7 @@
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/errqueue.h>
|
|
#include <linux/prefetch.h>
|
|
+#include <linux/if_vlan.h>
|
|
|
|
#include <net/protocol.h>
|
|
#include <net/dst.h>
|
|
@@ -277,13 +278,14 @@ nodata:
|
|
EXPORT_SYMBOL(__alloc_skb);
|
|
|
|
/**
|
|
- * build_skb - build a network buffer
|
|
+ * __build_skb - build a network buffer
|
|
* @data: data buffer provided by caller
|
|
- * @frag_size: size of fragment, or 0 if head was kmalloced
|
|
+ * @frag_size: size of data, or 0 if head was kmalloced
|
|
*
|
|
* Allocate a new &sk_buff. Caller provides space holding head and
|
|
* skb_shared_info. @data must have been allocated by kmalloc() only if
|
|
- * @frag_size is 0, otherwise data should come from the page allocator.
|
|
+ * @frag_size is 0, otherwise data should come from the page allocator
|
|
+ * or vmalloc()
|
|
* The return is the new skb buffer.
|
|
* On a failure the return is %NULL, and @data is not freed.
|
|
* Notes :
|
|
@@ -294,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb);
|
|
* before giving packet to stack.
|
|
* RX rings only contains data buffers, not full skbs.
|
|
*/
|
|
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
|
|
{
|
|
struct skb_shared_info *shinfo;
|
|
struct sk_buff *skb;
|
|
@@ -308,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|
|
|
memset(skb, 0, offsetof(struct sk_buff, tail));
|
|
skb->truesize = SKB_TRUESIZE(size);
|
|
- skb->head_frag = frag_size != 0;
|
|
atomic_set(&skb->users, 1);
|
|
skb->head = data;
|
|
skb->data = data;
|
|
@@ -325,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|
|
|
return skb;
|
|
}
|
|
+
|
|
+/* build_skb() is wrapper over __build_skb(), that specifically
|
|
+ * takes care of skb->head and skb->pfmemalloc
|
|
+ * This means that if @frag_size is not zero, then @data must be backed
|
|
+ * by a page fragment, not kmalloc() or vmalloc()
|
|
+ */
|
|
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|
+{
|
|
+ struct sk_buff *skb = __build_skb(data, frag_size);
|
|
+
|
|
+ if (skb && frag_size) {
|
|
+ skb->head_frag = 1;
|
|
+ if (virt_to_head_page(data)->pfmemalloc)
|
|
+ skb->pfmemalloc = 1;
|
|
+ }
|
|
+ return skb;
|
|
+}
|
|
EXPORT_SYMBOL(build_skb);
|
|
|
|
struct netdev_alloc_cache {
|
|
@@ -350,8 +368,11 @@ refill:
|
|
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
|
|
gfp_t gfp = gfp_mask;
|
|
|
|
- if (order)
|
|
- gfp |= __GFP_COMP | __GFP_NOWARN;
|
|
+ if (order) {
|
|
+ gfp |= __GFP_COMP | __GFP_NOWARN |
|
|
+ __GFP_NOMEMALLOC;
|
|
+ gfp &= ~__GFP_WAIT;
|
|
+ }
|
|
nc->frag.page = alloc_pages(gfp, order);
|
|
if (likely(nc->frag.page))
|
|
break;
|
|
@@ -2968,9 +2989,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|
tail = nskb;
|
|
|
|
__copy_skb_header(nskb, head_skb);
|
|
- nskb->mac_len = head_skb->mac_len;
|
|
|
|
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
|
|
+ skb_reset_mac_len(nskb);
|
|
|
|
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
|
|
nskb->data - tnl_hlen,
|
|
@@ -3139,6 +3160,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
|
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
|
|
goto done;
|
|
}
|
|
+ /* switch back to head shinfo */
|
|
+ pinfo = skb_shinfo(p);
|
|
+
|
|
if (pinfo->frag_list)
|
|
goto merge;
|
|
if (skb_gro_len(p) != pinfo->gso_size)
|
|
@@ -3933,6 +3957,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
|
skb->local_df = 0;
|
|
skb_dst_drop(skb);
|
|
skb->mark = 0;
|
|
+ skb_init_secmark(skb);
|
|
secpath_reset(skb);
|
|
nf_reset(skb);
|
|
nf_reset_trace(skb);
|
|
@@ -3963,3 +3988,55 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
|
return shinfo->gso_size;
|
|
}
|
|
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
|
+
|
|
+static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
|
+{
|
|
+ if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
|
+ kfree_skb(skb);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
|
|
+ skb->mac_header += VLAN_HLEN;
|
|
+ return skb;
|
|
+}
|
|
+
|
|
+struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
|
|
+{
|
|
+ struct vlan_hdr *vhdr;
|
|
+ u16 vlan_tci;
|
|
+
|
|
+ if (unlikely(vlan_tx_tag_present(skb))) {
|
|
+ /* vlan_tci is already set-up so leave this for another time */
|
|
+ return skb;
|
|
+ }
|
|
+
|
|
+ skb = skb_share_check(skb, GFP_ATOMIC);
|
|
+ if (unlikely(!skb))
|
|
+ goto err_free;
|
|
+
|
|
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
|
|
+ goto err_free;
|
|
+
|
|
+ vhdr = (struct vlan_hdr *)skb->data;
|
|
+ vlan_tci = ntohs(vhdr->h_vlan_TCI);
|
|
+ __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
|
|
+
|
|
+ skb_pull_rcsum(skb, VLAN_HLEN);
|
|
+ vlan_set_encap_proto(skb, vhdr);
|
|
+
|
|
+ skb = skb_reorder_vlan_header(skb);
|
|
+ if (unlikely(!skb))
|
|
+ goto err_free;
|
|
+
|
|
+ skb_reset_network_header(skb);
|
|
+ skb_reset_transport_header(skb);
|
|
+ skb_reset_mac_len(skb);
|
|
+
|
|
+ return skb;
|
|
+
|
|
+err_free:
|
|
+ kfree_skb(skb);
|
|
+ return NULL;
|
|
+}
|
|
+EXPORT_SYMBOL(skb_vlan_untag);
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index c806956..8ebfa52 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -659,6 +659,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
|
|
sock_reset_flag(sk, bit);
|
|
}
|
|
|
|
+bool sk_mc_loop(struct sock *sk)
|
|
+{
|
|
+ if (dev_recursion_level())
|
|
+ return false;
|
|
+ if (!sk)
|
|
+ return true;
|
|
+ switch (sk->sk_family) {
|
|
+ case AF_INET:
|
|
+ return inet_sk(sk)->mc_loop;
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ case AF_INET6:
|
|
+ return inet6_sk(sk)->mc_loop;
|
|
+#endif
|
|
+ }
|
|
+ WARN_ON(1);
|
|
+ return true;
|
|
+}
|
|
+EXPORT_SYMBOL(sk_mc_loop);
|
|
+
|
|
/*
|
|
* This is meant for all protocols to use and covers goings on
|
|
* at the socket level. Everything here is generic.
|
|
@@ -1895,8 +1914,10 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
|
do {
|
|
gfp_t gfp = prio;
|
|
|
|
- if (order)
|
|
+ if (order) {
|
|
gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
|
+ gfp &= ~__GFP_WAIT;
|
|
+ }
|
|
pfrag->page = alloc_pages(gfp, order);
|
|
if (likely(pfrag->page)) {
|
|
pfrag->offset = 0;
|
|
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
|
|
index cf9cd13..e731c96 100644
|
|
--- a/net/core/sysctl_net_core.c
|
|
+++ b/net/core/sysctl_net_core.c
|
|
@@ -25,6 +25,8 @@
|
|
static int zero = 0;
|
|
static int one = 1;
|
|
static int ushort_max = USHRT_MAX;
|
|
+static int min_sndbuf = SOCK_MIN_SNDBUF;
|
|
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
|
|
|
#ifdef CONFIG_RPS
|
|
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
|
|
@@ -223,7 +225,7 @@ static struct ctl_table net_core_table[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &one,
|
|
+ .extra1 = &min_sndbuf,
|
|
},
|
|
{
|
|
.procname = "rmem_max",
|
|
@@ -231,7 +233,7 @@ static struct ctl_table net_core_table[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &one,
|
|
+ .extra1 = &min_rcvbuf,
|
|
},
|
|
{
|
|
.procname = "wmem_default",
|
|
@@ -239,7 +241,7 @@ static struct ctl_table net_core_table[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &one,
|
|
+ .extra1 = &min_sndbuf,
|
|
},
|
|
{
|
|
.procname = "rmem_default",
|
|
@@ -247,7 +249,7 @@ static struct ctl_table net_core_table[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = &one,
|
|
+ .extra1 = &min_rcvbuf,
|
|
},
|
|
{
|
|
.procname = "dev_weight",
|
|
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
|
|
index 07bd8ed..951fe55 100644
|
|
--- a/net/ipv4/af_inet.c
|
|
+++ b/net/ipv4/af_inet.c
|
|
@@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
|
|
err = 0;
|
|
if (err)
|
|
goto out;
|
|
+
|
|
+ tcp_fastopen_init_key_once(true);
|
|
}
|
|
err = inet_csk_listen_start(sk, backlog);
|
|
if (err)
|
|
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
|
|
index f2e1573..8f7bd56 100644
|
|
--- a/net/ipv4/fib_rules.c
|
|
+++ b/net/ipv4/fib_rules.c
|
|
@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
|
|
else
|
|
res->tclassid = 0;
|
|
#endif
|
|
+
|
|
+ if (err == -ESRCH)
|
|
+ err = -ENETUNREACH;
|
|
+
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__fib_lookup);
|
|
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
|
|
index 9d43468..017fa5e 100644
|
|
--- a/net/ipv4/fib_semantics.c
|
|
+++ b/net/ipv4/fib_semantics.c
|
|
@@ -535,7 +535,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
|
|
return 1;
|
|
|
|
attrlen = rtnh_attrlen(rtnh);
|
|
- if (attrlen < 0) {
|
|
+ if (attrlen > 0) {
|
|
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
|
|
|
|
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
|
|
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
|
|
index 2d24f29..278836f 100644
|
|
--- a/net/ipv4/gre_offload.c
|
|
+++ b/net/ipv4/gre_offload.c
|
|
@@ -50,7 +50,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|
|
|
greh = (struct gre_base_hdr *)skb_transport_header(skb);
|
|
|
|
- ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
|
|
+ ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
|
|
if (unlikely(ghl < sizeof(*greh)))
|
|
goto out;
|
|
|
|
@@ -271,6 +271,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
|
|
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
|
|
|
|
rcu_read_unlock();
|
|
+
|
|
+ skb_set_inner_mac_header(skb, nhoff + grehlen);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
|
|
index 9db3b87..0ffcd4d 100644
|
|
--- a/net/ipv4/igmp.c
|
|
+++ b/net/ipv4/igmp.c
|
|
@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
|
|
pip->saddr = fl4.saddr;
|
|
pip->protocol = IPPROTO_IGMP;
|
|
pip->tot_len = 0; /* filled in later */
|
|
- ip_select_ident(skb, &rt->dst, NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
((u8 *)&pip[1])[0] = IPOPT_RA;
|
|
((u8 *)&pip[1])[1] = 4;
|
|
((u8 *)&pip[1])[2] = 0;
|
|
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
|
|
iph->daddr = dst;
|
|
iph->saddr = fl4.saddr;
|
|
iph->protocol = IPPROTO_IGMP;
|
|
- ip_select_ident(skb, &rt->dst, NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
((u8 *)&iph[1])[0] = IPOPT_RA;
|
|
((u8 *)&iph[1])[1] = 4;
|
|
((u8 *)&iph[1])[2] = 0;
|
|
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
|
|
index e34dccb..4eeba4e 100644
|
|
--- a/net/ipv4/inet_diag.c
|
|
+++ b/net/ipv4/inet_diag.c
|
|
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
|
|
mutex_unlock(&inet_diag_table_mutex);
|
|
}
|
|
|
|
+static size_t inet_sk_attr_size(void)
|
|
+{
|
|
+ return nla_total_size(sizeof(struct tcp_info))
|
|
+ + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
|
|
+ + nla_total_size(1) /* INET_DIAG_TOS */
|
|
+ + nla_total_size(1) /* INET_DIAG_TCLASS */
|
|
+ + nla_total_size(sizeof(struct inet_diag_meminfo))
|
|
+ + nla_total_size(sizeof(struct inet_diag_msg))
|
|
+ + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
|
+ + nla_total_size(TCP_CA_NAME_MAX)
|
|
+ + nla_total_size(sizeof(struct tcpvegas_info))
|
|
+ + 64;
|
|
+}
|
|
+
|
|
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
|
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
|
struct user_namespace *user_ns,
|
|
@@ -324,9 +338,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
|
|
if (err)
|
|
goto out;
|
|
|
|
- rep = nlmsg_new(sizeof(struct inet_diag_msg) +
|
|
- sizeof(struct inet_diag_meminfo) +
|
|
- sizeof(struct tcp_info) + 64, GFP_KERNEL);
|
|
+ rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
|
|
if (!rep) {
|
|
err = -ENOMEM;
|
|
goto out;
|
|
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
|
|
index 48f4244..bf2cb4a 100644
|
|
--- a/net/ipv4/inetpeer.c
|
|
+++ b/net/ipv4/inetpeer.c
|
|
@@ -26,20 +26,7 @@
|
|
* Theory of operations.
|
|
* We keep one entry for each peer IP address. The nodes contains long-living
|
|
* information about the peer which doesn't depend on routes.
|
|
- * At this moment this information consists only of ID field for the next
|
|
- * outgoing IP packet. This field is incremented with each packet as encoded
|
|
- * in inet_getid() function (include/net/inetpeer.h).
|
|
- * At the moment of writing this notes identifier of IP packets is generated
|
|
- * to be unpredictable using this code only for packets subjected
|
|
- * (actually or potentially) to defragmentation. I.e. DF packets less than
|
|
- * PMTU in size when local fragmentation is disabled use a constant ID and do
|
|
- * not use this code (see ip_select_ident() in include/net/ip.h).
|
|
*
|
|
- * Route cache entries hold references to our nodes.
|
|
- * New cache entries get references via lookup by destination IP address in
|
|
- * the avl tree. The reference is grabbed only when it's needed i.e. only
|
|
- * when we try to output IP packet which needs an unpredictable ID (see
|
|
- * __ip_select_ident() in net/ipv4/route.c).
|
|
* Nodes are removed only when reference counter goes to 0.
|
|
* When it's happened the node may be removed when a sufficient amount of
|
|
* time has been passed since its last use. The less-recently-used entry can
|
|
@@ -62,7 +49,6 @@
|
|
* refcnt: atomically against modifications on other CPU;
|
|
* usually under some other lock to prevent node disappearing
|
|
* daddr: unchangeable
|
|
- * ip_id_count: atomic value (no lock needed)
|
|
*/
|
|
|
|
static struct kmem_cache *peer_cachep __read_mostly;
|
|
@@ -497,10 +483,6 @@ relookup:
|
|
p->daddr = *daddr;
|
|
atomic_set(&p->refcnt, 1);
|
|
atomic_set(&p->rid, 0);
|
|
- atomic_set(&p->ip_id_count,
|
|
- (daddr->family == AF_INET) ?
|
|
- secure_ip_id(daddr->addr.a4) :
|
|
- secure_ipv6_id(daddr->addr.a6));
|
|
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
|
p->rate_tokens = 0;
|
|
/* 60*HZ is arbitrary, but chosen enough high so that the first
|
|
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
|
|
index 1c6bd43..57075c4 100644
|
|
--- a/net/ipv4/ip_forward.c
|
|
+++ b/net/ipv4/ip_forward.c
|
|
@@ -127,6 +127,9 @@ int ip_forward(struct sk_buff *skb)
|
|
struct rtable *rt; /* Route we use */
|
|
struct ip_options *opt = &(IPCB(skb)->opt);
|
|
|
|
+ if (unlikely(skb->sk))
|
|
+ goto drop;
|
|
+
|
|
if (skb_warn_if_lro(skb))
|
|
goto drop;
|
|
|
|
@@ -178,7 +181,8 @@ int ip_forward(struct sk_buff *skb)
|
|
* We now generate an ICMP HOST REDIRECT giving the route
|
|
* we calculated.
|
|
*/
|
|
- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
|
|
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
|
|
+ !skb_sec_path(skb))
|
|
ip_rt_send_redirect(skb);
|
|
|
|
skb->priority = rt_tos2priority(iph->tos);
|
|
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
|
|
index c10a3ce..9ff497d 100644
|
|
--- a/net/ipv4/ip_fragment.c
|
|
+++ b/net/ipv4/ip_fragment.c
|
|
@@ -679,27 +679,30 @@ EXPORT_SYMBOL(ip_defrag);
|
|
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
|
{
|
|
struct iphdr iph;
|
|
+ int netoff;
|
|
u32 len;
|
|
|
|
if (skb->protocol != htons(ETH_P_IP))
|
|
return skb;
|
|
|
|
- if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
|
|
+ netoff = skb_network_offset(skb);
|
|
+
|
|
+ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
|
|
return skb;
|
|
|
|
if (iph.ihl < 5 || iph.version != 4)
|
|
return skb;
|
|
|
|
len = ntohs(iph.tot_len);
|
|
- if (skb->len < len || len < (iph.ihl * 4))
|
|
+ if (skb->len < netoff + len || len < (iph.ihl * 4))
|
|
return skb;
|
|
|
|
if (ip_is_fragment(&iph)) {
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
if (skb) {
|
|
- if (!pskb_may_pull(skb, iph.ihl*4))
|
|
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
|
|
return skb;
|
|
- if (pskb_trim_rcsum(skb, len))
|
|
+ if (pskb_trim_rcsum(skb, netoff + len))
|
|
return skb;
|
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
if (ip_defrag(skb, user))
|
|
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
|
|
index 94213c8..b40b90d 100644
|
|
--- a/net/ipv4/ip_gre.c
|
|
+++ b/net/ipv4/ip_gre.c
|
|
@@ -250,10 +250,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
const struct iphdr *tnl_params;
|
|
|
|
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
|
|
- if (IS_ERR(skb))
|
|
- goto out;
|
|
-
|
|
if (dev->header_ops) {
|
|
/* Need space for new headers */
|
|
if (skb_cow_head(skb, dev->needed_headroom -
|
|
@@ -266,6 +262,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|
* to gre header.
|
|
*/
|
|
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
|
|
+ skb_reset_mac_header(skb);
|
|
} else {
|
|
if (skb_cow_head(skb, dev->needed_headroom))
|
|
goto free_skb;
|
|
@@ -273,6 +270,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|
tnl_params = &tunnel->parms.iph;
|
|
}
|
|
|
|
+ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
|
|
+ if (IS_ERR(skb))
|
|
+ goto out;
|
|
+
|
|
__gre_xmit(skb, dev, tnl_params, skb->protocol);
|
|
|
|
return NETDEV_TX_OK;
|
|
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
|
|
index 73c6b63..05686c4 100644
|
|
--- a/net/ipv4/ip_output.c
|
|
+++ b/net/ipv4/ip_output.c
|
|
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
|
|
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
|
|
iph->saddr = saddr;
|
|
iph->protocol = sk->sk_protocol;
|
|
- ip_select_ident(skb, &rt->dst, sk);
|
|
+ ip_select_ident(skb, sk);
|
|
|
|
if (opt && opt->opt.optlen) {
|
|
iph->ihl += opt->opt.optlen>>2;
|
|
@@ -386,8 +386,7 @@ packet_routed:
|
|
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
|
|
}
|
|
|
|
- ip_select_ident_more(skb, &rt->dst, sk,
|
|
- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
|
|
+ ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
|
|
|
|
skb->priority = sk->sk_priority;
|
|
skb->mark = sk->sk_mark;
|
|
@@ -844,7 +843,8 @@ static int __ip_append_data(struct sock *sk,
|
|
cork->length += length;
|
|
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
|
|
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
|
+ (sk->sk_type == SOCK_DGRAM)) {
|
|
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
|
hh_len, fragheaderlen, transhdrlen,
|
|
maxfraglen, flags);
|
|
@@ -1338,7 +1338,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
|
|
iph->ttl = ttl;
|
|
iph->protocol = sk->sk_protocol;
|
|
ip_copy_addrs(iph, fl4);
|
|
- ip_select_ident(skb, &rt->dst, sk);
|
|
+ ip_select_ident(skb, sk);
|
|
|
|
if (opt) {
|
|
iph->ihl += opt->optlen>>2;
|
|
@@ -1461,23 +1461,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
|
|
/*
|
|
* Generic function to send a packet as reply to another packet.
|
|
* Used to send some TCP resets/acks so far.
|
|
- *
|
|
- * Use a fake percpu inet socket to avoid false sharing and contention.
|
|
*/
|
|
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
|
|
- .sk = {
|
|
- .__sk_common = {
|
|
- .skc_refcnt = ATOMIC_INIT(1),
|
|
- },
|
|
- .sk_wmem_alloc = ATOMIC_INIT(1),
|
|
- .sk_allocation = GFP_ATOMIC,
|
|
- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
|
|
- },
|
|
- .pmtudisc = IP_PMTUDISC_WANT,
|
|
- .uc_ttl = -1,
|
|
-};
|
|
-
|
|
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
|
|
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
|
|
__be32 saddr, const struct ip_reply_arg *arg,
|
|
unsigned int len)
|
|
{
|
|
@@ -1485,9 +1470,9 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
|
|
struct ipcm_cookie ipc;
|
|
struct flowi4 fl4;
|
|
struct rtable *rt = skb_rtable(skb);
|
|
+ struct net *net = sock_net(sk);
|
|
struct sk_buff *nskb;
|
|
- struct sock *sk;
|
|
- struct inet_sock *inet;
|
|
+ int err;
|
|
|
|
if (ip_options_echo(&replyopts.opt.opt, skb))
|
|
return;
|
|
@@ -1516,18 +1501,19 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
|
|
if (IS_ERR(rt))
|
|
return;
|
|
|
|
- inet = &get_cpu_var(unicast_sock);
|
|
+ inet_sk(sk)->tos = arg->tos;
|
|
|
|
- inet->tos = arg->tos;
|
|
- sk = &inet->sk;
|
|
sk->sk_priority = skb->priority;
|
|
sk->sk_protocol = ip_hdr(skb)->protocol;
|
|
sk->sk_bound_dev_if = arg->bound_dev_if;
|
|
- sock_net_set(sk, net);
|
|
- __skb_queue_head_init(&sk->sk_write_queue);
|
|
sk->sk_sndbuf = sysctl_wmem_default;
|
|
- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
|
|
- &ipc, &rt, MSG_DONTWAIT);
|
|
+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
|
|
+ len, 0, &ipc, &rt, MSG_DONTWAIT);
|
|
+ if (unlikely(err)) {
|
|
+ ip_flush_pending_frames(sk);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
nskb = skb_peek(&sk->sk_write_queue);
|
|
if (nskb) {
|
|
if (arg->csumoffset >= 0)
|
|
@@ -1535,13 +1521,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
|
|
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
|
|
arg->csum));
|
|
nskb->ip_summed = CHECKSUM_NONE;
|
|
- skb_orphan(nskb);
|
|
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
|
|
ip_push_pending_frames(sk, &fl4);
|
|
}
|
|
-
|
|
- put_cpu_var(unicast_sock);
|
|
-
|
|
+out:
|
|
ip_rt_put(rt);
|
|
}
|
|
|
|
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
|
|
index 580dd96..135045e 100644
|
|
--- a/net/ipv4/ip_sockglue.c
|
|
+++ b/net/ipv4/ip_sockglue.c
|
|
@@ -426,15 +426,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|
|
|
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
|
|
sin = &errhdr.offender;
|
|
- sin->sin_family = AF_UNSPEC;
|
|
+ memset(sin, 0, sizeof(*sin));
|
|
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
|
|
- struct inet_sock *inet = inet_sk(sk);
|
|
-
|
|
sin->sin_family = AF_INET;
|
|
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
|
|
- sin->sin_port = 0;
|
|
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
|
|
- if (inet->cmsg_flags)
|
|
+ if (inet_sk(sk)->cmsg_flags)
|
|
ip_cmsg_recv(msg, skb);
|
|
}
|
|
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index 62cd9e0..0a4af09 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
|
|
}
|
|
|
|
static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
|
|
- struct dst_entry *dst)
|
|
+ struct dst_entry *dst, __be32 saddr)
|
|
{
|
|
struct dst_entry *old_dst;
|
|
|
|
dst_clone(dst);
|
|
old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
|
|
dst_release(old_dst);
|
|
+ idst->saddr = saddr;
|
|
}
|
|
|
|
-static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
|
|
+static void tunnel_dst_set(struct ip_tunnel *t,
|
|
+ struct dst_entry *dst, __be32 saddr)
|
|
{
|
|
- __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
|
|
+ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
|
|
}
|
|
|
|
static void tunnel_dst_reset(struct ip_tunnel *t)
|
|
{
|
|
- tunnel_dst_set(t, NULL);
|
|
+ tunnel_dst_set(t, NULL, 0);
|
|
}
|
|
|
|
void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
|
|
@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
|
|
int i;
|
|
|
|
for_each_possible_cpu(i)
|
|
- __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
|
|
+ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
|
|
}
|
|
EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
|
|
|
|
-static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
|
|
+static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
|
|
+ u32 cookie, __be32 *saddr)
|
|
{
|
|
+ struct ip_tunnel_dst *idst;
|
|
struct dst_entry *dst;
|
|
|
|
rcu_read_lock();
|
|
- dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
|
|
+ idst = this_cpu_ptr(t->dst_cache);
|
|
+ dst = rcu_dereference(idst->dst);
|
|
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
|
|
dst = NULL;
|
|
if (dst) {
|
|
- if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
|
+ if (!dst->obsolete || dst->ops->check(dst, cookie)) {
|
|
+ *saddr = idst->saddr;
|
|
+ } else {
|
|
tunnel_dst_reset(t);
|
|
dst_release(dst);
|
|
dst = NULL;
|
|
@@ -362,7 +369,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
|
|
|
if (!IS_ERR(rt)) {
|
|
tdev = rt->dst.dev;
|
|
- tunnel_dst_set(tunnel, &rt->dst);
|
|
+ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
|
|
ip_rt_put(rt);
|
|
}
|
|
if (dev->type != ARPHRD_ETHER)
|
|
@@ -606,7 +613,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
|
|
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
|
|
|
|
- rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
|
|
+ rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
|
|
|
|
if (!rt) {
|
|
rt = ip_route_output_key(tunnel->net, &fl4);
|
|
@@ -616,7 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
goto tx_error;
|
|
}
|
|
if (connected)
|
|
- tunnel_dst_set(tunnel, &rt->dst);
|
|
+ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
|
|
}
|
|
|
|
if (rt->dst.dev == dev) {
|
|
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
|
|
index 8d69626..791a419 100644
|
|
--- a/net/ipv4/ip_tunnel_core.c
|
|
+++ b/net/ipv4/ip_tunnel_core.c
|
|
@@ -74,7 +74,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
|
|
iph->daddr = dst;
|
|
iph->saddr = src;
|
|
iph->ttl = ttl;
|
|
- __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
|
|
+ __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
|
|
|
|
err = ip_local_out(skb);
|
|
if (unlikely(net_xmit_eval(err)))
|
|
@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
|
|
skb_pull_rcsum(skb, hdr_len);
|
|
|
|
if (inner_proto == htons(ETH_P_TEB)) {
|
|
- struct ethhdr *eh = (struct ethhdr *)skb->data;
|
|
+ struct ethhdr *eh;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
|
return -ENOMEM;
|
|
|
|
+ eh = (struct ethhdr *)skb->data;
|
|
if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
|
|
skb->protocol = eh->h_proto;
|
|
else
|
|
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
|
|
index e4a8f76..b0a9cb4 100644
|
|
--- a/net/ipv4/ip_vti.c
|
|
+++ b/net/ipv4/ip_vti.c
|
|
@@ -369,6 +369,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
|
|
.validate = vti_tunnel_validate,
|
|
.newlink = vti_newlink,
|
|
.changelink = vti_changelink,
|
|
+ .dellink = ip_tunnel_dellink,
|
|
.get_size = vti_get_size,
|
|
.fill_info = vti_fill_info,
|
|
};
|
|
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
|
|
index 2886357..1149fc2 100644
|
|
--- a/net/ipv4/ipmr.c
|
|
+++ b/net/ipv4/ipmr.c
|
|
@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
|
|
iph->protocol = IPPROTO_IPIP;
|
|
iph->ihl = 5;
|
|
iph->tot_len = htons(skb->len);
|
|
- ip_select_ident(skb, skb_dst(skb), NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
ip_send_check(iph);
|
|
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
|
|
index e21934b..1e2e9bf 100644
|
|
--- a/net/ipv4/ping.c
|
|
+++ b/net/ipv4/ping.c
|
|
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
|
|
if (sk_hashed(sk)) {
|
|
write_lock_bh(&ping_table.lock);
|
|
hlist_nulls_del(&sk->sk_nulls_node);
|
|
+ sk_nulls_node_init(&sk->sk_nulls_node);
|
|
sock_put(sk);
|
|
isk->inet_num = 0;
|
|
isk->inet_sport = 0;
|
|
@@ -217,6 +218,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
|
|
&ipv6_hdr(skb)->daddr))
|
|
continue;
|
|
#endif
|
|
+ } else {
|
|
+ continue;
|
|
}
|
|
|
|
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
|
|
@@ -257,6 +260,10 @@ int ping_init_sock(struct sock *sk)
|
|
kgid_t low, high;
|
|
int ret = 0;
|
|
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ if (sk->sk_family == AF_INET6)
|
|
+ inet6_sk(sk)->ipv6only = 1;
|
|
+#endif
|
|
inet_get_ping_group_range_net(net, &low, &high);
|
|
if (gid_lte(low, group) && gid_lte(group, high))
|
|
return 0;
|
|
@@ -303,6 +310,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
|
|
if (addr_len < sizeof(*addr))
|
|
return -EINVAL;
|
|
|
|
+ if (addr->sin_family != AF_INET &&
|
|
+ !(addr->sin_family == AF_UNSPEC &&
|
|
+ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
|
|
+ return -EAFNOSUPPORT;
|
|
+
|
|
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
|
|
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
|
|
|
|
@@ -328,7 +340,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
|
|
return -EINVAL;
|
|
|
|
if (addr->sin6_family != AF_INET6)
|
|
- return -EINVAL;
|
|
+ return -EAFNOSUPPORT;
|
|
|
|
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
|
|
sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
|
|
@@ -714,7 +726,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
|
|
if (msg->msg_namelen < sizeof(*usin))
|
|
return -EINVAL;
|
|
if (usin->sin_family != AF_INET)
|
|
- return -EINVAL;
|
|
+ return -EAFNOSUPPORT;
|
|
daddr = usin->sin_addr.s_addr;
|
|
/* no remote port */
|
|
} else {
|
|
@@ -971,8 +983,11 @@ void ping_rcv(struct sk_buff *skb)
|
|
|
|
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
|
|
if (sk != NULL) {
|
|
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
|
|
+
|
|
pr_debug("rcv on socket %p\n", sk);
|
|
- ping_queue_rcv_skb(sk, skb_get(skb));
|
|
+ if (skb2)
|
|
+ ping_queue_rcv_skb(sk, skb2);
|
|
sock_put(sk);
|
|
return;
|
|
}
|
|
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
|
|
index c04518f..11c8d81 100644
|
|
--- a/net/ipv4/raw.c
|
|
+++ b/net/ipv4/raw.c
|
|
@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
|
iph->check = 0;
|
|
iph->tot_len = htons(length);
|
|
if (!iph->id)
|
|
- ip_select_ident(skb, &rt->dst, NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
|
|
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
|
|
}
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 031553f..625615c 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -89,6 +89,7 @@
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/times.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/jhash.h>
|
|
#include <net/dst.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/protocol.h>
|
|
@@ -462,39 +463,45 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
|
|
return neigh_create(&arp_tbl, pkey, dev);
|
|
}
|
|
|
|
-/*
|
|
- * Peer allocation may fail only in serious out-of-memory conditions. However
|
|
- * we still can generate some output.
|
|
- * Random ID selection looks a bit dangerous because we have no chances to
|
|
- * select ID being unique in a reasonable period of time.
|
|
- * But broken packet identifier may be better than no packet at all.
|
|
+#define IP_IDENTS_SZ 2048u
|
|
+struct ip_ident_bucket {
|
|
+ atomic_t id;
|
|
+ u32 stamp32;
|
|
+};
|
|
+
|
|
+static struct ip_ident_bucket *ip_idents __read_mostly;
|
|
+
|
|
+/* In order to protect privacy, we add a perturbation to identifiers
|
|
+ * if one generator is seldom used. This makes hard for an attacker
|
|
+ * to infer how many packets were sent between two points in time.
|
|
*/
|
|
-static void ip_select_fb_ident(struct iphdr *iph)
|
|
+u32 ip_idents_reserve(u32 hash, int segs)
|
|
{
|
|
- static DEFINE_SPINLOCK(ip_fb_id_lock);
|
|
- static u32 ip_fallback_id;
|
|
- u32 salt;
|
|
+ struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
|
|
+ u32 old = ACCESS_ONCE(bucket->stamp32);
|
|
+ u32 now = (u32)jiffies;
|
|
+ u32 delta = 0;
|
|
+
|
|
+ if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
|
|
+ delta = prandom_u32_max(now - old);
|
|
|
|
- spin_lock_bh(&ip_fb_id_lock);
|
|
- salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
|
|
- iph->id = htons(salt & 0xFFFF);
|
|
- ip_fallback_id = salt;
|
|
- spin_unlock_bh(&ip_fb_id_lock);
|
|
+ return atomic_add_return(segs + delta, &bucket->id) - segs;
|
|
}
|
|
+EXPORT_SYMBOL(ip_idents_reserve);
|
|
|
|
-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
|
+void __ip_select_ident(struct iphdr *iph, int segs)
|
|
{
|
|
- struct net *net = dev_net(dst->dev);
|
|
- struct inet_peer *peer;
|
|
+ static u32 ip_idents_hashrnd __read_mostly;
|
|
+ u32 hash, id;
|
|
|
|
- peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
|
|
- if (peer) {
|
|
- iph->id = htons(inet_getid(peer, more));
|
|
- inet_putpeer(peer);
|
|
- return;
|
|
- }
|
|
+ net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
|
|
|
|
- ip_select_fb_ident(iph);
|
|
+ hash = jhash_3words((__force u32)iph->daddr,
|
|
+ (__force u32)iph->saddr,
|
|
+ iph->protocol,
|
|
+ ip_idents_hashrnd);
|
|
+ id = ip_idents_reserve(hash, segs);
|
|
+ iph->id = htons(id);
|
|
}
|
|
EXPORT_SYMBOL(__ip_select_ident);
|
|
|
|
@@ -903,6 +910,10 @@ static int ip_error(struct sk_buff *skb)
|
|
bool send;
|
|
int code;
|
|
|
|
+ /* IP on this device is disabled. */
|
|
+ if (!in_dev)
|
|
+ goto out;
|
|
+
|
|
net = dev_net(rt->dst.dev);
|
|
if (!IN_DEV_FORWARD(in_dev)) {
|
|
switch (rt->dst.error) {
|
|
@@ -1547,11 +1558,10 @@ static int __mkroute_input(struct sk_buff *skb,
|
|
|
|
do_cache = res->fi && !itag;
|
|
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
|
|
+ skb->protocol == htons(ETH_P_IP) &&
|
|
(IN_DEV_SHARED_MEDIA(out_dev) ||
|
|
- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
|
|
- flags |= RTCF_DOREDIRECT;
|
|
- do_cache = false;
|
|
- }
|
|
+ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
|
|
+ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
|
|
|
|
if (skb->protocol != htons(ETH_P_IP)) {
|
|
/* Not IP (i.e. ARP). Do not create route, if it is
|
|
@@ -2261,9 +2271,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
|
|
return rt;
|
|
|
|
if (flp4->flowi4_proto)
|
|
- rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
|
|
- flowi4_to_flowi(flp4),
|
|
- sk, 0);
|
|
+ rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
|
|
+ flowi4_to_flowi(flp4),
|
|
+ sk, 0);
|
|
|
|
return rt;
|
|
}
|
|
@@ -2298,6 +2308,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
|
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
|
|
if (rt->rt_flags & RTCF_NOTIFY)
|
|
r->rtm_flags |= RTM_F_NOTIFY;
|
|
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
|
|
+ r->rtm_flags |= RTCF_DOREDIRECT;
|
|
|
|
if (nla_put_be32(skb, RTA_DST, dst))
|
|
goto nla_put_failure;
|
|
@@ -2718,6 +2730,12 @@ int __init ip_rt_init(void)
|
|
{
|
|
int rc = 0;
|
|
|
|
+ ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
|
|
+ if (!ip_idents)
|
|
+ panic("IP: failed to allocate ip_idents\n");
|
|
+
|
|
+ prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
|
|
+
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
|
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
|
|
if (!ip_rt_acct)
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index b48fba0..dc45221 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -1175,13 +1175,6 @@ new_segment:
|
|
goto wait_for_memory;
|
|
|
|
/*
|
|
- * All packets are restored as if they have
|
|
- * already been sent.
|
|
- */
|
|
- if (tp->repair)
|
|
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
-
|
|
- /*
|
|
* Check whether we can use HW checksum.
|
|
*/
|
|
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
|
|
@@ -1190,6 +1183,13 @@ new_segment:
|
|
skb_entail(sk, skb);
|
|
copy = size_goal;
|
|
max = size_goal;
|
|
+
|
|
+ /* All packets are restored as if they have
|
|
+ * already been sent. skb_mstamp isn't set to
|
|
+ * avoid wrong rtt estimation.
|
|
+ */
|
|
+ if (tp->repair)
|
|
+ TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
|
|
}
|
|
|
|
/* Try to append data to the end of skb. */
|
|
@@ -2684,10 +2684,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|
|
|
case TCP_FASTOPEN:
|
|
if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
|
|
- TCPF_LISTEN)))
|
|
+ TCPF_LISTEN))) {
|
|
+ tcp_fastopen_init_key_once(true);
|
|
+
|
|
err = fastopen_init_queue(sk, val);
|
|
- else
|
|
+ } else {
|
|
err = -EINVAL;
|
|
+ }
|
|
break;
|
|
case TCP_TIMESTAMP:
|
|
if (!tp->repair)
|
|
@@ -2954,61 +2957,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
|
|
#endif
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
|
|
+static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
|
|
static DEFINE_MUTEX(tcp_md5sig_mutex);
|
|
-
|
|
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
|
|
-{
|
|
- int cpu;
|
|
-
|
|
- for_each_possible_cpu(cpu) {
|
|
- struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
|
|
-
|
|
- if (p->md5_desc.tfm)
|
|
- crypto_free_hash(p->md5_desc.tfm);
|
|
- }
|
|
- free_percpu(pool);
|
|
-}
|
|
+static bool tcp_md5sig_pool_populated = false;
|
|
|
|
static void __tcp_alloc_md5sig_pool(void)
|
|
{
|
|
int cpu;
|
|
- struct tcp_md5sig_pool __percpu *pool;
|
|
-
|
|
- pool = alloc_percpu(struct tcp_md5sig_pool);
|
|
- if (!pool)
|
|
- return;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
- struct crypto_hash *hash;
|
|
-
|
|
- hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
|
|
- if (IS_ERR_OR_NULL(hash))
|
|
- goto out_free;
|
|
+ if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
|
|
+ struct crypto_hash *hash;
|
|
|
|
- per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
|
|
+ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
|
|
+ if (IS_ERR_OR_NULL(hash))
|
|
+ return;
|
|
+ per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
|
|
+ }
|
|
}
|
|
- /* before setting tcp_md5sig_pool, we must commit all writes
|
|
- * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
|
|
+ /* before setting tcp_md5sig_pool_populated, we must commit all writes
|
|
+ * to memory. See smp_rmb() in tcp_get_md5sig_pool()
|
|
*/
|
|
smp_wmb();
|
|
- tcp_md5sig_pool = pool;
|
|
- return;
|
|
-out_free:
|
|
- __tcp_free_md5sig_pool(pool);
|
|
+ tcp_md5sig_pool_populated = true;
|
|
}
|
|
|
|
bool tcp_alloc_md5sig_pool(void)
|
|
{
|
|
- if (unlikely(!tcp_md5sig_pool)) {
|
|
+ if (unlikely(!tcp_md5sig_pool_populated)) {
|
|
mutex_lock(&tcp_md5sig_mutex);
|
|
|
|
- if (!tcp_md5sig_pool)
|
|
+ if (!tcp_md5sig_pool_populated)
|
|
__tcp_alloc_md5sig_pool();
|
|
|
|
mutex_unlock(&tcp_md5sig_mutex);
|
|
}
|
|
- return tcp_md5sig_pool != NULL;
|
|
+ return tcp_md5sig_pool_populated;
|
|
}
|
|
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
|
|
|
|
@@ -3022,13 +3006,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
|
|
*/
|
|
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
|
|
{
|
|
- struct tcp_md5sig_pool __percpu *p;
|
|
-
|
|
local_bh_disable();
|
|
- p = ACCESS_ONCE(tcp_md5sig_pool);
|
|
- if (p)
|
|
- return __this_cpu_ptr(p);
|
|
|
|
+ if (tcp_md5sig_pool_populated) {
|
|
+ /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
|
|
+ smp_rmb();
|
|
+ return this_cpu_ptr(&tcp_md5sig_pool);
|
|
+ }
|
|
local_bh_enable();
|
|
return NULL;
|
|
}
|
|
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
|
|
index f195d93..ee6518d 100644
|
|
--- a/net/ipv4/tcp_fastopen.c
|
|
+++ b/net/ipv4/tcp_fastopen.c
|
|
@@ -84,8 +84,6 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
|
|
__be32 path[4] = { src, dst, 0, 0 };
|
|
struct tcp_fastopen_context *ctx;
|
|
|
|
- tcp_fastopen_init_key_once(true);
|
|
-
|
|
rcu_read_lock();
|
|
ctx = rcu_dereference(tcp_fastopen_ctx);
|
|
if (ctx) {
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 3898694..9fbd69e 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -2678,7 +2678,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
|
*/
|
|
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
|
{
|
|
- struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
bool recovered = !before(tp->snd_una, tp->high_seq);
|
|
|
|
@@ -2704,12 +2703,9 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
|
|
|
if (recovered) {
|
|
/* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
|
|
- icsk->icsk_retransmits = 0;
|
|
tcp_try_undo_recovery(sk);
|
|
return;
|
|
}
|
|
- if (flag & FLAG_DATA_ACKED)
|
|
- icsk->icsk_retransmits = 0;
|
|
if (tcp_is_reno(tp)) {
|
|
/* A Reno DUPACK means new data in F-RTO step 2.b above are
|
|
* delivered. Lower inflight to clock out (re)tranmissions.
|
|
@@ -3068,10 +3064,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
|
if (seq_rtt < 0) {
|
|
seq_rtt = ca_seq_rtt;
|
|
}
|
|
- if (!(sacked & TCPCB_SACKED_ACKED))
|
|
+ if (!(sacked & TCPCB_SACKED_ACKED)) {
|
|
reord = min(pkts_acked, reord);
|
|
- if (!after(scb->end_seq, tp->high_seq))
|
|
- flag |= FLAG_ORIG_SACK_ACKED;
|
|
+ if (!after(scb->end_seq, tp->high_seq))
|
|
+ flag |= FLAG_ORIG_SACK_ACKED;
|
|
+ }
|
|
}
|
|
|
|
if (sacked & TCPCB_SACKED_ACKED)
|
|
@@ -3398,8 +3395,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
|
tcp_rearm_rto(sk);
|
|
|
|
- if (after(ack, prior_snd_una))
|
|
+ if (after(ack, prior_snd_una)) {
|
|
flag |= FLAG_SND_UNA_ADVANCED;
|
|
+ icsk->icsk_retransmits = 0;
|
|
+ }
|
|
|
|
prior_fackets = tp->fackets_out;
|
|
prior_in_flight = tcp_packets_in_flight(tp);
|
|
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
|
|
index 1e4eac7..e2f8bd0 100644
|
|
--- a/net/ipv4/tcp_ipv4.c
|
|
+++ b/net/ipv4/tcp_ipv4.c
|
|
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
|
|
* It can be called through tcp_release_cb() if socket was owned by user
|
|
* at the time tcp_v4_err() was called to handle ICMP message.
|
|
*/
|
|
-static void tcp_v4_mtu_reduced(struct sock *sk)
|
|
+void tcp_v4_mtu_reduced(struct sock *sk)
|
|
{
|
|
struct dst_entry *dst;
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
@@ -300,6 +300,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
|
|
tcp_simple_retransmit(sk);
|
|
} /* else let the usual retransmit timer handle it */
|
|
}
|
|
+EXPORT_SYMBOL(tcp_v4_mtu_reduced);
|
|
|
|
static void do_redirect(struct sk_buff *skb, struct sock *sk)
|
|
{
|
|
@@ -690,7 +691,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
|
|
|
|
net = dev_net(skb_dst(skb)->dev);
|
|
arg.tos = ip_hdr(skb)->tos;
|
|
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
|
|
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
|
|
+ skb, ip_hdr(skb)->saddr,
|
|
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
|
|
|
|
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
|
|
@@ -773,7 +775,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
|
|
if (oif)
|
|
arg.bound_dev_if = oif;
|
|
arg.tos = tos;
|
|
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
|
|
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
|
|
+ skb, ip_hdr(skb)->saddr,
|
|
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
|
|
|
|
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
|
|
@@ -1872,7 +1875,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
|
|
skb->sk = sk;
|
|
skb->destructor = sock_edemux;
|
|
if (sk->sk_state != TCP_TIME_WAIT) {
|
|
- struct dst_entry *dst = sk->sk_rx_dst;
|
|
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
|
|
|
|
if (dst)
|
|
dst = dst_check(dst, 0);
|
|
@@ -2117,6 +2120,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
|
|
.compat_setsockopt = compat_ip_setsockopt,
|
|
.compat_getsockopt = compat_ip_getsockopt,
|
|
#endif
|
|
+ .mtu_reduced = tcp_v4_mtu_reduced,
|
|
};
|
|
EXPORT_SYMBOL(ipv4_specific);
|
|
|
|
@@ -2736,7 +2740,6 @@ struct proto tcp_prot = {
|
|
.sendpage = tcp_sendpage,
|
|
.backlog_rcv = tcp_v4_do_rcv,
|
|
.release_cb = tcp_release_cb,
|
|
- .mtu_reduced = tcp_v4_mtu_reduced,
|
|
.hash = inet_hash,
|
|
.unhash = inet_unhash,
|
|
.get_port = inet_csk_get_port,
|
|
@@ -2768,14 +2771,39 @@ struct proto tcp_prot = {
|
|
};
|
|
EXPORT_SYMBOL(tcp_prot);
|
|
|
|
+static void __net_exit tcp_sk_exit(struct net *net)
|
|
+{
|
|
+ int cpu;
|
|
+
|
|
+ for_each_possible_cpu(cpu)
|
|
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
|
|
+ free_percpu(net->ipv4.tcp_sk);
|
|
+}
|
|
+
|
|
static int __net_init tcp_sk_init(struct net *net)
|
|
{
|
|
+ int res, cpu;
|
|
+
|
|
+ net->ipv4.tcp_sk = alloc_percpu(struct sock *);
|
|
+ if (!net->ipv4.tcp_sk)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ struct sock *sk;
|
|
+
|
|
+ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
|
|
+ IPPROTO_TCP, net);
|
|
+ if (res)
|
|
+ goto fail;
|
|
+ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
|
|
+ }
|
|
net->ipv4.sysctl_tcp_ecn = 2;
|
|
return 0;
|
|
-}
|
|
|
|
-static void __net_exit tcp_sk_exit(struct net *net)
|
|
-{
|
|
+fail:
|
|
+ tcp_sk_exit(net);
|
|
+
|
|
+ return res;
|
|
}
|
|
|
|
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
|
|
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
|
|
index 7a436c5..9128d0a 100644
|
|
--- a/net/ipv4/tcp_minisocks.c
|
|
+++ b/net/ipv4/tcp_minisocks.c
|
|
@@ -297,7 +297,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
|
tw->tw_v6_daddr = sk->sk_v6_daddr;
|
|
tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
|
tw->tw_tclass = np->tclass;
|
|
- tw->tw_flowlabel = np->flow_label >> 12;
|
|
+ tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
|
|
tw->tw_ipv6only = np->ipv6only;
|
|
}
|
|
#endif
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index b3d1add..a68cd71 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -787,7 +787,7 @@ void tcp_release_cb(struct sock *sk)
|
|
__sock_put(sk);
|
|
}
|
|
if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
|
|
- sk->sk_prot->mtu_reduced(sk);
|
|
+ inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
|
|
__sock_put(sk);
|
|
}
|
|
}
|
|
@@ -1876,8 +1876,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
|
|
BUG_ON(!tso_segs);
|
|
|
|
- if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
|
|
+ if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
|
|
+ /* "when" is used as a start point for the retransmit timer */
|
|
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
goto repair; /* Skip network transmission */
|
|
+ }
|
|
|
|
cwnd_quota = tcp_cwnd_test(tp, skb);
|
|
if (!cwnd_quota) {
|
|
@@ -1891,7 +1894,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
|
|
break;
|
|
|
|
- if (tso_segs == 1) {
|
|
+ if (tso_segs == 1 || !sk->sk_gso_max_segs) {
|
|
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
|
|
(tcp_skb_is_last(sk, skb) ?
|
|
nonagle : TCP_NAGLE_PUSH))))
|
|
@@ -1928,7 +1931,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
}
|
|
|
|
limit = mss_now;
|
|
- if (tso_segs > 1 && !tcp_urg_mode(tp))
|
|
+ if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
|
|
limit = tcp_mss_split_point(sk, skb, mss_now,
|
|
min_t(unsigned int,
|
|
cwnd_quota,
|
|
@@ -2066,9 +2069,7 @@ void tcp_send_loss_probe(struct sock *sk)
|
|
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
|
|
goto rearm_timer;
|
|
|
|
- /* Probe with zero data doesn't trigger fast recovery. */
|
|
- if (skb->len > 0)
|
|
- err = __tcp_retransmit_skb(sk, skb);
|
|
+ err = __tcp_retransmit_skb(sk, skb);
|
|
|
|
/* Record snd_nxt for loss detection. */
|
|
if (likely(!err))
|
|
@@ -2594,43 +2595,65 @@ begin_fwd:
|
|
}
|
|
}
|
|
|
|
-/* Send a fin. The caller locks the socket for us. This cannot be
|
|
- * allowed to fail queueing a FIN frame under any circumstances.
|
|
+/* We allow to exceed memory limits for FIN packets to expedite
|
|
+ * connection tear down and (memory) recovery.
|
|
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
|
|
+ * or even be forced to close flow without any FIN.
|
|
+ */
|
|
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
|
|
+{
|
|
+ int amt, status;
|
|
+
|
|
+ if (size <= sk->sk_forward_alloc)
|
|
+ return;
|
|
+ amt = sk_mem_pages(size);
|
|
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
|
|
+ sk_memory_allocated_add(sk, amt, &status);
|
|
+}
|
|
+
|
|
+/* Send a FIN. The caller locks the socket for us.
|
|
+ * We should try to send a FIN packet really hard, but eventually give up.
|
|
*/
|
|
void tcp_send_fin(struct sock *sk)
|
|
{
|
|
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- struct sk_buff *skb = tcp_write_queue_tail(sk);
|
|
- int mss_now;
|
|
|
|
- /* Optimization, tack on the FIN if we have a queue of
|
|
- * unsent frames. But be careful about outgoing SACKS
|
|
- * and IP options.
|
|
+ /* Optimization, tack on the FIN if we have one skb in write queue and
|
|
+ * this skb was not yet sent, or we are under memory pressure.
|
|
+ * Note: in the latter case, FIN packet will be sent after a timeout,
|
|
+ * as TCP stack thinks it has already been transmitted.
|
|
*/
|
|
- mss_now = tcp_current_mss(sk);
|
|
-
|
|
- if (tcp_send_head(sk) != NULL) {
|
|
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
|
|
- TCP_SKB_CB(skb)->end_seq++;
|
|
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
|
|
+coalesce:
|
|
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
|
|
+ TCP_SKB_CB(tskb)->end_seq++;
|
|
tp->write_seq++;
|
|
+ if (!tcp_send_head(sk)) {
|
|
+ /* This means tskb was already sent.
|
|
+ * Pretend we included the FIN on previous transmit.
|
|
+ * We need to set tp->snd_nxt to the value it would have
|
|
+ * if FIN had been sent. This is because retransmit path
|
|
+ * does not change tp->snd_nxt.
|
|
+ */
|
|
+ tp->snd_nxt++;
|
|
+ return;
|
|
+ }
|
|
} else {
|
|
- /* Socket is locked, keep trying until memory is available. */
|
|
- for (;;) {
|
|
- skb = alloc_skb_fclone(MAX_TCP_HEADER,
|
|
- sk->sk_allocation);
|
|
- if (skb)
|
|
- break;
|
|
- yield();
|
|
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
|
|
+ if (unlikely(!skb)) {
|
|
+ if (tskb)
|
|
+ goto coalesce;
|
|
+ return;
|
|
}
|
|
-
|
|
- /* Reserve space for headers and prepare control bits. */
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
+ sk_forced_wmem_schedule(sk, skb->truesize);
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
TCPHDR_ACK | TCPHDR_FIN);
|
|
tcp_queue_skb(sk, skb);
|
|
}
|
|
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
|
|
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
|
|
}
|
|
|
|
/* We get here when a process closes a file descriptor (either due to
|
|
@@ -2799,6 +2822,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
|
}
|
|
#endif
|
|
|
|
+ /* Do not fool tcpdump (if any), clean our debris */
|
|
+ skb->tstamp.tv64 = 0;
|
|
return skb;
|
|
}
|
|
EXPORT_SYMBOL(tcp_make_synack);
|
|
@@ -2898,9 +2923,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_fastopen_request *fo = tp->fastopen_req;
|
|
- int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
|
|
- struct sk_buff *syn_data = NULL, *data;
|
|
+ int syn_loss = 0, space, err = 0;
|
|
unsigned long last_syn_loss = 0;
|
|
+ struct sk_buff *syn_data;
|
|
|
|
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
|
|
tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
|
|
@@ -2931,42 +2956,39 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
|
/* limit to order-0 allocations */
|
|
space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
|
|
|
|
- syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
|
|
- sk->sk_allocation);
|
|
- if (syn_data == NULL)
|
|
+ syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
|
|
+ if (!syn_data)
|
|
+ goto fallback;
|
|
+ syn_data->ip_summed = CHECKSUM_PARTIAL;
|
|
+ memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
|
|
+ skb_shinfo(syn_data)->gso_segs = 1;
|
|
+ if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
|
|
+ fo->data->msg_iov, 0, space))) {
|
|
+ kfree_skb(syn_data);
|
|
goto fallback;
|
|
+ }
|
|
|
|
- for (i = 0; i < iovlen && syn_data->len < space; ++i) {
|
|
- struct iovec *iov = &fo->data->msg_iov[i];
|
|
- unsigned char __user *from = iov->iov_base;
|
|
- int len = iov->iov_len;
|
|
+ /* No more data pending in inet_wait_for_connect() */
|
|
+ if (space == fo->size)
|
|
+ fo->data = NULL;
|
|
+ fo->copied = space;
|
|
|
|
- if (syn_data->len + len > space)
|
|
- len = space - syn_data->len;
|
|
- else if (i + 1 == iovlen)
|
|
- /* No more data pending in inet_wait_for_connect() */
|
|
- fo->data = NULL;
|
|
+ tcp_connect_queue_skb(sk, syn_data);
|
|
|
|
- if (skb_add_data(syn_data, from, len))
|
|
- goto fallback;
|
|
- }
|
|
+ err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
|
|
|
|
- /* Queue a data-only packet after the regular SYN for retransmission */
|
|
- data = pskb_copy(syn_data, sk->sk_allocation);
|
|
- if (data == NULL)
|
|
- goto fallback;
|
|
- TCP_SKB_CB(data)->seq++;
|
|
- TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
|
|
- TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
|
|
- tcp_connect_queue_skb(sk, data);
|
|
- fo->copied = data->len;
|
|
-
|
|
- if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
|
|
+ /* Now full SYN+DATA was cloned and sent (or not),
|
|
+ * remove the SYN from the original skb (syn_data)
|
|
+ * we keep in write queue in case of a retransmit, as we
|
|
+ * also have the SYN packet (with no data) in the same queue.
|
|
+ */
|
|
+ TCP_SKB_CB(syn_data)->seq++;
|
|
+ TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
|
|
+ if (!err) {
|
|
tp->syn_data = (fo->copied > 0);
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
|
|
goto done;
|
|
}
|
|
- syn_data = NULL;
|
|
|
|
fallback:
|
|
/* Send a regular SYN with Fast Open cookie request option */
|
|
@@ -2975,7 +2997,6 @@ fallback:
|
|
err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
|
|
if (err)
|
|
tp->syn_fastopen = 0;
|
|
- kfree_skb(syn_data);
|
|
done:
|
|
fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
|
|
return err;
|
|
@@ -2995,13 +3016,10 @@ int tcp_connect(struct sock *sk)
|
|
return 0;
|
|
}
|
|
|
|
- buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
|
|
- if (unlikely(buff == NULL))
|
|
+ buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
|
|
+ if (unlikely(!buff))
|
|
return -ENOBUFS;
|
|
|
|
- /* Reserve space for headers. */
|
|
- skb_reserve(buff, MAX_TCP_HEADER);
|
|
-
|
|
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
|
|
tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
|
tcp_connect_queue_skb(sk, buff);
|
|
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
|
|
index 06cae62..6b1a5fd 100644
|
|
--- a/net/ipv4/tcp_vegas.c
|
|
+++ b/net/ipv4/tcp_vegas.c
|
|
@@ -219,7 +219,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
|
|
* This is:
|
|
* (actual rate in segments) * baseRTT
|
|
*/
|
|
- target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
|
|
+ target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
|
|
+ do_div(target_cwnd, rtt);
|
|
|
|
/* Calculate the difference between the window we had,
|
|
* and the window we would like to have. This quantity
|
|
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
|
|
index 326475a..603ad49 100644
|
|
--- a/net/ipv4/tcp_veno.c
|
|
+++ b/net/ipv4/tcp_veno.c
|
|
@@ -145,7 +145,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
|
|
|
|
rtt = veno->minrtt;
|
|
|
|
- target_cwnd = (tp->snd_cwnd * veno->basertt);
|
|
+ target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
|
|
target_cwnd <<= V_PARAM_SHIFT;
|
|
do_div(target_cwnd, rtt);
|
|
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index b25e852..21a3a9e 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -90,6 +90,7 @@
|
|
#include <linux/socket.h>
|
|
#include <linux/sockios.h>
|
|
#include <linux/igmp.h>
|
|
+#include <linux/inetdevice.h>
|
|
#include <linux/in.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/timer.h>
|
|
@@ -1317,10 +1318,8 @@ csum_copy_err:
|
|
}
|
|
unlock_sock_fast(sk, slow);
|
|
|
|
- if (noblock)
|
|
- return -EAGAIN;
|
|
-
|
|
- /* starting over for a new packet */
|
|
+ /* starting over for a new packet, but check if we need to yield */
|
|
+ cond_resched();
|
|
msg->msg_flags &= ~MSG_TRUNC;
|
|
goto try_again;
|
|
}
|
|
@@ -1924,6 +1923,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|
struct sock *sk;
|
|
struct dst_entry *dst;
|
|
int dif = skb->dev->ifindex;
|
|
+ int ours;
|
|
|
|
/* validate the packet */
|
|
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
|
|
@@ -1933,14 +1933,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|
uh = udp_hdr(skb);
|
|
|
|
if (skb->pkt_type == PACKET_BROADCAST ||
|
|
- skb->pkt_type == PACKET_MULTICAST)
|
|
+ skb->pkt_type == PACKET_MULTICAST) {
|
|
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
|
|
+
|
|
+ if (!in_dev)
|
|
+ return;
|
|
+
|
|
+ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
|
|
+ iph->protocol);
|
|
+ if (!ours)
|
|
+ return;
|
|
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
|
|
uh->source, iph->saddr, dif);
|
|
- else if (skb->pkt_type == PACKET_HOST)
|
|
+ } else if (skb->pkt_type == PACKET_HOST) {
|
|
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
|
|
uh->source, iph->saddr, dif);
|
|
- else
|
|
+ } else {
|
|
return;
|
|
+ }
|
|
|
|
if (!sk)
|
|
return;
|
|
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
|
|
index 7927db0..4a000f1 100644
|
|
--- a/net/ipv4/udp_diag.c
|
|
+++ b/net/ipv4/udp_diag.c
|
|
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
|
|
s_slot = cb->args[0];
|
|
num = s_num = cb->args[1];
|
|
|
|
- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
|
|
+ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
|
|
struct sock *sk;
|
|
struct hlist_nulls_node *node;
|
|
struct udp_hslot *hslot = &table->hash[slot];
|
|
|
|
+ num = 0;
|
|
+
|
|
if (hlist_nulls_empty(&hslot->head))
|
|
continue;
|
|
|
|
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
|
|
index 31b1815..1f564a1 100644
|
|
--- a/net/ipv4/xfrm4_mode_tunnel.c
|
|
+++ b/net/ipv4/xfrm4_mode_tunnel.c
|
|
@@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
|
|
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
|
|
- ip_select_ident(skb, dst->child, NULL);
|
|
|
|
top_iph->ttl = ip4_dst_hoplimit(dst->child);
|
|
|
|
top_iph->saddr = x->props.saddr.a4;
|
|
top_iph->daddr = x->id.daddr.a4;
|
|
+ ip_select_ident(skb, NULL);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index 6c7fa08..3f0ec06 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -1684,14 +1684,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
|
|
addrconf_mod_dad_work(ifp, 0);
|
|
}
|
|
|
|
-/* Join to solicited addr multicast group. */
|
|
-
|
|
+/* Join to solicited addr multicast group.
|
|
+ * caller must hold RTNL */
|
|
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
|
|
{
|
|
struct in6_addr maddr;
|
|
|
|
- ASSERT_RTNL();
|
|
-
|
|
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
|
return;
|
|
|
|
@@ -1699,12 +1697,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
|
|
ipv6_dev_mc_inc(dev, &maddr);
|
|
}
|
|
|
|
+/* caller must hold RTNL */
|
|
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
|
{
|
|
struct in6_addr maddr;
|
|
|
|
- ASSERT_RTNL();
|
|
-
|
|
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
|
return;
|
|
|
|
@@ -1712,12 +1709,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
|
__ipv6_dev_mc_dec(idev, &maddr);
|
|
}
|
|
|
|
+/* caller must hold RTNL */
|
|
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
|
{
|
|
struct in6_addr addr;
|
|
|
|
- ASSERT_RTNL();
|
|
-
|
|
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
|
return;
|
|
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
|
@@ -1726,12 +1722,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
|
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
|
|
}
|
|
|
|
+/* caller must hold RTNL */
|
|
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
|
|
{
|
|
struct in6_addr addr;
|
|
|
|
- ASSERT_RTNL();
|
|
-
|
|
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
|
return;
|
|
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
|
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
|
|
index 2101832..ff2de7d 100644
|
|
--- a/net/ipv6/anycast.c
|
|
+++ b/net/ipv6/anycast.c
|
|
@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
pac->acl_next = NULL;
|
|
pac->acl_addr = *addr;
|
|
|
|
+ rtnl_lock();
|
|
rcu_read_lock();
|
|
if (ifindex == 0) {
|
|
struct rt6_info *rt;
|
|
@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
|
|
error:
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
if (pac)
|
|
sock_kfree_s(sk, pac, sizeof(*pac));
|
|
return err;
|
|
@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
|
|
spin_unlock_bh(&ipv6_sk_ac_lock);
|
|
|
|
+ rtnl_lock();
|
|
rcu_read_lock();
|
|
dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
|
|
if (dev)
|
|
ipv6_dev_ac_dec(dev, &pac->acl_addr);
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
|
|
sock_kfree_s(sk, pac, sizeof(*pac));
|
|
return 0;
|
|
@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
|
|
spin_unlock_bh(&ipv6_sk_ac_lock);
|
|
|
|
prev_index = 0;
|
|
+ rtnl_lock();
|
|
rcu_read_lock();
|
|
while (pac) {
|
|
struct ipv6_ac_socklist *next = pac->acl_next;
|
|
@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
|
|
pac = next;
|
|
}
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
}
|
|
|
|
static void aca_put(struct ifacaddr6 *ac)
|
|
@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
|
|
struct rt6_info *rt;
|
|
int err;
|
|
|
|
+ ASSERT_RTNL();
|
|
+
|
|
idev = in6_dev_get(dev);
|
|
|
|
if (idev == NULL)
|
|
@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
|
|
{
|
|
struct ifacaddr6 *aca, *prev_aca;
|
|
|
|
+ ASSERT_RTNL();
|
|
+
|
|
write_lock_bh(&idev->lock);
|
|
prev_aca = NULL;
|
|
for (aca = idev->ac_list; aca; aca = aca->aca_next) {
|
|
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
|
|
index c3bf2d2..841cfa2 100644
|
|
--- a/net/ipv6/datagram.c
|
|
+++ b/net/ipv6/datagram.c
|
|
@@ -382,11 +382,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|
|
|
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
|
|
sin = &errhdr.offender;
|
|
- sin->sin6_family = AF_UNSPEC;
|
|
+ memset(sin, 0, sizeof(*sin));
|
|
+
|
|
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
|
|
sin->sin6_family = AF_INET6;
|
|
- sin->sin6_flowinfo = 0;
|
|
- sin->sin6_port = 0;
|
|
if (np->rxopt.all)
|
|
ip6_datagram_recv_common_ctl(sk, msg, skb);
|
|
if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
@@ -397,12 +396,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|
ipv6_iface_scope_id(&sin->sin6_addr,
|
|
IP6CB(skb)->iif);
|
|
} else {
|
|
- struct inet_sock *inet = inet_sk(sk);
|
|
-
|
|
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
|
|
&sin->sin6_addr);
|
|
- sin->sin6_scope_id = 0;
|
|
- if (inet->cmsg_flags)
|
|
+ if (inet_sk(sk)->cmsg_flags)
|
|
ip_cmsg_recv(msg, skb);
|
|
}
|
|
}
|
|
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
|
|
index b4d5e1d..27ca796 100644
|
|
--- a/net/ipv6/fib6_rules.c
|
|
+++ b/net/ipv6/fib6_rules.c
|
|
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
|
goto again;
|
|
flp6->saddr = saddr;
|
|
}
|
|
+ err = rt->dst.error;
|
|
goto out;
|
|
}
|
|
again:
|
|
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
|
|
index 1e55f5e..7daaeaf 100644
|
|
--- a/net/ipv6/ip6_fib.c
|
|
+++ b/net/ipv6/ip6_fib.c
|
|
@@ -638,6 +638,29 @@ static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
|
|
RTF_GATEWAY;
|
|
}
|
|
|
|
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
|
|
+ struct net *net)
|
|
+{
|
|
+ if (atomic_read(&rt->rt6i_ref) != 1) {
|
|
+ /* This route is used as dummy address holder in some split
|
|
+ * nodes. It is not leaked, but it still holds other resources,
|
|
+ * which must be released in time. So, scan ascendant nodes
|
|
+ * and replace dummy references to this route with references
|
|
+ * to still alive ones.
|
|
+ */
|
|
+ while (fn) {
|
|
+ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
|
|
+ fn->leaf = fib6_find_prefix(net, fn);
|
|
+ atomic_inc(&fn->leaf->rt6i_ref);
|
|
+ rt6_release(rt);
|
|
+ }
|
|
+ fn = fn->parent;
|
|
+ }
|
|
+ /* No more references are possible at this point. */
|
|
+ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* Insert routing information in a node.
|
|
*/
|
|
@@ -775,11 +798,12 @@ add:
|
|
rt->dst.rt6_next = iter->dst.rt6_next;
|
|
atomic_inc(&rt->rt6i_ref);
|
|
inet6_rt_notify(RTM_NEWROUTE, rt, info);
|
|
- rt6_release(iter);
|
|
if (!(fn->fn_flags & RTN_RTINFO)) {
|
|
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
|
|
fn->fn_flags |= RTN_RTINFO;
|
|
}
|
|
+ fib6_purge_rt(iter, fn, info->nl_net);
|
|
+ rt6_release(iter);
|
|
}
|
|
|
|
return 0;
|
|
@@ -1284,24 +1308,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
|
|
fn = fib6_repair_tree(net, fn);
|
|
}
|
|
|
|
- if (atomic_read(&rt->rt6i_ref) != 1) {
|
|
- /* This route is used as dummy address holder in some split
|
|
- * nodes. It is not leaked, but it still holds other resources,
|
|
- * which must be released in time. So, scan ascendant nodes
|
|
- * and replace dummy references to this route with references
|
|
- * to still alive ones.
|
|
- */
|
|
- while (fn) {
|
|
- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
|
|
- fn->leaf = fib6_find_prefix(net, fn);
|
|
- atomic_inc(&fn->leaf->rt6i_ref);
|
|
- rt6_release(rt);
|
|
- }
|
|
- fn = fn->parent;
|
|
- }
|
|
- /* No more references are possible at this point. */
|
|
- BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
|
|
- }
|
|
+ fib6_purge_rt(rt, fn, net);
|
|
|
|
inet6_rt_notify(RTM_DELROUTE, rt, info);
|
|
rt6_release(rt);
|
|
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
|
|
index 2465d18..4a230b1 100644
|
|
--- a/net/ipv6/ip6_gre.c
|
|
+++ b/net/ipv6/ip6_gre.c
|
|
@@ -508,11 +508,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
|
|
|
|
skb->protocol = gre_proto;
|
|
/* WCCP version 1 and 2 protocol decoding.
|
|
- * - Change protocol to IP
|
|
+ * - Change protocol to IPv6
|
|
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
|
|
*/
|
|
if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
|
|
- skb->protocol = htons(ETH_P_IP);
|
|
+ skb->protocol = htons(ETH_P_IPV6);
|
|
if ((*(h + offset) & 0xF0) != 0x40)
|
|
offset += 4;
|
|
}
|
|
@@ -787,7 +787,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
|
|
encap_limit = t->parms.encap_limit;
|
|
|
|
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
|
|
- fl6.flowi6_proto = IPPROTO_IPIP;
|
|
+ fl6.flowi6_proto = IPPROTO_GRE;
|
|
|
|
dsfield = ipv4_get_dsfield(iph);
|
|
|
|
@@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
|
|
encap_limit = t->parms.encap_limit;
|
|
|
|
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
|
|
- fl6.flowi6_proto = IPPROTO_IPV6;
|
|
+ fl6.flowi6_proto = IPPROTO_GRE;
|
|
|
|
dsfield = ipv6_get_dsfield(ipv6h);
|
|
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
|
|
@@ -962,8 +962,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|
else
|
|
dev->flags &= ~IFF_POINTOPOINT;
|
|
|
|
- dev->iflink = p->link;
|
|
-
|
|
/* Precalculate GRE options length */
|
|
if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
|
|
if (t->parms.o_flags&GRE_CSUM)
|
|
@@ -1273,6 +1271,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
|
|
u64_stats_init(&ip6gre_tunnel_stats->syncp);
|
|
}
|
|
|
|
+ dev->iflink = tunnel->parms.link;
|
|
|
|
return 0;
|
|
}
|
|
@@ -1474,6 +1473,8 @@ static int ip6gre_tap_init(struct net_device *dev)
|
|
u64_stats_init(&ip6gre_tap_stats->syncp);
|
|
}
|
|
|
|
+ dev->iflink = tunnel->parms.link;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index a62b610..066d0b0 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -537,11 +537,26 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
|
skb_copy_secmark(to, from);
|
|
}
|
|
|
|
+static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
|
+{
|
|
+ static u32 ip6_idents_hashrnd __read_mostly;
|
|
+ u32 hash, id;
|
|
+
|
|
+ net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
|
|
+
|
|
+ hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
|
|
+ hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
|
|
+
|
|
+ id = ip_idents_reserve(hash, 1);
|
|
+ fhdr->identification = htonl(id);
|
|
+}
|
|
+
|
|
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
|
{
|
|
struct sk_buff *frag;
|
|
struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
|
|
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
|
|
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
|
|
+ inet6_sk(skb->sk) : NULL;
|
|
struct ipv6hdr *tmp_hdr;
|
|
struct frag_hdr *fh;
|
|
unsigned int mtu, hlen, left, len;
|
|
@@ -994,7 +1009,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
|
if (final_dst)
|
|
fl6->daddr = *final_dst;
|
|
|
|
- return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
|
+ return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
|
|
|
|
@@ -1026,7 +1041,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
|
if (final_dst)
|
|
fl6->daddr = *final_dst;
|
|
|
|
- return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
|
+ return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
|
|
|
|
@@ -1280,7 +1295,8 @@ emsgsize:
|
|
if (((length > mtu) ||
|
|
(skb && skb_is_gso(skb))) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
- (rt->dst.dev->features & NETIF_F_UFO)) {
|
|
+ (rt->dst.dev->features & NETIF_F_UFO) &&
|
|
+ (sk->sk_type == SOCK_DGRAM)) {
|
|
err = ip6_ufo_append_data(sk, getfrag, from, length,
|
|
hh_len, fragheaderlen,
|
|
transhdrlen, mtu, flags, rt);
|
|
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
|
|
index 9120339..657639d 100644
|
|
--- a/net/ipv6/ip6_tunnel.c
|
|
+++ b/net/ipv6/ip6_tunnel.c
|
|
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
|
|
int err;
|
|
|
|
t = netdev_priv(dev);
|
|
- err = ip6_tnl_dev_init(dev);
|
|
- if (err < 0)
|
|
- goto out;
|
|
|
|
err = register_netdevice(dev);
|
|
if (err < 0)
|
|
@@ -1456,6 +1453,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
|
|
static const struct net_device_ops ip6_tnl_netdev_ops = {
|
|
+ .ndo_init = ip6_tnl_dev_init,
|
|
.ndo_uninit = ip6_tnl_dev_uninit,
|
|
.ndo_start_xmit = ip6_tnl_xmit,
|
|
.ndo_do_ioctl = ip6_tnl_ioctl,
|
|
@@ -1547,16 +1545,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
struct net *net = dev_net(dev);
|
|
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
|
|
- int err = ip6_tnl_dev_init_gen(dev);
|
|
-
|
|
- if (err)
|
|
- return err;
|
|
|
|
t->parms.proto = IPPROTO_IPV6;
|
|
dev_hold(dev);
|
|
|
|
- ip6_tnl_link_config(t);
|
|
-
|
|
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
|
return 0;
|
|
}
|
|
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
|
|
index 2d19272..28456c9 100644
|
|
--- a/net/ipv6/ip6_vti.c
|
|
+++ b/net/ipv6/ip6_vti.c
|
|
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
|
|
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
|
int err;
|
|
|
|
- err = vti6_dev_init(dev);
|
|
- if (err < 0)
|
|
- goto out;
|
|
-
|
|
err = register_netdevice(dev);
|
|
if (err < 0)
|
|
goto out;
|
|
@@ -693,6 +689,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
|
|
}
|
|
|
|
static const struct net_device_ops vti6_netdev_ops = {
|
|
+ .ndo_init = vti6_dev_init,
|
|
.ndo_uninit = vti6_dev_uninit,
|
|
.ndo_start_xmit = vti6_tnl_xmit,
|
|
.ndo_do_ioctl = vti6_ioctl,
|
|
@@ -772,16 +769,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
struct net *net = dev_net(dev);
|
|
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
|
- int err = vti6_dev_init_gen(dev);
|
|
-
|
|
- if (err)
|
|
- return err;
|
|
|
|
t->parms.proto = IPPROTO_IPV6;
|
|
dev_hold(dev);
|
|
|
|
- vti6_link_config(t);
|
|
-
|
|
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
|
return 0;
|
|
}
|
|
@@ -834,6 +825,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
|
|
return vti6_tnl_create2(dev);
|
|
}
|
|
|
|
+static void vti6_dellink(struct net_device *dev, struct list_head *head)
|
|
+{
|
|
+ struct net *net = dev_net(dev);
|
|
+ struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
|
+
|
|
+ if (dev != ip6n->fb_tnl_dev)
|
|
+ unregister_netdevice_queue(dev, head);
|
|
+}
|
|
+
|
|
static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
|
|
struct nlattr *data[])
|
|
{
|
|
@@ -909,6 +909,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
|
|
.setup = vti6_dev_setup,
|
|
.validate = vti6_validate,
|
|
.newlink = vti6_newlink,
|
|
+ .dellink = vti6_dellink,
|
|
.changelink = vti6_changelink,
|
|
.get_size = vti6_get_size,
|
|
.fill_info = vti6_fill_info,
|
|
@@ -954,6 +955,7 @@ static int __net_init vti6_init_net(struct net *net)
|
|
if (!ip6n->fb_tnl_dev)
|
|
goto err_alloc_dev;
|
|
dev_net_set(ip6n->fb_tnl_dev, net);
|
|
+ ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
|
|
|
|
err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
|
|
if (err < 0)
|
|
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
|
|
index 08b367c..761e458 100644
|
|
--- a/net/ipv6/mcast.c
|
|
+++ b/net/ipv6/mcast.c
|
|
@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
mc_lst->next = NULL;
|
|
mc_lst->addr = *addr;
|
|
|
|
+ rtnl_lock();
|
|
rcu_read_lock();
|
|
if (ifindex == 0) {
|
|
struct rt6_info *rt;
|
|
@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
|
|
if (dev == NULL) {
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
|
return -ENODEV;
|
|
}
|
|
@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
|
|
if (err) {
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
|
return err;
|
|
}
|
|
@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
spin_unlock(&ipv6_sk_mc_lock);
|
|
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
|
|
return 0;
|
|
}
|
|
@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
if (!ipv6_addr_is_multicast(addr))
|
|
return -EINVAL;
|
|
|
|
+ rtnl_lock();
|
|
spin_lock(&ipv6_sk_mc_lock);
|
|
for (lnk = &np->ipv6_mc_list;
|
|
(mc_lst = rcu_dereference_protected(*lnk,
|
|
@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
} else
|
|
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
|
|
rcu_read_unlock();
|
|
+ rtnl_unlock();
|
|
+
|
|
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
|
|
kfree_rcu(mc_lst, rcu);
|
|
return 0;
|
|
}
|
|
}
|
|
spin_unlock(&ipv6_sk_mc_lock);
|
|
+ rtnl_unlock();
|
|
|
|
return -EADDRNOTAVAIL;
|
|
}
|
|
@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
|
|
if (!rcu_access_pointer(np->ipv6_mc_list))
|
|
return;
|
|
|
|
+ rtnl_lock();
|
|
spin_lock(&ipv6_sk_mc_lock);
|
|
while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
|
|
lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
|
|
@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
|
|
spin_lock(&ipv6_sk_mc_lock);
|
|
}
|
|
spin_unlock(&ipv6_sk_mc_lock);
|
|
+ rtnl_unlock();
|
|
}
|
|
|
|
int ip6_mc_source(int add, int omode, struct sock *sk,
|
|
@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
|
|
struct ifmcaddr6 *mc;
|
|
struct inet6_dev *idev;
|
|
|
|
+ ASSERT_RTNL();
|
|
+
|
|
/* we need to take a reference on idev */
|
|
idev = in6_dev_get(dev);
|
|
|
|
@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
|
|
{
|
|
struct ifmcaddr6 *ma, **map;
|
|
|
|
+ ASSERT_RTNL();
|
|
+
|
|
write_lock_bh(&idev->lock);
|
|
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
|
|
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
|
|
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
|
|
index 09a22f4..bcd6518 100644
|
|
--- a/net/ipv6/ndisc.c
|
|
+++ b/net/ipv6/ndisc.c
|
|
@@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
|
if (rt)
|
|
rt6_set_expires(rt, jiffies + (HZ * lifetime));
|
|
if (ra_msg->icmph.icmp6_hop_limit) {
|
|
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
|
|
+ /* Only set hop_limit on the interface if it is higher than
|
|
+ * the current hop_limit.
|
|
+ */
|
|
+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
|
|
+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
|
|
+ } else {
|
|
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
|
|
+ }
|
|
if (rt)
|
|
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
|
|
ra_msg->icmph.icmp6_hop_limit);
|
|
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
|
|
index b31a012..ae4a06b 100644
|
|
--- a/net/ipv6/output_core.c
|
|
+++ b/net/ipv6/output_core.c
|
|
@@ -3,33 +3,43 @@
|
|
* not configured or static. These functions are needed by GSO/GRO implementation.
|
|
*/
|
|
#include <linux/export.h>
|
|
+#include <net/ip.h>
|
|
#include <net/ipv6.h>
|
|
#include <net/ip6_fib.h>
|
|
#include <net/addrconf.h>
|
|
|
|
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
|
+/* This function exists only for tap drivers that must support broken
|
|
+ * clients requesting UFO without specifying an IPv6 fragment ID.
|
|
+ *
|
|
+ * This is similar to ipv6_select_ident() but we use an independent hash
|
|
+ * seed to limit information leakage.
|
|
+ *
|
|
+ * The network header must be set before calling this.
|
|
+ */
|
|
+void ipv6_proxy_select_ident(struct sk_buff *skb)
|
|
{
|
|
- static atomic_t ipv6_fragmentation_id;
|
|
- int ident;
|
|
-
|
|
-#if IS_ENABLED(CONFIG_IPV6)
|
|
- if (rt && !(rt->dst.flags & DST_NOPEER)) {
|
|
- struct inet_peer *peer;
|
|
- struct net *net;
|
|
-
|
|
- net = dev_net(rt->dst.dev);
|
|
- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
|
|
- if (peer) {
|
|
- fhdr->identification = htonl(inet_getid(peer, 0));
|
|
- inet_putpeer(peer);
|
|
- return;
|
|
- }
|
|
- }
|
|
-#endif
|
|
- ident = atomic_inc_return(&ipv6_fragmentation_id);
|
|
- fhdr->identification = htonl(ident);
|
|
+ static u32 ip6_proxy_idents_hashrnd __read_mostly;
|
|
+ struct in6_addr buf[2];
|
|
+ struct in6_addr *addrs;
|
|
+ u32 hash, id;
|
|
+
|
|
+ addrs = skb_header_pointer(skb,
|
|
+ skb_network_offset(skb) +
|
|
+ offsetof(struct ipv6hdr, saddr),
|
|
+ sizeof(buf), buf);
|
|
+ if (!addrs)
|
|
+ return;
|
|
+
|
|
+ net_get_random_once(&ip6_proxy_idents_hashrnd,
|
|
+ sizeof(ip6_proxy_idents_hashrnd));
|
|
+
|
|
+ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
|
|
+ hash = __ipv6_addr_jhash(&addrs[0], hash);
|
|
+
|
|
+ id = ip_idents_reserve(hash, 1);
|
|
+ skb_shinfo(skb)->ip6_frag_id = htonl(id);
|
|
}
|
|
-EXPORT_SYMBOL(ipv6_select_ident);
|
|
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
|
|
|
|
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
|
{
|
|
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
|
|
index bda7429..4611995 100644
|
|
--- a/net/ipv6/ping.c
|
|
+++ b/net/ipv6/ping.c
|
|
@@ -103,9 +103,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
|
if (msg->msg_name) {
|
|
DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
|
|
- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
|
|
- u->sin6_family != AF_INET6) {
|
|
+ if (msg->msg_namelen < sizeof(*u))
|
|
return -EINVAL;
|
|
+ if (u->sin6_family != AF_INET6) {
|
|
+ return -EAFNOSUPPORT;
|
|
}
|
|
if (sk->sk_bound_dev_if &&
|
|
sk->sk_bound_dev_if != u->sin6_scope_id) {
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index 7cc1102..3809ca2 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
|
|
u32 *p = NULL;
|
|
|
|
if (!(rt->dst.flags & DST_HOST))
|
|
- return NULL;
|
|
+ return dst_cow_metrics_generic(dst, old);
|
|
|
|
peer = rt6_get_peer_create(rt);
|
|
if (peer) {
|
|
@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
struct net *net = dev_net(dst->dev);
|
|
|
|
rt6->rt6i_flags |= RTF_MODIFIED;
|
|
- if (mtu < IPV6_MIN_MTU) {
|
|
- u32 features = dst_metric(dst, RTAX_FEATURES);
|
|
+ if (mtu < IPV6_MIN_MTU)
|
|
mtu = IPV6_MIN_MTU;
|
|
- features |= RTAX_FEATURE_ALLFRAG;
|
|
- dst_metric_set(dst, RTAX_FEATURES, features);
|
|
- }
|
|
+
|
|
dst_metric_set(dst, RTAX_MTU, mtu);
|
|
rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
|
}
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index fe548ba..317b6db 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
|
|
for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
|
|
if (local == t->parms.iph.saddr &&
|
|
remote == t->parms.iph.daddr &&
|
|
- (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
|
|
+ (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
|
|
(t->dev->flags & IFF_UP))
|
|
return t;
|
|
}
|
|
for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
|
|
if (remote == t->parms.iph.daddr &&
|
|
- (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
|
|
+ (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
|
|
(t->dev->flags & IFF_UP))
|
|
return t;
|
|
}
|
|
for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
|
|
if (local == t->parms.iph.saddr &&
|
|
- (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
|
|
+ (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
|
|
(t->dev->flags & IFF_UP))
|
|
return t;
|
|
}
|
|
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
|
struct sit_net *sitn = net_generic(net, sit_net_id);
|
|
int err;
|
|
|
|
- err = ipip6_tunnel_init(dev);
|
|
- if (err < 0)
|
|
- goto out;
|
|
- ipip6_tunnel_clone_6rd(dev, sitn);
|
|
+ memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
|
|
+ memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
|
|
|
|
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
|
|
dev->priv_flags |= IFF_ISATAP;
|
|
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
|
if (err < 0)
|
|
goto out;
|
|
|
|
- strcpy(t->parms.name, dev->name);
|
|
+ ipip6_tunnel_clone_6rd(dev, sitn);
|
|
+
|
|
dev->rtnl_link_ops = &sit_link_ops;
|
|
|
|
dev_hold(dev);
|
|
@@ -1321,6 +1320,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
|
}
|
|
|
|
static const struct net_device_ops ipip6_netdev_ops = {
|
|
+ .ndo_init = ipip6_tunnel_init,
|
|
.ndo_uninit = ipip6_tunnel_uninit,
|
|
.ndo_start_xmit = sit_tunnel_xmit,
|
|
.ndo_do_ioctl = ipip6_tunnel_ioctl,
|
|
@@ -1367,9 +1367,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
|
|
|
|
tunnel->dev = dev;
|
|
tunnel->net = dev_net(dev);
|
|
-
|
|
- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
|
|
- memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
|
|
+ strcpy(tunnel->parms.name, dev->name);
|
|
|
|
ipip6_tunnel_bind_dev(dev);
|
|
dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
|
|
@@ -1401,7 +1399,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
|
|
|
tunnel->dev = dev;
|
|
tunnel->net = dev_net(dev);
|
|
- strcpy(tunnel->parms.name, dev->name);
|
|
|
|
iph->version = 4;
|
|
iph->protocol = IPPROTO_IPV6;
|
|
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
|
|
index 889079b..b50ae29 100644
|
|
--- a/net/ipv6/tcp_ipv6.c
|
|
+++ b/net/ipv6/tcp_ipv6.c
|
|
@@ -905,7 +905,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
|
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
|
|
tcp_time_stamp + tcptw->tw_ts_offset,
|
|
tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
|
|
- tw->tw_tclass, (tw->tw_flowlabel << 12));
|
|
+ tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
|
|
|
|
inet_twsk_put(tw);
|
|
}
|
|
@@ -1633,7 +1633,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
|
|
skb->sk = sk;
|
|
skb->destructor = sock_edemux;
|
|
if (sk->sk_state != TCP_TIME_WAIT) {
|
|
- struct dst_entry *dst = sk->sk_rx_dst;
|
|
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
|
|
|
|
if (dst)
|
|
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
|
|
@@ -1668,6 +1668,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
|
|
.compat_setsockopt = compat_ipv6_setsockopt,
|
|
.compat_getsockopt = compat_ipv6_getsockopt,
|
|
#endif
|
|
+ .mtu_reduced = tcp_v6_mtu_reduced,
|
|
};
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
@@ -1699,6 +1700,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
|
|
.compat_setsockopt = compat_ipv6_setsockopt,
|
|
.compat_getsockopt = compat_ipv6_getsockopt,
|
|
#endif
|
|
+ .mtu_reduced = tcp_v4_mtu_reduced,
|
|
};
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
@@ -1935,7 +1937,6 @@ struct proto tcpv6_prot = {
|
|
.sendpage = tcp_sendpage,
|
|
.backlog_rcv = tcp_v6_do_rcv,
|
|
.release_cb = tcp_release_cb,
|
|
- .mtu_reduced = tcp_v6_mtu_reduced,
|
|
.hash = tcp_v6_hash,
|
|
.unhash = inet_unhash,
|
|
.get_port = inet_csk_get_port,
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index 20b63d2..38625a9 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -515,10 +515,8 @@ csum_copy_err:
|
|
}
|
|
unlock_sock_fast(sk, slow);
|
|
|
|
- if (noblock)
|
|
- return -EAGAIN;
|
|
-
|
|
- /* starting over for a new packet */
|
|
+ /* starting over for a new packet, but check if we need to yield */
|
|
+ cond_resched();
|
|
msg->msg_flags &= ~MSG_TRUNC;
|
|
goto try_again;
|
|
}
|
|
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
|
|
index 00b2a6d..d65aea2 100644
|
|
--- a/net/ipx/af_ipx.c
|
|
+++ b/net/ipx/af_ipx.c
|
|
@@ -1763,6 +1763,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
struct ipxhdr *ipx = NULL;
|
|
struct sk_buff *skb;
|
|
int copied, rc;
|
|
+ bool locked = true;
|
|
|
|
lock_sock(sk);
|
|
/* put the autobinding in */
|
|
@@ -1789,6 +1790,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
if (sock_flag(sk, SOCK_ZAPPED))
|
|
goto out;
|
|
|
|
+ release_sock(sk);
|
|
+ locked = false;
|
|
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
|
|
flags & MSG_DONTWAIT, &rc);
|
|
if (!skb)
|
|
@@ -1822,7 +1825,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
out_free:
|
|
skb_free_datagram(sk, skb);
|
|
out:
|
|
- release_sock(sk);
|
|
+ if (locked)
|
|
+ release_sock(sk);
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
|
|
index 2ba8b97..fdcb968 100644
|
|
--- a/net/irda/ircomm/ircomm_tty.c
|
|
+++ b/net/irda/ircomm/ircomm_tty.c
|
|
@@ -818,7 +818,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
|
|
orig_jiffies = jiffies;
|
|
|
|
/* Set poll time to 200 ms */
|
|
- poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
|
|
+ poll_time = msecs_to_jiffies(200);
|
|
+ if (timeout)
|
|
+ poll_time = min_t(unsigned long, timeout, poll_time);
|
|
|
|
spin_lock_irqsave(&self->spinlock, flags);
|
|
while (self->tx_skb && self->tx_skb->len) {
|
|
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
|
|
index ec66063..da8d067 100644
|
|
--- a/net/l2tp/l2tp_ppp.c
|
|
+++ b/net/l2tp/l2tp_ppp.c
|
|
@@ -758,7 +758,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|
/* If PMTU discovery was enabled, use the MTU that was discovered */
|
|
dst = sk_dst_get(tunnel->sock);
|
|
if (dst != NULL) {
|
|
- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
|
|
+ u32 pmtu = dst_mtu(dst);
|
|
+
|
|
if (pmtu != 0)
|
|
session->mtu = session->mru = pmtu -
|
|
PPPOL2TP_HEADER_OVERHEAD;
|
|
@@ -1368,7 +1369,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
|
|
int err;
|
|
|
|
if (level != SOL_PPPOL2TP)
|
|
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
|
|
+ return -EINVAL;
|
|
|
|
if (optlen < sizeof(int))
|
|
return -EINVAL;
|
|
@@ -1494,7 +1495,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
|
|
struct pppol2tp_session *ps;
|
|
|
|
if (level != SOL_PPPOL2TP)
|
|
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
|
|
+ return -EINVAL;
|
|
|
|
if (get_user(len, optlen))
|
|
return -EFAULT;
|
|
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
|
|
index 612a5dd..799bafc 100644
|
|
--- a/net/llc/sysctl_net_llc.c
|
|
+++ b/net/llc/sysctl_net_llc.c
|
|
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
|
|
{
|
|
.procname = "ack",
|
|
.data = &sysctl_llc2_ack_timeout,
|
|
- .maxlen = sizeof(long),
|
|
+ .maxlen = sizeof(sysctl_llc2_ack_timeout),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_jiffies,
|
|
},
|
|
{
|
|
.procname = "busy",
|
|
.data = &sysctl_llc2_busy_timeout,
|
|
- .maxlen = sizeof(long),
|
|
+ .maxlen = sizeof(sysctl_llc2_busy_timeout),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_jiffies,
|
|
},
|
|
{
|
|
.procname = "p",
|
|
.data = &sysctl_llc2_p_timeout,
|
|
- .maxlen = sizeof(long),
|
|
+ .maxlen = sizeof(sysctl_llc2_p_timeout),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_jiffies,
|
|
},
|
|
{
|
|
.procname = "rej",
|
|
.data = &sysctl_llc2_rej_timeout,
|
|
- .maxlen = sizeof(long),
|
|
+ .maxlen = sizeof(sysctl_llc2_rej_timeout),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_jiffies,
|
|
},
|
|
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
|
|
index 7c7df47..f056f9ed 100644
|
|
--- a/net/mac80211/aes_ccm.c
|
|
+++ b/net/mac80211/aes_ccm.c
|
|
@@ -54,6 +54,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
|
|
|
memset(&aead_req, 0, sizeof(aead_req));
|
|
|
|
+ if (data_len == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
sg_init_one(&pt, data, data_len);
|
|
sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
|
|
sg_init_table(ct, 2);
|
|
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
|
|
index 653ce5d..5d8bc1f 100644
|
|
--- a/net/mac80211/debugfs_netdev.c
|
|
+++ b/net/mac80211/debugfs_netdev.c
|
|
@@ -712,6 +712,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
|
|
|
|
debugfs_remove_recursive(sdata->vif.debugfs_dir);
|
|
sdata->vif.debugfs_dir = NULL;
|
|
+ sdata->debugfs.subdir_stations = NULL;
|
|
}
|
|
|
|
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
|
|
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
|
|
index e09f10a..82fbf32 100644
|
|
--- a/net/mac80211/ibss.c
|
|
+++ b/net/mac80211/ibss.c
|
|
@@ -815,7 +815,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
memset(&csa_ie, 0, sizeof(csa_ie));
|
|
- err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
|
|
+ err = ieee80211_parse_ch_switch_ie(sdata, elems,
|
|
ifibss->chandef.chan->band,
|
|
sta_flags, ifibss->bssid, &csa_ie);
|
|
/* can't switch to destination channel, fail */
|
|
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
|
|
index b127902..e278c64 100644
|
|
--- a/net/mac80211/ieee80211_i.h
|
|
+++ b/net/mac80211/ieee80211_i.h
|
|
@@ -57,13 +57,24 @@ struct ieee80211_local;
|
|
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
|
|
|
|
/*
|
|
- * Some APs experience problems when working with U-APSD. Decrease the
|
|
- * probability of that happening by using legacy mode for all ACs but VO.
|
|
- * The AP that caused us trouble was a Cisco 4410N. It ignores our
|
|
- * setting, and always treats non-VO ACs as legacy.
|
|
+ * Some APs experience problems when working with U-APSD. Decreasing the
|
|
+ * probability of that happening by using legacy mode for all ACs but VO isn't
|
|
+ * enough.
|
|
+ *
|
|
+ * Cisco 4410N originally forced us to enable VO by default only because it
|
|
+ * treated non-VO ACs as legacy.
|
|
+ *
|
|
+ * However some APs (notably Netgear R7000) silently reclassify packets to
|
|
+ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
|
|
+ * clients would never see some frames (e.g. ARP responses) or would fetch them
|
|
+ * accidentally after a long time.
|
|
+ *
|
|
+ * It makes little sense to enable u-APSD queues by default because it needs
|
|
+ * userspace applications to be aware of it to actually take advantage of the
|
|
+ * possible additional powersavings. Implicitly depending on driver autotrigger
|
|
+ * frame support doesn't make much sense.
|
|
*/
|
|
-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
|
|
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
|
|
+#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
|
|
|
|
#define IEEE80211_DEFAULT_MAX_SP_LEN \
|
|
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
|
|
@@ -1569,7 +1580,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
|
|
* ieee80211_parse_ch_switch_ie - parses channel switch IEs
|
|
* @sdata: the sdata of the interface which has received the frame
|
|
* @elems: parsed 802.11 elements received with the frame
|
|
- * @beacon: indicates if the frame was a beacon or probe response
|
|
* @current_band: indicates the current band
|
|
* @sta_flags: contains information about own capabilities and restrictions
|
|
* to decide which channel switch announcements can be accepted. Only the
|
|
@@ -1583,7 +1593,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
|
|
* Return: 0 on success, <0 on error and >0 if there is nothing to parse.
|
|
*/
|
|
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|
- struct ieee802_11_elems *elems, bool beacon,
|
|
+ struct ieee802_11_elems *elems,
|
|
enum ieee80211_band current_band,
|
|
u32 sta_flags, u8 *bssid,
|
|
struct ieee80211_csa_ie *csa_ie);
|
|
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
|
|
index 8f7fabc..06f5de4 100644
|
|
--- a/net/mac80211/iface.c
|
|
+++ b/net/mac80211/iface.c
|
|
@@ -760,10 +760,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
int i, flushed;
|
|
struct ps_data *ps;
|
|
struct cfg80211_chan_def chandef;
|
|
+ bool cancel_scan;
|
|
|
|
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
|
|
|
- if (rcu_access_pointer(local->scan_sdata) == sdata)
|
|
+ cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
|
|
+ if (cancel_scan)
|
|
ieee80211_scan_cancel(local);
|
|
|
|
/*
|
|
@@ -973,6 +975,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
|
|
ieee80211_recalc_ps(local, -1);
|
|
|
|
+ if (cancel_scan)
|
|
+ flush_delayed_work(&local->scan_work);
|
|
+
|
|
if (local->open_count == 0) {
|
|
ieee80211_stop_device(local);
|
|
|
|
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
|
|
index 6ff65a1..d78b37a 100644
|
|
--- a/net/mac80211/key.c
|
|
+++ b/net/mac80211/key.c
|
|
@@ -652,7 +652,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
|
|
int i;
|
|
|
|
mutex_lock(&local->key_mtx);
|
|
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
|
|
+ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
|
|
key = key_mtx_dereference(local, sta->gtk[i]);
|
|
if (!key)
|
|
continue;
|
|
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
|
|
index c7a7a86..9e58c99 100644
|
|
--- a/net/mac80211/main.c
|
|
+++ b/net/mac80211/main.c
|
|
@@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work)
|
|
{
|
|
struct ieee80211_local *local =
|
|
container_of(work, struct ieee80211_local, restart_work);
|
|
+ struct ieee80211_sub_if_data *sdata;
|
|
|
|
/* wait for scan work complete */
|
|
flush_workqueue(local->workqueue);
|
|
@@ -256,6 +257,8 @@ static void ieee80211_restart_work(struct work_struct *work)
|
|
"%s called with hardware scan in progress\n", __func__);
|
|
|
|
rtnl_lock();
|
|
+ list_for_each_entry(sdata, &local->interfaces, list)
|
|
+ flush_delayed_work(&sdata->dec_tailroom_needed_wk);
|
|
ieee80211_scan_cancel(local);
|
|
ieee80211_reconfig(local);
|
|
rtnl_unlock();
|
|
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
|
|
index 5b919ca..3d52d1d 100644
|
|
--- a/net/mac80211/mesh.c
|
|
+++ b/net/mac80211/mesh.c
|
|
@@ -885,7 +885,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
memset(&csa_ie, 0, sizeof(csa_ie));
|
|
- err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band,
|
|
+ err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
|
|
sta_flags, sdata->vif.addr,
|
|
&csa_ie);
|
|
if (err < 0)
|
|
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
|
|
index e6a84cb..c9535a9 100644
|
|
--- a/net/mac80211/mlme.c
|
|
+++ b/net/mac80211/mlme.c
|
|
@@ -1001,7 +1001,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
|
|
current_band = cbss->channel->band;
|
|
memset(&csa_ie, 0, sizeof(csa_ie));
|
|
- res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
|
|
+ res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
|
|
ifmgd->flags,
|
|
ifmgd->associated->bssid, &csa_ie);
|
|
if (res < 0)
|
|
@@ -1086,7 +1086,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
|
|
else
|
|
mod_timer(&ifmgd->chswitch_timer,
|
|
- TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
|
|
+ TU_TO_EXP_TIME((csa_ie.count - 1) *
|
|
+ cbss->beacon_interval));
|
|
}
|
|
|
|
static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
|
|
@@ -4240,8 +4241,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
|
|
rcu_read_unlock();
|
|
|
|
if (bss->wmm_used && bss->uapsd_supported &&
|
|
- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
|
|
- sdata->wmm_acm != 0xff) {
|
|
+ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
|
|
assoc_data->uapsd = true;
|
|
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
|
|
} else {
|
|
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
|
|
index 22b223f..74350c3 100644
|
|
--- a/net/mac80211/rate.c
|
|
+++ b/net/mac80211/rate.c
|
|
@@ -462,7 +462,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
|
|
*/
|
|
if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
|
|
u32 basic_rates = vif->bss_conf.basic_rates;
|
|
- s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
|
|
+ s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
|
|
|
|
rate = &sband->bitrates[rates[0].idx];
|
|
|
|
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
|
|
index 3e57f96..9abb445 100644
|
|
--- a/net/mac80211/rx.c
|
|
+++ b/net/mac80211/rx.c
|
|
@@ -261,7 +261,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
|
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
|
|
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
|
|
else if (rate)
|
|
- channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
|
|
+ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
|
|
else
|
|
channel_flags |= IEEE80211_CHAN_2GHZ;
|
|
put_unaligned_le16(channel_flags, pos);
|
|
@@ -1679,11 +1679,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
|
sc = le16_to_cpu(hdr->seq_ctrl);
|
|
frag = sc & IEEE80211_SCTL_FRAG;
|
|
|
|
- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
|
|
- is_multicast_ether_addr(hdr->addr1))) {
|
|
- /* not fragmented */
|
|
- goto out;
|
|
+ if (is_multicast_ether_addr(hdr->addr1)) {
|
|
+ rx->local->dot11MulticastReceivedFrameCount++;
|
|
+ goto out_no_led;
|
|
}
|
|
+
|
|
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
|
|
+ goto out;
|
|
+
|
|
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
|
|
|
|
if (skb_linearize(rx->skb))
|
|
@@ -1774,12 +1777,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
|
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
|
|
|
|
out:
|
|
+ ieee80211_led_rx(rx->local);
|
|
+ out_no_led:
|
|
if (rx->sta)
|
|
rx->sta->rx_packets++;
|
|
- if (is_multicast_ether_addr(hdr->addr1))
|
|
- rx->local->dot11MulticastReceivedFrameCount++;
|
|
- else
|
|
- ieee80211_led_rx(rx->local);
|
|
return RX_CONTINUE;
|
|
}
|
|
|
|
@@ -2106,6 +2107,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
|
|
|
|
+ if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
|
|
+ return RX_DROP_MONITOR;
|
|
+
|
|
/* frame is in RMC, don't forward */
|
|
if (ieee80211_is_data(hdr->frame_control) &&
|
|
is_multicast_ether_addr(hdr->addr1) &&
|
|
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
|
|
index 6ab0090..efeba56 100644
|
|
--- a/net/mac80211/spectmgmt.c
|
|
+++ b/net/mac80211/spectmgmt.c
|
|
@@ -22,7 +22,7 @@
|
|
#include "wme.h"
|
|
|
|
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|
- struct ieee802_11_elems *elems, bool beacon,
|
|
+ struct ieee802_11_elems *elems,
|
|
enum ieee80211_band current_band,
|
|
u32 sta_flags, u8 *bssid,
|
|
struct ieee80211_csa_ie *csa_ie)
|
|
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!beacon && sec_chan_offs) {
|
|
+ if (sec_chan_offs) {
|
|
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
|
|
- } else if (beacon && ht_oper) {
|
|
- secondary_channel_offset =
|
|
- ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
|
|
} else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
|
|
- /* If it's not a beacon, HT is enabled and the IE not present,
|
|
- * it's 20 MHz, 802.11-2012 8.5.2.6:
|
|
- * This element [the Secondary Channel Offset Element] is
|
|
- * present when switching to a 40 MHz channel. It may be
|
|
- * present when switching to a 20 MHz channel (in which
|
|
- * case the secondary channel offset is set to SCN).
|
|
- */
|
|
+ /* If the secondary channel offset IE is not present,
|
|
+ * we can't know what's the post-CSA offset, so the
|
|
+ * best we can do is use 20MHz.
|
|
+ */
|
|
secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
|
|
}
|
|
|
|
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
|
|
index c14c16a..dca076f 100644
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
|
|
if (ieee80211_has_order(hdr->frame_control))
|
|
return TX_CONTINUE;
|
|
|
|
+ if (ieee80211_is_probe_req(hdr->frame_control))
|
|
+ return TX_CONTINUE;
|
|
+
|
|
if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
|
|
info->hw_queue = tx->sdata->vif.cab_queue;
|
|
|
|
@@ -464,6 +467,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
|
{
|
|
struct sta_info *sta = tx->sta;
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
struct ieee80211_local *local = tx->local;
|
|
|
|
if (unlikely(!sta))
|
|
@@ -474,6 +478,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
|
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
|
|
int ac = skb_get_queue_mapping(tx->skb);
|
|
|
|
+ /* only deauth, disassoc and action are bufferable MMPDUs */
|
|
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
|
|
+ !ieee80211_is_deauth(hdr->frame_control) &&
|
|
+ !ieee80211_is_disassoc(hdr->frame_control) &&
|
|
+ !ieee80211_is_action(hdr->frame_control)) {
|
|
+ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
|
|
+ return TX_CONTINUE;
|
|
+ }
|
|
+
|
|
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
|
|
sta->sta.addr, sta->sta.aid, ac);
|
|
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
|
|
@@ -532,22 +545,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
|
static ieee80211_tx_result debug_noinline
|
|
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
|
|
{
|
|
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
-
|
|
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
|
|
return TX_CONTINUE;
|
|
-
|
|
- /* only deauth, disassoc and action are bufferable MMPDUs */
|
|
- if (ieee80211_is_mgmt(hdr->frame_control) &&
|
|
- !ieee80211_is_deauth(hdr->frame_control) &&
|
|
- !ieee80211_is_disassoc(hdr->frame_control) &&
|
|
- !ieee80211_is_action(hdr->frame_control)) {
|
|
- if (tx->flags & IEEE80211_TX_UNICAST)
|
|
- info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
|
|
- return TX_CONTINUE;
|
|
- }
|
|
-
|
|
if (tx->flags & IEEE80211_TX_UNICAST)
|
|
return ieee80211_tx_h_unicast_ps_buf(tx);
|
|
else
|
|
@@ -563,6 +562,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
|
|
if (tx->sdata->control_port_no_encrypt)
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
|
|
+ info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
|
|
}
|
|
|
|
return TX_CONTINUE;
|
|
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
|
|
index 6ee2b58..f21b142 100644
|
|
--- a/net/mac80211/wep.c
|
|
+++ b/net/mac80211/wep.c
|
|
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
|
|
|
|
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
|
|
|
- if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
|
|
- skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
|
|
+ if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
|
|
return NULL;
|
|
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
@@ -169,6 +168,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
|
|
size_t len;
|
|
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
|
|
|
|
+ if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
|
|
+ return -1;
|
|
+
|
|
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
|
|
if (!iv)
|
|
return -1;
|
|
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
|
|
index de770ec..53ea164 100644
|
|
--- a/net/netfilter/ipset/ip_set_core.c
|
|
+++ b/net/netfilter/ipset/ip_set_core.c
|
|
@@ -636,7 +636,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
|
|
struct ip_set *set;
|
|
struct ip_set_net *inst = ip_set_pernet(net);
|
|
|
|
- if (index > inst->ip_set_max)
|
|
+ if (index >= inst->ip_set_max)
|
|
return IPSET_INVALID_ID;
|
|
|
|
nfnl_lock(NFNL_SUBSYS_IPSET);
|
|
@@ -1839,6 +1839,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
|
|
if (*op < IP_SET_OP_VERSION) {
|
|
/* Check the version at the beginning of operations */
|
|
struct ip_set_req_version *req_version = data;
|
|
+
|
|
+ if (*len < sizeof(struct ip_set_req_version)) {
|
|
+ ret = -EINVAL;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
if (req_version->version != IPSET_PROTOCOL) {
|
|
ret = -EPROTO;
|
|
goto done;
|
|
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
|
|
index a8eb0a8..610e19c 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_conn.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_conn.c
|
|
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
|
|
ip_vs_control_del(cp);
|
|
|
|
if (cp->flags & IP_VS_CONN_F_NFCT) {
|
|
- ip_vs_conn_drop_conntrack(cp);
|
|
/* Do not access conntracks during subsys cleanup
|
|
* because nf_conntrack_find_get can not be used after
|
|
* conntrack cleanup for the net.
|
|
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
|
|
index 3d2d2c8..847d2a2 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_core.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_core.c
|
|
@@ -658,16 +658,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
|
|
return err;
|
|
}
|
|
|
|
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
|
|
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
|
|
+ unsigned int hooknum)
|
|
{
|
|
+ if (!sysctl_snat_reroute(skb))
|
|
+ return 0;
|
|
+ /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
|
|
+ if (NF_INET_LOCAL_IN == hooknum)
|
|
+ return 0;
|
|
#ifdef CONFIG_IP_VS_IPV6
|
|
if (af == AF_INET6) {
|
|
- if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
|
|
+ struct dst_entry *dst = skb_dst(skb);
|
|
+
|
|
+ if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
|
|
+ ip6_route_me_harder(skb) != 0)
|
|
return 1;
|
|
} else
|
|
#endif
|
|
- if ((sysctl_snat_reroute(skb) ||
|
|
- skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
|
|
+ if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
|
|
ip_route_me_harder(skb, RTN_LOCAL) != 0)
|
|
return 1;
|
|
|
|
@@ -790,7 +798,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
|
|
union nf_inet_addr *snet,
|
|
__u8 protocol, struct ip_vs_conn *cp,
|
|
struct ip_vs_protocol *pp,
|
|
- unsigned int offset, unsigned int ihl)
|
|
+ unsigned int offset, unsigned int ihl,
|
|
+ unsigned int hooknum)
|
|
{
|
|
unsigned int verdict = NF_DROP;
|
|
|
|
@@ -820,7 +829,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
|
|
#endif
|
|
ip_vs_nat_icmp(skb, pp, cp, 1);
|
|
|
|
- if (ip_vs_route_me_harder(af, skb))
|
|
+ if (ip_vs_route_me_harder(af, skb, hooknum))
|
|
goto out;
|
|
|
|
/* do the statistics and put it back */
|
|
@@ -915,7 +924,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
|
|
|
|
snet.ip = iph->saddr;
|
|
return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
|
|
- pp, ciph.len, ihl);
|
|
+ pp, ciph.len, ihl, hooknum);
|
|
}
|
|
|
|
#ifdef CONFIG_IP_VS_IPV6
|
|
@@ -980,7 +989,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
|
|
snet.in6 = ciph.saddr.in6;
|
|
writable = ciph.len;
|
|
return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
|
|
- pp, writable, sizeof(struct ipv6hdr));
|
|
+ pp, writable, sizeof(struct ipv6hdr),
|
|
+ hooknum);
|
|
}
|
|
#endif
|
|
|
|
@@ -1039,7 +1049,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
|
|
*/
|
|
static unsigned int
|
|
handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
|
|
- struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
|
|
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
|
|
+ unsigned int hooknum)
|
|
{
|
|
struct ip_vs_protocol *pp = pd->pp;
|
|
|
|
@@ -1077,7 +1088,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
|
|
* if it came from this machine itself. So re-compute
|
|
* the routing information.
|
|
*/
|
|
- if (ip_vs_route_me_harder(af, skb))
|
|
+ if (ip_vs_route_me_harder(af, skb, hooknum))
|
|
goto drop;
|
|
|
|
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
|
|
@@ -1180,7 +1191,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
|
|
cp = pp->conn_out_get(af, skb, &iph, 0);
|
|
|
|
if (likely(cp))
|
|
- return handle_response(af, skb, pd, cp, &iph);
|
|
+ return handle_response(af, skb, pd, cp, &iph, hooknum);
|
|
if (sysctl_nat_icmp_send(net) &&
|
|
(pp->protocol == IPPROTO_TCP ||
|
|
pp->protocol == IPPROTO_UDP ||
|
|
@@ -1906,7 +1917,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
|
|
{
|
|
.hook = ip_vs_local_reply6,
|
|
.owner = THIS_MODULE,
|
|
- .pf = NFPROTO_IPV4,
|
|
+ .pf = NFPROTO_IPV6,
|
|
.hooknum = NF_INET_LOCAL_OUT,
|
|
.priority = NF_IP6_PRI_NAT_DST + 1,
|
|
},
|
|
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
|
|
index 77c1732..4a662f1 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_ftp.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
|
|
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
|
|
struct nf_conn *ct;
|
|
struct net *net;
|
|
|
|
+ *diff = 0;
|
|
+
|
|
#ifdef CONFIG_IP_VS_IPV6
|
|
/* This application helper doesn't work with IPv6 yet,
|
|
* so turn this into a no-op for IPv6 packets
|
|
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
|
|
return 1;
|
|
#endif
|
|
|
|
- *diff = 0;
|
|
-
|
|
/* Only useful for established sessions */
|
|
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
|
|
return 1;
|
|
@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
|
|
struct ip_vs_conn *n_cp;
|
|
struct net *net;
|
|
|
|
+ /* no diff required for incoming packets */
|
|
+ *diff = 0;
|
|
+
|
|
#ifdef CONFIG_IP_VS_IPV6
|
|
/* This application helper doesn't work with IPv6 yet,
|
|
* so turn this into a no-op for IPv6 packets
|
|
@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
|
|
return 1;
|
|
#endif
|
|
|
|
- /* no diff required for incoming packets */
|
|
- *diff = 0;
|
|
-
|
|
/* Only useful for established sessions */
|
|
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
|
|
return 1;
|
|
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
|
|
index db80126..a8027e7 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_sync.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_sync.c
|
|
@@ -891,6 +891,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
|
|
IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
|
|
return;
|
|
}
|
|
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
|
|
+ kfree(param->pe_data);
|
|
}
|
|
|
|
if (opt)
|
|
@@ -1164,6 +1166,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
|
|
(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
|
|
);
|
|
#endif
|
|
+ ip_vs_pe_put(param.pe);
|
|
return 0;
|
|
/* Error exit */
|
|
out:
|
|
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
index c47444e..1692e75 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_xmit.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
iph->daddr = cp->daddr.ip;
|
|
iph->saddr = saddr;
|
|
iph->ttl = old_iph->ttl;
|
|
- ip_select_ident(skb, &rt->dst, NULL);
|
|
+ ip_select_ident(skb, NULL);
|
|
|
|
/* Another hack: avoid icmp_send in ip_fragment */
|
|
skb->local_df = 1;
|
|
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
iph->nexthdr = IPPROTO_IPV6;
|
|
iph->payload_len = old_iph->payload_len;
|
|
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
|
|
- iph->priority = old_iph->priority;
|
|
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
|
|
+ ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
|
|
iph->daddr = cp->daddr.in6;
|
|
iph->saddr = saddr;
|
|
iph->hop_limit = old_iph->hop_limit;
|
|
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
|
|
index d25f293..957c1db 100644
|
|
--- a/net/netfilter/nf_conntrack_proto_generic.c
|
|
+++ b/net/netfilter/nf_conntrack_proto_generic.c
|
|
@@ -14,6 +14,30 @@
|
|
|
|
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
|
|
|
|
+static bool nf_generic_should_process(u8 proto)
|
|
+{
|
|
+ switch (proto) {
|
|
+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
|
|
+ case IPPROTO_SCTP:
|
|
+ return false;
|
|
+#endif
|
|
+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
|
|
+ case IPPROTO_DCCP:
|
|
+ return false;
|
|
+#endif
|
|
+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
|
|
+ case IPPROTO_GRE:
|
|
+ return false;
|
|
+#endif
|
|
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
|
|
+ case IPPROTO_UDPLITE:
|
|
+ return false;
|
|
+#endif
|
|
+ default:
|
|
+ return true;
|
|
+ }
|
|
+}
|
|
+
|
|
static inline struct nf_generic_net *generic_pernet(struct net *net)
|
|
{
|
|
return &net->ct.nf_ct_proto.generic;
|
|
@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
|
|
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|
unsigned int dataoff, unsigned int *timeouts)
|
|
{
|
|
- return true;
|
|
+ return nf_generic_should_process(nf_ct_protonum(ct));
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index c68e5e0..99de240 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -855,7 +855,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
|
|
|
|
if (nla[NFTA_CHAIN_POLICY]) {
|
|
if ((chain != NULL &&
|
|
- !(chain->flags & NFT_BASE_CHAIN)) ||
|
|
+ !(chain->flags & NFT_BASE_CHAIN)))
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
+ if (chain == NULL &&
|
|
nla[NFTA_CHAIN_HOOK] == NULL)
|
|
return -EOPNOTSUPP;
|
|
|
|
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
|
|
index bf8a108..6cf2f07 100644
|
|
--- a/net/netfilter/nfnetlink.c
|
|
+++ b/net/netfilter/nfnetlink.c
|
|
@@ -265,7 +265,8 @@ replay:
|
|
nlh = nlmsg_hdr(skb);
|
|
err = 0;
|
|
|
|
- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
|
|
+ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
|
|
+ skb->len < nlh->nlmsg_len) {
|
|
err = -EINVAL;
|
|
goto ack;
|
|
}
|
|
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
|
|
index 9e287cb..54330fb 100644
|
|
--- a/net/netfilter/nfnetlink_cthelper.c
|
|
+++ b/net/netfilter/nfnetlink_cthelper.c
|
|
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
|
|
if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
|
|
return -EINVAL;
|
|
|
|
+ /* Not all fields are initialized so first zero the tuple */
|
|
+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
|
|
+
|
|
tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
|
|
tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
|
|
|
|
@@ -86,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
|
|
static int
|
|
nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
|
|
{
|
|
- const struct nf_conn_help *help = nfct_help(ct);
|
|
+ struct nf_conn_help *help = nfct_help(ct);
|
|
|
|
if (attr == NULL)
|
|
return -EINVAL;
|
|
@@ -94,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
|
|
if (help->helper->data_len == 0)
|
|
return -EINVAL;
|
|
|
|
- memcpy(&help->data, nla_data(attr), help->helper->data_len);
|
|
+ memcpy(help->data, nla_data(attr), help->helper->data_len);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
|
|
index a155d19..6ff12a1 100644
|
|
--- a/net/netfilter/nfnetlink_log.c
|
|
+++ b/net/netfilter/nfnetlink_log.c
|
|
@@ -45,7 +45,8 @@
|
|
#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
|
|
#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
|
|
#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
|
|
-#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
|
|
+/* max packet size is limited by 16-bit struct nfattr nfa_len field */
|
|
+#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN)
|
|
|
|
#define PRINTR(x, args...) do { if (net_ratelimit()) \
|
|
printk(x, ## args); } while (0);
|
|
@@ -255,6 +256,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
|
|
|
|
case NFULNL_COPY_PACKET:
|
|
inst->copy_mode = mode;
|
|
+ if (range == 0)
|
|
+ range = NFULNL_COPY_RANGE_MAX;
|
|
inst->copy_range = min_t(unsigned int,
|
|
range, NFULNL_COPY_RANGE_MAX);
|
|
break;
|
|
@@ -346,26 +349,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
|
|
return skb;
|
|
}
|
|
|
|
-static int
|
|
+static void
|
|
__nfulnl_send(struct nfulnl_instance *inst)
|
|
{
|
|
- int status = -1;
|
|
-
|
|
if (inst->qlen > 1) {
|
|
struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
|
|
NLMSG_DONE,
|
|
sizeof(struct nfgenmsg),
|
|
0);
|
|
- if (!nlh)
|
|
+ if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
|
|
+ inst->skb->len, skb_tailroom(inst->skb))) {
|
|
+ kfree_skb(inst->skb);
|
|
goto out;
|
|
+ }
|
|
}
|
|
- status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
|
|
- MSG_DONTWAIT);
|
|
-
|
|
+ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
|
|
+ MSG_DONTWAIT);
|
|
+out:
|
|
inst->qlen = 0;
|
|
inst->skb = NULL;
|
|
-out:
|
|
- return status;
|
|
}
|
|
|
|
static void
|
|
@@ -652,7 +654,8 @@ nfulnl_log_packet(struct net *net,
|
|
+ nla_total_size(sizeof(u_int32_t)) /* gid */
|
|
+ nla_total_size(plen) /* prefix */
|
|
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
|
|
- + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
|
|
+ + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
|
|
+ + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
|
|
|
|
if (in && skb_mac_header_was_set(skb)) {
|
|
size += nla_total_size(skb->dev->hard_header_len)
|
|
@@ -681,8 +684,7 @@ nfulnl_log_packet(struct net *net,
|
|
break;
|
|
|
|
case NFULNL_COPY_PACKET:
|
|
- if (inst->copy_range == 0
|
|
- || inst->copy_range > skb->len)
|
|
+ if (inst->copy_range > skb->len)
|
|
data_len = skb->len;
|
|
else
|
|
data_len = inst->copy_range;
|
|
@@ -695,8 +697,7 @@ nfulnl_log_packet(struct net *net,
|
|
goto unlock_and_release;
|
|
}
|
|
|
|
- if (inst->skb &&
|
|
- size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
|
|
+ if (inst->skb && size > skb_tailroom(inst->skb)) {
|
|
/* either the queue len is too high or we don't have
|
|
* enough room in the skb left. flush to userspace. */
|
|
__nfulnl_flush(inst);
|
|
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
|
|
index 82cb823..9695895 100644
|
|
--- a/net/netfilter/nft_compat.c
|
|
+++ b/net/netfilter/nft_compat.c
|
|
@@ -82,6 +82,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
|
|
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
|
|
break;
|
|
case AF_INET6:
|
|
+ if (proto)
|
|
+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
|
|
+
|
|
entry->e6.ipv6.proto = proto;
|
|
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
|
|
break;
|
|
@@ -313,6 +316,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
|
|
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
|
|
break;
|
|
case AF_INET6:
|
|
+ if (proto)
|
|
+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
|
|
+
|
|
entry->e6.ipv6.proto = proto;
|
|
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
|
|
break;
|
|
@@ -611,8 +617,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|
struct xt_match *match = nft_match->ops.data;
|
|
|
|
if (strcmp(match->name, mt_name) == 0 &&
|
|
- match->revision == rev && match->family == family)
|
|
+ match->revision == rev && match->family == family) {
|
|
+ if (!try_module_get(match->me))
|
|
+ return ERR_PTR(-ENOENT);
|
|
+
|
|
return &nft_match->ops;
|
|
+ }
|
|
}
|
|
|
|
match = xt_request_find_match(family, mt_name, rev);
|
|
@@ -678,12 +688,16 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|
family = ctx->afi->family;
|
|
|
|
/* Re-use the existing target if it's already loaded. */
|
|
- list_for_each_entry(nft_target, &nft_match_list, head) {
|
|
+ list_for_each_entry(nft_target, &nft_target_list, head) {
|
|
struct xt_target *target = nft_target->ops.data;
|
|
|
|
if (strcmp(target->name, tg_name) == 0 &&
|
|
- target->revision == rev && target->family == family)
|
|
+ target->revision == rev && target->family == family) {
|
|
+ if (!try_module_get(target->me))
|
|
+ return ERR_PTR(-ENOENT);
|
|
+
|
|
return &nft_target->ops;
|
|
+ }
|
|
}
|
|
|
|
target = xt_request_find_target(family, tg_name, rev);
|
|
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
|
|
index 9a8e77e7..ef5c75a 100644
|
|
--- a/net/netfilter/xt_cgroup.c
|
|
+++ b/net/netfilter/xt_cgroup.c
|
|
@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par)
|
|
if (info->invert & ~1)
|
|
return -EINVAL;
|
|
|
|
- return info->id ? 0 : -EINVAL;
|
|
+ return 0;
|
|
}
|
|
|
|
static bool
|
|
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
|
|
index a3910fc..47dc683 100644
|
|
--- a/net/netfilter/xt_hashlimit.c
|
|
+++ b/net/netfilter/xt_hashlimit.c
|
|
@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
|
|
spinlock_t lock; /* lock for list_head */
|
|
u_int32_t rnd; /* random seed for hash */
|
|
unsigned int count; /* number entries in table */
|
|
- struct timer_list timer; /* timer for gc */
|
|
+ struct delayed_work gc_work;
|
|
|
|
/* seq_file stuff */
|
|
struct proc_dir_entry *pde;
|
|
@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
|
|
call_rcu_bh(&ent->rcu, dsthash_free_rcu);
|
|
ht->count--;
|
|
}
|
|
-static void htable_gc(unsigned long htlong);
|
|
+static void htable_gc(struct work_struct *work);
|
|
|
|
static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
|
|
u_int8_t family)
|
|
@@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
|
|
}
|
|
hinfo->net = net;
|
|
|
|
- setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
|
|
- hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
|
|
- add_timer(&hinfo->timer);
|
|
+ INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
|
|
+ queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
|
|
+ msecs_to_jiffies(hinfo->cfg.gc_interval));
|
|
|
|
hlist_add_head(&hinfo->node, &hashlimit_net->htables);
|
|
|
|
@@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
|
|
{
|
|
unsigned int i;
|
|
|
|
- /* lock hash table and iterate over it */
|
|
- spin_lock_bh(&ht->lock);
|
|
for (i = 0; i < ht->cfg.size; i++) {
|
|
struct dsthash_ent *dh;
|
|
struct hlist_node *n;
|
|
+
|
|
+ spin_lock_bh(&ht->lock);
|
|
hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
|
|
if ((*select)(ht, dh))
|
|
dsthash_free(ht, dh);
|
|
}
|
|
+ spin_unlock_bh(&ht->lock);
|
|
+ cond_resched();
|
|
}
|
|
- spin_unlock_bh(&ht->lock);
|
|
}
|
|
|
|
-/* hash table garbage collector, run by timer */
|
|
-static void htable_gc(unsigned long htlong)
|
|
+static void htable_gc(struct work_struct *work)
|
|
{
|
|
- struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
|
|
+ struct xt_hashlimit_htable *ht;
|
|
+
|
|
+ ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
|
|
|
|
htable_selective_cleanup(ht, select_gc);
|
|
|
|
- /* re-add the timer accordingly */
|
|
- ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
|
|
- add_timer(&ht->timer);
|
|
+ queue_delayed_work(system_power_efficient_wq,
|
|
+ &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
|
|
}
|
|
|
|
static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
|
|
@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
|
|
|
|
static void htable_destroy(struct xt_hashlimit_htable *hinfo)
|
|
{
|
|
- del_timer_sync(&hinfo->timer);
|
|
+ cancel_delayed_work_sync(&hinfo->gc_work);
|
|
htable_remove_proc_entry(hinfo);
|
|
htable_selective_cleanup(hinfo, select_all);
|
|
kfree(hinfo->name);
|
|
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
|
|
index 1ba6793..13332db 100644
|
|
--- a/net/netfilter/xt_socket.c
|
|
+++ b/net/netfilter/xt_socket.c
|
|
@@ -243,12 +243,13 @@ static int
|
|
extract_icmp6_fields(const struct sk_buff *skb,
|
|
unsigned int outside_hdrlen,
|
|
int *protocol,
|
|
- struct in6_addr **raddr,
|
|
- struct in6_addr **laddr,
|
|
+ const struct in6_addr **raddr,
|
|
+ const struct in6_addr **laddr,
|
|
__be16 *rport,
|
|
- __be16 *lport)
|
|
+ __be16 *lport,
|
|
+ struct ipv6hdr *ipv6_var)
|
|
{
|
|
- struct ipv6hdr *inside_iph, _inside_iph;
|
|
+ const struct ipv6hdr *inside_iph;
|
|
struct icmp6hdr *icmph, _icmph;
|
|
__be16 *ports, _ports[2];
|
|
u8 inside_nexthdr;
|
|
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
|
|
if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
|
|
return 1;
|
|
|
|
- inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
|
|
+ inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
|
|
+ sizeof(*ipv6_var), ipv6_var);
|
|
if (inside_iph == NULL)
|
|
return 1;
|
|
inside_nexthdr = inside_iph->nexthdr;
|
|
|
|
- inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
|
|
+ inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
|
|
+ sizeof(*ipv6_var),
|
|
&inside_nexthdr, &inside_fragoff);
|
|
if (inside_hdrlen < 0)
|
|
return 1; /* hjm: Packet has no/incomplete transport layer headers. */
|
|
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
|
|
static bool
|
|
socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
|
|
{
|
|
- struct ipv6hdr *iph = ipv6_hdr(skb);
|
|
+ struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
|
|
struct udphdr _hdr, *hp = NULL;
|
|
struct sock *sk = skb->sk;
|
|
- struct in6_addr *daddr = NULL, *saddr = NULL;
|
|
+ const struct in6_addr *daddr = NULL, *saddr = NULL;
|
|
__be16 uninitialized_var(dport), uninitialized_var(sport);
|
|
int thoff = 0, uninitialized_var(tproto);
|
|
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
|
|
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
|
|
|
|
} else if (tproto == IPPROTO_ICMPV6) {
|
|
if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
|
|
- &sport, &dport))
|
|
+ &sport, &dport, &ipv6_var))
|
|
return false;
|
|
} else {
|
|
return false;
|
|
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
|
|
index 0dfe894..a0b0ea9 100644
|
|
--- a/net/netlink/af_netlink.c
|
|
+++ b/net/netlink/af_netlink.c
|
|
@@ -205,7 +205,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
|
|
nskb->protocol = htons((u16) sk->sk_protocol);
|
|
nskb->pkt_type = netlink_is_kernel(sk) ?
|
|
PACKET_KERNEL : PACKET_USER;
|
|
-
|
|
+ skb_reset_network_header(nskb);
|
|
ret = dev_queue_xmit(nskb);
|
|
if (unlikely(ret > 0))
|
|
ret = net_xmit_errno(ret);
|
|
@@ -510,14 +510,14 @@ out:
|
|
return err;
|
|
}
|
|
|
|
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
|
|
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
|
|
{
|
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
|
|
struct page *p_start, *p_end;
|
|
|
|
/* First page is flushed through netlink_{get,set}_status */
|
|
p_start = pgvec_to_page(hdr + PAGE_SIZE);
|
|
- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
|
|
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
|
|
while (p_start <= p_end) {
|
|
flush_dcache_page(p_start);
|
|
p_start++;
|
|
@@ -535,9 +535,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
|
|
static void netlink_set_status(struct nl_mmap_hdr *hdr,
|
|
enum nl_mmap_status status)
|
|
{
|
|
+ smp_mb();
|
|
hdr->nm_status = status;
|
|
flush_dcache_page(pgvec_to_page(hdr));
|
|
- smp_wmb();
|
|
}
|
|
|
|
static struct nl_mmap_hdr *
|
|
@@ -699,24 +699,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
|
|
struct nl_mmap_hdr *hdr;
|
|
struct sk_buff *skb;
|
|
unsigned int maxlen;
|
|
- bool excl = true;
|
|
int err = 0, len = 0;
|
|
|
|
- /* Netlink messages are validated by the receiver before processing.
|
|
- * In order to avoid userspace changing the contents of the message
|
|
- * after validation, the socket and the ring may only be used by a
|
|
- * single process, otherwise we fall back to copying.
|
|
- */
|
|
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
|
|
- atomic_read(&nlk->mapped) > 1)
|
|
- excl = false;
|
|
-
|
|
mutex_lock(&nlk->pg_vec_lock);
|
|
|
|
ring = &nlk->tx_ring;
|
|
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
|
|
|
|
do {
|
|
+ unsigned int nm_len;
|
|
+
|
|
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
|
|
if (hdr == NULL) {
|
|
if (!(msg->msg_flags & MSG_DONTWAIT) &&
|
|
@@ -724,35 +716,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
|
|
schedule();
|
|
continue;
|
|
}
|
|
- if (hdr->nm_len > maxlen) {
|
|
+
|
|
+ nm_len = ACCESS_ONCE(hdr->nm_len);
|
|
+ if (nm_len > maxlen) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
- netlink_frame_flush_dcache(hdr);
|
|
+ netlink_frame_flush_dcache(hdr, nm_len);
|
|
|
|
- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
|
|
- skb = alloc_skb_head(GFP_KERNEL);
|
|
- if (skb == NULL) {
|
|
- err = -ENOBUFS;
|
|
- goto out;
|
|
- }
|
|
- sock_hold(sk);
|
|
- netlink_ring_setup_skb(skb, sk, ring, hdr);
|
|
- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
|
|
- __skb_put(skb, hdr->nm_len);
|
|
- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
|
|
- atomic_inc(&ring->pending);
|
|
- } else {
|
|
- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
|
|
- if (skb == NULL) {
|
|
- err = -ENOBUFS;
|
|
- goto out;
|
|
- }
|
|
- __skb_put(skb, hdr->nm_len);
|
|
- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
|
|
- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
|
|
+ skb = alloc_skb(nm_len, GFP_KERNEL);
|
|
+ if (skb == NULL) {
|
|
+ err = -ENOBUFS;
|
|
+ goto out;
|
|
}
|
|
+ __skb_put(skb, nm_len);
|
|
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
|
|
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
|
|
|
|
netlink_increment_head(ring);
|
|
|
|
@@ -798,7 +778,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
|
|
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
|
|
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
|
|
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
|
|
- netlink_frame_flush_dcache(hdr);
|
|
+ netlink_frame_flush_dcache(hdr, hdr->nm_len);
|
|
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
|
|
|
|
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
|
|
@@ -1644,13 +1624,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
|
|
if (data == NULL)
|
|
return NULL;
|
|
|
|
- skb = build_skb(data, size);
|
|
+ skb = __build_skb(data, size);
|
|
if (skb == NULL)
|
|
vfree(data);
|
|
- else {
|
|
- skb->head_frag = 0;
|
|
+ else
|
|
skb->destructor = netlink_skb_destructor;
|
|
- }
|
|
|
|
return skb;
|
|
}
|
|
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
|
|
index 2c77e7b..600c764 100644
|
|
--- a/net/openvswitch/actions.c
|
|
+++ b/net/openvswitch/actions.c
|
|
@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
static int make_writable(struct sk_buff *skb, int write_len)
|
|
{
|
|
+ if (!pskb_may_pull(skb, write_len))
|
|
+ return -ENOMEM;
|
|
+
|
|
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
|
|
return 0;
|
|
|
|
@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
|
|
|
|
vlan_set_encap_proto(skb, vhdr);
|
|
skb->mac_header += VLAN_HLEN;
|
|
+ if (skb_network_offset(skb) < ETH_HLEN)
|
|
+ skb_set_network_header(skb, ETH_HLEN);
|
|
skb_reset_mac_len(skb);
|
|
|
|
return 0;
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 48a6a93..84a60b8 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -635,6 +635,7 @@ static void init_prb_bdqc(struct packet_sock *po,
|
|
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
|
|
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
|
|
|
|
+ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
|
|
prb_init_ft_ops(p1, req_u);
|
|
prb_setup_retire_blk_timer(po, tx_ring);
|
|
prb_open_block(p1, pbd);
|
|
@@ -1263,16 +1264,6 @@ static void packet_sock_destruct(struct sock *sk)
|
|
sk_refcnt_debug_dec(sk);
|
|
}
|
|
|
|
-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
|
|
-{
|
|
- int x = atomic_read(&f->rr_cur) + 1;
|
|
-
|
|
- if (x >= num)
|
|
- x = 0;
|
|
-
|
|
- return x;
|
|
-}
|
|
-
|
|
static unsigned int fanout_demux_hash(struct packet_fanout *f,
|
|
struct sk_buff *skb,
|
|
unsigned int num)
|
|
@@ -1284,13 +1275,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
|
|
struct sk_buff *skb,
|
|
unsigned int num)
|
|
{
|
|
- int cur, old;
|
|
+ unsigned int val = atomic_inc_return(&f->rr_cur);
|
|
|
|
- cur = atomic_read(&f->rr_cur);
|
|
- while ((old = atomic_cmpxchg(&f->rr_cur, cur,
|
|
- fanout_rr_next(f, num))) != cur)
|
|
- cur = old;
|
|
- return cur;
|
|
+ return val % num;
|
|
}
|
|
|
|
static unsigned int fanout_demux_cpu(struct packet_fanout *f,
|
|
@@ -1344,7 +1331,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
|
|
struct packet_type *pt, struct net_device *orig_dev)
|
|
{
|
|
struct packet_fanout *f = pt->af_packet_priv;
|
|
- unsigned int num = f->num_members;
|
|
+ unsigned int num = ACCESS_ONCE(f->num_members);
|
|
struct packet_sock *po;
|
|
unsigned int idx;
|
|
|
|
@@ -1946,6 +1933,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
if ((int)snaplen < 0)
|
|
snaplen = 0;
|
|
}
|
|
+ } else if (unlikely(macoff + snaplen >
|
|
+ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
|
|
+ u32 nval;
|
|
+
|
|
+ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
|
|
+ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
|
|
+ snaplen, nval, macoff);
|
|
+ snaplen = nval;
|
|
+ if (unlikely((int)snaplen < 0)) {
|
|
+ snaplen = 0;
|
|
+ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
|
|
+ }
|
|
}
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
h.raw = packet_current_rx_frame(po, skb,
|
|
@@ -3779,6 +3778,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|
goto out;
|
|
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
|
|
goto out;
|
|
+ if (po->tp_version >= TPACKET_V3 &&
|
|
+ (int)(req->tp_block_size -
|
|
+ BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
|
|
+ goto out;
|
|
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
|
|
po->tp_reserve))
|
|
goto out;
|
|
diff --git a/net/packet/internal.h b/net/packet/internal.h
|
|
index eb9580a..cdddf6a 100644
|
|
--- a/net/packet/internal.h
|
|
+++ b/net/packet/internal.h
|
|
@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
|
|
char *pkblk_start;
|
|
char *pkblk_end;
|
|
int kblk_size;
|
|
+ unsigned int max_frame_len;
|
|
unsigned int knum_blocks;
|
|
uint64_t knxt_seq_num;
|
|
char *prev;
|
|
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
|
|
index e8fdb17..a985158 100644
|
|
--- a/net/rds/ib_rdma.c
|
|
+++ b/net/rds/ib_rdma.c
|
|
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
|
|
}
|
|
|
|
ibmr = rds_ib_alloc_fmr(rds_ibdev);
|
|
- if (IS_ERR(ibmr))
|
|
+ if (IS_ERR(ibmr)) {
|
|
+ rds_ib_dev_put(rds_ibdev);
|
|
return ibmr;
|
|
+ }
|
|
|
|
ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
|
|
if (ret == 0)
|
|
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
|
|
index a817705..dba8d08 100644
|
|
--- a/net/rds/iw_rdma.c
|
|
+++ b/net/rds/iw_rdma.c
|
|
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
|
|
int *unpinned);
|
|
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
|
|
|
|
-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
|
|
+static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
|
|
+ struct rds_iw_device **rds_iwdev,
|
|
+ struct rdma_cm_id **cm_id)
|
|
{
|
|
struct rds_iw_device *iwdev;
|
|
struct rds_iw_cm_id *i_cm_id;
|
|
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
|
|
src_addr->sin_port,
|
|
dst_addr->sin_addr.s_addr,
|
|
dst_addr->sin_port,
|
|
- rs->rs_bound_addr,
|
|
- rs->rs_bound_port,
|
|
- rs->rs_conn_addr,
|
|
- rs->rs_conn_port);
|
|
+ src->sin_addr.s_addr,
|
|
+ src->sin_port,
|
|
+ dst->sin_addr.s_addr,
|
|
+ dst->sin_port);
|
|
#ifdef WORKING_TUPLE_DETECTION
|
|
- if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
|
|
- src_addr->sin_port == rs->rs_bound_port &&
|
|
- dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
|
|
- dst_addr->sin_port == rs->rs_conn_port) {
|
|
+ if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
|
|
+ src_addr->sin_port == src->sin_port &&
|
|
+ dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
|
|
+ dst_addr->sin_port == dst->sin_port) {
|
|
#else
|
|
/* FIXME - needs to compare the local and remote
|
|
* ipaddr/port tuple, but the ipaddr is the only
|
|
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
|
|
* zero'ed. It doesn't appear to be properly populated
|
|
* during connection setup...
|
|
*/
|
|
- if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
|
|
+ if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
|
|
#endif
|
|
spin_unlock_irq(&iwdev->spinlock);
|
|
*rds_iwdev = iwdev;
|
|
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
|
|
{
|
|
struct sockaddr_in *src_addr, *dst_addr;
|
|
struct rds_iw_device *rds_iwdev_old;
|
|
- struct rds_sock rs;
|
|
struct rdma_cm_id *pcm_id;
|
|
int rc;
|
|
|
|
src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
|
|
dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
|
|
|
|
- rs.rs_bound_addr = src_addr->sin_addr.s_addr;
|
|
- rs.rs_bound_port = src_addr->sin_port;
|
|
- rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
|
|
- rs.rs_conn_port = dst_addr->sin_port;
|
|
-
|
|
- rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
|
|
+ rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
|
|
if (rc)
|
|
rds_iw_remove_cm_id(rds_iwdev, cm_id);
|
|
|
|
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
|
|
struct rds_iw_device *rds_iwdev;
|
|
struct rds_iw_mr *ibmr = NULL;
|
|
struct rdma_cm_id *cm_id;
|
|
+ struct sockaddr_in src = {
|
|
+ .sin_addr.s_addr = rs->rs_bound_addr,
|
|
+ .sin_port = rs->rs_bound_port,
|
|
+ };
|
|
+ struct sockaddr_in dst = {
|
|
+ .sin_addr.s_addr = rs->rs_conn_addr,
|
|
+ .sin_port = rs->rs_conn_port,
|
|
+ };
|
|
int ret;
|
|
|
|
- ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
|
|
+ ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
|
|
if (ret || !cm_id) {
|
|
ret = -ENODEV;
|
|
goto out;
|
|
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
|
|
index b5cb2aa..35773ad 100644
|
|
--- a/net/rds/sysctl.c
|
|
+++ b/net/rds/sysctl.c
|
|
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
|
|
{
|
|
.procname = "max_unacked_packets",
|
|
.data = &rds_sysctl_max_unacked_packets,
|
|
- .maxlen = sizeof(unsigned long),
|
|
+ .maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec,
|
|
},
|
|
{
|
|
.procname = "max_unacked_bytes",
|
|
.data = &rds_sysctl_max_unacked_bytes,
|
|
- .maxlen = sizeof(unsigned long),
|
|
+ .maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec,
|
|
},
|
|
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
|
|
index 34b5490..4949f75 100644
|
|
--- a/net/rxrpc/ar-recvmsg.c
|
|
+++ b/net/rxrpc/ar-recvmsg.c
|
|
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
if (!skb) {
|
|
/* nothing remains on the queue */
|
|
if (copied &&
|
|
- (msg->msg_flags & MSG_PEEK || timeo == 0))
|
|
+ (flags & MSG_PEEK || timeo == 0))
|
|
goto out;
|
|
|
|
/* wait for a message to turn up */
|
|
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
|
|
index 3a633de..a2abc44 100644
|
|
--- a/net/sched/ematch.c
|
|
+++ b/net/sched/ematch.c
|
|
@@ -227,6 +227,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
|
|
* to replay the request.
|
|
*/
|
|
module_put(em->ops->owner);
|
|
+ em->ops = NULL;
|
|
err = -EAGAIN;
|
|
}
|
|
#endif
|
|
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
|
|
index 98532cf..bdaed31 100644
|
|
--- a/net/sched/sch_api.c
|
|
+++ b/net/sched/sch_api.c
|
|
@@ -812,10 +812,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
|
if (dev->flags & IFF_UP)
|
|
dev_deactivate(dev);
|
|
|
|
- if (new && new->ops->attach) {
|
|
- new->ops->attach(new);
|
|
- num_q = 0;
|
|
- }
|
|
+ if (new && new->ops->attach)
|
|
+ goto skip;
|
|
|
|
for (i = 0; i < num_q; i++) {
|
|
struct netdev_queue *dev_queue = dev_ingress_queue(dev);
|
|
@@ -831,12 +829,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
|
qdisc_destroy(old);
|
|
}
|
|
|
|
+skip:
|
|
if (!ingress) {
|
|
notify_and_destroy(net, skb, n, classid,
|
|
dev->qdisc, new);
|
|
if (new && !new->ops->attach)
|
|
atomic_inc(&new->refcnt);
|
|
dev->qdisc = new ? : &noop_qdisc;
|
|
+
|
|
+ if (new && new->ops->attach)
|
|
+ new->ops->attach(new);
|
|
} else {
|
|
notify_and_destroy(net, skb, n, classid, old, new);
|
|
}
|
|
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
|
|
index a4d5701..abc0922 100644
|
|
--- a/net/sctp/associola.c
|
|
+++ b/net/sctp/associola.c
|
|
@@ -1151,6 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
|
asoc->c = new->c;
|
|
asoc->peer.rwnd = new->peer.rwnd;
|
|
asoc->peer.sack_needed = new->peer.sack_needed;
|
|
+ asoc->peer.auth_capable = new->peer.auth_capable;
|
|
asoc->peer.i = new->peer.i;
|
|
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
|
|
asoc->peer.i.initial_tsn, GFP_ATOMIC);
|
|
@@ -1234,7 +1235,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
|
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
|
|
new->peer.peer_hmacs = NULL;
|
|
|
|
- sctp_auth_key_put(asoc->asoc_shared_key);
|
|
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
|
|
}
|
|
|
|
@@ -1626,6 +1626,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
|
|
* ack chunk whose serial number matches that of the request.
|
|
*/
|
|
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
|
|
+ if (sctp_chunk_pending(ack))
|
|
+ continue;
|
|
if (ack->subh.addip_hdr->serial == serial) {
|
|
sctp_chunk_hold(ack);
|
|
return ack;
|
|
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
|
|
index 0e85291..fb7976a 100644
|
|
--- a/net/sctp/auth.c
|
|
+++ b/net/sctp/auth.c
|
|
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
|
|
list_add(&cur_key->key_list, sh_keys);
|
|
|
|
cur_key->key = key;
|
|
- sctp_auth_key_hold(key);
|
|
-
|
|
return 0;
|
|
nomem:
|
|
if (!replace)
|
|
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
|
|
index 4de12af..7e8a16c 100644
|
|
--- a/net/sctp/inqueue.c
|
|
+++ b/net/sctp/inqueue.c
|
|
@@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
|
|
} else {
|
|
/* Nothing to do. Next chunk in the packet, please. */
|
|
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
|
|
-
|
|
/* Force chunk->skb->data to chunk->chunk_end. */
|
|
- skb_pull(chunk->skb,
|
|
- chunk->chunk_end - chunk->skb->data);
|
|
-
|
|
- /* Verify that we have at least chunk headers
|
|
- * worth of buffer left.
|
|
- */
|
|
- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
|
|
- sctp_chunk_free(chunk);
|
|
- chunk = queue->in_progress = NULL;
|
|
- }
|
|
+ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
|
|
+ /* We are guaranteed to pull a SCTP header. */
|
|
}
|
|
}
|
|
|
|
@@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
|
|
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
|
|
chunk->subh.v = NULL; /* Subheader is no longer valid. */
|
|
|
|
- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
|
|
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
|
|
+ skb_tail_pointer(chunk->skb)) {
|
|
/* This is not a singleton */
|
|
chunk->singleton = 0;
|
|
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
|
|
- /* RFC 2960, Section 6.10 Bundling
|
|
- *
|
|
- * Partial chunks MUST NOT be placed in an SCTP packet.
|
|
- * If the receiver detects a partial chunk, it MUST drop
|
|
- * the chunk.
|
|
- *
|
|
- * Since the end of the chunk is past the end of our buffer
|
|
- * (which contains the whole packet, we can freely discard
|
|
- * the whole packet.
|
|
- */
|
|
- sctp_chunk_free(chunk);
|
|
- chunk = queue->in_progress = NULL;
|
|
-
|
|
- return NULL;
|
|
+ /* Discard inside state machine. */
|
|
+ chunk->pdiscard = 1;
|
|
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
|
|
} else {
|
|
/* We are at the end of the packet, so mark the chunk
|
|
* in case we need to send a SACK.
|
|
diff --git a/net/sctp/output.c b/net/sctp/output.c
|
|
index 0f4d15f..e39e6d5 100644
|
|
--- a/net/sctp/output.c
|
|
+++ b/net/sctp/output.c
|
|
@@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
|
|
sk = chunk->skb->sk;
|
|
|
|
/* Allocate the new skb. */
|
|
- nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
|
|
+ nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
|
|
if (!nskb)
|
|
goto nomem;
|
|
|
|
/* Make sure the outbound skb has enough header room reserved. */
|
|
- skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
|
|
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
|
|
|
|
/* Set the owning socket so that we know where to get the
|
|
* destination IP address.
|
|
@@ -599,7 +599,9 @@ out:
|
|
return err;
|
|
no_route:
|
|
kfree_skb(nskb);
|
|
- IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
|
|
+
|
|
+ if (asoc)
|
|
+ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
|
|
|
|
/* FIXME: Returning the 'err' will effect all the associations
|
|
* associated with a socket, although only one of the paths of the
|
|
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
|
|
index fee5552..df06b13 100644
|
|
--- a/net/sctp/sm_make_chunk.c
|
|
+++ b/net/sctp/sm_make_chunk.c
|
|
@@ -2608,7 +2608,10 @@ do_addr_param:
|
|
|
|
addr_param = param.v + sizeof(sctp_addip_param_t);
|
|
|
|
- af = sctp_get_af_specific(param_type2af(param.p->type));
|
|
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
|
|
+ if (af == NULL)
|
|
+ break;
|
|
+
|
|
af->from_addr_param(&addr, addr_param,
|
|
htons(asoc->peer.port), 0);
|
|
|
|
@@ -3110,50 +3113,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
|
|
return SCTP_ERROR_NO_ERROR;
|
|
}
|
|
|
|
-/* Verify the ASCONF packet before we process it. */
|
|
-int sctp_verify_asconf(const struct sctp_association *asoc,
|
|
- struct sctp_paramhdr *param_hdr, void *chunk_end,
|
|
- struct sctp_paramhdr **errp) {
|
|
- sctp_addip_param_t *asconf_param;
|
|
+/* Verify the ASCONF packet before we process it. */
|
|
+bool sctp_verify_asconf(const struct sctp_association *asoc,
|
|
+ struct sctp_chunk *chunk, bool addr_param_needed,
|
|
+ struct sctp_paramhdr **errp)
|
|
+{
|
|
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
|
|
union sctp_params param;
|
|
- int length, plen;
|
|
+ bool addr_param_seen = false;
|
|
|
|
- param.v = (sctp_paramhdr_t *) param_hdr;
|
|
- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
|
|
- length = ntohs(param.p->length);
|
|
- *errp = param.p;
|
|
-
|
|
- if (param.v > chunk_end - length ||
|
|
- length < sizeof(sctp_paramhdr_t))
|
|
- return 0;
|
|
+ sctp_walk_params(param, addip, addip_hdr.params) {
|
|
+ size_t length = ntohs(param.p->length);
|
|
|
|
+ *errp = param.p;
|
|
switch (param.p->type) {
|
|
+ case SCTP_PARAM_ERR_CAUSE:
|
|
+ break;
|
|
+ case SCTP_PARAM_IPV4_ADDRESS:
|
|
+ if (length != sizeof(sctp_ipv4addr_param_t))
|
|
+ return false;
|
|
+ addr_param_seen = true;
|
|
+ break;
|
|
+ case SCTP_PARAM_IPV6_ADDRESS:
|
|
+ if (length != sizeof(sctp_ipv6addr_param_t))
|
|
+ return false;
|
|
+ addr_param_seen = true;
|
|
+ break;
|
|
case SCTP_PARAM_ADD_IP:
|
|
case SCTP_PARAM_DEL_IP:
|
|
case SCTP_PARAM_SET_PRIMARY:
|
|
- asconf_param = (sctp_addip_param_t *)param.v;
|
|
- plen = ntohs(asconf_param->param_hdr.length);
|
|
- if (plen < sizeof(sctp_addip_param_t) +
|
|
- sizeof(sctp_paramhdr_t))
|
|
- return 0;
|
|
+ /* In ASCONF chunks, these need to be first. */
|
|
+ if (addr_param_needed && !addr_param_seen)
|
|
+ return false;
|
|
+ length = ntohs(param.addip->param_hdr.length);
|
|
+ if (length < sizeof(sctp_addip_param_t) +
|
|
+ sizeof(sctp_paramhdr_t))
|
|
+ return false;
|
|
break;
|
|
case SCTP_PARAM_SUCCESS_REPORT:
|
|
case SCTP_PARAM_ADAPTATION_LAYER_IND:
|
|
if (length != sizeof(sctp_addip_param_t))
|
|
- return 0;
|
|
-
|
|
+ return false;
|
|
break;
|
|
default:
|
|
- break;
|
|
+ /* This is unkown to us, reject! */
|
|
+ return false;
|
|
}
|
|
-
|
|
- param.v += WORD_ROUND(length);
|
|
}
|
|
|
|
- if (param.v != chunk_end)
|
|
- return 0;
|
|
+ /* Remaining sanity checks. */
|
|
+ if (addr_param_needed && !addr_param_seen)
|
|
+ return false;
|
|
+ if (!addr_param_needed && addr_param_seen)
|
|
+ return false;
|
|
+ if (param.v != chunk->chunk_end)
|
|
+ return false;
|
|
|
|
- return 1;
|
|
+ return true;
|
|
}
|
|
|
|
/* Process an incoming ASCONF chunk with the next expected serial no. and
|
|
@@ -3162,16 +3178,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
|
|
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
|
|
struct sctp_chunk *asconf)
|
|
{
|
|
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
|
|
+ bool all_param_pass = true;
|
|
+ union sctp_params param;
|
|
sctp_addiphdr_t *hdr;
|
|
union sctp_addr_param *addr_param;
|
|
sctp_addip_param_t *asconf_param;
|
|
struct sctp_chunk *asconf_ack;
|
|
-
|
|
__be16 err_code;
|
|
int length = 0;
|
|
int chunk_len;
|
|
__u32 serial;
|
|
- int all_param_pass = 1;
|
|
|
|
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
|
|
hdr = (sctp_addiphdr_t *)asconf->skb->data;
|
|
@@ -3199,9 +3216,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
|
|
goto done;
|
|
|
|
/* Process the TLVs contained within the ASCONF chunk. */
|
|
- while (chunk_len > 0) {
|
|
+ sctp_walk_params(param, addip, addip_hdr.params) {
|
|
+ /* Skip preceeding address parameters. */
|
|
+ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
|
|
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
|
|
+ continue;
|
|
+
|
|
err_code = sctp_process_asconf_param(asoc, asconf,
|
|
- asconf_param);
|
|
+ param.addip);
|
|
/* ADDIP 4.1 A7)
|
|
* If an error response is received for a TLV parameter,
|
|
* all TLVs with no response before the failed TLV are
|
|
@@ -3209,28 +3231,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
|
|
* the failed response are considered unsuccessful unless
|
|
* a specific success indication is present for the parameter.
|
|
*/
|
|
- if (SCTP_ERROR_NO_ERROR != err_code)
|
|
- all_param_pass = 0;
|
|
-
|
|
+ if (err_code != SCTP_ERROR_NO_ERROR)
|
|
+ all_param_pass = false;
|
|
if (!all_param_pass)
|
|
- sctp_add_asconf_response(asconf_ack,
|
|
- asconf_param->crr_id, err_code,
|
|
- asconf_param);
|
|
+ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
|
|
+ err_code, param.addip);
|
|
|
|
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
|
|
* an IP address sends an 'Out of Resource' in its response, it
|
|
* MUST also fail any subsequent add or delete requests bundled
|
|
* in the ASCONF.
|
|
*/
|
|
- if (SCTP_ERROR_RSRC_LOW == err_code)
|
|
+ if (err_code == SCTP_ERROR_RSRC_LOW)
|
|
goto done;
|
|
-
|
|
- /* Move to the next ASCONF param. */
|
|
- length = ntohs(asconf_param->param_hdr.length);
|
|
- asconf_param = (void *)asconf_param + length;
|
|
- chunk_len -= length;
|
|
}
|
|
-
|
|
done:
|
|
asoc->peer.addip_serial++;
|
|
|
|
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
|
|
index 5170a1f..3e287a3 100644
|
|
--- a/net/sctp/sm_statefuns.c
|
|
+++ b/net/sctp/sm_statefuns.c
|
|
@@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
|
|
{
|
|
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
|
|
|
|
+ /* Previously already marked? */
|
|
+ if (unlikely(chunk->pdiscard))
|
|
+ return 0;
|
|
if (unlikely(chunk_length < required_length))
|
|
return 0;
|
|
|
|
@@ -1775,9 +1778,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
|
|
/* Update the content of current association. */
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
|
|
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
|
|
- SCTP_STATE(SCTP_STATE_ESTABLISHED));
|
|
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
|
|
+ if (sctp_state(asoc, SHUTDOWN_PENDING) &&
|
|
+ (sctp_sstate(asoc->base.sk, CLOSING) ||
|
|
+ sock_flag(asoc->base.sk, SOCK_DEAD))) {
|
|
+ /* if were currently in SHUTDOWN_PENDING, but the socket
|
|
+ * has been closed by user, don't transition to ESTABLISHED.
|
|
+ * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
|
|
+ */
|
|
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
|
|
+ return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
|
|
+ SCTP_ST_CHUNK(0), NULL,
|
|
+ commands);
|
|
+ } else {
|
|
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
|
|
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
|
|
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
|
|
+ }
|
|
return SCTP_DISPOSITION_CONSUME;
|
|
|
|
nomem_ev:
|
|
@@ -3578,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
|
|
struct sctp_chunk *asconf_ack = NULL;
|
|
struct sctp_paramhdr *err_param = NULL;
|
|
sctp_addiphdr_t *hdr;
|
|
- union sctp_addr_param *addr_param;
|
|
__u32 serial;
|
|
- int length;
|
|
|
|
if (!sctp_vtag_verify(chunk, asoc)) {
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
|
|
@@ -3605,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
|
|
hdr = (sctp_addiphdr_t *)chunk->skb->data;
|
|
serial = ntohl(hdr->serial);
|
|
|
|
- addr_param = (union sctp_addr_param *)hdr->params;
|
|
- length = ntohs(addr_param->p.length);
|
|
- if (length < sizeof(sctp_paramhdr_t))
|
|
- return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
|
|
- (void *)addr_param, commands);
|
|
-
|
|
/* Verify the ASCONF chunk before processing it. */
|
|
- if (!sctp_verify_asconf(asoc,
|
|
- (sctp_paramhdr_t *)((void *)addr_param + length),
|
|
- (void *)chunk->chunk_end,
|
|
- &err_param))
|
|
+ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
|
|
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
|
|
(void *)err_param, commands);
|
|
|
|
@@ -3732,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
|
|
rcvd_serial = ntohl(addip_hdr->serial);
|
|
|
|
/* Verify the ASCONF-ACK chunk before processing it. */
|
|
- if (!sctp_verify_asconf(asoc,
|
|
- (sctp_paramhdr_t *)addip_hdr->params,
|
|
- (void *)asconf_ack->chunk_end,
|
|
- &err_param))
|
|
+ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
|
|
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
|
|
(void *)err_param, commands);
|
|
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index 604a6ac..f940fdc 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -1532,8 +1532,10 @@ static void sctp_close(struct sock *sk, long timeout)
|
|
|
|
/* Supposedly, no process has access to the socket, but
|
|
* the net layers still may.
|
|
+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
|
|
+ * held and that should be grabbed before socket lock.
|
|
*/
|
|
- local_bh_disable();
|
|
+ spin_lock_bh(&net->sctp.addr_wq_lock);
|
|
bh_lock_sock(sk);
|
|
|
|
/* Hold the sock, since sk_common_release() will put sock_put()
|
|
@@ -1543,7 +1545,7 @@ static void sctp_close(struct sock *sk, long timeout)
|
|
sk_common_release(sk);
|
|
|
|
bh_unlock_sock(sk);
|
|
- local_bh_enable();
|
|
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
|
|
|
|
sock_put(sk);
|
|
|
|
@@ -3511,6 +3513,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
|
|
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
|
|
return 0;
|
|
|
|
+ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
|
if (val == 0 && sp->do_auto_asconf) {
|
|
list_del(&sp->auto_asconf_list);
|
|
sp->do_auto_asconf = 0;
|
|
@@ -3519,6 +3522,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
|
|
&sock_net(sk)->sctp.auto_asconf_splist);
|
|
sp->do_auto_asconf = 1;
|
|
}
|
|
+ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -4009,18 +4013,28 @@ static int sctp_init_sock(struct sock *sk)
|
|
local_bh_disable();
|
|
percpu_counter_inc(&sctp_sockets_allocated);
|
|
sock_prot_inuse_add(net, sk->sk_prot, 1);
|
|
+
|
|
+ /* Nothing can fail after this block, otherwise
|
|
+ * sctp_destroy_sock() will be called without addr_wq_lock held
|
|
+ */
|
|
if (net->sctp.default_auto_asconf) {
|
|
+ spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
|
|
list_add_tail(&sp->auto_asconf_list,
|
|
&net->sctp.auto_asconf_splist);
|
|
sp->do_auto_asconf = 1;
|
|
- } else
|
|
+ spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
|
|
+ } else {
|
|
sp->do_auto_asconf = 0;
|
|
+ }
|
|
+
|
|
local_bh_enable();
|
|
|
|
return 0;
|
|
}
|
|
|
|
-/* Cleanup any SCTP per socket resources. */
|
|
+/* Cleanup any SCTP per socket resources. Must be called with
|
|
+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
|
|
+ */
|
|
static void sctp_destroy_sock(struct sock *sk)
|
|
{
|
|
struct sctp_sock *sp;
|
|
@@ -6973,6 +6987,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
|
|
newinet->mc_list = NULL;
|
|
}
|
|
|
|
+static inline void sctp_copy_descendant(struct sock *sk_to,
|
|
+ const struct sock *sk_from)
|
|
+{
|
|
+ int ancestor_size = sizeof(struct inet_sock) +
|
|
+ sizeof(struct sctp_sock) -
|
|
+ offsetof(struct sctp_sock, auto_asconf_list);
|
|
+
|
|
+ if (sk_from->sk_family == PF_INET6)
|
|
+ ancestor_size += sizeof(struct ipv6_pinfo);
|
|
+
|
|
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
|
|
+}
|
|
+
|
|
/* Populate the fields of the newsk from the oldsk and migrate the assoc
|
|
* and its messages to the newsk.
|
|
*/
|
|
@@ -6987,7 +7014,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|
struct sk_buff *skb, *tmp;
|
|
struct sctp_ulpevent *event;
|
|
struct sctp_bind_hashbucket *head;
|
|
- struct list_head tmplist;
|
|
|
|
/* Migrate socket buffer sizes and all the socket level options to the
|
|
* new socket.
|
|
@@ -6995,12 +7021,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|
newsk->sk_sndbuf = oldsk->sk_sndbuf;
|
|
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
|
|
/* Brute force copy old sctp opt. */
|
|
- if (oldsp->do_auto_asconf) {
|
|
- memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
|
|
- inet_sk_copy_descendant(newsk, oldsk);
|
|
- memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
|
|
- } else
|
|
- inet_sk_copy_descendant(newsk, oldsk);
|
|
+ sctp_copy_descendant(newsk, oldsk);
|
|
|
|
/* Restore the ep value that was overwritten with the above structure
|
|
* copy.
|
|
diff --git a/net/socket.c b/net/socket.c
|
|
index a19ae19..b72fc13 100644
|
|
--- a/net/socket.c
|
|
+++ b/net/socket.c
|
|
@@ -886,9 +886,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
|
|
static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
|
|
struct sock_iocb *siocb)
|
|
{
|
|
- if (!is_sync_kiocb(iocb))
|
|
- BUG();
|
|
-
|
|
siocb->kiocb = iocb;
|
|
iocb->private = siocb;
|
|
return siocb;
|
|
@@ -2010,14 +2007,12 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
|
|
int err, ctl_len, total_len;
|
|
|
|
err = -EFAULT;
|
|
- if (MSG_CMSG_COMPAT & flags) {
|
|
- if (get_compat_msghdr(msg_sys, msg_compat))
|
|
- return -EFAULT;
|
|
- } else {
|
|
+ if (MSG_CMSG_COMPAT & flags)
|
|
+ err = get_compat_msghdr(msg_sys, msg_compat);
|
|
+ else
|
|
err = copy_msghdr_from_user(msg_sys, msg);
|
|
- if (err)
|
|
- return err;
|
|
- }
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
|
err = -EMSGSIZE;
|
|
@@ -2222,14 +2217,12 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
|
|
struct sockaddr __user *uaddr;
|
|
int __user *uaddr_len;
|
|
|
|
- if (MSG_CMSG_COMPAT & flags) {
|
|
- if (get_compat_msghdr(msg_sys, msg_compat))
|
|
- return -EFAULT;
|
|
- } else {
|
|
+ if (MSG_CMSG_COMPAT & flags)
|
|
+ err = get_compat_msghdr(msg_sys, msg_compat);
|
|
+ else
|
|
err = copy_msghdr_from_user(msg_sys, msg);
|
|
- if (err)
|
|
- return err;
|
|
- }
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
|
|
err = -EMSGSIZE;
|
|
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
index 1ec19f6..eeeba5a 100644
|
|
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
|
|
{
|
|
u32 value_follows;
|
|
int err;
|
|
+ struct page *scratch;
|
|
+
|
|
+ scratch = alloc_page(GFP_KERNEL);
|
|
+ if (!scratch)
|
|
+ return -ENOMEM;
|
|
+ xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
|
|
|
|
/* res->status */
|
|
err = gssx_dec_status(xdr, &res->status);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
|
|
/* res->context_handle */
|
|
err = gssx_dec_bool(xdr, &value_follows);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
if (value_follows) {
|
|
err = gssx_dec_ctx(xdr, res->context_handle);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
} else {
|
|
res->context_handle = NULL;
|
|
}
|
|
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
|
|
/* res->output_token */
|
|
err = gssx_dec_bool(xdr, &value_follows);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
if (value_follows) {
|
|
err = gssx_dec_buffer(xdr, res->output_token);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
} else {
|
|
res->output_token = NULL;
|
|
}
|
|
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
|
|
/* res->delegated_cred_handle */
|
|
err = gssx_dec_bool(xdr, &value_follows);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
if (value_follows) {
|
|
/* we do not support upcall servers sending this data. */
|
|
- return -EINVAL;
|
|
+ err = -EINVAL;
|
|
+ goto out_free;
|
|
}
|
|
|
|
/* res->options */
|
|
err = gssx_dec_option_array(xdr, &res->options);
|
|
|
|
+out_free:
|
|
+ __free_page(scratch);
|
|
return err;
|
|
}
|
|
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
|
|
index e860d4f..ab21968 100644
|
|
--- a/net/sunrpc/backchannel_rqst.c
|
|
+++ b/net/sunrpc/backchannel_rqst.c
|
|
@@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
|
|
|
|
dprintk("RPC: free allocations for req= %p\n", req);
|
|
WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
|
|
- xbufp = &req->rq_private_buf;
|
|
+ xbufp = &req->rq_rcv_buf;
|
|
free_page((unsigned long)xbufp->head[0].iov_base);
|
|
xbufp = &req->rq_snd_buf;
|
|
free_page((unsigned long)xbufp->head[0].iov_base);
|
|
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
|
|
index ae333c1..0adc66c 100644
|
|
--- a/net/sunrpc/cache.c
|
|
+++ b/net/sunrpc/cache.c
|
|
@@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
|
|
poll_wait(filp, &queue_wait, wait);
|
|
|
|
/* alway allow write */
|
|
- mask = POLL_OUT | POLLWRNORM;
|
|
+ mask = POLLOUT | POLLWRNORM;
|
|
|
|
if (!rp)
|
|
return mask;
|
|
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
|
|
index 3ea5cda..5ff8b87 100644
|
|
--- a/net/sunrpc/clnt.c
|
|
+++ b/net/sunrpc/clnt.c
|
|
@@ -533,6 +533,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
|
|
|
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
|
|
clnt->cl_autobind = 1;
|
|
+ if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
|
|
+ clnt->cl_noretranstimeo = 1;
|
|
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
|
|
clnt->cl_discrtry = 1;
|
|
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
|
|
@@ -571,6 +573,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
|
|
/* Turn off autobind on clones */
|
|
new->cl_autobind = 0;
|
|
new->cl_softrtry = clnt->cl_softrtry;
|
|
+ new->cl_noretranstimeo = clnt->cl_noretranstimeo;
|
|
new->cl_discrtry = clnt->cl_discrtry;
|
|
new->cl_chatty = clnt->cl_chatty;
|
|
return new;
|
|
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
|
|
index d06cb87..5e8fe77 100644
|
|
--- a/net/sunrpc/svcsock.c
|
|
+++ b/net/sunrpc/svcsock.c
|
|
@@ -685,6 +685,7 @@ static struct svc_xprt_class svc_udp_class = {
|
|
.xcl_owner = THIS_MODULE,
|
|
.xcl_ops = &svc_udp_ops,
|
|
.xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
|
|
+ .xcl_ident = XPRT_TRANSPORT_UDP,
|
|
};
|
|
|
|
static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
|
@@ -1279,6 +1280,7 @@ static struct svc_xprt_class svc_tcp_class = {
|
|
.xcl_owner = THIS_MODULE,
|
|
.xcl_ops = &svc_tcp_ops,
|
|
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
|
|
+ .xcl_ident = XPRT_TRANSPORT_TCP,
|
|
};
|
|
|
|
void svc_init_xprt_sock(void)
|
|
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
|
|
index 7d4df99..03ac884 100644
|
|
--- a/net/sunrpc/xprt.c
|
|
+++ b/net/sunrpc/xprt.c
|
|
@@ -1316,7 +1316,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
|
|
}
|
|
}
|
|
spin_unlock(&xprt_list_lock);
|
|
- printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
|
|
+ dprintk("RPC: transport (%d) not supported\n", args->ident);
|
|
return ERR_PTR(-EIO);
|
|
|
|
found:
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
index 62e4f9b..ed36cb5 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
@@ -89,6 +89,7 @@ struct svc_xprt_class svc_rdma_class = {
|
|
.xcl_owner = THIS_MODULE,
|
|
.xcl_ops = &svc_rdma_ops,
|
|
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
|
|
+ .xcl_ident = XPRT_TRANSPORT_RDMA,
|
|
};
|
|
|
|
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index 0addefc..41c2f9d 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -842,6 +842,8 @@ static void xs_error_report(struct sock *sk)
|
|
dprintk("RPC: xs_error_report client %p, error=%d...\n",
|
|
xprt, -err);
|
|
trace_rpc_socket_error(xprt, sk->sk_socket, err);
|
|
+ if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state))
|
|
+ goto out;
|
|
xprt_wake_pending_tasks(xprt, err);
|
|
out:
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
@@ -2251,7 +2253,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
|
|
abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
|
|
&xprt->state);
|
|
/* "close" the socket, preserving the local port */
|
|
+ set_bit(XPRT_CONNECTION_REUSE, &xprt->state);
|
|
xs_tcp_reuse_connection(transport);
|
|
+ clear_bit(XPRT_CONNECTION_REUSE, &xprt->state);
|
|
|
|
if (abort_and_exit)
|
|
goto out_eagain;
|
|
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
|
|
index 94404f1..4757f1c 100644
|
|
--- a/net/unix/af_unix.c
|
|
+++ b/net/unix/af_unix.c
|
|
@@ -1893,6 +1893,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
|
|
unix_state_unlock(sk);
|
|
timeo = freezable_schedule_timeout(timeo);
|
|
unix_state_lock(sk);
|
|
+
|
|
+ if (sock_flag(sk, SOCK_DEAD))
|
|
+ break;
|
|
+
|
|
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
|
}
|
|
|
|
@@ -1957,6 +1961,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
struct sk_buff *skb, *last;
|
|
|
|
unix_state_lock(sk);
|
|
+ if (sock_flag(sk, SOCK_DEAD)) {
|
|
+ err = -ECONNRESET;
|
|
+ goto unlock;
|
|
+ }
|
|
last = skb = skb_peek(&sk->sk_receive_queue);
|
|
again:
|
|
if (skb == NULL) {
|
|
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
|
|
index 78559b5..27157a78 100644
|
|
--- a/net/wireless/chan.c
|
|
+++ b/net/wireless/chan.c
|
|
@@ -516,7 +516,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
|
|
{
|
|
struct ieee80211_sta_ht_cap *ht_cap;
|
|
struct ieee80211_sta_vht_cap *vht_cap;
|
|
- u32 width, control_freq;
|
|
+ u32 width, control_freq, cap;
|
|
|
|
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
|
|
return false;
|
|
@@ -554,7 +554,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
|
|
return false;
|
|
break;
|
|
case NL80211_CHAN_WIDTH_80P80:
|
|
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
|
|
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
|
|
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
|
|
return false;
|
|
case NL80211_CHAN_WIDTH_80:
|
|
if (!vht_cap->vht_supported)
|
|
@@ -565,7 +566,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
|
|
case NL80211_CHAN_WIDTH_160:
|
|
if (!vht_cap->vht_supported)
|
|
return false;
|
|
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
|
|
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
|
|
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
|
|
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
|
|
return false;
|
|
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
|
|
width = 160;
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 4fe2e6e..c260243 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -1450,18 +1450,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
|
|
}
|
|
CMD(start_p2p_device, START_P2P_DEVICE);
|
|
CMD(set_mcast_rate, SET_MCAST_RATE);
|
|
+#ifdef CONFIG_NL80211_TESTMODE
|
|
+ CMD(testmode_cmd, TESTMODE);
|
|
+#endif
|
|
if (state->split) {
|
|
CMD(crit_proto_start, CRIT_PROTOCOL_START);
|
|
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
|
|
if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
|
|
CMD(channel_switch, CHANNEL_SWITCH);
|
|
+ CMD(set_qos_map, SET_QOS_MAP);
|
|
}
|
|
- CMD(set_qos_map, SET_QOS_MAP);
|
|
-
|
|
-#ifdef CONFIG_NL80211_TESTMODE
|
|
- CMD(testmode_cmd, TESTMODE);
|
|
-#endif
|
|
-
|
|
+ /* add into the if now */
|
|
#undef CMD
|
|
|
|
if (dev->ops->connect || dev->ops->auth) {
|
|
@@ -2698,6 +2697,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
|
|
if (!rdev->ops->get_key)
|
|
return -EOPNOTSUPP;
|
|
|
|
+ if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
|
+ return -ENOENT;
|
|
+
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
if (!msg)
|
|
return -ENOMEM;
|
|
@@ -2717,10 +2719,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
|
|
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
|
|
goto nla_put_failure;
|
|
|
|
- if (pairwise && mac_addr &&
|
|
- !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
|
- return -ENOENT;
|
|
-
|
|
err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
|
|
get_key_callback);
|
|
|
|
@@ -2891,7 +2889,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
|
|
wdev_lock(dev->ieee80211_ptr);
|
|
err = nl80211_key_allowed(dev->ieee80211_ptr);
|
|
|
|
- if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
|
|
+ if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
|
|
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
|
err = -ENOENT;
|
|
|
|
@@ -4192,6 +4190,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
|
return -EINVAL;
|
|
|
|
+ /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
|
|
+ * as userspace might just pass through the capabilities from the IEs
|
|
+ * directly, rather than enforcing this restriction and returning an
|
|
+ * error in this case.
|
|
+ */
|
|
+ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
|
|
+ params.ht_capa = NULL;
|
|
+ params.vht_capa = NULL;
|
|
+ }
|
|
+
|
|
/* When you run into this, adjust the code below for the new flag */
|
|
BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
|
|
|
|
@@ -6797,6 +6805,9 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
|
|
struct nlattr *data = ((void **)skb->cb)[2];
|
|
enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
|
|
|
|
+ /* clear CB data for netlink core to own from now on */
|
|
+ memset(skb->cb, 0, sizeof(skb->cb));
|
|
+
|
|
nla_nest_end(skb, data);
|
|
genlmsg_end(skb, hdr);
|
|
|
|
@@ -9076,6 +9087,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
|
|
void *hdr = ((void **)skb->cb)[1];
|
|
struct nlattr *data = ((void **)skb->cb)[2];
|
|
|
|
+ /* clear CB data for netlink core to own from now on */
|
|
+ memset(skb->cb, 0, sizeof(skb->cb));
|
|
+
|
|
if (WARN_ON(!rdev->cur_cmd_info)) {
|
|
kfree_skb(skb);
|
|
return -EINVAL;
|
|
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
|
|
index 338794e..04d5305 100644
|
|
--- a/net/wireless/reg.c
|
|
+++ b/net/wireless/reg.c
|
|
@@ -1547,7 +1547,7 @@ static enum reg_request_treatment
|
|
reg_process_hint_driver(struct wiphy *wiphy,
|
|
struct regulatory_request *driver_request)
|
|
{
|
|
- const struct ieee80211_regdomain *regd;
|
|
+ const struct ieee80211_regdomain *regd, *tmp;
|
|
enum reg_request_treatment treatment;
|
|
|
|
treatment = __reg_process_hint_driver(driver_request);
|
|
@@ -1566,7 +1566,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
|
|
kfree(driver_request);
|
|
return REG_REQ_IGNORE;
|
|
}
|
|
+
|
|
+ tmp = get_wiphy_regdom(wiphy);
|
|
rcu_assign_pointer(wiphy->regd, regd);
|
|
+ rcu_free_regdom(tmp);
|
|
}
|
|
|
|
|
|
@@ -1625,11 +1628,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
|
|
return REG_REQ_IGNORE;
|
|
return REG_REQ_ALREADY_SET;
|
|
}
|
|
- /*
|
|
- * Two consecutive Country IE hints on the same wiphy.
|
|
- * This should be picked up early by the driver/stack
|
|
- */
|
|
- if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
|
|
+
|
|
+ if (regdom_changes(country_ie_request->alpha2))
|
|
return REG_REQ_OK;
|
|
return REG_REQ_ALREADY_SET;
|
|
}
|
|
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
|
|
index 5eaeed5..815a3b7 100644
|
|
--- a/net/wireless/trace.h
|
|
+++ b/net/wireless/trace.h
|
|
@@ -2068,7 +2068,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
|
|
MAC_ASSIGN(addr, addr);
|
|
__entry->key_type = key_type;
|
|
__entry->key_id = key_id;
|
|
- memcpy(__entry->tsc, tsc, 6);
|
|
+ if (tsc)
|
|
+ memcpy(__entry->tsc, tsc, 6);
|
|
),
|
|
TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
|
|
NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
|
|
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
|
|
index 5661a54..ae1fe6f 100644
|
|
--- a/net/wireless/wext-compat.c
|
|
+++ b/net/wireless/wext-compat.c
|
|
@@ -1331,6 +1331,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
|
|
memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
|
|
wdev_unlock(wdev);
|
|
|
|
+ memset(&sinfo, 0, sizeof(sinfo));
|
|
+
|
|
if (rdev_get_station(rdev, dev, bssid, &sinfo))
|
|
return NULL;
|
|
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 1d5c7bf..59cf325 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -41,6 +41,11 @@
|
|
|
|
static struct dst_entry *xfrm_policy_sk_bundles;
|
|
|
|
+struct xfrm_flo {
|
|
+ struct dst_entry *dst_orig;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
|
|
static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
|
|
__read_mostly;
|
|
@@ -1889,13 +1894,14 @@ static int xdst_queue_output(struct sk_buff *skb)
|
|
}
|
|
|
|
static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
|
|
- struct dst_entry *dst,
|
|
+ struct xfrm_flo *xflo,
|
|
const struct flowi *fl,
|
|
int num_xfrms,
|
|
u16 family)
|
|
{
|
|
int err;
|
|
struct net_device *dev;
|
|
+ struct dst_entry *dst;
|
|
struct dst_entry *dst1;
|
|
struct xfrm_dst *xdst;
|
|
|
|
@@ -1903,9 +1909,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
|
|
if (IS_ERR(xdst))
|
|
return xdst;
|
|
|
|
- if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
|
|
+ if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
|
|
+ net->xfrm.sysctl_larval_drop ||
|
|
+ num_xfrms <= 0)
|
|
return xdst;
|
|
|
|
+ dst = xflo->dst_orig;
|
|
dst1 = &xdst->u.dst;
|
|
dst_hold(dst);
|
|
xdst->route = dst;
|
|
@@ -1947,7 +1956,7 @@ static struct flow_cache_object *
|
|
xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
|
|
struct flow_cache_object *oldflo, void *ctx)
|
|
{
|
|
- struct dst_entry *dst_orig = (struct dst_entry *)ctx;
|
|
+ struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
|
|
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
|
|
struct xfrm_dst *xdst, *new_xdst;
|
|
int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
|
|
@@ -1988,7 +1997,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
|
|
goto make_dummy_bundle;
|
|
}
|
|
|
|
- new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
|
|
+ new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
|
|
+ xflo->dst_orig);
|
|
if (IS_ERR(new_xdst)) {
|
|
err = PTR_ERR(new_xdst);
|
|
if (err != -EAGAIN)
|
|
@@ -2022,7 +2032,7 @@ make_dummy_bundle:
|
|
/* We found policies, but there's no bundles to instantiate:
|
|
* either because the policy blocks, has no transformations or
|
|
* we could not build template (no xfrm_states).*/
|
|
- xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
|
|
+ xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
|
|
if (IS_ERR(xdst)) {
|
|
xfrm_pols_put(pols, num_pols);
|
|
return ERR_CAST(xdst);
|
|
@@ -2121,13 +2131,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
|
|
}
|
|
|
|
if (xdst == NULL) {
|
|
+ struct xfrm_flo xflo;
|
|
+
|
|
+ xflo.dst_orig = dst_orig;
|
|
+ xflo.flags = flags;
|
|
+
|
|
/* To accelerate a bit... */
|
|
if ((dst_orig->flags & DST_NOXFRM) ||
|
|
!net->xfrm.policy_count[XFRM_POLICY_OUT])
|
|
goto nopol;
|
|
|
|
flo = flow_cache_lookup(net, fl, family, dir,
|
|
- xfrm_bundle_lookup, dst_orig);
|
|
+ xfrm_bundle_lookup, &xflo);
|
|
if (flo == NULL)
|
|
goto nopol;
|
|
if (IS_ERR(flo)) {
|
|
@@ -2155,7 +2170,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
|
|
xfrm_pols_put(pols, drop_pols);
|
|
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
|
|
|
|
- return make_blackhole(net, family, dst_orig);
|
|
+ return ERR_PTR(-EREMOTE);
|
|
}
|
|
|
|
err = -EAGAIN;
|
|
@@ -2212,6 +2227,23 @@ dropdst:
|
|
}
|
|
EXPORT_SYMBOL(xfrm_lookup);
|
|
|
|
+/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
|
|
+ * Otherwise we may send out blackholed packets.
|
|
+ */
|
|
+struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
|
|
+ const struct flowi *fl,
|
|
+ struct sock *sk, int flags)
|
|
+{
|
|
+ struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
|
|
+ flags | XFRM_LOOKUP_QUEUE);
|
|
+
|
|
+ if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
|
|
+ return make_blackhole(net, dst_orig->ops->family, dst_orig);
|
|
+
|
|
+ return dst;
|
|
+}
|
|
+EXPORT_SYMBOL(xfrm_lookup_route);
|
|
+
|
|
static inline int
|
|
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
|
|
{
|
|
@@ -2477,7 +2509,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
|
|
|
|
skb_dst_force(skb);
|
|
|
|
- dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
|
|
+ dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
|
|
if (IS_ERR(dst)) {
|
|
res = 0;
|
|
dst = NULL;
|
|
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
|
|
index b10d04f..3bea4dd 100644
|
|
--- a/net/xfrm/xfrm_user.c
|
|
+++ b/net/xfrm/xfrm_user.c
|
|
@@ -176,9 +176,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
|
|
attrs[XFRMA_ALG_AEAD] ||
|
|
attrs[XFRMA_ALG_CRYPT] ||
|
|
attrs[XFRMA_ALG_COMP] ||
|
|
- attrs[XFRMA_TFCPAD] ||
|
|
- (ntohl(p->id.spi) >= 0x10000))
|
|
-
|
|
+ attrs[XFRMA_TFCPAD])
|
|
goto out;
|
|
break;
|
|
|
|
@@ -206,7 +204,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
|
|
attrs[XFRMA_ALG_AUTH] ||
|
|
attrs[XFRMA_ALG_AUTH_TRUNC] ||
|
|
attrs[XFRMA_ALG_CRYPT] ||
|
|
- attrs[XFRMA_TFCPAD])
|
|
+ attrs[XFRMA_TFCPAD] ||
|
|
+ (ntohl(p->id.spi) >= 0x10000))
|
|
goto out;
|
|
break;
|
|
|
|
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
|
|
index db1512a..f53542b 100644
|
|
--- a/scripts/kconfig/menu.c
|
|
+++ b/scripts/kconfig/menu.c
|
|
@@ -545,7 +545,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
|
|
{
|
|
int i, j;
|
|
struct menu *submenu[8], *menu, *location = NULL;
|
|
- struct jump_key *jump;
|
|
+ struct jump_key *jump = NULL;
|
|
|
|
str_printf(r, _("Prompt: %s\n"), _(prop->text));
|
|
menu = prop->menu->parent;
|
|
@@ -583,7 +583,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
|
|
str_printf(r, _(" Location:\n"));
|
|
for (j = 4; --i >= 0; j += 2) {
|
|
menu = submenu[i];
|
|
- if (head && location && menu == location)
|
|
+ if (jump && menu == location)
|
|
jump->offset = strlen(r->s);
|
|
str_printf(r, "%*c-> %s", j, ' ',
|
|
_(menu_get_prompt(menu)));
|
|
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
|
|
index 3133172..9969fee 100644
|
|
--- a/scripts/kconfig/streamline_config.pl
|
|
+++ b/scripts/kconfig/streamline_config.pl
|
|
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
|
|
my $kconfig = $ARGV[1];
|
|
my $lsmod_file = $ENV{'LSMOD'};
|
|
|
|
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
|
|
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
|
|
chomp @makefiles;
|
|
|
|
my %depends;
|
|
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
|
|
index da058da..2438cc3 100755
|
|
--- a/scripts/kernel-doc
|
|
+++ b/scripts/kernel-doc
|
|
@@ -1753,7 +1753,7 @@ sub dump_struct($$) {
|
|
# strip kmemcheck_bitfield_{begin,end}.*;
|
|
$members =~ s/kmemcheck_bitfield_.*?;//gos;
|
|
# strip attributes
|
|
- $members =~ s/__aligned\s*\(.+\)//gos;
|
|
+ $members =~ s/__aligned\s*\([^;]*\)//gos;
|
|
|
|
create_parameterlist($members, ';', $file);
|
|
check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
|
|
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
|
|
index 397b6b8..e28f38e 100755
|
|
--- a/scripts/recordmcount.pl
|
|
+++ b/scripts/recordmcount.pl
|
|
@@ -262,7 +262,6 @@ if ($arch eq "x86_64") {
|
|
# force flags for this arch
|
|
$ld .= " -m shlelf_linux";
|
|
$objcopy .= " -O elf32-sh-linux";
|
|
- $cc .= " -m32";
|
|
|
|
} elsif ($arch eq "powerpc") {
|
|
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
|
|
diff --git a/security/commoncap.c b/security/commoncap.c
|
|
index b9d613e..963dc59 100644
|
|
--- a/security/commoncap.c
|
|
+++ b/security/commoncap.c
|
|
@@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
|
|
cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
|
|
}
|
|
|
|
+ cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
|
|
+ cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
|
|
index 3c5cbb9..048550a 100644
|
|
--- a/security/integrity/evm/evm_main.c
|
|
+++ b/security/integrity/evm/evm_main.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/xattr.h>
|
|
#include <linux/integrity.h>
|
|
#include <linux/evm.h>
|
|
+#include <linux/magic.h>
|
|
#include <crypto/hash.h>
|
|
#include "evm.h"
|
|
|
|
@@ -269,6 +270,24 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
|
|
goto out;
|
|
}
|
|
evm_status = evm_verify_current_integrity(dentry);
|
|
+ if (evm_status == INTEGRITY_NOXATTRS) {
|
|
+ struct integrity_iint_cache *iint;
|
|
+
|
|
+ iint = integrity_iint_find(dentry->d_inode);
|
|
+ if (iint && (iint->flags & IMA_NEW_FILE))
|
|
+ return 0;
|
|
+
|
|
+ /* exception for pseudo filesystems */
|
|
+ if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
|
|
+ || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
|
|
+ return 0;
|
|
+
|
|
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
|
|
+ dentry->d_inode, dentry->d_name.name,
|
|
+ "update_metadata",
|
|
+ integrity_status_msg[evm_status],
|
|
+ -EPERM, 0);
|
|
+ }
|
|
out:
|
|
if (evm_status != INTEGRITY_PASS)
|
|
integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
|
|
@@ -296,9 +315,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
|
|
{
|
|
const struct evm_ima_xattr_data *xattr_data = xattr_value;
|
|
|
|
- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
|
|
- && (xattr_data->type == EVM_XATTR_HMAC))
|
|
- return -EPERM;
|
|
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
|
|
+ if (!xattr_value_len)
|
|
+ return -EINVAL;
|
|
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
|
|
+ return -EPERM;
|
|
+ }
|
|
return evm_protect_xattr(dentry, xattr_name, xattr_value,
|
|
xattr_value_len);
|
|
}
|
|
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
|
|
index f79fa8b..5cb7de9 100644
|
|
--- a/security/integrity/ima/ima.h
|
|
+++ b/security/integrity/ima/ima.h
|
|
@@ -106,7 +106,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
|
|
const char *op, const char *cause);
|
|
int ima_init_crypto(void);
|
|
void ima_putc(struct seq_file *m, void *data, int datalen);
|
|
-void ima_print_digest(struct seq_file *m, u8 *digest, int size);
|
|
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
|
|
struct ima_template_desc *ima_template_desc_current(void);
|
|
int ima_init_template(void);
|
|
|
|
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
|
|
index 734e946..6df1b25 100644
|
|
--- a/security/integrity/ima/ima_appraise.c
|
|
+++ b/security/integrity/ima/ima_appraise.c
|
|
@@ -194,8 +194,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
|
|
goto out;
|
|
|
|
cause = "missing-hash";
|
|
- status =
|
|
- (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
|
|
+ status = INTEGRITY_NOLABEL;
|
|
+ if (inode->i_size == 0) {
|
|
+ iint->flags |= IMA_NEW_FILE;
|
|
+ status = INTEGRITY_PASS;
|
|
+ }
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
|
|
index 468a3ba..35f3c90 100644
|
|
--- a/security/integrity/ima/ima_fs.c
|
|
+++ b/security/integrity/ima/ima_fs.c
|
|
@@ -186,9 +186,9 @@ static const struct file_operations ima_measurements_ops = {
|
|
.release = seq_release,
|
|
};
|
|
|
|
-void ima_print_digest(struct seq_file *m, u8 *digest, int size)
|
|
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
|
|
{
|
|
- int i;
|
|
+ u32 i;
|
|
|
|
for (i = 0; i < size; i++)
|
|
seq_printf(m, "%02x", *(digest + i));
|
|
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
|
|
index 76d8aad..9f70efd 100644
|
|
--- a/security/integrity/ima/ima_main.c
|
|
+++ b/security/integrity/ima/ima_main.c
|
|
@@ -131,11 +131,13 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
|
|
return;
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
- if (atomic_read(&inode->i_writecount) == 1 &&
|
|
- iint->version != inode->i_version) {
|
|
- iint->flags &= ~IMA_DONE_MASK;
|
|
- if (iint->flags & IMA_APPRAISE)
|
|
- ima_update_xattr(iint, file);
|
|
+ if (atomic_read(&inode->i_writecount) == 1) {
|
|
+ if ((iint->version != inode->i_version) ||
|
|
+ (iint->flags & IMA_NEW_FILE)) {
|
|
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
|
|
+ if (iint->flags & IMA_APPRAISE)
|
|
+ ima_update_xattr(iint, file);
|
|
+ }
|
|
}
|
|
mutex_unlock(&inode->i_mutex);
|
|
}
|
|
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
|
|
index 085c496..9d8e420 100644
|
|
--- a/security/integrity/ima/ima_policy.c
|
|
+++ b/security/integrity/ima/ima_policy.c
|
|
@@ -27,6 +27,8 @@
|
|
#define IMA_UID 0x0008
|
|
#define IMA_FOWNER 0x0010
|
|
#define IMA_FSUUID 0x0020
|
|
+#define IMA_INMASK 0x0040
|
|
+#define IMA_EUID 0x0080
|
|
|
|
#define UNKNOWN 0
|
|
#define MEASURE 0x0001 /* same as IMA_MEASURE */
|
|
@@ -171,6 +173,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
|
|
return false;
|
|
if ((rule->flags & IMA_MASK) && rule->mask != mask)
|
|
return false;
|
|
+ if ((rule->flags & IMA_INMASK) &&
|
|
+ (!(rule->mask & mask) && func != POST_SETATTR))
|
|
+ return false;
|
|
if ((rule->flags & IMA_FSMAGIC)
|
|
&& rule->fsmagic != inode->i_sb->s_magic)
|
|
return false;
|
|
@@ -179,6 +184,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
|
|
return false;
|
|
if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
|
|
return false;
|
|
+ if (rule->flags & IMA_EUID) {
|
|
+ if (has_capability_noaudit(current, CAP_SETUID)) {
|
|
+ if (!uid_eq(rule->uid, cred->euid)
|
|
+ && !uid_eq(rule->uid, cred->suid)
|
|
+ && !uid_eq(rule->uid, cred->uid))
|
|
+ return false;
|
|
+ } else if (!uid_eq(rule->uid, cred->euid))
|
|
+ return false;
|
|
+ }
|
|
+
|
|
if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
|
|
return false;
|
|
for (i = 0; i < MAX_LSM_RULES; i++) {
|
|
@@ -350,7 +365,8 @@ enum {
|
|
Opt_audit,
|
|
Opt_obj_user, Opt_obj_role, Opt_obj_type,
|
|
Opt_subj_user, Opt_subj_role, Opt_subj_type,
|
|
- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
|
|
+ Opt_func, Opt_mask, Opt_fsmagic,
|
|
+ Opt_uid, Opt_euid, Opt_fowner,
|
|
Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
|
|
};
|
|
|
|
@@ -371,6 +387,7 @@ static match_table_t policy_tokens = {
|
|
{Opt_fsmagic, "fsmagic=%s"},
|
|
{Opt_fsuuid, "fsuuid=%s"},
|
|
{Opt_uid, "uid=%s"},
|
|
+ {Opt_euid, "euid=%s"},
|
|
{Opt_fowner, "fowner=%s"},
|
|
{Opt_appraise_type, "appraise_type=%s"},
|
|
{Opt_permit_directio, "permit_directio"},
|
|
@@ -412,6 +429,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
|
|
static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
|
|
{
|
|
struct audit_buffer *ab;
|
|
+ char *from;
|
|
char *p;
|
|
int result = 0;
|
|
|
|
@@ -500,18 +518,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
|
|
if (entry->mask)
|
|
result = -EINVAL;
|
|
|
|
- if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
|
|
+ from = args[0].from;
|
|
+ if (*from == '^')
|
|
+ from++;
|
|
+
|
|
+ if ((strcmp(from, "MAY_EXEC")) == 0)
|
|
entry->mask = MAY_EXEC;
|
|
- else if (strcmp(args[0].from, "MAY_WRITE") == 0)
|
|
+ else if (strcmp(from, "MAY_WRITE") == 0)
|
|
entry->mask = MAY_WRITE;
|
|
- else if (strcmp(args[0].from, "MAY_READ") == 0)
|
|
+ else if (strcmp(from, "MAY_READ") == 0)
|
|
entry->mask = MAY_READ;
|
|
- else if (strcmp(args[0].from, "MAY_APPEND") == 0)
|
|
+ else if (strcmp(from, "MAY_APPEND") == 0)
|
|
entry->mask = MAY_APPEND;
|
|
else
|
|
result = -EINVAL;
|
|
if (!result)
|
|
- entry->flags |= IMA_MASK;
|
|
+ entry->flags |= (*args[0].from == '^')
|
|
+ ? IMA_INMASK : IMA_MASK;
|
|
break;
|
|
case Opt_fsmagic:
|
|
ima_log_string(ab, "fsmagic", args[0].from);
|
|
@@ -542,6 +565,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
|
|
break;
|
|
case Opt_uid:
|
|
ima_log_string(ab, "uid", args[0].from);
|
|
+ case Opt_euid:
|
|
+ if (token == Opt_euid)
|
|
+ ima_log_string(ab, "euid", args[0].from);
|
|
|
|
if (uid_valid(entry->uid)) {
|
|
result = -EINVAL;
|
|
@@ -550,11 +576,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
|
|
|
|
result = strict_strtoul(args[0].from, 10, &lnum);
|
|
if (!result) {
|
|
- entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
|
|
- if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
|
|
+ entry->uid = make_kuid(current_user_ns(),
|
|
+ (uid_t) lnum);
|
|
+ if (!uid_valid(entry->uid) ||
|
|
+ (uid_t)lnum != lnum)
|
|
result = -EINVAL;
|
|
else
|
|
- entry->flags |= IMA_UID;
|
|
+ entry->flags |= (token == Opt_uid)
|
|
+ ? IMA_UID : IMA_EUID;
|
|
}
|
|
break;
|
|
case Opt_fowner:
|
|
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
|
|
index e8592e7..dcf77b7 100644
|
|
--- a/security/integrity/ima/ima_template_lib.c
|
|
+++ b/security/integrity/ima/ima_template_lib.c
|
|
@@ -79,7 +79,8 @@ static void ima_show_template_data_ascii(struct seq_file *m,
|
|
enum data_formats datafmt,
|
|
struct ima_field_data *field_data)
|
|
{
|
|
- u8 *buf_ptr = field_data->data, buflen = field_data->len;
|
|
+ u8 *buf_ptr = field_data->data;
|
|
+ u32 buflen = field_data->len;
|
|
|
|
switch (datafmt) {
|
|
case DATA_FMT_DIGEST_WITH_ALGO:
|
|
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
|
|
index 33c0a70..2f8715d 100644
|
|
--- a/security/integrity/integrity.h
|
|
+++ b/security/integrity/integrity.h
|
|
@@ -31,6 +31,7 @@
|
|
#define IMA_DIGSIG 0x01000000
|
|
#define IMA_DIGSIG_REQUIRED 0x02000000
|
|
#define IMA_PERMIT_DIRECTIO 0x04000000
|
|
+#define IMA_NEW_FILE 0x08000000
|
|
|
|
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
|
|
IMA_APPRAISE_SUBMASK)
|
|
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
|
|
index 9e1e005..c4c8df4 100644
|
|
--- a/security/keys/encrypted-keys/encrypted.c
|
|
+++ b/security/keys/encrypted-keys/encrypted.c
|
|
@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void)
|
|
ret = encrypted_shash_alloc();
|
|
if (ret < 0)
|
|
return ret;
|
|
+ ret = aes_get_sizes();
|
|
+ if (ret < 0)
|
|
+ goto out;
|
|
ret = register_key_type(&key_type_encrypted);
|
|
if (ret < 0)
|
|
goto out;
|
|
- return aes_get_sizes();
|
|
+ return 0;
|
|
out:
|
|
encrypted_shash_release();
|
|
return ret;
|
|
diff --git a/security/keys/gc.c b/security/keys/gc.c
|
|
index d3222b6..009d937 100644
|
|
--- a/security/keys/gc.c
|
|
+++ b/security/keys/gc.c
|
|
@@ -157,12 +157,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
|
|
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
|
|
atomic_dec(&key->user->nikeys);
|
|
|
|
- key_user_put(key->user);
|
|
-
|
|
/* now throw away the key memory */
|
|
if (key->type->destroy)
|
|
key->type->destroy(key);
|
|
|
|
+ key_user_put(key->user);
|
|
+
|
|
kfree(key->description);
|
|
|
|
#ifdef KEY_DEBUGGING
|
|
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
|
|
index 2fb2576..04d0d7c 100644
|
|
--- a/security/keys/keyring.c
|
|
+++ b/security/keys/keyring.c
|
|
@@ -1151,9 +1151,11 @@ void __key_link_end(struct key *keyring,
|
|
if (index_key->type == &key_type_keyring)
|
|
up_write(&keyring_serialise_link_sem);
|
|
|
|
- if (edit && !edit->dead_leaf) {
|
|
- key_payload_reserve(keyring,
|
|
- keyring->datalen - KEYQUOTA_LINK_BYTES);
|
|
+ if (edit) {
|
|
+ if (!edit->dead_leaf) {
|
|
+ key_payload_reserve(keyring,
|
|
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
|
|
+ }
|
|
assoc_array_cancel_edit(edit);
|
|
}
|
|
up_write(&keyring->sem);
|
|
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
|
|
index e294b86..47b5c69 100644
|
|
--- a/security/selinux/hooks.c
|
|
+++ b/security/selinux/hooks.c
|
|
@@ -470,6 +470,7 @@ next_inode:
|
|
list_entry(sbsec->isec_head.next,
|
|
struct inode_security_struct, list);
|
|
struct inode *inode = isec->inode;
|
|
+ list_del_init(&isec->list);
|
|
spin_unlock(&sbsec->isec_lock);
|
|
inode = igrab(inode);
|
|
if (inode) {
|
|
@@ -478,7 +479,6 @@ next_inode:
|
|
iput(inode);
|
|
}
|
|
spin_lock(&sbsec->isec_lock);
|
|
- list_del_init(&isec->list);
|
|
goto next_inode;
|
|
}
|
|
spin_unlock(&sbsec->isec_lock);
|
|
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
|
|
index d60c0ee..fc68bf6 100644
|
|
--- a/security/selinux/selinuxfs.c
|
|
+++ b/security/selinux/selinuxfs.c
|
|
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
|
|
goto out;
|
|
|
|
/* No partial writes. */
|
|
- length = EINVAL;
|
|
+ length = -EINVAL;
|
|
if (*ppos != 0)
|
|
goto out;
|
|
|
|
@@ -1200,7 +1200,7 @@ static void sel_remove_entries(struct dentry *de)
|
|
spin_lock(&de->d_lock);
|
|
node = de->d_subdirs.next;
|
|
while (node != &de->d_subdirs) {
|
|
- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
|
|
+ struct dentry *d = list_entry(node, struct dentry, d_child);
|
|
|
|
spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
|
|
list_del_init(node);
|
|
@@ -1674,12 +1674,12 @@ static void sel_remove_classes(void)
|
|
|
|
list_for_each(class_node, &class_dir->d_subdirs) {
|
|
struct dentry *class_subdir = list_entry(class_node,
|
|
- struct dentry, d_u.d_child);
|
|
+ struct dentry, d_child);
|
|
struct list_head *class_subdir_node;
|
|
|
|
list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
|
|
struct dentry *d = list_entry(class_subdir_node,
|
|
- struct dentry, d_u.d_child);
|
|
+ struct dentry, d_child);
|
|
|
|
if (d->d_inode)
|
|
if (d->d_inode->i_mode & S_IFDIR)
|
|
diff --git a/sound/core/control.c b/sound/core/control.c
|
|
index 98a29b2..f2082a3 100644
|
|
--- a/sound/core/control.c
|
|
+++ b/sound/core/control.c
|
|
@@ -1168,6 +1168,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
|
|
|
|
if (info->count < 1)
|
|
return -EINVAL;
|
|
+ if (!*info->id.name)
|
|
+ return -EINVAL;
|
|
+ if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
|
|
+ return -EINVAL;
|
|
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
|
|
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
|
|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
|
|
diff --git a/sound/core/info.c b/sound/core/info.c
|
|
index e79baa1..08070e1 100644
|
|
--- a/sound/core/info.c
|
|
+++ b/sound/core/info.c
|
|
@@ -679,7 +679,7 @@ int snd_info_card_free(struct snd_card *card)
|
|
* snd_info_get_line - read one line from the procfs buffer
|
|
* @buffer: the procfs buffer
|
|
* @line: the buffer to store
|
|
- * @len: the max. buffer size - 1
|
|
+ * @len: the max. buffer size
|
|
*
|
|
* Reads one line from the buffer and stores the string.
|
|
*
|
|
@@ -699,7 +699,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
|
|
buffer->stop = 1;
|
|
if (c == '\n')
|
|
break;
|
|
- if (len) {
|
|
+ if (len > 1) {
|
|
len--;
|
|
*line++ = c;
|
|
}
|
|
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
|
|
index af49721..c4ac3c1 100644
|
|
--- a/sound/core/pcm_compat.c
|
|
+++ b/sound/core/pcm_compat.c
|
|
@@ -206,6 +206,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
+ if (clear_user(src, sizeof(*src)))
|
|
+ return -EFAULT;
|
|
if (put_user(status.state, &src->state) ||
|
|
compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
|
|
compat_put_timespec(&status.tstamp, &src->tstamp) ||
|
|
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
|
|
index a210467..e1ef106 100644
|
|
--- a/sound/core/pcm_lib.c
|
|
+++ b/sound/core/pcm_lib.c
|
|
@@ -1783,14 +1783,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
|
|
{
|
|
struct snd_pcm_hw_params *params = arg;
|
|
snd_pcm_format_t format;
|
|
- int channels, width;
|
|
+ int channels;
|
|
+ ssize_t frame_size;
|
|
|
|
params->fifo_size = substream->runtime->hw.fifo_size;
|
|
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
|
|
format = params_format(params);
|
|
channels = params_channels(params);
|
|
- width = snd_pcm_format_physical_width(format);
|
|
- params->fifo_size /= width * channels;
|
|
+ frame_size = snd_pcm_format_size(format, channels);
|
|
+ if (frame_size > 0)
|
|
+ params->fifo_size /= (unsigned)frame_size;
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
|
|
index 01a5e05..ee24057 100644
|
|
--- a/sound/core/pcm_native.c
|
|
+++ b/sound/core/pcm_native.c
|
|
@@ -1404,6 +1404,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
|
|
if (! snd_pcm_playback_empty(substream)) {
|
|
snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
|
|
snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
|
|
+ } else {
|
|
+ runtime->status->state = SNDRV_PCM_STATE_SETUP;
|
|
}
|
|
break;
|
|
case SNDRV_PCM_STATE_RUNNING:
|
|
@@ -3189,7 +3191,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
|
|
|
|
#ifndef ARCH_HAS_DMA_MMAP_COHERENT
|
|
/* This should be defined / handled globally! */
|
|
-#ifdef CONFIG_ARM
|
|
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
|
|
#define ARCH_HAS_DMA_MMAP_COHERENT
|
|
#endif
|
|
#endif
|
|
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
|
|
index dbc5507..f60d814 100644
|
|
--- a/sound/core/seq/seq_dummy.c
|
|
+++ b/sound/core/seq/seq_dummy.c
|
|
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
|
|
static int my_client = -1;
|
|
|
|
/*
|
|
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
|
|
- * to subscribers.
|
|
- * Note: this callback is called only after all subscribers are removed.
|
|
- */
|
|
-static int
|
|
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
|
|
-{
|
|
- struct snd_seq_dummy_port *p;
|
|
- int i;
|
|
- struct snd_seq_event ev;
|
|
-
|
|
- p = private_data;
|
|
- memset(&ev, 0, sizeof(ev));
|
|
- if (p->duplex)
|
|
- ev.source.port = p->connect;
|
|
- else
|
|
- ev.source.port = p->port;
|
|
- ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
|
|
- ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
|
|
- for (i = 0; i < 16; i++) {
|
|
- ev.data.control.channel = i;
|
|
- ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
|
|
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
|
|
- ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
|
|
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
* event input callback - just redirect events to subscribers
|
|
*/
|
|
static int
|
|
@@ -175,7 +145,6 @@ create_port(int idx, int type)
|
|
| SNDRV_SEQ_PORT_TYPE_PORT;
|
|
memset(&pcb, 0, sizeof(pcb));
|
|
pcb.owner = THIS_MODULE;
|
|
- pcb.unuse = dummy_unuse;
|
|
pcb.event_input = dummy_input;
|
|
pcb.private_free = dummy_free;
|
|
pcb.private_data = rec;
|
|
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
|
|
index e04e750..7a9149b 100644
|
|
--- a/sound/i2c/other/ak4113.c
|
|
+++ b/sound/i2c/other/ak4113.c
|
|
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
|
|
|
|
static void snd_ak4113_free(struct ak4113 *chip)
|
|
{
|
|
- chip->init = 1; /* don't schedule new work */
|
|
- mb();
|
|
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
|
|
cancel_delayed_work_sync(&chip->work);
|
|
kfree(chip);
|
|
}
|
|
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
|
|
chip->write = write;
|
|
chip->private_data = private_data;
|
|
INIT_DELAYED_WORK(&chip->work, ak4113_stats);
|
|
+ atomic_set(&chip->wq_processing, 0);
|
|
|
|
for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
|
|
chip->regmap[reg] = pgm[reg];
|
|
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
|
|
|
|
void snd_ak4113_reinit(struct ak4113 *chip)
|
|
{
|
|
- chip->init = 1;
|
|
- mb();
|
|
- flush_delayed_work(&chip->work);
|
|
+ if (atomic_inc_return(&chip->wq_processing) == 1)
|
|
+ cancel_delayed_work_sync(&chip->work);
|
|
ak4113_init_regs(chip);
|
|
/* bring up statistics / event queing */
|
|
- chip->init = 0;
|
|
- if (chip->kctls[0])
|
|
+ if (atomic_dec_and_test(&chip->wq_processing))
|
|
schedule_delayed_work(&chip->work, HZ / 10);
|
|
}
|
|
EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
|
|
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
|
|
{
|
|
struct ak4113 *chip = container_of(work, struct ak4113, work.work);
|
|
|
|
- if (!chip->init)
|
|
+ if (atomic_inc_return(&chip->wq_processing) == 1)
|
|
snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
|
|
|
|
- schedule_delayed_work(&chip->work, HZ / 10);
|
|
+ if (atomic_dec_and_test(&chip->wq_processing))
|
|
+ schedule_delayed_work(&chip->work, HZ / 10);
|
|
}
|
|
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
|
|
index 15ae025..bf515db 100644
|
|
--- a/sound/i2c/other/ak4114.c
|
|
+++ b/sound/i2c/other/ak4114.c
|
|
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
|
|
|
|
static void snd_ak4114_free(struct ak4114 *chip)
|
|
{
|
|
- chip->init = 1; /* don't schedule new work */
|
|
- mb();
|
|
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
|
|
cancel_delayed_work_sync(&chip->work);
|
|
kfree(chip);
|
|
}
|
|
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
|
|
chip->write = write;
|
|
chip->private_data = private_data;
|
|
INIT_DELAYED_WORK(&chip->work, ak4114_stats);
|
|
+ atomic_set(&chip->wq_processing, 0);
|
|
|
|
for (reg = 0; reg < 6; reg++)
|
|
chip->regmap[reg] = pgm[reg];
|
|
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
|
|
|
|
void snd_ak4114_reinit(struct ak4114 *chip)
|
|
{
|
|
- chip->init = 1;
|
|
- mb();
|
|
- flush_delayed_work(&chip->work);
|
|
+ if (atomic_inc_return(&chip->wq_processing) == 1)
|
|
+ cancel_delayed_work_sync(&chip->work);
|
|
ak4114_init_regs(chip);
|
|
/* bring up statistics / event queing */
|
|
- chip->init = 0;
|
|
- if (chip->kctls[0])
|
|
+ if (atomic_dec_and_test(&chip->wq_processing))
|
|
schedule_delayed_work(&chip->work, HZ / 10);
|
|
}
|
|
|
|
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
|
|
{
|
|
struct ak4114 *chip = container_of(work, struct ak4114, work.work);
|
|
|
|
- if (!chip->init)
|
|
+ if (atomic_inc_return(&chip->wq_processing) == 1)
|
|
snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
|
|
-
|
|
- schedule_delayed_work(&chip->work, HZ / 10);
|
|
+ if (atomic_dec_and_test(&chip->wq_processing))
|
|
+ schedule_delayed_work(&chip->work, HZ / 10);
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_ak4114_create);
|
|
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
|
|
index 9b9f7d3..1010ca1 100644
|
|
--- a/sound/oss/sequencer.c
|
|
+++ b/sound/oss/sequencer.c
|
|
@@ -683,13 +683,8 @@ static int seq_timing_event(unsigned char *event_rec)
|
|
break;
|
|
|
|
case TMR_ECHO:
|
|
- if (seq_mode == SEQ_2)
|
|
- seq_copy_to_input(event_rec, 8);
|
|
- else
|
|
- {
|
|
- parm = (parm << 8 | SEQ_ECHO);
|
|
- seq_copy_to_input((unsigned char *) &parm, 4);
|
|
- }
|
|
+ parm = (parm << 8 | SEQ_ECHO);
|
|
+ seq_copy_to_input((unsigned char *) &parm, 4);
|
|
break;
|
|
|
|
default:;
|
|
@@ -1330,7 +1325,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
|
|
int mode = translate_mode(file);
|
|
struct synth_info inf;
|
|
struct seq_event_rec event_rec;
|
|
- unsigned long flags;
|
|
int __user *p = arg;
|
|
|
|
orig_dev = dev = dev >> 4;
|
|
@@ -1485,9 +1479,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
|
|
case SNDCTL_SEQ_OUTOFBAND:
|
|
if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
|
|
return -EFAULT;
|
|
- spin_lock_irqsave(&lock,flags);
|
|
play_event(event_rec.arr);
|
|
- spin_unlock_irqrestore(&lock,flags);
|
|
return 0;
|
|
|
|
case SNDCTL_MIDI_INFO:
|
|
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
|
|
index 8756c8e..46e5630 100644
|
|
--- a/sound/pci/Kconfig
|
|
+++ b/sound/pci/Kconfig
|
|
@@ -859,8 +859,8 @@ config SND_VIRTUOSO
|
|
select SND_JACK if INPUT=y || INPUT=SND
|
|
help
|
|
Say Y here to include support for sound cards based on the
|
|
- Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
|
|
- Essence ST (Deluxe), and Essence STX.
|
|
+ Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX,
|
|
+ Essence ST (Deluxe), and Essence STX (II).
|
|
Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
|
|
for the Xense, missing.
|
|
|
|
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
|
|
index 9e1bd0c..6757458 100644
|
|
--- a/sound/pci/emu10k1/emu10k1.c
|
|
+++ b/sound/pci/emu10k1/emu10k1.c
|
|
@@ -181,8 +181,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
|
|
}
|
|
#endif
|
|
|
|
- strcpy(card->driver, emu->card_capabilities->driver);
|
|
- strcpy(card->shortname, emu->card_capabilities->name);
|
|
+ strlcpy(card->driver, emu->card_capabilities->driver,
|
|
+ sizeof(card->driver));
|
|
+ strlcpy(card->shortname, emu->card_capabilities->name,
|
|
+ sizeof(card->shortname));
|
|
snprintf(card->longname, sizeof(card->longname),
|
|
"%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
|
|
card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
|
|
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
|
|
index cae3659..f8a6549 100644
|
|
--- a/sound/pci/emu10k1/emu10k1_callback.c
|
|
+++ b/sound/pci/emu10k1/emu10k1_callback.c
|
|
@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
|
|
* get more voice for pcm
|
|
*
|
|
* terminate most inactive voice and give it as a pcm voice.
|
|
+ *
|
|
+ * voice_lock is already held.
|
|
*/
|
|
int
|
|
snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
|
@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
|
struct snd_emux *emu;
|
|
struct snd_emux_voice *vp;
|
|
struct best_voice best[V_END];
|
|
- unsigned long flags;
|
|
int i;
|
|
|
|
emu = hw->synth;
|
|
|
|
- spin_lock_irqsave(&emu->voice_lock, flags);
|
|
lookup_voices(emu, hw, best, 1); /* no OFF voices */
|
|
for (i = 0; i < V_END; i++) {
|
|
if (best[i].voice >= 0) {
|
|
@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
|
vp->emu->num_voices--;
|
|
vp->ch = -1;
|
|
vp->state = SNDRV_EMUX_ST_OFF;
|
|
- spin_unlock_irqrestore(&emu->voice_lock, flags);
|
|
return ch;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&emu->voice_lock, flags);
|
|
|
|
/* not found */
|
|
return -ENOMEM;
|
|
@@ -417,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
|
|
snd_emu10k1_ptr_write(hw, Z2, ch, 0);
|
|
|
|
/* invalidate maps */
|
|
- temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
|
|
+ temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
|
|
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
|
|
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
|
|
#if 0
|
|
@@ -438,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
|
|
snd_emu10k1_ptr_write(hw, CDF, ch, sample);
|
|
|
|
/* invalidate maps */
|
|
- temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
|
|
+ temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
|
|
snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
|
|
snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
|
|
|
|
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
|
|
index bdd888e..a131092 100644
|
|
--- a/sound/pci/emu10k1/emu10k1_main.c
|
|
+++ b/sound/pci/emu10k1/emu10k1_main.c
|
|
@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
|
|
snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
|
|
snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
|
|
|
|
- silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
|
|
+ silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
|
|
for (ch = 0; ch < NUM_G; ch++) {
|
|
snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
|
|
snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
|
|
@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
|
|
outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
|
|
}
|
|
|
|
+ if (emu->address_mode == 0) {
|
|
+ /* use 16M in 4G */
|
|
+ outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1411,7 +1416,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
|
|
*
|
|
*/
|
|
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
|
|
- .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
|
|
+ .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
|
|
.id = "Audigy2",
|
|
.emu10k2_chip = 1,
|
|
.ca0108_chip = 1,
|
|
@@ -1561,7 +1566,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
|
|
.adc_1361t = 1, /* 24 bit capture instead of 16bit */
|
|
.ac97_chip = 1} ,
|
|
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
|
|
- .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
|
|
+ .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
|
|
.id = "Audigy2",
|
|
.emu10k2_chip = 1,
|
|
.ca0102_chip = 1,
|
|
@@ -1865,8 +1870,10 @@ int snd_emu10k1_create(struct snd_card *card,
|
|
|
|
is_audigy = emu->audigy = c->emu10k2_chip;
|
|
|
|
+ /* set addressing mode */
|
|
+ emu->address_mode = is_audigy ? 0 : 1;
|
|
/* set the DMA transfer mask */
|
|
- emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
|
|
+ emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
|
|
if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
|
|
pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
|
|
snd_printk(KERN_ERR "architecture does not support PCI busmaster DMA with mask 0x%lx\n", emu->dma_mask);
|
|
@@ -1889,7 +1896,7 @@ int snd_emu10k1_create(struct snd_card *card,
|
|
|
|
emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
|
|
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
|
|
- 32 * 1024, &emu->ptb_pages) < 0) {
|
|
+ (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
|
|
err = -ENOMEM;
|
|
goto error;
|
|
}
|
|
@@ -1988,8 +1995,8 @@ int snd_emu10k1_create(struct snd_card *card,
|
|
|
|
/* Clear silent pages and set up pointers */
|
|
memset(emu->silent_page.area, 0, PAGE_SIZE);
|
|
- silent_page = emu->silent_page.addr << 1;
|
|
- for (idx = 0; idx < MAXPAGES; idx++)
|
|
+ silent_page = emu->silent_page.addr << emu->address_mode;
|
|
+ for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
|
|
((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
|
|
|
|
/* set up voice indices */
|
|
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
|
|
index 5ae1d04..7581019 100644
|
|
--- a/sound/pci/emu10k1/emupcm.c
|
|
+++ b/sound/pci/emu10k1/emupcm.c
|
|
@@ -379,7 +379,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
|
|
snd_emu10k1_ptr_write(emu, Z1, voice, 0);
|
|
snd_emu10k1_ptr_write(emu, Z2, voice, 0);
|
|
/* invalidate maps */
|
|
- silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
|
|
+ silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
|
|
snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
|
|
snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
|
|
/* modulation envelope */
|
|
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
|
|
index 2ca9f2e..53745f4 100644
|
|
--- a/sound/pci/emu10k1/emuproc.c
|
|
+++ b/sound/pci/emu10k1/emuproc.c
|
|
@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
|
|
struct snd_emu10k1 *emu = entry->private_data;
|
|
u32 value;
|
|
u32 value2;
|
|
- unsigned long flags;
|
|
u32 rate;
|
|
|
|
if (emu->card_capabilities->emu_model) {
|
|
- spin_lock_irqsave(&emu->emu_lock, flags);
|
|
snd_emu1010_fpga_read(emu, 0x38, &value);
|
|
- spin_unlock_irqrestore(&emu->emu_lock, flags);
|
|
if ((value & 0x1) == 0) {
|
|
- spin_lock_irqsave(&emu->emu_lock, flags);
|
|
snd_emu1010_fpga_read(emu, 0x2a, &value);
|
|
snd_emu1010_fpga_read(emu, 0x2b, &value2);
|
|
- spin_unlock_irqrestore(&emu->emu_lock, flags);
|
|
rate = 0x1770000 / (((value << 5) | value2)+1);
|
|
snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
|
|
} else {
|
|
snd_iprintf(buffer, "ADAT Unlocked\n");
|
|
}
|
|
- spin_lock_irqsave(&emu->emu_lock, flags);
|
|
snd_emu1010_fpga_read(emu, 0x20, &value);
|
|
- spin_unlock_irqrestore(&emu->emu_lock, flags);
|
|
if ((value & 0x4) == 0) {
|
|
- spin_lock_irqsave(&emu->emu_lock, flags);
|
|
snd_emu1010_fpga_read(emu, 0x28, &value);
|
|
snd_emu1010_fpga_read(emu, 0x29, &value2);
|
|
- spin_unlock_irqrestore(&emu->emu_lock, flags);
|
|
rate = 0x1770000 / (((value << 5) | value2)+1);
|
|
snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
|
|
} else {
|
|
@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
|
|
{
|
|
struct snd_emu10k1 *emu = entry->private_data;
|
|
u32 value;
|
|
- unsigned long flags;
|
|
int i;
|
|
snd_iprintf(buffer, "EMU1010 Registers:\n\n");
|
|
|
|
for(i = 0; i < 0x40; i+=1) {
|
|
- spin_lock_irqsave(&emu->emu_lock, flags);
|
|
snd_emu1010_fpga_read(emu, i, &value);
|
|
- spin_unlock_irqrestore(&emu->emu_lock, flags);
|
|
snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
|
|
}
|
|
}
|
|
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
|
|
index ae709c1..d514458 100644
|
|
--- a/sound/pci/emu10k1/memory.c
|
|
+++ b/sound/pci/emu10k1/memory.c
|
|
@@ -34,10 +34,11 @@
|
|
* aligned pages in others
|
|
*/
|
|
#define __set_ptb_entry(emu,page,addr) \
|
|
- (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
|
|
+ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
|
|
|
|
#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
|
|
-#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
|
|
+#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
|
|
+#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
|
|
/* get aligned page from offset address */
|
|
#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
|
|
/* get offset address from aligned page */
|
|
@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
|
|
}
|
|
page = blk->mapped_page + blk->pages;
|
|
}
|
|
- size = MAX_ALIGN_PAGES - page;
|
|
+ size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
|
|
if (size >= max_size) {
|
|
*nextp = pos;
|
|
return page;
|
|
@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
|
q = get_emu10k1_memblk(p, mapped_link);
|
|
end_page = q->mapped_page;
|
|
} else
|
|
- end_page = MAX_ALIGN_PAGES;
|
|
+ end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
|
|
|
|
/* remove links */
|
|
list_del(&blk->mapped_link);
|
|
@@ -305,7 +306,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
|
|
if (snd_BUG_ON(!emu))
|
|
return NULL;
|
|
if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
|
|
- runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
|
|
+ runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
|
|
return NULL;
|
|
hdr = emu->memhdr;
|
|
if (snd_BUG_ON(!hdr))
|
|
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
|
|
index dafcf82..97ebc10 100644
|
|
--- a/sound/pci/hda/hda_codec.c
|
|
+++ b/sound/pci/hda/hda_codec.c
|
|
@@ -338,8 +338,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
|
|
unsigned int parm;
|
|
|
|
parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
|
|
- if (parm == -1)
|
|
+ if (parm == -1) {
|
|
+ *start_id = 0;
|
|
return 0;
|
|
+ }
|
|
*start_id = (parm >> 16) & 0x7fff;
|
|
return (int)(parm & 0x7fff);
|
|
}
|
|
@@ -2080,6 +2082,16 @@ static void put_vol_mute(struct hda_codec *codec, unsigned int amp_caps,
|
|
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, parm);
|
|
}
|
|
|
|
+/* meta hook to call each driver's vmaster hook */
|
|
+static void vmaster_hook(void *private_data, int enabled)
|
|
+{
|
|
+ struct hda_vmaster_mute_hook *hook = private_data;
|
|
+
|
|
+ if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
|
|
+ enabled = hook->mute_mode;
|
|
+ hook->hook(hook->codec, enabled);
|
|
+}
|
|
+
|
|
/**
|
|
* snd_hda_codec_amp_read - Read AMP value
|
|
* @codec: HD-audio codec
|
|
@@ -2915,9 +2927,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
|
|
|
|
if (!hook->hook || !hook->sw_kctl)
|
|
return 0;
|
|
- snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
|
|
hook->codec = codec;
|
|
hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
|
|
+ snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
|
|
if (!expose_enum_ctl)
|
|
return 0;
|
|
kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
|
|
@@ -2940,14 +2952,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
|
|
*/
|
|
if (hook->codec->bus->shutdown)
|
|
return;
|
|
- switch (hook->mute_mode) {
|
|
- case HDA_VMUTE_FOLLOW_MASTER:
|
|
- snd_ctl_sync_vmaster_hook(hook->sw_kctl);
|
|
- break;
|
|
- default:
|
|
- hook->hook(hook->codec, hook->mute_mode);
|
|
- break;
|
|
- }
|
|
+ snd_ctl_sync_vmaster_hook(hook->sw_kctl);
|
|
}
|
|
EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
|
|
|
|
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
|
|
index d9a09bd..9a23bde 100644
|
|
--- a/sound/pci/hda/hda_generic.c
|
|
+++ b/sound/pci/hda/hda_generic.c
|
|
@@ -653,12 +653,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
|
|
return val;
|
|
}
|
|
|
|
+/* is this a stereo widget or a stereo-to-mono mix? */
|
|
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
|
|
+{
|
|
+ unsigned int wcaps = get_wcaps(codec, nid);
|
|
+ hda_nid_t conn;
|
|
+
|
|
+ if (wcaps & AC_WCAP_STEREO)
|
|
+ return true;
|
|
+ if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
|
|
+ return false;
|
|
+ if (snd_hda_get_num_conns(codec, nid) != 1)
|
|
+ return false;
|
|
+ if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
|
|
+ return false;
|
|
+ return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
|
|
+}
|
|
+
|
|
/* initialize the amp value (only at the first time) */
|
|
static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
|
|
{
|
|
unsigned int caps = query_amp_caps(codec, nid, dir);
|
|
int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
|
|
- snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
|
|
+
|
|
+ if (is_stereo_amps(codec, nid, dir))
|
|
+ snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
|
|
+ else
|
|
+ snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
|
|
+}
|
|
+
|
|
+/* update the amp, doing in stereo or mono depending on NID */
|
|
+static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
|
|
+ unsigned int mask, unsigned int val)
|
|
+{
|
|
+ if (is_stereo_amps(codec, nid, dir))
|
|
+ return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
|
|
+ mask, val);
|
|
+ else
|
|
+ return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
|
|
+ mask, val);
|
|
}
|
|
|
|
/* calculate amp value mask we can modify;
|
|
@@ -698,7 +731,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
|
|
return;
|
|
|
|
val &= mask;
|
|
- snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
|
|
+ update_amp(codec, nid, dir, idx, mask, val);
|
|
}
|
|
|
|
static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
|
|
@@ -4337,13 +4370,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
|
|
has_amp = nid_has_mute(codec, mix, HDA_INPUT);
|
|
for (i = 0; i < nums; i++) {
|
|
if (has_amp)
|
|
- snd_hda_codec_amp_stereo(codec, mix,
|
|
- HDA_INPUT, i,
|
|
- 0xff, HDA_AMP_MUTE);
|
|
+ update_amp(codec, mix, HDA_INPUT, i,
|
|
+ 0xff, HDA_AMP_MUTE);
|
|
else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
|
|
- snd_hda_codec_amp_stereo(codec, conn[i],
|
|
- HDA_OUTPUT, 0,
|
|
- 0xff, HDA_AMP_MUTE);
|
|
+ update_amp(codec, conn[i], HDA_OUTPUT, 0,
|
|
+ 0xff, HDA_AMP_MUTE);
|
|
}
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 7ec9142..84e8879 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -959,7 +959,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
|
|
}
|
|
}
|
|
|
|
- if (!bus->no_response_fallback)
|
|
+ if (bus->no_response_fallback)
|
|
return -1;
|
|
|
|
if (!chip->polling_mode && chip->poll_count < 2) {
|
|
@@ -3984,7 +3984,7 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
|
|
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
|
|
/* Panther Point */
|
|
{ PCI_DEVICE(0x8086, 0x1e20),
|
|
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
|
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
|
|
/* Lynx Point */
|
|
{ PCI_DEVICE(0x8086, 0x8c20),
|
|
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
|
@@ -4027,6 +4027,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
|
|
/* BayTrail */
|
|
{ PCI_DEVICE(0x8086, 0x0f04),
|
|
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
|
|
+ /* Braswell */
|
|
+ { PCI_DEVICE(0x8086, 0x2284),
|
|
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
|
/* ICH */
|
|
{ PCI_DEVICE(0x8086, 0x2668),
|
|
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
|
|
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
|
|
index ce5a6da..05e19f7 100644
|
|
--- a/sound/pci/hda/hda_proc.c
|
|
+++ b/sound/pci/hda/hda_proc.c
|
|
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
|
|
(caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
|
|
}
|
|
|
|
+/* is this a stereo widget or a stereo-to-mono mix? */
|
|
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
|
|
+ int dir, unsigned int wcaps, int indices)
|
|
+{
|
|
+ hda_nid_t conn;
|
|
+
|
|
+ if (wcaps & AC_WCAP_STEREO)
|
|
+ return true;
|
|
+ /* check for a stereo-to-mono mix; it must be:
|
|
+ * only a single connection, only for input, and only a mixer widget
|
|
+ */
|
|
+ if (indices != 1 || dir != HDA_INPUT ||
|
|
+ get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
|
|
+ return false;
|
|
+
|
|
+ if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
|
|
+ return false;
|
|
+ /* the connection source is a stereo? */
|
|
+ wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
|
|
+ return !!(wcaps & AC_WCAP_STEREO);
|
|
+}
|
|
+
|
|
static void print_amp_vals(struct snd_info_buffer *buffer,
|
|
struct hda_codec *codec, hda_nid_t nid,
|
|
- int dir, int stereo, int indices)
|
|
+ int dir, unsigned int wcaps, int indices)
|
|
{
|
|
unsigned int val;
|
|
+ bool stereo;
|
|
int i;
|
|
|
|
+ stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
|
|
+
|
|
dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
|
|
for (i = 0; i < indices; i++) {
|
|
snd_iprintf(buffer, " [");
|
|
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry,
|
|
(codec->single_adc_amp &&
|
|
wid_type == AC_WID_AUD_IN))
|
|
print_amp_vals(buffer, codec, nid, HDA_INPUT,
|
|
- wid_caps & AC_WCAP_STEREO,
|
|
- 1);
|
|
+ wid_caps, 1);
|
|
else
|
|
print_amp_vals(buffer, codec, nid, HDA_INPUT,
|
|
- wid_caps & AC_WCAP_STEREO,
|
|
- conn_len);
|
|
+ wid_caps, conn_len);
|
|
}
|
|
if (wid_caps & AC_WCAP_OUT_AMP) {
|
|
snd_iprintf(buffer, " Amp-Out caps: ");
|
|
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry,
|
|
if (wid_type == AC_WID_PIN &&
|
|
codec->pin_amp_workaround)
|
|
print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
|
|
- wid_caps & AC_WCAP_STEREO,
|
|
- conn_len);
|
|
+ wid_caps, conn_len);
|
|
else
|
|
print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
|
|
- wid_caps & AC_WCAP_STEREO, 1);
|
|
+ wid_caps, 1);
|
|
}
|
|
|
|
switch (wid_type) {
|
|
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
|
|
index eaf64ea..1a05efa 100644
|
|
--- a/sound/pci/hda/patch_analog.c
|
|
+++ b/sound/pci/hda/patch_analog.c
|
|
@@ -333,6 +333,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
|
|
|
|
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
|
|
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
|
|
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
|
|
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
|
|
index 46ecdbb..d5843da 100644
|
|
--- a/sound/pci/hda/patch_ca0132.c
|
|
+++ b/sound/pci/hda/patch_ca0132.c
|
|
@@ -4379,6 +4379,9 @@ static void ca0132_download_dsp(struct hda_codec *codec)
|
|
return; /* NOP */
|
|
#endif
|
|
|
|
+ if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
|
|
+ return; /* don't retry failures */
|
|
+
|
|
chipio_enable_clocks(codec);
|
|
spec->dsp_state = DSP_DOWNLOADING;
|
|
if (!ca0132_download_dsp_images(codec))
|
|
@@ -4555,7 +4558,8 @@ static int ca0132_init(struct hda_codec *codec)
|
|
struct auto_pin_cfg *cfg = &spec->autocfg;
|
|
int i;
|
|
|
|
- spec->dsp_state = DSP_DOWNLOAD_INIT;
|
|
+ if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
|
|
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
|
|
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
|
|
|
|
snd_hda_power_up(codec);
|
|
@@ -4666,6 +4670,7 @@ static int patch_ca0132(struct hda_codec *codec)
|
|
codec->spec = spec;
|
|
spec->codec = codec;
|
|
|
|
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
|
|
spec->num_mixers = 1;
|
|
spec->mixers[0] = ca0132_mixer;
|
|
|
|
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
|
|
index fc492ac..7b0aac9 100644
|
|
--- a/sound/pci/hda/patch_cirrus.c
|
|
+++ b/sound/pci/hda/patch_cirrus.c
|
|
@@ -396,6 +396,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
|
|
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
|
|
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
|
|
+ SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
|
|
SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
|
|
SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
|
|
{} /* terminator */
|
|
@@ -587,6 +588,7 @@ static int patch_cs420x(struct hda_codec *codec)
|
|
return -ENOMEM;
|
|
|
|
spec->gen.automute_hook = cs_automute;
|
|
+ codec->single_adc_amp = 1;
|
|
|
|
snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
|
|
cs420x_fixups);
|
|
@@ -1000,9 +1002,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
|
|
|
|
spec->spdif_present = spdif_present;
|
|
/* SPDIF TX on/off */
|
|
- if (spdif_present)
|
|
- snd_hda_set_pin_ctl(codec, spdif_pin,
|
|
- spdif_present ? PIN_OUT : 0);
|
|
+ snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
|
|
|
|
cs_automute(codec);
|
|
}
|
|
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
|
|
index bcf91be..50981b1 100644
|
|
--- a/sound/pci/hda/patch_conexant.c
|
|
+++ b/sound/pci/hda/patch_conexant.c
|
|
@@ -3232,11 +3232,13 @@ enum {
|
|
CXT_PINCFG_LENOVO_TP410,
|
|
CXT_PINCFG_LEMOTE_A1004,
|
|
CXT_PINCFG_LEMOTE_A1205,
|
|
+ CXT_PINCFG_COMPAQ_CQ60,
|
|
CXT_FIXUP_STEREO_DMIC,
|
|
CXT_FIXUP_INC_MIC_BOOST,
|
|
CXT_FIXUP_HEADPHONE_MIC_PIN,
|
|
CXT_FIXUP_HEADPHONE_MIC,
|
|
CXT_FIXUP_GPIO1,
|
|
+ CXT_FIXUP_ASPIRE_DMIC,
|
|
CXT_FIXUP_THINKPAD_ACPI,
|
|
};
|
|
|
|
@@ -3367,6 +3369,15 @@ static const struct hda_fixup cxt_fixups[] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = cxt_pincfg_lemote,
|
|
},
|
|
+ [CXT_PINCFG_COMPAQ_CQ60] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ /* 0x17 was falsely set up as a mic, it should 0x1d */
|
|
+ { 0x17, 0x400001f0 },
|
|
+ { 0x1d, 0x97a70120 },
|
|
+ { }
|
|
+ }
|
|
+ },
|
|
[CXT_FIXUP_STEREO_DMIC] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = cxt_fixup_stereo_dmic,
|
|
@@ -3397,6 +3408,12 @@ static const struct hda_fixup cxt_fixups[] = {
|
|
{ }
|
|
},
|
|
},
|
|
+ [CXT_FIXUP_ASPIRE_DMIC] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = cxt_fixup_stereo_dmic,
|
|
+ .chained = true,
|
|
+ .chain_id = CXT_FIXUP_GPIO1,
|
|
+ },
|
|
[CXT_FIXUP_THINKPAD_ACPI] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = hda_fixup_thinkpad_acpi,
|
|
@@ -3404,13 +3421,14 @@ static const struct hda_fixup cxt_fixups[] = {
|
|
};
|
|
|
|
static const struct snd_pci_quirk cxt5051_fixups[] = {
|
|
+ SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
|
|
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
|
|
{}
|
|
};
|
|
|
|
static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
|
|
- SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
|
|
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
|
|
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
|
|
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
|
|
@@ -3573,6 +3591,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
|
|
.patch = patch_conexant_auto },
|
|
{ .id = 0x14f150b9, .name = "CX20665",
|
|
.patch = patch_conexant_auto },
|
|
+ { .id = 0x14f150f1, .name = "CX20721",
|
|
+ .patch = patch_conexant_auto },
|
|
+ { .id = 0x14f150f2, .name = "CX20722",
|
|
+ .patch = patch_conexant_auto },
|
|
+ { .id = 0x14f150f3, .name = "CX20723",
|
|
+ .patch = patch_conexant_auto },
|
|
+ { .id = 0x14f150f4, .name = "CX20724",
|
|
+ .patch = patch_conexant_auto },
|
|
{ .id = 0x14f1510f, .name = "CX20751/2",
|
|
.patch = patch_conexant_auto },
|
|
{ .id = 0x14f15110, .name = "CX20751/2",
|
|
@@ -3607,6 +3633,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f150ac");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f150b8");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f150b9");
|
|
+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
|
|
+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
|
|
+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
|
|
+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f1510f");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f15110");
|
|
MODULE_ALIAS("snd-hda-codec-id:14f15111");
|
|
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
|
index d135c90..611110a 100644
|
|
--- a/sound/pci/hda/patch_hdmi.c
|
|
+++ b/sound/pci/hda/patch_hdmi.c
|
|
@@ -1557,19 +1557,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
|
|
}
|
|
}
|
|
|
|
- if (pin_eld->eld_valid && !eld->eld_valid) {
|
|
- update_eld = true;
|
|
+ if (pin_eld->eld_valid != eld->eld_valid)
|
|
eld_changed = true;
|
|
- }
|
|
+
|
|
+ if (pin_eld->eld_valid && !eld->eld_valid)
|
|
+ update_eld = true;
|
|
+
|
|
if (update_eld) {
|
|
bool old_eld_valid = pin_eld->eld_valid;
|
|
pin_eld->eld_valid = eld->eld_valid;
|
|
- eld_changed = pin_eld->eld_size != eld->eld_size ||
|
|
+ if (pin_eld->eld_size != eld->eld_size ||
|
|
memcmp(pin_eld->eld_buffer, eld->eld_buffer,
|
|
- eld->eld_size) != 0;
|
|
- if (eld_changed)
|
|
+ eld->eld_size) != 0) {
|
|
memcpy(pin_eld->eld_buffer, eld->eld_buffer,
|
|
eld->eld_size);
|
|
+ eld_changed = true;
|
|
+ }
|
|
pin_eld->eld_size = eld->eld_size;
|
|
pin_eld->info = eld->info;
|
|
|
|
@@ -3314,6 +3317,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
|
|
{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
|
|
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
|
|
{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
|
|
+{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
|
|
{ .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
|
|
{} /* terminator */
|
|
};
|
|
@@ -3370,6 +3374,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862807");
|
|
MODULE_ALIAS("snd-hda-codec-id:80862808");
|
|
MODULE_ALIAS("snd-hda-codec-id:80862880");
|
|
MODULE_ALIAS("snd-hda-codec-id:80862882");
|
|
+MODULE_ALIAS("snd-hda-codec-id:80862883");
|
|
MODULE_ALIAS("snd-hda-codec-id:808629fb");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 2a16a90..907371d 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -180,6 +180,8 @@ static void alc_fix_pll(struct hda_codec *codec)
|
|
spec->pll_coef_idx);
|
|
val = snd_hda_codec_read(codec, spec->pll_nid, 0,
|
|
AC_VERB_GET_PROC_COEF, 0);
|
|
+ if (val == -1)
|
|
+ return;
|
|
snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
|
|
spec->pll_coef_idx);
|
|
snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
|
|
@@ -269,7 +271,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
|
|
{
|
|
/* We currently only handle front, HP */
|
|
static hda_nid_t pins[] = {
|
|
- 0x0f, 0x10, 0x14, 0x15, 0
|
|
+ 0x0f, 0x10, 0x14, 0x15, 0x17, 0
|
|
};
|
|
hda_nid_t *p;
|
|
for (p = pins; *p; p++)
|
|
@@ -325,6 +327,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
|
|
case 0x10ec0885:
|
|
case 0x10ec0887:
|
|
/*case 0x10ec0889:*/ /* this causes an SPDIF problem */
|
|
+ case 0x10ec0900:
|
|
alc889_coef_init(codec);
|
|
break;
|
|
case 0x10ec0888:
|
|
@@ -2223,6 +2226,7 @@ static const struct hda_fixup alc882_fixups[] = {
|
|
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
|
|
SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
|
|
+ SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
|
|
SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
|
|
SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
|
|
SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
|
|
@@ -2278,7 +2282,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
|
|
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
|
|
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
|
|
- SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
|
|
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
|
|
|
|
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
|
|
@@ -2328,6 +2332,7 @@ static int patch_alc882(struct hda_codec *codec)
|
|
switch (codec->vendor_id) {
|
|
case 0x10ec0882:
|
|
case 0x10ec0885:
|
|
+ case 0x10ec0900:
|
|
break;
|
|
default:
|
|
/* ALC883 and variants */
|
|
@@ -2765,6 +2770,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
|
|
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
|
|
{
|
|
int val = alc_read_coef_idx(codec, 0x04);
|
|
+ if (val == -1)
|
|
+ return;
|
|
if (power_up)
|
|
val |= 1 << 11;
|
|
else
|
|
@@ -2879,6 +2886,8 @@ static void alc283_init(struct hda_codec *codec)
|
|
|
|
if (!hp_pin)
|
|
return;
|
|
+
|
|
+ msleep(30);
|
|
hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
|
|
|
|
/* Index 0x43 Direct Drive HP AMP LPM Control 1 */
|
|
@@ -2920,6 +2929,9 @@ static void alc283_shutup(struct hda_codec *codec)
|
|
|
|
alc_write_coef_idx(codec, 0x43, 0x9004);
|
|
|
|
+ /*depop hp during suspend*/
|
|
+ alc_write_coef_idx(codec, 0x06, 0x2100);
|
|
+
|
|
snd_hda_codec_write(codec, hp_pin, 0,
|
|
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
|
|
|
@@ -3064,6 +3076,15 @@ static int alc269_resume(struct hda_codec *codec)
|
|
snd_hda_codec_resume_cache(codec);
|
|
alc_inv_dmic_sync(codec, true);
|
|
hda_call_check_power_status(codec, 0x01);
|
|
+
|
|
+ /* on some machine, the BIOS will clear the codec gpio data when enter
|
|
+ * suspend, and won't restore the data after resume, so we restore it
|
|
+ * in the driver.
|
|
+ */
|
|
+ if (spec->gpio_led)
|
|
+ snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
|
|
+ spec->gpio_led);
|
|
+
|
|
if (spec->has_alc5505_dsp)
|
|
alc5505_dsp_resume(codec);
|
|
|
|
@@ -3933,6 +3954,8 @@ enum {
|
|
ALC269_FIXUP_QUANTA_MUTE,
|
|
ALC269_FIXUP_LIFEBOOK,
|
|
ALC269_FIXUP_LIFEBOOK_EXTMIC,
|
|
+ ALC269_FIXUP_LIFEBOOK_HP_PIN,
|
|
+ ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
|
|
ALC269_FIXUP_AMIC,
|
|
ALC269_FIXUP_DMIC,
|
|
ALC269VB_FIXUP_AMIC,
|
|
@@ -3951,6 +3974,7 @@ enum {
|
|
ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
|
|
ALC269_FIXUP_HEADSET_MODE,
|
|
ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
|
|
+ ALC269_FIXUP_ASPIRE_HEADSET_MIC,
|
|
ALC269_FIXUP_ASUS_X101_FUNC,
|
|
ALC269_FIXUP_ASUS_X101_VERB,
|
|
ALC269_FIXUP_ASUS_X101,
|
|
@@ -4067,6 +4091,17 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
{ }
|
|
},
|
|
},
|
|
+ [ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x21, 0x0221102f }, /* HP out */
|
|
+ { }
|
|
+ },
|
|
+ },
|
|
+ [ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
|
|
+ },
|
|
[ALC269_FIXUP_AMIC] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = (const struct hda_pintbl[]) {
|
|
@@ -4185,6 +4220,15 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc_fixup_headset_mode_no_hp_mic,
|
|
},
|
|
+ [ALC269_FIXUP_ASPIRE_HEADSET_MIC] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x19, 0x01a1913c }, /* headset mic w/o jack detect */
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
|
|
+ },
|
|
[ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = (const struct hda_pintbl[]) {
|
|
@@ -4368,6 +4412,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
|
|
SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
|
|
SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
|
|
+ SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
|
|
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
|
|
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
|
|
@@ -4435,6 +4481,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
|
|
/* ALC282 */
|
|
+ SND_PCI_QUIRK(0x103c, 0x2191, "HP Touchsmart 14", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
+ SND_PCI_QUIRK(0x103c, 0x2192, "HP Touchsmart 15", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
|
|
@@ -4518,6 +4566,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
|
|
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
|
|
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
|
|
+ SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
|
|
+ SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
|
|
+ SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
|
|
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
|
|
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
|
|
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
|
|
@@ -4534,6 +4585,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
|
|
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
|
|
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
|
|
@@ -4633,27 +4685,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
|
|
if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
|
|
val = alc_read_coef_idx(codec, 0x04);
|
|
/* Power up output pin */
|
|
- alc_write_coef_idx(codec, 0x04, val | (1<<11));
|
|
+ if (val != -1)
|
|
+ alc_write_coef_idx(codec, 0x04, val | (1<<11));
|
|
}
|
|
|
|
if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
|
|
val = alc_read_coef_idx(codec, 0xd);
|
|
- if ((val & 0x0c00) >> 10 != 0x1) {
|
|
+ if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
|
|
/* Capless ramp up clock control */
|
|
alc_write_coef_idx(codec, 0xd, val | (1<<10));
|
|
}
|
|
val = alc_read_coef_idx(codec, 0x17);
|
|
- if ((val & 0x01c0) >> 6 != 0x4) {
|
|
+ if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
|
|
/* Class D power on reset */
|
|
alc_write_coef_idx(codec, 0x17, val | (1<<7));
|
|
}
|
|
}
|
|
|
|
val = alc_read_coef_idx(codec, 0xd); /* Class D */
|
|
- alc_write_coef_idx(codec, 0xd, val | (1<<14));
|
|
+ if (val != -1)
|
|
+ alc_write_coef_idx(codec, 0xd, val | (1<<14));
|
|
|
|
val = alc_read_coef_idx(codec, 0x4); /* HP */
|
|
- alc_write_coef_idx(codec, 0x4, val | (1<<11));
|
|
+ if (val != -1)
|
|
+ alc_write_coef_idx(codec, 0x4, val | (1<<11));
|
|
}
|
|
|
|
/*
|
|
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
|
|
index 3bc29c9..231b264 100644
|
|
--- a/sound/pci/hda/patch_sigmatel.c
|
|
+++ b/sound/pci/hda/patch_sigmatel.c
|
|
@@ -84,6 +84,8 @@ enum {
|
|
STAC_DELL_EQ,
|
|
STAC_ALIENWARE_M17X,
|
|
STAC_92HD89XX_HP_FRONT_JACK,
|
|
+ STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
|
|
+ STAC_92HD73XX_ASUS_MOBO,
|
|
STAC_92HD73XX_MODELS
|
|
};
|
|
|
|
@@ -558,8 +560,8 @@ static void stac_init_power_map(struct hda_codec *codec)
|
|
if (snd_hda_jack_tbl_get(codec, nid))
|
|
continue;
|
|
if (def_conf == AC_JACK_PORT_COMPLEX &&
|
|
- !(spec->vref_mute_led_nid == nid ||
|
|
- is_jack_detectable(codec, nid))) {
|
|
+ spec->vref_mute_led_nid != nid &&
|
|
+ is_jack_detectable(codec, nid)) {
|
|
snd_hda_jack_detect_enable_callback(codec, nid,
|
|
STAC_PWR_EVENT,
|
|
jack_update_power);
|
|
@@ -592,9 +594,9 @@ static void stac_store_hints(struct hda_codec *codec)
|
|
spec->gpio_mask;
|
|
}
|
|
if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
|
|
- spec->gpio_mask &= spec->gpio_mask;
|
|
- if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
|
|
spec->gpio_dir &= spec->gpio_mask;
|
|
+ if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
|
|
+ spec->gpio_data &= spec->gpio_mask;
|
|
if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
|
|
spec->eapd_mask &= spec->gpio_mask;
|
|
if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
|
|
@@ -1803,6 +1805,11 @@ static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
|
|
{}
|
|
};
|
|
|
|
+static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = {
|
|
+ { 0x0e, 0x400000f0 },
|
|
+ {}
|
|
+};
|
|
+
|
|
static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
|
|
const struct hda_fixup *fix, int action)
|
|
{
|
|
@@ -1925,7 +1932,22 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
|
|
[STAC_92HD89XX_HP_FRONT_JACK] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = stac92hd89xx_hp_front_jack_pin_configs,
|
|
- }
|
|
+ },
|
|
+ [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
|
|
+ },
|
|
+ [STAC_92HD73XX_ASUS_MOBO] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ /* enable 5.1 and SPDIF out */
|
|
+ { 0x0c, 0x01014411 },
|
|
+ { 0x0d, 0x01014410 },
|
|
+ { 0x0e, 0x01014412 },
|
|
+ { 0x22, 0x014b1180 },
|
|
+ { }
|
|
+ }
|
|
+ },
|
|
};
|
|
|
|
static const struct hda_model_fixup stac92hd73xx_models[] = {
|
|
@@ -1937,6 +1959,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
|
|
{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
|
|
{ .id = STAC_DELL_EQ, .name = "dell-eq" },
|
|
{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
|
|
+ { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
|
|
{}
|
|
};
|
|
|
|
@@ -1985,8 +2008,12 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
|
|
"Alienware M17x", STAC_ALIENWARE_M17X),
|
|
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
|
|
"Alienware M17x R3", STAC_DELL_EQ),
|
|
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
|
|
+ "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
|
|
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
|
|
"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
|
|
+ SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
|
|
+ STAC_92HD73XX_ASUS_MOBO),
|
|
{} /* terminator */
|
|
};
|
|
|
|
@@ -4200,11 +4227,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
|
|
return err;
|
|
}
|
|
|
|
- stac_init_power_map(codec);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
+static int stac_build_controls(struct hda_codec *codec)
|
|
+{
|
|
+ int err = snd_hda_gen_build_controls(codec);
|
|
+
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+ stac_init_power_map(codec);
|
|
+ return 0;
|
|
+}
|
|
|
|
static int stac_init(struct hda_codec *codec)
|
|
{
|
|
@@ -4316,7 +4350,7 @@ static int stac_suspend(struct hda_codec *codec)
|
|
#endif /* CONFIG_PM */
|
|
|
|
static const struct hda_codec_ops stac_patch_ops = {
|
|
- .build_controls = snd_hda_gen_build_controls,
|
|
+ .build_controls = stac_build_controls,
|
|
.build_pcms = snd_hda_gen_build_pcms,
|
|
.init = stac_init,
|
|
.free = stac_free,
|
|
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
|
|
index 64b9fda..dbbbacf 100644
|
|
--- a/sound/pci/oxygen/virtuoso.c
|
|
+++ b/sound/pci/oxygen/virtuoso.c
|
|
@@ -53,6 +53,7 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = {
|
|
{ OXYGEN_PCI_SUBID(0x1043, 0x835e) },
|
|
{ OXYGEN_PCI_SUBID(0x1043, 0x838e) },
|
|
{ OXYGEN_PCI_SUBID(0x1043, 0x8522) },
|
|
+ { OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
|
|
{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
|
|
{ }
|
|
};
|
|
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
|
|
index c8c7f2c..e026059 100644
|
|
--- a/sound/pci/oxygen/xonar_pcm179x.c
|
|
+++ b/sound/pci/oxygen/xonar_pcm179x.c
|
|
@@ -100,8 +100,8 @@
|
|
*/
|
|
|
|
/*
|
|
- * Xonar Essence ST (Deluxe)/STX
|
|
- * -----------------------------
|
|
+ * Xonar Essence ST (Deluxe)/STX (II)
|
|
+ * ----------------------------------
|
|
*
|
|
* CMI8788:
|
|
*
|
|
@@ -1138,6 +1138,14 @@ int get_xonar_pcm179x_model(struct oxygen *chip,
|
|
chip->model.resume = xonar_stx_resume;
|
|
chip->model.set_dac_params = set_pcm1796_params;
|
|
break;
|
|
+ case 0x85f4:
|
|
+ chip->model = model_xonar_st;
|
|
+ /* TODO: daughterboard support */
|
|
+ chip->model.shortname = "Xonar STX II";
|
|
+ chip->model.init = xonar_stx_init;
|
|
+ chip->model.resume = xonar_stx_resume;
|
|
+ chip->model.set_dac_params = set_pcm1796_params;
|
|
+ break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
|
|
index 56cc891..d99c8d3 100644
|
|
--- a/sound/pci/riptide/riptide.c
|
|
+++ b/sound/pci/riptide/riptide.c
|
|
@@ -2032,32 +2032,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
|
{
|
|
static int dev;
|
|
struct gameport *gameport;
|
|
+ int ret;
|
|
|
|
if (dev >= SNDRV_CARDS)
|
|
return -ENODEV;
|
|
+
|
|
if (!enable[dev]) {
|
|
- dev++;
|
|
- return -ENOENT;
|
|
+ ret = -ENOENT;
|
|
+ goto inc_dev;
|
|
}
|
|
|
|
- if (!joystick_port[dev++])
|
|
- return 0;
|
|
+ if (!joystick_port[dev]) {
|
|
+ ret = 0;
|
|
+ goto inc_dev;
|
|
+ }
|
|
|
|
gameport = gameport_allocate_port();
|
|
- if (!gameport)
|
|
- return -ENOMEM;
|
|
+ if (!gameport) {
|
|
+ ret = -ENOMEM;
|
|
+ goto inc_dev;
|
|
+ }
|
|
if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
|
|
snd_printk(KERN_WARNING
|
|
"Riptide: cannot grab gameport 0x%x\n",
|
|
joystick_port[dev]);
|
|
gameport_free_port(gameport);
|
|
- return -EBUSY;
|
|
+ ret = -EBUSY;
|
|
+ goto inc_dev;
|
|
}
|
|
|
|
gameport->io = joystick_port[dev];
|
|
gameport_register_port(gameport);
|
|
pci_set_drvdata(pci, gameport);
|
|
- return 0;
|
|
+
|
|
+ ret = 0;
|
|
+inc_dev:
|
|
+ dev++;
|
|
+ return ret;
|
|
}
|
|
|
|
static void snd_riptide_joystick_remove(struct pci_dev *pci)
|
|
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
|
|
index e98dc00..2116750 100644
|
|
--- a/sound/pci/rme9652/hdspm.c
|
|
+++ b/sound/pci/rme9652/hdspm.c
|
|
@@ -6102,6 +6102,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
|
|
snd_pcm_hw_constraint_minmax(runtime,
|
|
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
|
|
64, 8192);
|
|
+ snd_pcm_hw_constraint_minmax(runtime,
|
|
+ SNDRV_PCM_HW_PARAM_PERIODS,
|
|
+ 2, 2);
|
|
break;
|
|
}
|
|
|
|
@@ -6176,6 +6179,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
|
|
snd_pcm_hw_constraint_minmax(runtime,
|
|
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
|
|
64, 8192);
|
|
+ snd_pcm_hw_constraint_minmax(runtime,
|
|
+ SNDRV_PCM_HW_PARAM_PERIODS,
|
|
+ 2, 2);
|
|
break;
|
|
}
|
|
|
|
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
|
|
index 1ead3c9..f20e703 100644
|
|
--- a/sound/soc/atmel/atmel_ssc_dai.c
|
|
+++ b/sound/soc/atmel/atmel_ssc_dai.c
|
|
@@ -344,7 +344,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
|
|
struct atmel_pcm_dma_params *dma_params;
|
|
int dir, channels, bits;
|
|
u32 tfmr, rfmr, tcmr, rcmr;
|
|
- int start_event;
|
|
int ret;
|
|
|
|
/*
|
|
@@ -451,19 +450,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
|
|
* The SSC transmit clock is obtained from the BCLK signal on
|
|
* on the TK line, and the SSC receive clock is
|
|
* generated from the transmit clock.
|
|
- *
|
|
- * For single channel data, one sample is transferred
|
|
- * on the falling edge of the LRC clock.
|
|
- * For two channel data, one sample is
|
|
- * transferred on both edges of the LRC clock.
|
|
*/
|
|
- start_event = ((channels == 1)
|
|
- ? SSC_START_FALLING_RF
|
|
- : SSC_START_EDGE_RF);
|
|
-
|
|
rcmr = SSC_BF(RCMR_PERIOD, 0)
|
|
| SSC_BF(RCMR_STTDLY, START_DELAY)
|
|
- | SSC_BF(RCMR_START, start_event)
|
|
+ | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
|
|
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
|
|
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
|
|
| SSC_BF(RCMR_CKS, SSC_CKS_CLOCK);
|
|
@@ -471,14 +461,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
|
|
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
|
|
| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
|
|
| SSC_BF(RFMR_FSLEN, 0)
|
|
- | SSC_BF(RFMR_DATNB, 0)
|
|
+ | SSC_BF(RFMR_DATNB, (channels - 1))
|
|
| SSC_BIT(RFMR_MSBF)
|
|
| SSC_BF(RFMR_LOOP, 0)
|
|
| SSC_BF(RFMR_DATLEN, (bits - 1));
|
|
|
|
tcmr = SSC_BF(TCMR_PERIOD, 0)
|
|
| SSC_BF(TCMR_STTDLY, START_DELAY)
|
|
- | SSC_BF(TCMR_START, start_event)
|
|
+ | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
|
|
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
|
|
| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
|
|
| SSC_BF(TCMR_CKS, SSC_CKS_PIN);
|
|
@@ -487,7 +477,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
|
|
| SSC_BF(TFMR_FSDEN, 0)
|
|
| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
|
|
| SSC_BF(TFMR_FSLEN, 0)
|
|
- | SSC_BF(TFMR_DATNB, 0)
|
|
+ | SSC_BF(TFMR_DATNB, (channels - 1))
|
|
| SSC_BIT(TFMR_MSBF)
|
|
| SSC_BF(TFMR_DATDEF, 0)
|
|
| SSC_BF(TFMR_DATLEN, (bits - 1));
|
|
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
|
|
index a3881c4..bcf5913 100644
|
|
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
|
|
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
|
|
@@ -290,19 +290,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
|
|
unsigned int sample_size = runtime->sample_bits / 8;
|
|
void *buf = runtime->dma_area;
|
|
struct bf5xx_i2s_pcm_data *dma_data;
|
|
- unsigned int offset, size;
|
|
+ unsigned int offset, samples;
|
|
|
|
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
|
|
|
|
if (dma_data->tdm_mode) {
|
|
offset = pos * 8 * sample_size;
|
|
- size = count * 8 * sample_size;
|
|
+ samples = count * 8;
|
|
} else {
|
|
offset = frames_to_bytes(runtime, pos);
|
|
- size = frames_to_bytes(runtime, count);
|
|
+ samples = count * runtime->channels;
|
|
}
|
|
|
|
- snd_pcm_format_set_silence(runtime->format, buf + offset, size);
|
|
+ snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
|
|
index d71c59c..370b742 100644
|
|
--- a/sound/soc/codecs/adau1701.c
|
|
+++ b/sound/soc/codecs/adau1701.c
|
|
@@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
|
|
|
|
*value = 0;
|
|
|
|
- for (i = 0; i < size; i++)
|
|
- *value |= recv_buf[i] << (i * 8);
|
|
+ for (i = 0; i < size; i++) {
|
|
+ *value <<= 8;
|
|
+ *value |= recv_buf[i];
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
|
|
index f78b27a..23454e9 100644
|
|
--- a/sound/soc/codecs/adav80x.c
|
|
+++ b/sound/soc/codecs/adav80x.c
|
|
@@ -319,7 +319,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
|
|
- unsigned int deemph = ucontrol->value.enumerated.item[0];
|
|
+ unsigned int deemph = ucontrol->value.integer.value[0];
|
|
|
|
if (deemph > 1)
|
|
return -EINVAL;
|
|
@@ -335,7 +335,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = adav80x->deemph;
|
|
+ ucontrol->value.integer.value[0] = adav80x->deemph;
|
|
return 0;
|
|
};
|
|
|
|
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
|
|
index 94cbe50..d718472 100644
|
|
--- a/sound/soc/codecs/ak4641.c
|
|
+++ b/sound/soc/codecs/ak4641.c
|
|
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
|
|
if (deemph > 1)
|
|
return -EINVAL;
|
|
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = ak4641->deemph;
|
|
+ ucontrol->value.integer.value[0] = ak4641->deemph;
|
|
return 0;
|
|
};
|
|
|
|
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
|
|
index ce05fd9..a0ad41a 100644
|
|
--- a/sound/soc/codecs/cs4271.c
|
|
+++ b/sound/soc/codecs/cs4271.c
|
|
@@ -288,7 +288,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = cs4271->deemph;
|
|
+ ucontrol->value.integer.value[0] = cs4271->deemph;
|
|
return 0;
|
|
}
|
|
|
|
@@ -298,7 +298,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- cs4271->deemph = ucontrol->value.enumerated.item[0];
|
|
+ cs4271->deemph = ucontrol->value.integer.value[0];
|
|
return cs4271_set_deemph(codec);
|
|
}
|
|
|
|
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
|
|
index b3f7c90..9dd260f 100644
|
|
--- a/sound/soc/codecs/max98090.c
|
|
+++ b/sound/soc/codecs/max98090.c
|
|
@@ -1378,8 +1378,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
|
|
{"STENL Mux", "Sidetone Left", "DMICL"},
|
|
{"STENR Mux", "Sidetone Right", "ADCR"},
|
|
{"STENR Mux", "Sidetone Right", "DMICR"},
|
|
- {"DACL", "NULL", "STENL Mux"},
|
|
- {"DACR", "NULL", "STENL Mux"},
|
|
+ {"DACL", NULL, "STENL Mux"},
|
|
+ {"DACR", NULL, "STENL Mux"},
|
|
|
|
{"AIFINL", NULL, "SHDN"},
|
|
{"AIFINR", NULL, "SHDN"},
|
|
@@ -2250,7 +2250,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
|
|
/* Register for interrupts */
|
|
dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
|
|
|
|
- ret = request_threaded_irq(max98090->irq, NULL,
|
|
+ ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
|
|
max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
|
"max98090_interrupt", codec);
|
|
if (ret < 0) {
|
|
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
|
|
index 582c2bb..b852293 100644
|
|
--- a/sound/soc/codecs/mc13783.c
|
|
+++ b/sound/soc/codecs/mc13783.c
|
|
@@ -634,14 +634,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
|
|
AUDIO_SSI_SEL, 0);
|
|
else
|
|
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
|
|
- 0, AUDIO_SSI_SEL);
|
|
+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
|
|
|
|
if (priv->dac_ssi_port == MC13783_SSI1_PORT)
|
|
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
|
|
AUDIO_SSI_SEL, 0);
|
|
else
|
|
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
|
|
- 0, AUDIO_SSI_SEL);
|
|
+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
|
|
index 73f9c36..dfa9755 100644
|
|
--- a/sound/soc/codecs/pcm1681.c
|
|
+++ b/sound/soc/codecs/pcm1681.c
|
|
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
|
|
|
|
if (val != -1) {
|
|
regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
|
|
- PCM1681_DEEMPH_RATE_MASK, val);
|
|
+ PCM1681_DEEMPH_RATE_MASK, val << 3);
|
|
enable = 1;
|
|
} else
|
|
enable = 0;
|
|
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = priv->deemph;
|
|
+ ucontrol->value.integer.value[0] = priv->deemph;
|
|
|
|
return 0;
|
|
}
|
|
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- priv->deemph = ucontrol->value.enumerated.item[0];
|
|
+ priv->deemph = ucontrol->value.integer.value[0];
|
|
|
|
return pcm1681_set_deemph(codec);
|
|
}
|
|
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
|
|
index 8869249..5cb515b 100644
|
|
--- a/sound/soc/codecs/rt5640.c
|
|
+++ b/sound/soc/codecs/rt5640.c
|
|
@@ -2071,6 +2071,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
|
|
static const struct regmap_config rt5640_regmap = {
|
|
.reg_bits = 8,
|
|
.val_bits = 16,
|
|
+ .use_single_rw = true,
|
|
|
|
.max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
|
|
RT5640_PR_SPACING),
|
|
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
|
|
index 893c95b..5ea3b4e 100644
|
|
--- a/sound/soc/codecs/sgtl5000.c
|
|
+++ b/sound/soc/codecs/sgtl5000.c
|
|
@@ -1379,8 +1379,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
|
|
|
|
/* enable small pop, introduce 400ms delay in turning off */
|
|
snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
|
|
- SGTL5000_SMALL_POP,
|
|
- SGTL5000_SMALL_POP);
|
|
+ SGTL5000_SMALL_POP, 1);
|
|
|
|
/* disable short cut detector */
|
|
snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
|
|
@@ -1534,6 +1533,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ /* Need 8 clocks before I2C accesses */
|
|
+ udelay(1);
|
|
+
|
|
/* read chip information */
|
|
ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, ®);
|
|
if (ret)
|
|
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
|
|
index 2f8c889..bd7a344 100644
|
|
--- a/sound/soc/codecs/sgtl5000.h
|
|
+++ b/sound/soc/codecs/sgtl5000.h
|
|
@@ -275,7 +275,7 @@
|
|
#define SGTL5000_BIAS_CTRL_MASK 0x000e
|
|
#define SGTL5000_BIAS_CTRL_SHIFT 1
|
|
#define SGTL5000_BIAS_CTRL_WIDTH 3
|
|
-#define SGTL5000_SMALL_POP 0x0001
|
|
+#define SGTL5000_SMALL_POP 0
|
|
|
|
/*
|
|
* SGTL5000_CHIP_MIC_CTRL
|
|
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
|
|
index 4068f24..bb3878c 100644
|
|
--- a/sound/soc/codecs/sigmadsp.c
|
|
+++ b/sound/soc/codecs/sigmadsp.c
|
|
@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
|
|
goto done;
|
|
}
|
|
|
|
+ if (ssfw_head->version != 1) {
|
|
+ dev_err(dev,
|
|
+ "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n",
|
|
+ ssfw_head->version);
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
crc = crc32(0, fw->data + sizeof(*ssfw_head),
|
|
fw->size - sizeof(*ssfw_head));
|
|
pr_debug("%s: crc=%x\n", __func__, crc);
|
|
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
|
|
index a895a5e..c6c6500 100644
|
|
--- a/sound/soc/codecs/tas5086.c
|
|
+++ b/sound/soc/codecs/tas5086.c
|
|
@@ -275,7 +275,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = priv->deemph;
|
|
+ ucontrol->value.integer.value[0] = priv->deemph;
|
|
|
|
return 0;
|
|
}
|
|
@@ -286,7 +286,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- priv->deemph = ucontrol->value.enumerated.item[0];
|
|
+ priv->deemph = ucontrol->value.integer.value[0];
|
|
|
|
return tas5086_set_deemph(codec);
|
|
}
|
|
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
|
|
index eb241c6..fd53d37 100644
|
|
--- a/sound/soc/codecs/tlv320aic3x.c
|
|
+++ b/sound/soc/codecs/tlv320aic3x.c
|
|
@@ -1121,6 +1121,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
|
|
static int aic3x_set_power(struct snd_soc_codec *codec, int power)
|
|
{
|
|
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
|
|
+ unsigned int pll_c, pll_d;
|
|
int ret;
|
|
|
|
if (power) {
|
|
@@ -1138,6 +1139,18 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
|
|
/* Sync reg_cache with the hardware */
|
|
regcache_cache_only(aic3x->regmap, false);
|
|
regcache_sync(aic3x->regmap);
|
|
+
|
|
+ /* Rewrite paired PLL D registers in case cached sync skipped
|
|
+ * writing one of them and thus caused other one also not
|
|
+ * being written
|
|
+ */
|
|
+ pll_c = snd_soc_read(codec, AIC3X_PLL_PROGC_REG);
|
|
+ pll_d = snd_soc_read(codec, AIC3X_PLL_PROGD_REG);
|
|
+ if (pll_c == aic3x_reg[AIC3X_PLL_PROGC_REG].def ||
|
|
+ pll_d == aic3x_reg[AIC3X_PLL_PROGD_REG].def) {
|
|
+ snd_soc_write(codec, AIC3X_PLL_PROGC_REG, pll_c);
|
|
+ snd_soc_write(codec, AIC3X_PLL_PROGD_REG, pll_d);
|
|
+ }
|
|
} else {
|
|
/*
|
|
* Do soft reset to this codec instance in order to clear
|
|
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
|
|
index 8ae5027..1a9f457 100644
|
|
--- a/sound/soc/codecs/wm2000.c
|
|
+++ b/sound/soc/codecs/wm2000.c
|
|
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm2000->anc_active;
|
|
+ ucontrol->value.integer.value[0] = wm2000->anc_active;
|
|
|
|
return 0;
|
|
}
|
|
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
|
|
- int anc_active = ucontrol->value.enumerated.item[0];
|
|
+ int anc_active = ucontrol->value.integer.value[0];
|
|
int ret;
|
|
|
|
if (anc_active > 1)
|
|
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm2000->spk_ena;
|
|
+ ucontrol->value.integer.value[0] = wm2000->spk_ena;
|
|
|
|
return 0;
|
|
}
|
|
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
|
|
- int val = ucontrol->value.enumerated.item[0];
|
|
+ int val = ucontrol->value.integer.value[0];
|
|
int ret;
|
|
|
|
if (val > 1)
|
|
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
|
|
index ce9c8e1..fbee45c 100644
|
|
--- a/sound/soc/codecs/wm5102.c
|
|
+++ b/sound/soc/codecs/wm5102.c
|
|
@@ -41,7 +41,7 @@ struct wm5102_priv {
|
|
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
|
|
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
|
|
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
|
|
static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
|
|
|
|
static const struct wm_adsp_region wm5102_dsp1_regions[] = {
|
|
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
|
|
index 2c3c962..0fce853 100644
|
|
--- a/sound/soc/codecs/wm5110.c
|
|
+++ b/sound/soc/codecs/wm5110.c
|
|
@@ -167,7 +167,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
|
|
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
|
|
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
|
|
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
|
|
static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
|
|
|
|
#define WM5110_NG_SRC(name, base) \
|
|
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
|
|
index 0297203..e593722 100644
|
|
--- a/sound/soc/codecs/wm8731.c
|
|
+++ b/sound/soc/codecs/wm8731.c
|
|
@@ -122,7 +122,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm8731->deemph;
|
|
+ ucontrol->value.integer.value[0] = wm8731->deemph;
|
|
|
|
return 0;
|
|
}
|
|
@@ -132,7 +132,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
int ret = 0;
|
|
|
|
if (deemph > 1)
|
|
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
|
|
index 2f167a8..62bacb8 100644
|
|
--- a/sound/soc/codecs/wm8737.c
|
|
+++ b/sound/soc/codecs/wm8737.c
|
|
@@ -494,7 +494,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
|
|
|
|
/* Fast VMID ramp at 2*2.5k */
|
|
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
|
|
- WM8737_VMIDSEL_MASK, 0x4);
|
|
+ WM8737_VMIDSEL_MASK,
|
|
+ 2 << WM8737_VMIDSEL_SHIFT);
|
|
|
|
/* Bring VMID up */
|
|
snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
|
|
@@ -508,7 +509,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
|
|
|
|
/* VMID at 2*300k */
|
|
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
|
|
- WM8737_VMIDSEL_MASK, 2);
|
|
+ WM8737_VMIDSEL_MASK,
|
|
+ 1 << WM8737_VMIDSEL_SHIFT);
|
|
|
|
break;
|
|
|
|
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
|
|
index eebcb1d..ae7d76e 100644
|
|
--- a/sound/soc/codecs/wm8903.c
|
|
+++ b/sound/soc/codecs/wm8903.c
|
|
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm8903->deemph;
|
|
+ ucontrol->value.integer.value[0] = wm8903->deemph;
|
|
|
|
return 0;
|
|
}
|
|
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
int ret = 0;
|
|
|
|
if (deemph > 1)
|
|
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
|
|
index db94931..0bb4a64 100644
|
|
--- a/sound/soc/codecs/wm8903.h
|
|
+++ b/sound/soc/codecs/wm8903.h
|
|
@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
|
|
#define WM8903_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
|
|
|
|
#define WM8903_VMID_RES_50K 2
|
|
-#define WM8903_VMID_RES_250K 3
|
|
+#define WM8903_VMID_RES_250K 4
|
|
#define WM8903_VMID_RES_5K 6
|
|
|
|
/*
|
|
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
|
|
index 53bbfac..66cb9e9 100644
|
|
--- a/sound/soc/codecs/wm8904.c
|
|
+++ b/sound/soc/codecs/wm8904.c
|
|
@@ -523,7 +523,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm8904->deemph;
|
|
+ ucontrol->value.integer.value[0] = wm8904->deemph;
|
|
return 0;
|
|
}
|
|
|
|
@@ -532,7 +532,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
|
|
if (deemph > 1)
|
|
return -EINVAL;
|
|
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
|
|
index 82c8ba9..475fc24 100644
|
|
--- a/sound/soc/codecs/wm8955.c
|
|
+++ b/sound/soc/codecs/wm8955.c
|
|
@@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
|
|
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
|
|
WM8955_K_17_9_MASK,
|
|
(pll.k >> 9) & WM8955_K_17_9_MASK);
|
|
- snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
|
|
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
|
|
WM8955_K_8_0_MASK,
|
|
pll.k & WM8955_K_8_0_MASK);
|
|
if (pll.k)
|
|
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm8955->deemph;
|
|
+ ucontrol->value.integer.value[0] = wm8955->deemph;
|
|
return 0;
|
|
}
|
|
|
|
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
|
|
if (deemph > 1)
|
|
return -EINVAL;
|
|
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
|
|
index f156010..e04dbaa 100644
|
|
--- a/sound/soc/codecs/wm8960.c
|
|
+++ b/sound/soc/codecs/wm8960.c
|
|
@@ -181,7 +181,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
|
|
|
|
- ucontrol->value.enumerated.item[0] = wm8960->deemph;
|
|
+ ucontrol->value.integer.value[0] = wm8960->deemph;
|
|
return 0;
|
|
}
|
|
|
|
@@ -190,7 +190,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
|
|
{
|
|
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
|
|
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
|
|
- int deemph = ucontrol->value.enumerated.item[0];
|
|
+ int deemph = ucontrol->value.integer.value[0];
|
|
|
|
if (deemph > 1)
|
|
return -EINVAL;
|
|
@@ -242,7 +242,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
|
|
SOC_ENUM("ADC Polarity", wm8960_enum[0]),
|
|
SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
|
|
|
|
-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
|
|
+SOC_ENUM("DAC Polarity", wm8960_enum[1]),
|
|
SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
|
|
wm8960_get_deemph, wm8960_put_deemph),
|
|
|
|
@@ -392,7 +392,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
|
|
{ "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
|
|
{ "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
|
|
{ "Right Input Mixer", NULL, "RINPUT2" },
|
|
- { "Right Input Mixer", NULL, "LINPUT3" },
|
|
+ { "Right Input Mixer", NULL, "RINPUT3" },
|
|
|
|
{ "Left ADC", NULL, "Left Input Mixer" },
|
|
{ "Right ADC", NULL, "Right Input Mixer" },
|
|
@@ -555,7 +555,7 @@ static struct {
|
|
{ 22050, 2 },
|
|
{ 24000, 2 },
|
|
{ 16000, 3 },
|
|
- { 11250, 4 },
|
|
+ { 11025, 4 },
|
|
{ 12000, 4 },
|
|
{ 8000, 5 },
|
|
};
|
|
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
|
|
index adb7206..66aec0c 100644
|
|
--- a/sound/soc/codecs/wm8994.c
|
|
+++ b/sound/soc/codecs/wm8994.c
|
|
@@ -2745,7 +2745,7 @@ static struct {
|
|
};
|
|
|
|
static int fs_ratios[] = {
|
|
- 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
|
|
+ 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
|
|
};
|
|
|
|
static int bclk_divs[] = {
|
|
@@ -3497,6 +3497,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
+/* Should be called with accdet_lock held */
|
|
static void wm1811_micd_stop(struct snd_soc_codec *codec)
|
|
{
|
|
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
|
|
@@ -3504,14 +3505,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
|
|
if (!wm8994->jackdet)
|
|
return;
|
|
|
|
- mutex_lock(&wm8994->accdet_lock);
|
|
-
|
|
snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
|
|
|
|
wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
|
|
|
|
- mutex_unlock(&wm8994->accdet_lock);
|
|
-
|
|
if (wm8994->wm8994->pdata.jd_ext_cap)
|
|
snd_soc_dapm_disable_pin(&codec->dapm,
|
|
"MICBIAS2");
|
|
@@ -3552,10 +3549,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
|
|
open_circuit_work.work);
|
|
struct device *dev = wm8994->wm8994->dev;
|
|
|
|
- wm1811_micd_stop(wm8994->hubs.codec);
|
|
-
|
|
mutex_lock(&wm8994->accdet_lock);
|
|
|
|
+ wm1811_micd_stop(wm8994->hubs.codec);
|
|
+
|
|
dev_dbg(dev, "Reporting open circuit\n");
|
|
|
|
wm8994->jack_mic = false;
|
|
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
|
|
index 555115e..1461ae61 100644
|
|
--- a/sound/soc/codecs/wm8997.c
|
|
+++ b/sound/soc/codecs/wm8997.c
|
|
@@ -40,7 +40,7 @@ struct wm8997_priv {
|
|
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
|
|
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
|
|
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
|
|
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
|
|
static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
|
|
|
|
static const struct reg_default wm8997_sysclk_reva_patch[] = {
|
|
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
|
|
index 444626f..0502e3f 100644
|
|
--- a/sound/soc/codecs/wm_adsp.c
|
|
+++ b/sound/soc/codecs/wm_adsp.c
|
|
@@ -1341,6 +1341,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
|
|
file, blocks, pos - firmware->size);
|
|
|
|
out_fw:
|
|
+ regmap_async_complete(regmap);
|
|
release_firmware(firmware);
|
|
wm_adsp_buf_free(&buf_list);
|
|
out:
|
|
@@ -1745,3 +1746,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(wm_adsp2_init);
|
|
+
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
|
|
index 5e3bc3c..f40a7a4 100644
|
|
--- a/sound/soc/davinci/davinci-evm.c
|
|
+++ b/sound/soc/davinci/davinci-evm.c
|
|
@@ -384,18 +384,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
|
|
return ret;
|
|
}
|
|
|
|
-static int davinci_evm_remove(struct platform_device *pdev)
|
|
-{
|
|
- struct snd_soc_card *card = platform_get_drvdata(pdev);
|
|
-
|
|
- snd_soc_unregister_card(card);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static struct platform_driver davinci_evm_driver = {
|
|
.probe = davinci_evm_probe,
|
|
- .remove = davinci_evm_remove,
|
|
.driver = {
|
|
.name = "davinci_evm",
|
|
.owner = THIS_MODULE,
|
|
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
|
|
index 670afa2..7350ebb 100644
|
|
--- a/sound/soc/davinci/davinci-mcasp.c
|
|
+++ b/sound/soc/davinci/davinci-mcasp.c
|
|
@@ -418,8 +418,17 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
|
|
{
|
|
u32 fmt;
|
|
u32 tx_rotate = (word_length / 4) & 0x7;
|
|
- u32 rx_rotate = (32 - word_length) / 4;
|
|
u32 mask = (1ULL << word_length) - 1;
|
|
+ /*
|
|
+ * For captured data we should not rotate, inversion and masking is
|
|
+ * enoguh to get the data to the right position:
|
|
+ * Format data from bus after reverse (XRBUF)
|
|
+ * S16_LE: |LSB|MSB|xxx|xxx| |xxx|xxx|MSB|LSB|
|
|
+ * S24_3LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB|
|
|
+ * S24_LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB|
|
|
+ * S32_LE: |LSB|DAT|DAT|MSB| |MSB|DAT|DAT|LSB|
|
|
+ */
|
|
+ u32 rx_rotate = 0;
|
|
|
|
/*
|
|
* if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()
|
|
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
|
|
index 25c31f1..2f63575 100644
|
|
--- a/sound/soc/dwc/designware_i2s.c
|
|
+++ b/sound/soc/dwc/designware_i2s.c
|
|
@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
|
|
snd_soc_dai_set_dma_data(dai, substream, NULL);
|
|
}
|
|
|
|
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
|
|
+ struct snd_soc_dai *dai)
|
|
+{
|
|
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
|
|
+
|
|
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
|
+ i2s_write_reg(dev->i2s_base, TXFFR, 1);
|
|
+ else
|
|
+ i2s_write_reg(dev->i2s_base, RXFFR, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int dw_i2s_trigger(struct snd_pcm_substream *substream,
|
|
int cmd, struct snd_soc_dai *dai)
|
|
{
|
|
@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
|
|
.startup = dw_i2s_startup,
|
|
.shutdown = dw_i2s_shutdown,
|
|
.hw_params = dw_i2s_hw_params,
|
|
+ .prepare = dw_i2s_prepare,
|
|
.trigger = dw_i2s_trigger,
|
|
};
|
|
|
|
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
|
|
index 75e1403..dfdbaa0 100644
|
|
--- a/sound/soc/fsl/fsl_esai.h
|
|
+++ b/sound/soc/fsl/fsl_esai.h
|
|
@@ -302,7 +302,7 @@
|
|
#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
|
|
#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
|
|
#define ESAI_xCCR_xDC_SHIFT 9
|
|
-#define ESAI_xCCR_xDC_WIDTH 4
|
|
+#define ESAI_xCCR_xDC_WIDTH 5
|
|
#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
|
|
#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
|
|
#define ESAI_xCCR_xPSR_SHIFT 8
|
|
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
|
|
index fdd28a7..274c220 100644
|
|
--- a/sound/soc/fsl/imx-wm8962.c
|
|
+++ b/sound/soc/fsl/imx-wm8962.c
|
|
@@ -439,7 +439,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
|
|
dev_err(&pdev->dev, "audmux internal port setup failed\n");
|
|
return ret;
|
|
}
|
|
- imx_audmux_v2_configure_port(ext_port,
|
|
+ ret = imx_audmux_v2_configure_port(ext_port,
|
|
IMX_AUDMUX_V2_PTCR_SYN,
|
|
IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
|
|
if (ret) {
|
|
diff --git a/sound/soc/jz4740/Makefile b/sound/soc/jz4740/Makefile
|
|
index be873c1..d32c540 100644
|
|
--- a/sound/soc/jz4740/Makefile
|
|
+++ b/sound/soc/jz4740/Makefile
|
|
@@ -1,10 +1,8 @@
|
|
#
|
|
# Jz4740 Platform Support
|
|
#
|
|
-snd-soc-jz4740-objs := jz4740-pcm.o
|
|
snd-soc-jz4740-i2s-objs := jz4740-i2s.o
|
|
|
|
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
|
|
obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
|
|
|
|
# Jz4740 Machine Support
|
|
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
|
|
index 6c19bba..6a339fb 100644
|
|
--- a/sound/soc/omap/omap-mcbsp.c
|
|
+++ b/sound/soc/omap/omap-mcbsp.c
|
|
@@ -436,7 +436,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
|
case SND_SOC_DAIFMT_CBM_CFS:
|
|
/* McBSP slave. FS clock as output */
|
|
regs->srgr2 |= FSGM;
|
|
- regs->pcr0 |= FSXM;
|
|
+ regs->pcr0 |= FSXM | FSRM;
|
|
break;
|
|
case SND_SOC_DAIFMT_CBM_CFM:
|
|
/* McBSP slave */
|
|
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
|
|
index 07b8b7b..81f6a75 100644
|
|
--- a/sound/soc/omap/omap-pcm.c
|
|
+++ b/sound/soc/omap/omap-pcm.c
|
|
@@ -200,7 +200,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
|
struct snd_pcm *pcm = rtd->pcm;
|
|
int ret;
|
|
|
|
- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
|
|
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
|
if (ret)
|
|
return ret;
|
|
|
|
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
|
|
index a3119a0..6c6b35e 100644
|
|
--- a/sound/soc/pxa/pxa-ssp.c
|
|
+++ b/sound/soc/pxa/pxa-ssp.c
|
|
@@ -725,7 +725,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
|
|
ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
|
|
if (!ssp_handle) {
|
|
dev_err(dev, "unable to get 'port' phandle\n");
|
|
- return -ENODEV;
|
|
+ ret = -ENODEV;
|
|
+ goto err_priv;
|
|
}
|
|
|
|
priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
|
|
@@ -766,9 +767,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
|
|
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
|
|
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
|
|
|
|
-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
|
|
- SNDRV_PCM_FMTBIT_S24_LE | \
|
|
- SNDRV_PCM_FMTBIT_S32_LE)
|
|
+#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
|
|
|
|
static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
|
|
.startup = pxa_ssp_startup,
|
|
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
|
|
index 0a9b44c..5dae660 100644
|
|
--- a/sound/soc/samsung/i2s.c
|
|
+++ b/sound/soc/samsung/i2s.c
|
|
@@ -915,11 +915,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
|
|
{
|
|
struct i2s_dai *i2s = to_info(dai);
|
|
|
|
- if (dai->active) {
|
|
- i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
|
|
- i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
|
|
- i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
|
|
- }
|
|
+ i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
|
|
+ i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
|
|
+ i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
|
|
|
|
return 0;
|
|
}
|
|
@@ -928,11 +926,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
|
|
{
|
|
struct i2s_dai *i2s = to_info(dai);
|
|
|
|
- if (dai->active) {
|
|
- writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
|
|
- writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
|
|
- writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
|
|
- }
|
|
+ writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
|
|
+ writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
|
|
+ writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
|
|
index 1967f44..9d0c59c 100644
|
|
--- a/sound/soc/sh/fsi.c
|
|
+++ b/sound/soc/sh/fsi.c
|
|
@@ -1785,8 +1785,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
|
|
static struct snd_pcm_hardware fsi_pcm_hardware = {
|
|
.info = SNDRV_PCM_INFO_INTERLEAVED |
|
|
SNDRV_PCM_INFO_MMAP |
|
|
- SNDRV_PCM_INFO_MMAP_VALID |
|
|
- SNDRV_PCM_INFO_PAUSE,
|
|
+ SNDRV_PCM_INFO_MMAP_VALID,
|
|
.buffer_bytes_max = 64 * 1024,
|
|
.period_bytes_min = 32,
|
|
.period_bytes_max = 8192,
|
|
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
|
|
index 743de5e..37fcd93 100644
|
|
--- a/sound/soc/sh/rcar/core.c
|
|
+++ b/sound/soc/sh/rcar/core.c
|
|
@@ -626,8 +626,7 @@ static void rsnd_dai_remove(struct platform_device *pdev,
|
|
static struct snd_pcm_hardware rsnd_pcm_hardware = {
|
|
.info = SNDRV_PCM_INFO_INTERLEAVED |
|
|
SNDRV_PCM_INFO_MMAP |
|
|
- SNDRV_PCM_INFO_MMAP_VALID |
|
|
- SNDRV_PCM_INFO_PAUSE,
|
|
+ SNDRV_PCM_INFO_MMAP_VALID,
|
|
.buffer_bytes_max = 64 * 1024,
|
|
.period_bytes_min = 32,
|
|
.period_bytes_max = 8192,
|
|
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
|
|
index 5e9690c..4f98ff1 100644
|
|
--- a/sound/soc/soc-compress.c
|
|
+++ b/sound/soc/soc-compress.c
|
|
@@ -696,7 +696,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
|
|
rtd->dai_link->stream_name);
|
|
|
|
ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
|
|
- 1, 0, &be_pcm);
|
|
+ rtd->dai_link->dpcm_playback,
|
|
+ rtd->dai_link->dpcm_capture, &be_pcm);
|
|
if (ret < 0) {
|
|
dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
|
|
rtd->dai_link->name);
|
|
@@ -705,8 +706,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
|
|
|
|
rtd->pcm = be_pcm;
|
|
rtd->fe_compr = 1;
|
|
- be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
|
|
- be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
|
|
+ if (rtd->dai_link->dpcm_playback)
|
|
+ be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
|
|
+ else if (rtd->dai_link->dpcm_capture)
|
|
+ be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
|
|
memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
|
|
} else
|
|
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
|
|
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
|
|
index 731d47b..e4da224 100644
|
|
--- a/sound/soc/soc-dapm.c
|
|
+++ b/sound/soc/soc-dapm.c
|
|
@@ -689,9 +689,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
|
|
int shared;
|
|
struct snd_kcontrol *kcontrol;
|
|
bool wname_in_long_name, kcname_in_long_name;
|
|
- char *long_name;
|
|
+ char *long_name = NULL;
|
|
const char *name;
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
if (dapm->codec)
|
|
prefix = dapm->codec->name_prefix;
|
|
@@ -756,15 +756,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
|
|
|
|
kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
|
|
prefix);
|
|
- kfree(long_name);
|
|
- if (!kcontrol)
|
|
- return -ENOMEM;
|
|
+ if (!kcontrol) {
|
|
+ ret = -ENOMEM;
|
|
+ goto exit_free;
|
|
+ }
|
|
+
|
|
kcontrol->private_free = dapm_kcontrol_free;
|
|
|
|
ret = dapm_kcontrol_data_alloc(w, kcontrol);
|
|
if (ret) {
|
|
snd_ctl_free_one(kcontrol);
|
|
- return ret;
|
|
+ goto exit_free;
|
|
}
|
|
|
|
ret = snd_ctl_add(card, kcontrol);
|
|
@@ -772,17 +774,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
|
|
dev_err(dapm->dev,
|
|
"ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
|
|
w->name, name, ret);
|
|
- return ret;
|
|
+ goto exit_free;
|
|
}
|
|
}
|
|
|
|
ret = dapm_kcontrol_add_widget(kcontrol, w);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (ret == 0)
|
|
+ w->kcontrols[kci] = kcontrol;
|
|
|
|
- w->kcontrols[kci] = kcontrol;
|
|
+exit_free:
|
|
+ kfree(long_name);
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
/* create new dapm mixer control */
|
|
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
|
|
index 8fa7cd3..18984c4 100644
|
|
--- a/sound/soc/soc-pcm.c
|
|
+++ b/sound/soc/soc-pcm.c
|
|
@@ -1258,13 +1258,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
|
|
dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
|
|
}
|
|
|
|
+static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
|
|
+
|
|
+/* Set FE's runtime_update state; the state is protected via PCM stream lock
|
|
+ * for avoiding the race with trigger callback.
|
|
+ * If the state is unset and a trigger is pending while the previous operation,
|
|
+ * process the pending trigger action here.
|
|
+ */
|
|
+static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
|
|
+ int stream, enum snd_soc_dpcm_update state)
|
|
+{
|
|
+ struct snd_pcm_substream *substream =
|
|
+ snd_soc_dpcm_get_substream(fe, stream);
|
|
+
|
|
+ snd_pcm_stream_lock_irq(substream);
|
|
+ if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
|
|
+ dpcm_fe_dai_do_trigger(substream,
|
|
+ fe->dpcm[stream].trigger_pending - 1);
|
|
+ fe->dpcm[stream].trigger_pending = 0;
|
|
+ }
|
|
+ fe->dpcm[stream].runtime_update = state;
|
|
+ snd_pcm_stream_unlock_irq(substream);
|
|
+}
|
|
+
|
|
static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
|
|
{
|
|
struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
|
|
struct snd_pcm_runtime *runtime = fe_substream->runtime;
|
|
int stream = fe_substream->stream, ret = 0;
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
|
|
|
|
ret = dpcm_be_dai_startup(fe, fe_substream->stream);
|
|
if (ret < 0) {
|
|
@@ -1286,13 +1309,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
|
|
dpcm_set_fe_runtime(fe_substream);
|
|
snd_pcm_limit_hw_rates(runtime);
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
return 0;
|
|
|
|
unwind:
|
|
dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
|
|
be_err:
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1339,7 +1362,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
|
|
struct snd_soc_pcm_runtime *fe = substream->private_data;
|
|
int stream = substream->stream;
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
|
|
|
|
/* shutdown the BEs */
|
|
dpcm_be_dai_shutdown(fe, substream->stream);
|
|
@@ -1353,7 +1376,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
|
|
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
|
|
|
|
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1401,7 +1424,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
|
|
int err, stream = substream->stream;
|
|
|
|
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
|
|
|
|
dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
|
|
|
|
@@ -1416,7 +1439,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
|
|
err = dpcm_be_dai_hw_free(fe, stream);
|
|
|
|
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
|
|
mutex_unlock(&fe->card->mutex);
|
|
return 0;
|
|
@@ -1509,7 +1532,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
|
|
int ret, stream = substream->stream;
|
|
|
|
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
|
|
|
|
memcpy(&fe->dpcm[substream->stream].hw_params, params,
|
|
sizeof(struct snd_pcm_hw_params));
|
|
@@ -1532,7 +1555,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
|
|
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
|
|
|
|
out:
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
mutex_unlock(&fe->card->mutex);
|
|
return ret;
|
|
}
|
|
@@ -1646,7 +1669,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
|
|
}
|
|
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
|
|
|
|
-static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
|
|
+static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
|
|
{
|
|
struct snd_soc_pcm_runtime *fe = substream->private_data;
|
|
int stream = substream->stream, ret;
|
|
@@ -1720,6 +1743,23 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
|
|
+{
|
|
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
|
|
+ int stream = substream->stream;
|
|
+
|
|
+ /* if FE's runtime_update is already set, we're in race;
|
|
+ * process this trigger later at exit
|
|
+ */
|
|
+ if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
|
|
+ fe->dpcm[stream].trigger_pending = cmd + 1;
|
|
+ return 0; /* delayed, assuming it's successful */
|
|
+ }
|
|
+
|
|
+ /* we're alone, let's trigger */
|
|
+ return dpcm_fe_dai_do_trigger(substream, cmd);
|
|
+}
|
|
+
|
|
int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
|
|
{
|
|
struct snd_soc_dpcm *dpcm;
|
|
@@ -1763,7 +1803,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
|
|
|
|
dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
|
|
|
|
/* there is no point preparing this FE if there are no BEs */
|
|
if (list_empty(&fe->dpcm[stream].be_clients)) {
|
|
@@ -1790,7 +1830,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
|
|
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
|
|
|
|
out:
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
mutex_unlock(&fe->card->mutex);
|
|
|
|
return ret;
|
|
@@ -1937,11 +1977,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
|
|
{
|
|
int ret;
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
|
|
ret = dpcm_run_update_startup(fe, stream);
|
|
if (ret < 0)
|
|
dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1950,11 +1990,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
|
|
{
|
|
int ret;
|
|
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
|
|
ret = dpcm_run_update_shutdown(fe, stream);
|
|
if (ret < 0)
|
|
dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
|
|
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
|
|
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2011,6 +2051,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
|
|
dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
|
|
}
|
|
|
|
+ dpcm_path_put(&list);
|
|
capture:
|
|
/* skip if FE doesn't have capture capability */
|
|
if (!fe->cpu_dai->driver->capture.channels_min)
|
|
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
|
|
index 319754c..daf61ab 100644
|
|
--- a/sound/synth/emux/emux_oss.c
|
|
+++ b/sound/synth/emux/emux_oss.c
|
|
@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
|
|
if (snd_BUG_ON(!arg || !emu))
|
|
return -ENXIO;
|
|
|
|
- mutex_lock(&emu->register_mutex);
|
|
-
|
|
- if (!snd_emux_inc_count(emu)) {
|
|
- mutex_unlock(&emu->register_mutex);
|
|
+ if (!snd_emux_inc_count(emu))
|
|
return -EFAULT;
|
|
- }
|
|
|
|
memset(&callback, 0, sizeof(callback));
|
|
callback.owner = THIS_MODULE;
|
|
@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
|
|
if (p == NULL) {
|
|
snd_printk(KERN_ERR "can't create port\n");
|
|
snd_emux_dec_count(emu);
|
|
- mutex_unlock(&emu->register_mutex);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
|
|
reset_port_mode(p, arg->seq_mode);
|
|
|
|
snd_emux_reset_port(p);
|
|
-
|
|
- mutex_unlock(&emu->register_mutex);
|
|
return 0;
|
|
}
|
|
|
|
@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
|
|
if (snd_BUG_ON(!emu))
|
|
return -ENXIO;
|
|
|
|
- mutex_lock(&emu->register_mutex);
|
|
snd_emux_sounds_off_all(p);
|
|
snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
|
|
snd_seq_event_port_detach(p->chset.client, p->chset.port);
|
|
snd_emux_dec_count(emu);
|
|
|
|
- mutex_unlock(&emu->register_mutex);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
|
|
index 7778b8e..a020920 100644
|
|
--- a/sound/synth/emux/emux_seq.c
|
|
+++ b/sound/synth/emux/emux_seq.c
|
|
@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
|
|
if (emu->voices)
|
|
snd_emux_terminate_all(emu);
|
|
|
|
- mutex_lock(&emu->register_mutex);
|
|
if (emu->client >= 0) {
|
|
snd_seq_delete_kernel_client(emu->client);
|
|
emu->client = -1;
|
|
}
|
|
- mutex_unlock(&emu->register_mutex);
|
|
}
|
|
|
|
|
|
@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
|
|
/*
|
|
* increment usage count
|
|
*/
|
|
-int
|
|
-snd_emux_inc_count(struct snd_emux *emu)
|
|
+static int
|
|
+__snd_emux_inc_count(struct snd_emux *emu)
|
|
{
|
|
emu->used++;
|
|
if (!try_module_get(emu->ops.owner))
|
|
@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
|
|
return 1;
|
|
}
|
|
|
|
+int snd_emux_inc_count(struct snd_emux *emu)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_lock(&emu->register_mutex);
|
|
+ ret = __snd_emux_inc_count(emu);
|
|
+ mutex_unlock(&emu->register_mutex);
|
|
+ return ret;
|
|
+}
|
|
|
|
/*
|
|
* decrease usage count
|
|
*/
|
|
-void
|
|
-snd_emux_dec_count(struct snd_emux *emu)
|
|
+static void
|
|
+__snd_emux_dec_count(struct snd_emux *emu)
|
|
{
|
|
module_put(emu->card->module);
|
|
emu->used--;
|
|
@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
|
|
module_put(emu->ops.owner);
|
|
}
|
|
|
|
+void snd_emux_dec_count(struct snd_emux *emu)
|
|
+{
|
|
+ mutex_lock(&emu->register_mutex);
|
|
+ __snd_emux_dec_count(emu);
|
|
+ mutex_unlock(&emu->register_mutex);
|
|
+}
|
|
|
|
/*
|
|
* Routine that is called upon a first use of a particular port
|
|
@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
|
|
|
|
mutex_lock(&emu->register_mutex);
|
|
snd_emux_init_port(p);
|
|
- snd_emux_inc_count(emu);
|
|
+ __snd_emux_inc_count(emu);
|
|
mutex_unlock(&emu->register_mutex);
|
|
return 0;
|
|
}
|
|
@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
|
|
|
|
mutex_lock(&emu->register_mutex);
|
|
snd_emux_sounds_off_all(p);
|
|
- snd_emux_dec_count(emu);
|
|
+ __snd_emux_dec_count(emu);
|
|
mutex_unlock(&emu->register_mutex);
|
|
return 0;
|
|
}
|
|
diff --git a/sound/usb/card.c b/sound/usb/card.c
|
|
index af19560..ab433a0 100644
|
|
--- a/sound/usb/card.c
|
|
+++ b/sound/usb/card.c
|
|
@@ -586,18 +586,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
|
|
{
|
|
struct snd_card *card;
|
|
struct list_head *p;
|
|
+ bool was_shutdown;
|
|
|
|
if (chip == (void *)-1L)
|
|
return;
|
|
|
|
card = chip->card;
|
|
down_write(&chip->shutdown_rwsem);
|
|
+ was_shutdown = chip->shutdown;
|
|
chip->shutdown = 1;
|
|
up_write(&chip->shutdown_rwsem);
|
|
|
|
mutex_lock(®ister_mutex);
|
|
- chip->num_interfaces--;
|
|
- if (chip->num_interfaces <= 0) {
|
|
+ if (!was_shutdown) {
|
|
struct snd_usb_endpoint *ep;
|
|
|
|
snd_card_disconnect(card);
|
|
@@ -617,6 +618,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
|
|
list_for_each(p, &chip->mixer_list) {
|
|
snd_usb_mixer_disconnect(p);
|
|
}
|
|
+ }
|
|
+
|
|
+ chip->num_interfaces--;
|
|
+ if (chip->num_interfaces <= 0) {
|
|
usb_chip[chip->index] = NULL;
|
|
mutex_unlock(®ister_mutex);
|
|
snd_card_free_when_closed(card);
|
|
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
|
|
index b901f46..c7aa71e 100644
|
|
--- a/sound/usb/midi.c
|
|
+++ b/sound/usb/midi.c
|
|
@@ -364,6 +364,8 @@ static void snd_usbmidi_error_timer(unsigned long data)
|
|
if (in && in->error_resubmit) {
|
|
in->error_resubmit = 0;
|
|
for (j = 0; j < INPUT_URBS; ++j) {
|
|
+ if (atomic_read(&in->urbs[j]->use_count))
|
|
+ continue;
|
|
in->urbs[j]->dev = umidi->dev;
|
|
snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
|
|
}
|
|
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
|
|
index 1bed780..c601033 100644
|
|
--- a/sound/usb/mixer.c
|
|
+++ b/sound/usb/mixer.c
|
|
@@ -886,10 +886,12 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
|
|
case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
|
|
case USB_ID(0x046d, 0x0808):
|
|
case USB_ID(0x046d, 0x0809):
|
|
+ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
|
|
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
|
|
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
|
|
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
|
|
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
|
|
+ case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
|
|
case USB_ID(0x046d, 0x0991):
|
|
/* Most audio usb devices lie about volume resolution.
|
|
* Most Logitech webcams have res = 384.
|
|
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
|
|
index d1d72ff..9a3e107 100644
|
|
--- a/sound/usb/mixer_maps.c
|
|
+++ b/sound/usb/mixer_maps.c
|
|
@@ -328,11 +328,28 @@ static struct usbmix_name_map gamecom780_map[] = {
|
|
{}
|
|
};
|
|
|
|
-static const struct usbmix_name_map kef_x300a_map[] = {
|
|
- { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
|
|
+/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
|
|
+ * when anything attempts to access FU 10 (control)
|
|
+ */
|
|
+static const struct usbmix_name_map scms_usb3318_map[] = {
|
|
+ { 10, NULL },
|
|
{ 0 }
|
|
};
|
|
|
|
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
|
|
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
|
|
+static struct usbmix_name_map bose_companion5_map[] = {
|
|
+ { 3, NULL, .dB = &bose_companion5_dB },
|
|
+ { 0 } /* terminator */
|
|
+};
|
|
+
|
|
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
|
|
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
|
|
+static struct usbmix_name_map dragonfly_1_2_map[] = {
|
|
+ { 7, NULL, .dB = &dragonfly_1_2_dB },
|
|
+ { 0 } /* terminator */
|
|
+};
|
|
+
|
|
/*
|
|
* Control map entries
|
|
*/
|
|
@@ -425,8 +442,29 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
|
|
.map = ebox44_map,
|
|
},
|
|
{
|
|
+ /* MAYA44 USB+ */
|
|
+ .id = USB_ID(0x2573, 0x0008),
|
|
+ .map = maya44_map,
|
|
+ },
|
|
+ {
|
|
+ /* KEF X300A */
|
|
.id = USB_ID(0x27ac, 0x1000),
|
|
- .map = kef_x300a_map,
|
|
+ .map = scms_usb3318_map,
|
|
+ },
|
|
+ {
|
|
+ /* Arcam rPAC */
|
|
+ .id = USB_ID(0x25c4, 0x0003),
|
|
+ .map = scms_usb3318_map,
|
|
+ },
|
|
+ {
|
|
+ /* Bose Companion 5 */
|
|
+ .id = USB_ID(0x05a7, 0x1020),
|
|
+ .map = bose_companion5_map,
|
|
+ },
|
|
+ {
|
|
+ /* Dragonfly DAC 1.2 */
|
|
+ .id = USB_ID(0x21b4, 0x0081),
|
|
+ .map = dragonfly_1_2_map,
|
|
},
|
|
{ 0 } /* terminator */
|
|
};
|
|
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
|
|
index f4b12c2..a82ec53 100644
|
|
--- a/sound/usb/mixer_quirks.c
|
|
+++ b/sound/usb/mixer_quirks.c
|
|
@@ -178,6 +178,7 @@ static const struct rc_config {
|
|
{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
|
|
{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
|
|
{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
|
|
+ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
|
|
{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
|
|
};
|
|
|
|
@@ -885,6 +886,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
|
|
return changed;
|
|
}
|
|
|
|
+static void kctl_private_value_free(struct snd_kcontrol *kctl)
|
|
+{
|
|
+ kfree((void *)kctl->private_value);
|
|
+}
|
|
+
|
|
static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
|
|
int validx, int bUnitID)
|
|
{
|
|
@@ -919,6 +925,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+ kctl->private_free = kctl_private_value_free;
|
|
err = snd_ctl_add(mixer->chip->card, kctl);
|
|
if (err < 0)
|
|
return err;
|
|
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
|
index f652b10..7c24088 100644
|
|
--- a/sound/usb/quirks-table.h
|
|
+++ b/sound/usb/quirks-table.h
|
|
@@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL),
|
|
}
|
|
},
|
|
{
|
|
+ USB_DEVICE(0x0499, 0x1509),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ /* .vendor_name = "Yamaha", */
|
|
+ /* .product_name = "Steinberg UR22", */
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 3,
|
|
+ .type = QUIRK_MIDI_YAMAHA
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 4,
|
|
+ .type = QUIRK_IGNORE_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+},
|
|
+{
|
|
USB_DEVICE(0x0499, 0x150a),
|
|
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
/* .vendor_name = "Yamaha", */
|
|
@@ -1581,6 +1611,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
|
}
|
|
},
|
|
{
|
|
+ /* BOSS ME-25 */
|
|
+ USB_DEVICE(0x0582, 0x0113),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 0,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
|
|
+ .data = & (const struct snd_usb_midi_endpoint_info) {
|
|
+ .out_cables = 0x0001,
|
|
+ .in_cables = 0x0001
|
|
+ }
|
|
+ },
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+},
|
|
+{
|
|
/* only 44.1 kHz works at the moment */
|
|
USB_DEVICE(0x0582, 0x0120),
|
|
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
@@ -1714,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
|
}
|
|
}
|
|
},
|
|
+{
|
|
+ USB_DEVICE(0x0582, 0x0159),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ /* .vendor_name = "Roland", */
|
|
+ /* .product_name = "UA-22", */
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 0,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
|
|
+ .data = & (const struct snd_usb_midi_endpoint_info) {
|
|
+ .out_cables = 0x0001,
|
|
+ .in_cables = 0x0001
|
|
+ }
|
|
+ },
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+},
|
|
/* this catches most recent vendor-specific Roland devices */
|
|
{
|
|
.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
|
|
@@ -2427,6 +2516,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
|
}
|
|
},
|
|
|
|
+/* Steinberg devices */
|
|
+{
|
|
+ /* Steinberg MI2 */
|
|
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = & (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 0,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 3,
|
|
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
|
|
+ .data = &(const struct snd_usb_midi_endpoint_info) {
|
|
+ .out_cables = 0x0001,
|
|
+ .in_cables = 0x0001
|
|
+ }
|
|
+ },
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+},
|
|
+{
|
|
+ /* Steinberg MI4 */
|
|
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = & (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 0,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
|
+ },
|
|
+ {
|
|
+ .ifnum = 3,
|
|
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
|
|
+ .data = &(const struct snd_usb_midi_endpoint_info) {
|
|
+ .out_cables = 0x0001,
|
|
+ .in_cables = 0x0001
|
|
+ }
|
|
+ },
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+},
|
|
+
|
|
/* TerraTec devices */
|
|
{
|
|
USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
|
|
@@ -2745,133 +2902,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
|
}
|
|
},
|
|
|
|
-/* Hauppauge HVR-950Q and HVR-850 */
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-850",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
-{
|
|
- USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
|
|
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
|
- USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
- .bInterfaceClass = USB_CLASS_AUDIO,
|
|
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
|
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
- .vendor_name = "Hauppauge",
|
|
- .product_name = "HVR-950Q",
|
|
- .ifnum = QUIRK_ANY_INTERFACE,
|
|
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
|
- }
|
|
-},
|
|
+/*
|
|
+ * Auvitek au0828 devices with audio interface.
|
|
+ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
|
|
+ * Please notice that some drivers are DVB only, and don't need to be
|
|
+ * here. That's the case, for example, of DVICO_FUSIONHDTV7.
|
|
+ */
|
|
+
|
|
+#define AU0828_DEVICE(vid, pid, vname, pname) { \
|
|
+ USB_DEVICE_VENDOR_SPEC(vid, pid), \
|
|
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
|
|
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
|
|
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
|
|
+ .bInterfaceClass = USB_CLASS_AUDIO, \
|
|
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
|
|
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
|
|
+ .vendor_name = vname, \
|
|
+ .product_name = pname, \
|
|
+ .ifnum = QUIRK_ANY_INTERFACE, \
|
|
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
|
|
+ } \
|
|
+}
|
|
+
|
|
+AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
|
|
+AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
|
|
+AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
|
|
+AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
|
|
+AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
|
|
+AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
|
|
+AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
|
|
+AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
|
|
|
/* Digidesign Mbox */
|
|
{
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index 8973070..827d404 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -666,7 +666,7 @@ static int snd_usb_gamecon780_boot_quirk(struct usb_device *dev)
|
|
/* set the initial volume and don't change; other values are either
|
|
* too loud or silent due to firmware bug (bko#65251)
|
|
*/
|
|
- u8 buf[2] = { 0x74, 0xdc };
|
|
+ u8 buf[2] = { 0x74, 0xe3 };
|
|
return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
|
|
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
|
|
UAC_FU_VOLUME << 8, 9 << 8, buf, 2);
|
|
@@ -1142,6 +1142,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
|
|
if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
|
|
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
|
|
mdelay(20);
|
|
+
|
|
+ /* Marantz/Denon devices with USB DAC functionality need a delay
|
|
+ * after each class compliant request
|
|
+ */
|
|
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
|
|
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
|
|
+
|
|
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
|
|
+ case 0x3005: /* Marantz HD-DAC1 */
|
|
+ case 0x3006: /* Marantz SA-14S1 */
|
|
+ mdelay(20);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
|
|
index dcc6652..deb3569 100644
|
|
--- a/tools/lib/traceevent/kbuffer-parse.c
|
|
+++ b/tools/lib/traceevent/kbuffer-parse.c
|
|
@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
|
|
switch (type_len) {
|
|
case KBUFFER_TYPE_PADDING:
|
|
*length = read_4(kbuf, data);
|
|
- data += *length;
|
|
break;
|
|
|
|
case KBUFFER_TYPE_TIME_EXTEND:
|
|
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
|
|
index a59743f..0001c9a 100644
|
|
--- a/tools/perf/util/hist.h
|
|
+++ b/tools/perf/util/hist.h
|
|
@@ -36,6 +36,7 @@ struct events_stats {
|
|
u32 nr_invalid_chains;
|
|
u32 nr_unknown_id;
|
|
u32 nr_unprocessable_samples;
|
|
+ u32 nr_unordered_events;
|
|
};
|
|
|
|
enum hist_column {
|
|
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
|
|
index 5da6ce7..c1f20e9 100644
|
|
--- a/tools/perf/util/session.c
|
|
+++ b/tools/perf/util/session.c
|
|
@@ -638,8 +638,7 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
|
|
return -ETIME;
|
|
|
|
if (timestamp < s->ordered_samples.last_flush) {
|
|
- printf("Warning: Timestamp below last timeslice flush\n");
|
|
- return -EINVAL;
|
|
+ s->stats.nr_unordered_events++;
|
|
}
|
|
|
|
if (!list_empty(sc)) {
|
|
@@ -1135,6 +1134,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
|
|
"Do you have a KVM guest running and not using 'perf kvm'?\n",
|
|
session->stats.nr_unprocessable_samples);
|
|
}
|
|
+ if (session->stats.nr_unordered_events != 0)
|
|
+ ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
|
|
}
|
|
|
|
volatile int session_done;
|
|
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
|
|
index d1b3a36..4039854 100644
|
|
--- a/tools/power/x86/turbostat/Makefile
|
|
+++ b/tools/power/x86/turbostat/Makefile
|
|
@@ -1,8 +1,12 @@
|
|
CC = $(CROSS_COMPILE)gcc
|
|
-BUILD_OUTPUT := $(PWD)
|
|
+BUILD_OUTPUT := $(CURDIR)
|
|
PREFIX := /usr
|
|
DESTDIR :=
|
|
|
|
+ifeq ("$(origin O)", "command line")
|
|
+ BUILD_OUTPUT := $(O)
|
|
+endif
|
|
+
|
|
turbostat : turbostat.c
|
|
CFLAGS += -Wall
|
|
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
|
|
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
|
|
index 32487ed..3d5979b2 100644
|
|
--- a/tools/testing/selftests/Makefile
|
|
+++ b/tools/testing/selftests/Makefile
|
|
@@ -4,6 +4,7 @@ TARGETS += efivarfs
|
|
TARGETS += kcmp
|
|
TARGETS += memory-hotplug
|
|
TARGETS += mqueue
|
|
+TARGETS += mount
|
|
TARGETS += net
|
|
TARGETS += ptrace
|
|
TARGETS += timers
|
|
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
|
|
index 3d907da..c604f3e 100644
|
|
--- a/tools/vm/Makefile
|
|
+++ b/tools/vm/Makefile
|
|
@@ -3,7 +3,7 @@
|
|
TARGETS=page-types slabinfo
|
|
|
|
LIB_DIR = ../lib/api
|
|
-LIBS = $(LIB_DIR)/libapikfs.a
|
|
+LIBS = $(LIB_DIR)/libapi.a
|
|
|
|
CC = $(CROSS_COMPILE)gcc
|
|
CFLAGS = -Wall -Wextra -I../lib/
|
|
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
|
|
index 5081e80..c6fe405 100644
|
|
--- a/virt/kvm/arm/arch_timer.c
|
|
+++ b/virt/kvm/arm/arch_timer.c
|
|
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
|
|
|
|
static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
|
|
{
|
|
+ int ret;
|
|
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
|
|
|
timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
|
|
- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
|
|
- timer->irq->irq,
|
|
- timer->irq->level);
|
|
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
|
|
+ timer->irq->irq,
|
|
+ timer->irq->level);
|
|
+ WARN_ON(ret);
|
|
}
|
|
|
|
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
|
|
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
|
|
timer_disarm(timer);
|
|
}
|
|
|
|
-int kvm_timer_init(struct kvm *kvm)
|
|
+void kvm_timer_enable(struct kvm *kvm)
|
|
{
|
|
- if (timecounter && wqueue) {
|
|
- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
|
|
+ if (kvm->arch.timer.enabled)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * There is a potential race here between VCPUs starting for the first
|
|
+ * time, which may be enabling the timer multiple times. That doesn't
|
|
+ * hurt though, because we're just setting a variable to the same
|
|
+ * variable that it already was. The important thing is that all
|
|
+ * VCPUs have the enabled variable set, before entering the guest, if
|
|
+ * the arch timers are enabled.
|
|
+ */
|
|
+ if (timecounter && wqueue)
|
|
kvm->arch.timer.enabled = 1;
|
|
- }
|
|
+}
|
|
|
|
- return 0;
|
|
+void kvm_timer_init(struct kvm *kvm)
|
|
+{
|
|
+ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
|
|
}
|
|
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
|
|
index 26954a7..152ec76 100644
|
|
--- a/virt/kvm/arm/vgic.c
|
|
+++ b/virt/kvm/arm/vgic.c
|
|
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
|
|
u32 val;
|
|
u32 *reg;
|
|
|
|
- offset >>= 1;
|
|
reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
|
|
- vcpu->vcpu_id, offset);
|
|
+ vcpu->vcpu_id, offset >> 1);
|
|
|
|
- if (offset & 2)
|
|
+ if (offset & 4)
|
|
val = *reg >> 16;
|
|
else
|
|
val = *reg & 0xffff;
|
|
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
|
|
vgic_reg_access(mmio, &val, offset,
|
|
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
|
|
if (mmio->is_write) {
|
|
- if (offset < 4) {
|
|
+ if (offset < 8) {
|
|
*reg = ~0U; /* Force PPIs/SGIs to 1 */
|
|
return false;
|
|
}
|
|
|
|
val = vgic_cfg_compress(val);
|
|
- if (offset & 2) {
|
|
+ if (offset & 4) {
|
|
*reg &= 0xffff;
|
|
*reg |= val << 16;
|
|
} else {
|
|
@@ -675,7 +674,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
|
{
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
int sgi;
|
|
- int min_sgi = (offset & ~0x3) * 4;
|
|
+ int min_sgi = (offset & ~0x3);
|
|
int max_sgi = min_sgi + 3;
|
|
int vcpu_id = vcpu->vcpu_id;
|
|
u32 reg = 0;
|
|
@@ -696,7 +695,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
|
{
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
int sgi;
|
|
- int min_sgi = (offset & ~0x3) * 4;
|
|
+ int min_sgi = (offset & ~0x3);
|
|
int max_sgi = min_sgi + 3;
|
|
int vcpu_id = vcpu->vcpu_id;
|
|
u32 reg;
|
|
@@ -1043,6 +1042,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
lr, irq, vgic_cpu->vgic_lr[lr]);
|
|
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
|
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
|
|
+ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
|
|
return true;
|
|
}
|
|
|
|
@@ -1056,6 +1056,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
|
|
vgic_cpu->vgic_irq_lr_map[irq] = lr;
|
|
set_bit(lr, vgic_cpu->lr_used);
|
|
+ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
|
|
|
|
if (!vgic_irq_is_edge(vcpu, irq))
|
|
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
|
|
@@ -1210,6 +1211,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
if (vgic_cpu->vgic_misr & GICH_MISR_U)
|
|
vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
|
|
|
|
+ /*
|
|
+ * In the next iterations of the vcpu loop, if we sync the vgic state
|
|
+ * after flushing it, but before entering the guest (this happens for
|
|
+ * pending signals and vmid rollovers), then make sure we don't pick
|
|
+ * up any old maintenance interrupts here.
|
|
+ */
|
|
+ memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2);
|
|
+
|
|
return level_pending;
|
|
}
|
|
|
|
@@ -1388,7 +1397,8 @@ out:
|
|
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
|
|
bool level)
|
|
{
|
|
- if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
|
|
+ if (likely(vgic_initialized(kvm)) &&
|
|
+ vgic_update_irq_state(kvm, cpuid, irq_num, level))
|
|
vgic_kick_vcpus(kvm);
|
|
|
|
return 0;
|
|
@@ -1527,17 +1537,33 @@ int kvm_vgic_hyp_init(void)
|
|
goto out_unmap;
|
|
}
|
|
|
|
- kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
|
- vctrl_res.start, vgic_maint_irq);
|
|
- on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
|
|
-
|
|
if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
|
|
kvm_err("Cannot obtain VCPU resource\n");
|
|
ret = -ENXIO;
|
|
goto out_unmap;
|
|
}
|
|
+
|
|
+ if (!PAGE_ALIGNED(vcpu_res.start)) {
|
|
+ kvm_err("GICV physical address 0x%llx not page aligned\n",
|
|
+ (unsigned long long)vcpu_res.start);
|
|
+ ret = -ENXIO;
|
|
+ goto out_unmap;
|
|
+ }
|
|
+
|
|
+ if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
|
|
+ kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
|
|
+ (unsigned long long)resource_size(&vcpu_res),
|
|
+ PAGE_SIZE);
|
|
+ ret = -ENXIO;
|
|
+ goto out_unmap;
|
|
+ }
|
|
+
|
|
vgic_vcpu_base = vcpu_res.start;
|
|
|
|
+ kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
|
+ vctrl_res.start, vgic_maint_irq);
|
|
+ on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
|
|
+
|
|
goto out;
|
|
|
|
out_unmap:
|
|
@@ -1595,7 +1621,7 @@ out:
|
|
|
|
int kvm_vgic_create(struct kvm *kvm)
|
|
{
|
|
- int i, vcpu_lock_idx = -1, ret = 0;
|
|
+ int i, vcpu_lock_idx = -1, ret;
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
mutex_lock(&kvm->lock);
|
|
@@ -1610,6 +1636,7 @@ int kvm_vgic_create(struct kvm *kvm)
|
|
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
|
|
* that no other VCPUs are run while we create the vgic.
|
|
*/
|
|
+ ret = -EBUSY;
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
if (!mutex_trylock(&vcpu->mutex))
|
|
goto out_unlock;
|
|
@@ -1617,11 +1644,10 @@ int kvm_vgic_create(struct kvm *kvm)
|
|
}
|
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
- if (vcpu->arch.has_run_once) {
|
|
- ret = -EBUSY;
|
|
+ if (vcpu->arch.has_run_once)
|
|
goto out_unlock;
|
|
- }
|
|
}
|
|
+ ret = 0;
|
|
|
|
spin_lock_init(&kvm->arch.vgic.lock);
|
|
kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
|
|
@@ -1639,7 +1665,7 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-static bool vgic_ioaddr_overlap(struct kvm *kvm)
|
|
+static int vgic_ioaddr_overlap(struct kvm *kvm)
|
|
{
|
|
phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
|
|
phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
|
|
@@ -1668,10 +1694,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
|
|
if (addr + size < addr)
|
|
return -EINVAL;
|
|
|
|
+ *ioaddr = addr;
|
|
ret = vgic_ioaddr_overlap(kvm);
|
|
if (ret)
|
|
- return ret;
|
|
- *ioaddr = addr;
|
|
+ *ioaddr = VGIC_ADDR_UNDEF;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
|
|
index 8c805a0..b47541d 100644
|
|
--- a/virt/kvm/ioapic.c
|
|
+++ b/virt/kvm/ioapic.c
|
|
@@ -203,10 +203,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
|
|
spin_lock(&ioapic->lock);
|
|
for (index = 0; index < IOAPIC_NUM_PINS; index++) {
|
|
e = &ioapic->redirtbl[index];
|
|
- if (!e->fields.mask &&
|
|
- (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
|
|
- kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
|
|
- index) || index == RTC_GSI)) {
|
|
+ if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
|
|
+ kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
|
|
+ index == RTC_GSI) {
|
|
if (kvm_apic_match_dest(vcpu, NULL, 0,
|
|
e->fields.dest_id, e->fields.dest_mode)) {
|
|
__set_bit(e->fields.vector,
|
|
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
|
|
index 0df7d4b..1f0dc1e 100644
|
|
--- a/virt/kvm/iommu.c
|
|
+++ b/virt/kvm/iommu.c
|
|
@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
|
gfn_t base_gfn, unsigned long npages);
|
|
|
|
static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
|
|
- unsigned long size)
|
|
+ unsigned long npages)
|
|
{
|
|
gfn_t end_gfn;
|
|
pfn_t pfn;
|
|
|
|
pfn = gfn_to_pfn_memslot(slot, gfn);
|
|
- end_gfn = gfn + (size >> PAGE_SHIFT);
|
|
+ end_gfn = gfn + npages;
|
|
gfn += 1;
|
|
|
|
if (is_error_noslot_pfn(pfn))
|
|
@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
|
|
return pfn;
|
|
}
|
|
|
|
+static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ for (i = 0; i < npages; ++i)
|
|
+ kvm_release_pfn_clean(pfn + i);
|
|
+}
|
|
+
|
|
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
{
|
|
gfn_t gfn, end_gfn;
|
|
@@ -111,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
* Pin all pages we are about to map in memory. This is
|
|
* important because we unmap and unpin in 4kb steps later.
|
|
*/
|
|
- pfn = kvm_pin_pages(slot, gfn, page_size);
|
|
+ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
|
|
if (is_error_noslot_pfn(pfn)) {
|
|
gfn += 1;
|
|
continue;
|
|
@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
if (r) {
|
|
printk(KERN_ERR "kvm_iommu_map_address:"
|
|
"iommu failed to map pfn=%llx\n", pfn);
|
|
+ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
|
|
goto unmap_pages;
|
|
}
|
|
|
|
@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
return 0;
|
|
|
|
unmap_pages:
|
|
- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
|
|
+ kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
|
|
return r;
|
|
}
|
|
|
|
@@ -266,14 +275,6 @@ out_unlock:
|
|
return r;
|
|
}
|
|
|
|
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
|
|
-{
|
|
- unsigned long i;
|
|
-
|
|
- for (i = 0; i < npages; ++i)
|
|
- kvm_release_pfn_clean(pfn + i);
|
|
-}
|
|
-
|
|
static void kvm_iommu_put_pages(struct kvm *kvm,
|
|
gfn_t base_gfn, unsigned long npages)
|
|
{
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 03a0381..eed250e 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -52,6 +52,7 @@
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/io.h>
|
|
+#include <asm/ioctl.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
@@ -95,8 +96,6 @@ static int hardware_enable_all(void);
|
|
static void hardware_disable_all(void);
|
|
|
|
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
|
|
-static void update_memslots(struct kvm_memslots *slots,
|
|
- struct kvm_memory_slot *new, u64 last_generation);
|
|
|
|
static void kvm_release_pfn_dirty(pfn_t pfn);
|
|
static void mark_page_dirty_in_slot(struct kvm *kvm,
|
|
@@ -682,8 +681,7 @@ static void sort_memslots(struct kvm_memslots *slots)
|
|
}
|
|
|
|
static void update_memslots(struct kvm_memslots *slots,
|
|
- struct kvm_memory_slot *new,
|
|
- u64 last_generation)
|
|
+ struct kvm_memory_slot *new)
|
|
{
|
|
if (new) {
|
|
int id = new->id;
|
|
@@ -694,8 +692,6 @@ static void update_memslots(struct kvm_memslots *slots,
|
|
if (new->npages != npages)
|
|
sort_memslots(slots);
|
|
}
|
|
-
|
|
- slots->generation = last_generation + 1;
|
|
}
|
|
|
|
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
|
|
@@ -717,10 +713,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
|
|
{
|
|
struct kvm_memslots *old_memslots = kvm->memslots;
|
|
|
|
- update_memslots(slots, new, kvm->memslots->generation);
|
|
+ /*
|
|
+ * Set the low bit in the generation, which disables SPTE caching
|
|
+ * until the end of synchronize_srcu_expedited.
|
|
+ */
|
|
+ WARN_ON(old_memslots->generation & 1);
|
|
+ slots->generation = old_memslots->generation + 1;
|
|
+
|
|
+ update_memslots(slots, new);
|
|
rcu_assign_pointer(kvm->memslots, slots);
|
|
synchronize_srcu_expedited(&kvm->srcu);
|
|
|
|
+ /*
|
|
+ * Increment the new memslot generation a second time. This prevents
|
|
+ * vm exits that race with memslot updates from caching a memslot
|
|
+ * generation that will (potentially) be valid forever.
|
|
+ */
|
|
+ slots->generation++;
|
|
+
|
|
kvm_arch_memslots_updated(kvm);
|
|
|
|
return old_memslots;
|
|
@@ -1539,8 +1549,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
ghc->generation = slots->generation;
|
|
ghc->len = len;
|
|
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
|
|
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
|
|
- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
|
|
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
|
|
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
|
|
ghc->hva += offset;
|
|
} else {
|
|
/*
|
|
@@ -1970,6 +1980,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
|
if (vcpu->kvm->mm != current->mm)
|
|
return -EIO;
|
|
|
|
+ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
|
|
+ return -EINVAL;
|
|
+
|
|
#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
|
|
/*
|
|
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
|